From 147c5a95b19f267b96dfcd9c8e33f871d89baec4 Mon Sep 17 00:00:00 2001 From: Zhao Hang Date: Wed, 12 Feb 2025 10:47:35 +0800 Subject: [PATCH 1/4] [CVE]update to gcc-8.5.0-23 to #IBLPXQ update to gcc-8.5.0-23 for CVE-2020-11023 Project: TC2024080204 Signed-off-by: Zhao Hang --- ...-LoongArch-support-for-anolis-a8-gcc.patch | 208589 --------------- ...LoongArch-Fixup-configure-file-error.patch | 153 - 0001-gcc-anolis-Rebrand-for-OpenAnolis.patch | 25 - ...rch-Rename-config-file-for-loongarch.patch | 18 - ...rch-fix-multilib-osdirnames-to-lib64.patch | 27 - Fix-dwarf2cfi-error.patch | 33 - ...ocessing-to-allow-in-function-argume.patch | 220 - LoongArch-Add-missing-headers.patch | 65 - LoongArch-Add-sanitizer-support.patch | 1100 - ...x-atomic_exchange-expanding-PR107713.patch | 164 - ...-NOOP_TRUNCATION-and-fix-extendsidi2.patch | 101 - Sync-to-gcc-8-vec-36.patch | 30492 --- gcc.spec | 95 +- libitm-Add-LoongArch-support.patch | 285 - 14 files changed, 16 insertions(+), 241351 deletions(-) delete mode 100644 0001-Add-LoongArch-support-for-anolis-a8-gcc.patch delete mode 100644 0001-LoongArch-Fixup-configure-file-error.patch delete mode 100644 0001-gcc-anolis-Rebrand-for-OpenAnolis.patch delete mode 100644 0002-LoongArch-Rename-config-file-for-loongarch.patch delete mode 100644 0002-loongarch-fix-multilib-osdirnames-to-lib64.patch delete mode 100644 Fix-dwarf2cfi-error.patch delete mode 100644 Improve-specs-processing-to-allow-in-function-argume.patch delete mode 100644 LoongArch-Add-missing-headers.patch delete mode 100644 LoongArch-Add-sanitizer-support.patch delete mode 100644 LoongArch-Fix-atomic_exchange-expanding-PR107713.patch delete mode 100644 LoongArch-Remove-NOOP_TRUNCATION-and-fix-extendsidi2.patch delete mode 100644 Sync-to-gcc-8-vec-36.patch delete mode 100644 libitm-Add-LoongArch-support.patch diff --git a/0001-Add-LoongArch-support-for-anolis-a8-gcc.patch b/0001-Add-LoongArch-support-for-anolis-a8-gcc.patch deleted file mode 100644 index bcc40a4..0000000 --- a/0001-Add-LoongArch-support-for-anolis-a8-gcc.patch +++ /dev/null @@ -1,208589 +0,0 @@ -From fac81a61084fa8cc05a39c8d8553accf237cbe05 Mon Sep 17 00:00:00 2001 -From: yala -Date: Fri, 10 Jun 2022 14:17:05 +0800 -Subject: [PATCH 1/2] Add LoongArch support for anolis a8 gcc - -Change-Id: Ia517b342881e0486b1cd143725f4c064962c998d ---- - config.guess | 3 + - config.sub | 8 +- - config/mt-loongnix-gnu | 1 + - config/picflag.m4 | 3 + - configure | 14 +- - configure.ac | 13 +- - .../config/loongarch/loongarch-common.c | 68 + - gcc/config.gcc | 196 +- - gcc/config.host | 12 + - gcc/config/host-linux.c | 2 + - gcc/config/loongarch/constraints.md | 389 + - gcc/config/loongarch/driver-native.c | 82 + - gcc/config/loongarch/elf.h | 50 + - gcc/config/loongarch/frame-header-opt.c | 292 + - gcc/config/loongarch/generic.md | 109 + - gcc/config/loongarch/genopt.sh | 110 + - gcc/config/loongarch/gnu-user.h | 132 + - gcc/config/loongarch/larchintrin.h | 386 + - gcc/config/loongarch/lasx.md | 4825 + - gcc/config/loongarch/lasxintrin.h | 5139 + - gcc/config/loongarch/linux-common.h | 68 + - gcc/config/loongarch/linux.h | 33 + - gcc/config/loongarch/loongarch-builtins.c | 3152 + - gcc/config/loongarch/loongarch-c.c | 135 + - gcc/config/loongarch/loongarch-cpus.def | 38 + - gcc/config/loongarch/loongarch-d.c | 31 + - gcc/config/loongarch/loongarch-ftypes.def | 719 + - gcc/config/loongarch/loongarch-modes.def | 64 + - gcc/config/loongarch/loongarch-opts.h | 34 + - gcc/config/loongarch/loongarch-protos.h | 290 + - gcc/config/loongarch/loongarch-tables.opt | 34 + - gcc/config/loongarch/loongarch.c | 10465 + - gcc/config/loongarch/loongarch.h | 2145 + - gcc/config/loongarch/loongarch.md | 4320 + - gcc/config/loongarch/loongarch.opt | 171 + - gcc/config/loongarch/lsx.md | 3181 + - gcc/config/loongarch/lsx2.md | 1091 + - gcc/config/loongarch/lsxintrin.h | 4980 + - gcc/config/loongarch/predicates.md | 639 + - gcc/config/loongarch/rtems.h | 39 + - gcc/config/loongarch/sde.opt | 28 + - gcc/config/loongarch/sync.md | 616 + - gcc/config/loongarch/t-linux | 23 + - gcc/config/loongarch/t-loongarch | 45 + - gcc/config/loongarch/x-native | 3 + - gcc/configure.ac | 35 +- - gcc/targhooks.c | 2 +- - gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C | 2 +- - gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C | 2 +- - gcc/testsuite/g++.old-deja/g++.pt/ptrmem6.C | 2 +- - gcc/testsuite/gcc.dg/20020312-2.c | 2 + - gcc/testsuite/gcc.dg/loop-8.c | 2 +- - gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c | 2 +- - .../loongarch/insn_correctness_check.c | 159432 +++++++++++++++ - .../gcc.target/loongarch/lasx-builtin.c | 1509 + - .../gcc.target/loongarch/loongarch.exp | 40 + - .../gcc.target/loongarch/lsx-builtin.c | 1461 + - .../gcc.target/loongarch/tst-asm-const.c | 16 + - .../mips/loongson-shift-count-truncated-1.c | 6 +- - gcc/testsuite/gcc.target/mips/loongson-simd.c | 4 +- - gcc/testsuite/gcc.target/mips/mips.exp | 23 + - gcc/testsuite/go.test/go-test.exp | 3 + - gcc/testsuite/lib/target-supports.exp | 64 +- - libgcc/config.host | 40 + - libgcc/config/loongarch/crtfastmath.c | 50 + - libgcc/config/loongarch/crti.S | 43 + - libgcc/config/loongarch/crtn.S | 39 + - libgcc/config/loongarch/gthr-loongnixsde.h | 237 + - libgcc/config/loongarch/lib2funcs.c | 23 + - libgcc/config/loongarch/linux-unwind.h | 81 + - libgcc/config/loongarch/sfp-machine.h | 148 + - libgcc/config/loongarch/t-crtstuff | 5 + - libgcc/config/loongarch/t-elf | 3 + - libgcc/config/loongarch/t-loongarch | 9 + - libgcc/config/loongarch/t-loongarch64 | 1 + - libgcc/config/loongarch/t-sdemtk | 3 + - libgcc/config/loongarch/t-softfp-tf | 3 + - libgcc/config/loongarch/t-vr | 0 - libgcc/configure.ac | 2 +- - libgomp/configure.tgt | 4 + - .../22_locale/time_get/get_date/wchar_t/4.cc | 24 +- - 81 files changed, 207456 insertions(+), 44 deletions(-) - create mode 100644 config/mt-loongnix-gnu - create mode 100644 gcc/common/config/loongarch/loongarch-common.c - create mode 100644 gcc/config/loongarch/constraints.md - create mode 100644 gcc/config/loongarch/driver-native.c - create mode 100644 gcc/config/loongarch/elf.h - create mode 100644 gcc/config/loongarch/frame-header-opt.c - create mode 100644 gcc/config/loongarch/generic.md - create mode 100644 gcc/config/loongarch/genopt.sh - create mode 100644 gcc/config/loongarch/gnu-user.h - create mode 100644 gcc/config/loongarch/larchintrin.h - create mode 100644 gcc/config/loongarch/lasx.md - create mode 100644 gcc/config/loongarch/lasxintrin.h - create mode 100644 gcc/config/loongarch/linux-common.h - create mode 100644 gcc/config/loongarch/linux.h - create mode 100644 gcc/config/loongarch/loongarch-builtins.c - create mode 100644 gcc/config/loongarch/loongarch-c.c - create mode 100644 gcc/config/loongarch/loongarch-cpus.def - create mode 100644 gcc/config/loongarch/loongarch-d.c - create mode 100644 gcc/config/loongarch/loongarch-ftypes.def - create mode 100644 gcc/config/loongarch/loongarch-modes.def - create mode 100644 gcc/config/loongarch/loongarch-opts.h - create mode 100644 gcc/config/loongarch/loongarch-protos.h - create mode 100644 gcc/config/loongarch/loongarch-tables.opt - create mode 100644 gcc/config/loongarch/loongarch.c - create mode 100644 gcc/config/loongarch/loongarch.h - create mode 100644 gcc/config/loongarch/loongarch.md - create mode 100644 gcc/config/loongarch/loongarch.opt - create mode 100644 gcc/config/loongarch/lsx.md - create mode 100644 gcc/config/loongarch/lsx2.md - create mode 100644 gcc/config/loongarch/lsxintrin.h - create mode 100644 gcc/config/loongarch/predicates.md - create mode 100644 gcc/config/loongarch/rtems.h - create mode 100644 gcc/config/loongarch/sde.opt - create mode 100644 gcc/config/loongarch/sync.md - create mode 100644 gcc/config/loongarch/t-linux - create mode 100644 gcc/config/loongarch/t-loongarch - create mode 100644 gcc/config/loongarch/x-native - create mode 100644 gcc/testsuite/gcc.target/loongarch/insn_correctness_check.c - create mode 100644 gcc/testsuite/gcc.target/loongarch/lasx-builtin.c - create mode 100644 gcc/testsuite/gcc.target/loongarch/loongarch.exp - create mode 100644 gcc/testsuite/gcc.target/loongarch/lsx-builtin.c - create mode 100644 gcc/testsuite/gcc.target/loongarch/tst-asm-const.c - create mode 100644 libgcc/config/loongarch/crtfastmath.c - create mode 100644 libgcc/config/loongarch/crti.S - create mode 100644 libgcc/config/loongarch/crtn.S - create mode 100644 libgcc/config/loongarch/gthr-loongnixsde.h - create mode 100644 libgcc/config/loongarch/lib2funcs.c - create mode 100644 libgcc/config/loongarch/linux-unwind.h - create mode 100644 libgcc/config/loongarch/sfp-machine.h - create mode 100644 libgcc/config/loongarch/t-crtstuff - create mode 100644 libgcc/config/loongarch/t-elf - create mode 100644 libgcc/config/loongarch/t-loongarch - create mode 100644 libgcc/config/loongarch/t-loongarch64 - create mode 100644 libgcc/config/loongarch/t-sdemtk - create mode 100644 libgcc/config/loongarch/t-softfp-tf - create mode 100644 libgcc/config/loongarch/t-vr - -diff --git a/config.guess b/config.guess -index 588fe82a4..edfd052ae 100755 ---- a/config.guess -+++ b/config.guess -@@ -985,6 +985,9 @@ EOF - mips64el:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} - exit ;; -+ loongarch64:Linux:*:*) -+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC} -+ exit ;; - openrisc*:Linux:*:*) - echo or1k-unknown-linux-${LIBC} - exit ;; -diff --git a/config.sub b/config.sub -index f2632cd8a..11ee24619 100755 ---- a/config.sub -+++ b/config.sub -@@ -2,7 +2,7 @@ - # Configuration validation subroutine script. - # Copyright 1992-2018 Free Software Foundation, Inc. - --timestamp='2018-01-01' -+timestamp='2020-01-04' - - # This file is free software; you can redistribute it and/or modify it - # under the terms of the GNU General Public License as published by -@@ -142,7 +142,7 @@ case $os in - -sun*os*) - # Prevent following clause from handling this invalid input. - ;; -- -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ -+ -dec* | -mips* | -loongarch* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ - -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ - -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ - -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ -@@ -288,6 +288,8 @@ case $basic_machine in - | mipsisa64sr71k | mipsisa64sr71kel \ - | mipsr5900 | mipsr5900el \ - | mipstx39 | mipstx39el \ -+ | loongarch \ -+ | loongarch64 \ - | mn10200 | mn10300 \ - | moxie \ - | mt \ -@@ -415,6 +417,8 @@ case $basic_machine in - | mipsisa64sr71k-* | mipsisa64sr71kel-* \ - | mipsr5900-* | mipsr5900el-* \ - | mipstx39-* | mipstx39el-* \ -+ | loongarch-* \ -+ | loongarch64-* \ - | mmix-* \ - | mt-* \ - | msp430-* \ -diff --git a/config/mt-loongnix-gnu b/config/mt-loongnix-gnu -new file mode 100644 -index 000000000..713c4e379 ---- /dev/null -+++ b/config/mt-loongnix-gnu -@@ -0,0 +1 @@ -+include $(srcdir)/config/mt-gnu -diff --git a/config/picflag.m4 b/config/picflag.m4 -index 8b106f9af..0aefcf619 100644 ---- a/config/picflag.m4 -+++ b/config/picflag.m4 -@@ -44,6 +44,9 @@ case "${$2}" in - # sets the default TLS model and affects inlining. - $1=-fPIC - ;; -+ loongarch*-*-*) -+ $1=-fpic -+ ;; - mips-sgi-irix6*) - # PIC is the default. - ;; -diff --git a/configure b/configure -index 060139551..633db33c5 100755 ---- a/configure -+++ b/configure -@@ -2974,7 +2974,7 @@ case "${ENABLE_GOLD}" in - # Check for target supported by gold. - case "${target}" in - i?86-*-* | x86_64-*-* | sparc*-*-* | powerpc*-*-* | arm*-*-* \ -- | aarch64*-*-* | tilegx*-*-* | mips*-*-* | s390*-*-*) -+ | aarch64*-*-* | tilegx*-*-* | mips*-*-* | loongarch*-*-* | s390*-*-*) - configdirs="$configdirs gold" - if test x${ENABLE_GOLD} = xdefault; then - default_ld=gold -@@ -3507,6 +3507,9 @@ case "${target}" in - mips*-*-*) - libgloss_dir=mips - ;; -+ loongarch*-*-*) -+ libgloss_dir=loongarch -+ ;; - powerpc*-*-*) - libgloss_dir=rs6000 - ;; -@@ -3863,6 +3866,12 @@ case "${target}" in - wasm32-*-*) - noconfigdirs="$noconfigdirs ld" - ;; -+ loongarch*-*-linux*) -+ ;; -+ loongarch*-*-*) -+ noconfigdirs="$noconfigdirs gprof" -+ ;; -+ - esac - - # If we aren't building newlib, then don't build libgloss, since libgloss -@@ -6905,6 +6914,9 @@ case "${target}" in - mips*-*-*linux* | mips*-*-gnu*) - target_makefile_frag="config/mt-mips-gnu" - ;; -+ loongarch*-*-*linux* | loongarch*-*-gnu*) -+ target_makefile_frag="config/mt-loongarch-gnu" -+ ;; - nios2-*-elf*) - target_makefile_frag="config/mt-nios2-elf" - ;; -diff --git a/configure.ac b/configure.ac -index c34333365..59e0fc035 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -345,7 +345,7 @@ case "${ENABLE_GOLD}" in - # Check for target supported by gold. - case "${target}" in - i?86-*-* | x86_64-*-* | sparc*-*-* | powerpc*-*-* | arm*-*-* \ -- | aarch64*-*-* | tilegx*-*-* | mips*-*-* | s390*-*-*) -+ | aarch64*-*-* | tilegx*-*-* | mips*-*-* | loongarch*-*-* | s390*-*-*) - configdirs="$configdirs gold" - if test x${ENABLE_GOLD} = xdefault; then - default_ld=gold -@@ -838,6 +838,9 @@ case "${target}" in - mips*-*-*) - libgloss_dir=mips - ;; -+ loongarch*-*-*) -+ libgloss_dir=loongarch -+ ;; - powerpc*-*-*) - libgloss_dir=rs6000 - ;; -@@ -1194,6 +1197,11 @@ case "${target}" in - wasm32-*-*) - noconfigdirs="$noconfigdirs ld" - ;; -+ loongarch*-*-linux*) -+ ;; -+ loongarch*-*-*) -+ noconfigdirs="$noconfigdirs gprof" -+ ;; - esac - - # If we aren't building newlib, then don't build libgloss, since libgloss -@@ -2499,6 +2507,9 @@ case "${target}" in - mips*-*-*linux* | mips*-*-gnu*) - target_makefile_frag="config/mt-mips-gnu" - ;; -+ loongarch*-*-*linux* | loongarch*-*-gnu*) -+ target_makefile_frag="config/mt-loongarch-gnu" -+ ;; - nios2-*-elf*) - target_makefile_frag="config/mt-nios2-elf" - ;; -diff --git a/gcc/common/config/loongarch/loongarch-common.c b/gcc/common/config/loongarch/loongarch-common.c -new file mode 100644 -index 000000000..afbbc3ad0 ---- /dev/null -+++ b/gcc/common/config/loongarch/loongarch-common.c -@@ -0,0 +1,68 @@ -+/* Common hooks for LARCH. -+ Copyright (C) 1989-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "tm.h" -+#include "common/common-target.h" -+#include "common/common-target-def.h" -+#include "opts.h" -+#include "flags.h" -+ -+#undef TARGET_OPTION_OPTIMIZATION_TABLE -+#define TARGET_OPTION_OPTIMIZATION_TABLE loongarch_option_optimization_table -+ -+/* Set default optimization options. */ -+static const struct default_options loongarch_option_optimization_table[] = -+{ -+ { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 }, -+ { OPT_LEVELS_NONE, 0, NULL, 0 } -+}; -+ -+/* Implement TARGET_HANDLE_OPTION. */ -+ -+static bool -+loongarch_handle_option (struct gcc_options *opts, -+ struct gcc_options *opts_set ATTRIBUTE_UNUSED, -+ const struct cl_decoded_option *decoded, -+ location_t loc ATTRIBUTE_UNUSED) -+{ -+ size_t code = decoded->opt_index; -+ -+ switch (code) -+ { -+ case OPT_mno_flush_func: -+ opts->x_loongarch_cache_flush_func = NULL; -+ return true; -+ -+ default: -+ return true; -+ } -+} -+ -+#undef TARGET_DEFAULT_TARGET_FLAGS -+#define TARGET_DEFAULT_TARGET_FLAGS \ -+ (TARGET_DEFAULT \ -+ | TARGET_CPU_DEFAULT \ -+ | MASK_CHECK_ZERO_DIV) -+#undef TARGET_HANDLE_OPTION -+#define TARGET_HANDLE_OPTION loongarch_handle_option -+ -+struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER; -diff --git a/gcc/config.gcc b/gcc/config.gcc -index a6140f900..ba061efa4 100644 ---- a/gcc/config.gcc -+++ b/gcc/config.gcc -@@ -282,7 +282,7 @@ case ${target} in - | *-*-sysv* \ - | vax-*-vms* \ - ) -- echo "*** Configuration ${target} not supported" 1>&2 -+ echo "*** Configuration ${target} not supported " 1>&2 - exit 1 - ;; - esac -@@ -425,6 +425,13 @@ hppa*-*-*) - lm32*) - extra_options="${extra_options} g.opt" - ;; -+loongarch*-*-*) -+ cpu_type=loongarch -+ d_target_objs="loongarch-d.o" -+ extra_headers="lasxintrin.h lsxintrin.h larchintrin.h" -+ extra_objs="frame-header-opt.o loongarch-c.o loongarch-builtins.o" -+ extra_options="${extra_options} g.opt fused-madd.opt loongarch/loongarch-tables.opt" -+ ;; - m32r*-*-*) - cpu_type=m32r - extra_options="${extra_options} g.opt" -@@ -2185,6 +2192,55 @@ mips*-*-linux*) # Linux MIPS, either endian. - tmake_file="${tmake_file} mips/t-linux64" - fi - ;; -+loongarch*-*-linux*) -+ case ${with_abi} in -+ "") -+ echo "not specify ABI, default is lp64 for loongarch64" -+ with_abi=lp64 # for default -+ ;; -+ lpx32) -+ ;; -+ lp32) -+ ;; -+ lp64) -+ ;; -+ *) -+ echo "Unknown ABI used in --with-abi=$with_abi" -+ exit 1 -+ esac -+ -+ enable_multilib="yes" -+ loongarch_multilibs="${with_multilib_list}" -+ if test "$loongarch_multilibs" = "default"; then -+ loongarch_multilibs="${with_abi}" -+ fi -+ loongarch_multilibs=`echo $loongarch_multilibs | sed -e 's/,/ /g'` -+ for loongarch_multilib in ${loongarch_multilibs}; do -+ case ${loongarch_multilib} in -+ lp64 | lpx32 | lp32 ) -+ TM_MULTILIB_CONFIG="${TM_MULTILIB_CONFIG},${loongarch_multilib}" -+ ;; -+ *) -+ echo "--with-multilib-list=${loongarch_multilib} not supported." -+ exit 1 -+ esac -+ done -+ TM_MULTILIB_CONFIG=`echo $TM_MULTILIB_CONFIG | sed 's/^,//'` -+ -+ if test `for one_abi in ${loongarch_multilibs}; do if [ x\$one_abi = x$with_abi ]; then echo 1; exit 0; fi; done; echo 0;` = "0"; then -+ echo "--with-abi=${with_abi} must be one of --with-multilib-list=${with_multilib_list}" -+ exit 1 -+ fi -+ -+ tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} loongarch/gnu-user.h loongarch/linux.h loongarch/linux-common.h" -+ extra_options="${extra_options} linux-android.opt" -+ tmake_file="${tmake_file} loongarch/t-linux" -+ gnu_ld=yes -+ gas=yes -+ # Force .init_array support. The configure script cannot always -+ # automatically detect that GAS supports it, yet we require it. -+ gcc_cv_initfini_array=yes -+ ;; - mips*-mti-elf*) - tm_file="elfos.h newlib-stdint.h ${tm_file} mips/elf.h mips/n32-elf.h mips/sde.h mips/mti-elf.h" - tmake_file="mips/t-mti-elf" -@@ -2239,6 +2295,31 @@ mips*-sde-elf*) - ;; - esac - ;; -+loongarch*-sde-elf*) -+ tm_file="elfos.h newlib-stdint.h ${tm_file} loongarch/elf.h loongarch/sde.h" -+# tmake_file="loongarch/t-sde" -+ extra_options="${extra_options} loongarch/sde.opt" -+ case "${with_newlib}" in -+ yes) -+ # newlib / libgloss. -+ ;; -+ *) -+ # MIPS toolkit libraries. -+ tm_file="$tm_file loongarch/sdemtk.h" -+ tmake_file="$tmake_file loongarch/t-sdemtk" -+ case ${enable_threads} in -+ "" | yes | loongarchsde) -+ thread_file='loongarchsde' -+ ;; -+ esac -+ ;; -+ esac -+ case ${target} in -+ loongarch*) -+ tm_defines="LARCH_ISA_DEFAULT=0 LARCH_ABI_DEFAULT=ABILP64" -+ ;; -+ esac -+ ;; - mipsisa32-*-elf* | mipsisa32el-*-elf* | \ - mipsisa32r2-*-elf* | mipsisa32r2el-*-elf* | \ - mipsisa32r6-*-elf* | mipsisa32r6el-*-elf* | \ -@@ -2524,7 +2605,7 @@ powerpc*-*-linux*) - esac - case ${target} in - powerpc64*-*-linux*spe* | powerpc64*-*-linux*paired*) -- echo "*** Configuration ${target} not supported" 1>&2 -+ echo "*** Configuration ${target} not supported " 1>&2 - exit 1 - ;; - powerpc*-*-linux*spe* | powerpc*-*-linux*paired*) -@@ -3143,7 +3224,7 @@ m32c-*-elf*) - cxx_target_objs="m32c-pragma.o" - ;; - *) -- echo "*** Configuration ${target} not supported" 1>&2 -+ echo "*** Configuration ${target} not supported " 1>&2 - exit 1 - ;; - esac -@@ -4174,7 +4255,31 @@ case "${target}" in - ;; - - mips*-*-*) -- supported_defaults="abi arch arch_32 arch_64 float fpu nan fp_32 odd_spreg_32 tune tune_32 tune_64 divide llsc mips-plt synci lxc1-sxc1 madd4" -+ supported_defaults="abi arch arch_32 arch_64 float fpu nan fp_32 odd_spreg_32 tune tune_32 tune_64 divide llsc mips-plt synci lxc1-sxc1 madd4 fix-loongson3-llsc" -+ -+ all_defaults="abi cpu cpu_32 cpu_64 arch arch_32 arch_64 tune tune_32 tune_64 schedule float mode fpu nan fp_32 odd_spreg_32 divide llsc mips-plt synci tls lxc1-sxc1 madd4 fix-loongson3-llsc" -+ for option in $all_defaults -+ do -+ eval "val=\$with_"`echo $option | sed s/-/_/g` -+ if test -n "$val"; then -+ case " $supported_defaults " in -+ *" $option "*) -+ ;; -+ *) -+ echo "This target does not support --with-$option." 2>&1 -+ echo "Valid --with options are: $supported_defaults" 2>&1 -+ exit 1 -+ ;; -+ esac -+ -+ if test "x$t" = x -+ then -+ t="{ \"$option\", \"$val\" }" -+ else -+ t="${t}, { \"$option\", \"$val\" }" -+ fi -+ fi -+ done - - case ${with_float} in - "" | soft | hard) -@@ -4327,6 +4432,75 @@ case "${target}" in - exit 1 - ;; - esac -+ -+ case ${with_fix_loongson3_llsc} in -+ yes) -+ with_fix_loongson3_llsc=fix-loongson3-llsc -+ ;; -+ no) -+ with_fix_loongson3_llsc=no-fix-loongson3-llsc -+ ;; -+ "") -+ ;; -+ *) -+ echo "Unknown fix-loongson3-llsc type used in --with-fix-loongson3-llsc" 1>&2 -+ exit 1 -+ ;; -+ esac -+ ;; -+ -+ loongarch*-*-*) -+ supported_defaults="abi arch float fpu tune" -+ -+ case ${with_arch} in -+ loongarch64 | loongarch32) -+ # OK -+ default_loongarch_arch=$with_arch -+ ;; -+ "") -+ # fallback -+ default_loongarch_arch=loongarch64 -+ ;; -+ *) -+ echo "Unknown arch given in --with-arch=$with_arch, available choices are: loongarch64" 1>&2 -+ exit 1 -+ ;; -+ esac -+ -+ case ${with_abi} in -+ lp64 | lp32) -+ # OK -+ default_loongarch_abi=$with_abi -+ ;; -+ "") -+ # fallback -+ default_loongarch_abi=lp64 -+ ;; -+ *) -+ echo "Unknown ABI given in --with-abi=$with_abi, available choices are: lp32 lp64" 1>&2 -+ exit 1 -+ ;; -+ esac -+ -+ case ${with_float} in -+ "" | soft | hard) -+ # OK -+ ;; -+ *) -+ echo "Unknown floating point type used in --with-float=$with_float" 1>&2 -+ exit 1 -+ ;; -+ esac -+ -+ case ${with_fpu} in -+ "" | single | double) -+ # OK -+ ;; -+ *) -+ echo "Unknown fpu type used in --with-fpu=$with_fpu" 1>&2 -+ exit 1 -+ ;; -+ esac - ;; - - nds32*-*-*) -@@ -4760,6 +4934,18 @@ case ${target} in - tmake_file="mips/t-mips $tmake_file" - ;; - -+ loongarch*-*-*) -+ case ${default_loongarch_arch} in -+ loongarch64) tm_defines="$tm_defines LARCH_ISA_DEFAULT=0" ;; -+ loongarch32) tm_defines="$tm_defines LARCH_ISA_DEFAULT=1" ;; -+ esac -+ case ${default_loongarch_abi} in -+ lp64) tm_defines="$tm_defines LARCH_ABI_DEFAULT=ABILP64" ;; -+ lp32) tm_defines="$tm_defines LARCH_ABI_DEFAULT=ABILP32" ;; -+ esac -+ tmake_file="loongarch/t-loongarch $tmake_file" -+ ;; -+ - powerpc*-*-* | rs6000-*-*) - # FIXME: The PowerPC port uses the value set at compile time, - # although it's only cosmetic. -@@ -4822,7 +5008,7 @@ case ${target} in - esac - - t= --all_defaults="abi cpu cpu_32 cpu_64 arch arch_32 arch_64 tune tune_32 tune_64 schedule float mode fpu nan fp_32 odd_spreg_32 divide llsc mips-plt synci tls lxc1-sxc1 madd4" -+all_defaults="abi cpu cpu_32 cpu_64 arch arch_32 arch_64 tune tune_32 tune_64 schedule float mode fpu nan fp_32 odd_spreg_32 divide llsc mips-plt synci tls lxc1-sxc1 madd4 fix-loongson3-llsc" - for option in $all_defaults - do - eval "val=\$with_"`echo $option | sed s/-/_/g` -diff --git a/gcc/config.host b/gcc/config.host -index c65569da2..d23dae4ac 100644 ---- a/gcc/config.host -+++ b/gcc/config.host -@@ -139,6 +139,18 @@ case ${host} in - host_extra_gcc_objs="driver-native.o" - host_xmake_file="${host_xmake_file} mips/x-native" - ;; -+ loongarch*-*-linux*) -+ host_extra_gcc_objs="driver-native.o" -+ host_xmake_file="${host_xmake_file} loongarch/x-native" -+ ;; -+ esac -+ ;; -+ loongarch*-*-linux*) -+ case ${target} in -+ loongarch*-*-linux*) -+ host_extra_gcc_objs="driver-native.o" -+ host_xmake_file="${host_xmake_file} loongarch/x-native" -+ ;; - esac - ;; - rs6000-*-* \ -diff --git a/gcc/config/host-linux.c b/gcc/config/host-linux.c -index 4696e413a..95fc19196 100644 ---- a/gcc/config/host-linux.c -+++ b/gcc/config/host-linux.c -@@ -94,6 +94,8 @@ - # define TRY_EMPTY_VM_SPACE 0x60000000 - #elif defined(__mips__) && defined(__LP64__) - # define TRY_EMPTY_VM_SPACE 0x8000000000 -+#elif defined(__loongarch__) && defined(__LP64__) -+# define TRY_EMPTY_VM_SPACE 0x8000000000 - #elif defined(__mips__) - # define TRY_EMPTY_VM_SPACE 0x60000000 - #else -diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md -new file mode 100644 -index 000000000..ae8596107 ---- /dev/null -+++ b/gcc/config/loongarch/constraints.md -@@ -0,0 +1,389 @@ -+;; Constraint definitions for LARCH. -+;; Copyright (C) 2006-2018 Free Software Foundation, Inc. -+;; -+;; This file is part of GCC. -+;; -+;; GCC is free software; you can redistribute it and/or modify -+;; it under the terms of the GNU General Public License as published by -+;; the Free Software Foundation; either version 3, or (at your option) -+;; any later version. -+;; -+;; GCC is distributed in the hope that it will be useful, -+;; but WITHOUT ANY WARRANTY; without even the implied warranty of -+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+;; GNU General Public License for more details. -+;; -+;; You should have received a copy of the GNU General Public License -+;; along with GCC; see the file COPYING3. If not see -+;; . -+ -+;; Register constraints -+ -+;; "a" A constant call global and noplt address. -+;; "b" ALL_REGS -+;; "c" A constant call local address. -+;; "d" - -+;; "e" JALR_REGS -+;; "f" FP_REGS -+;; "g" * -+;; "h" A constant call plt address. -+;; "i" "Matches a general integer constant." -+;; "j" SIBCALL_REGS -+;; "k" * -+;; "l" "A signed 16-bit constant ." -+;; "m" "A memory operand whose address is formed by a base register and offset -+;; that is suitable for use in instructions with the same addressing mode -+;; as @code{st.w} and @code{ld.w}." -+;; "n" "Matches a non-symbolic integer constant." -+;; "o" "Matches an offsettable memory reference." -+;; "p" "Matches a general address." -+;; "q" CSR_REGS -+;; "r" GENERAL_REGS -+;; "s" "Matches a symbolic integer constant." -+;; "t" A constant call weak address -+;; "u" - -+;; "v" - -+;; "w" "Matches any valid memory." -+;; "x" - -+;; "y" GR_REGS -+;; "z" ST_REGS -+;; "A" - -+;; "B" - -+;; "C" - -+;; "D" - -+;; "E" "Matches a floating-point constant." -+;; "F" "Matches a floating-point constant." -+;; "G" "Floating-point zero." -+;; "H" - -+;; "I" "A signed 12-bit constant (for arithmetic instructions)." -+;; "J" "Integer zero." -+;; "K" "An unsigned 12-bit constant (for logic instructions)." -+;; "L" "A signed 32-bit constant in which the lower 12 bits are zero. -+;; "M" "A constant that cannot be loaded using @code{lui}, @code{addiu} or @code{ori}." -+;; "N" "A constant in the range -65535 to -1 (inclusive)." -+;; "O" "A signed 15-bit constant." -+;; "P" "A constant in the range 1 to 65535 (inclusive)." -+;; "Q" "A signed 12-bit constant" -+;; "R" "An address that can be used in a non-macro load or store." -+;; "S" "A constant call address." -+;; "T" - -+;; "U" - -+;; "V" "Matches a non-offsettable memory reference." -+;; "W" "A memory address based on a member of @code{BASE_REG_CLASS}. This is -+;; true for all references (although it can sometimes be implicit -+;; if @samp{!TARGET_EXPLICIT_RELOCS})." -+;; "X" "Matches anything." -+;; "Y" - -+;; "YG" -+;; "A vector zero." -+;; "YA" -+;; "An unsigned 6-bit constant." -+;; "YB" -+;; "A signed 10-bit constant." -+;; "Yb" -+;; "Yd" -+;; "A constant @code{move_operand} that can be safely loaded into @code{$25} -+;; using @code{la}." -+;; "Yh" -+;; "Yw" -+;; "Yx" -+;; "YI" -+;; "A replicated vector const in which the replicated value is in the range -+;; [-512,511]." -+;; "YC" -+;; "A replicated vector const in which the replicated value has a single -+;; bit set." -+;; "YZ" -+;; "A replicated vector const in which the replicated value has a single -+;; bit clear." -+;; "Z" - -+;; "ZC" -+;; "A memory operand whose address is formed by a base register and offset -+;; that is suitable for use in instructions with the same addressing mode -+;; as @code{ll.w} and @code{sc.w}." -+;; "ZD" -+;; "An address suitable for a @code{prefetch} instruction, or for any other -+;; instruction with the same addressing mode as @code{prefetch}." -+;; "ZR" -+;; "An address valid for loading/storing register exclusive" -+;; "ZB" -+;; "An address that is held in a general-purpose register. -+;; The offset is zero" -+ -+ -+(define_constraint "c" -+ "@internal -+ A constant call local address." -+ (match_operand 0 "is_const_call_local_symbol")) -+ -+(define_constraint "a" -+ "@internal -+ A constant call global and noplt address." -+ (match_operand 0 "is_const_call_global_noplt_symbol")) -+ -+(define_constraint "h" -+ "@internal -+ A constant call plt address." -+ (match_operand 0 "is_const_call_plt_symbol")) -+ -+(define_constraint "t" -+ "@internal -+ A constant call weak address." -+ (match_operand 0 "is_const_call_weak_symbol")) -+ -+(define_register_constraint "e" "JALR_REGS" -+ "@internal") -+ -+(define_register_constraint "q" "CSR_REGS" -+ "A general-purpose register except for $r0 and $r1 for csr.") -+ -+(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS" -+ "A floating-point register (if available).") -+ -+(define_register_constraint "b" "ALL_REGS" -+ "@internal") -+ -+(define_register_constraint "j" "SIBCALL_REGS" -+ "@internal") -+ -+(define_constraint "l" -+ "A signed 16-bit constant ." -+ (and (match_code "const_int") -+ (match_test "IMM16_OPERAND (ival)"))) -+ -+(define_register_constraint "y" "GR_REGS" -+ "Equivalent to @code{r}; retained for backwards compatibility.") -+ -+(define_register_constraint "z" "ST_REGS" -+ "A floating-point condition code register.") -+ -+(define_constraint "kf" -+ "@internal" -+ (match_operand 0 "force_to_mem_operand")) -+ -+;; This is a normal rather than a register constraint because we can -+;; never use the stack pointer as a reload register. -+(define_constraint "ks" -+ "@internal" -+ (and (match_code "reg") -+ (match_test "REGNO (op) == STACK_POINTER_REGNUM"))) -+ -+;; Integer constraints -+ -+(define_constraint "I" -+ "A signed 12-bit constant (for arithmetic instructions)." -+ (and (match_code "const_int") -+ (match_test "SMALL_OPERAND (ival)"))) -+ -+(define_constraint "J" -+ "Integer zero." -+ (and (match_code "const_int") -+ (match_test "ival == 0"))) -+ -+(define_constraint "K" -+ "An unsigned 12-bit constant (for logic instructions)." -+ (and (match_code "const_int") -+ (match_test "SMALL_OPERAND_UNSIGNED (ival)"))) -+ -+(define_constraint "u" -+ "An unsigned 12-bit constant (for logic instructions)." -+ (and (match_code "const_int") -+ (match_test "LU32I_OPERAND (ival)"))) -+ -+(define_constraint "v" -+ "An unsigned 12-bit constant (for logic instructions)." -+ (and (match_code "const_int") -+ (match_test "LU52I_OPERAND (ival)"))) -+ -+(define_constraint "L" -+ "A signed 32-bit constant in which the lower 12 bits are zero. -+ Such constants can be loaded using @code{lui}." -+ (and (match_code "const_int") -+ (match_test "LUI_OPERAND (ival)"))) -+ -+(define_constraint "M" -+ "A constant that cannot be loaded using @code{lui}, @code{addiu} -+ or @code{ori}." -+ (and (match_code "const_int") -+ (not (match_test "SMALL_OPERAND (ival)")) -+ (not (match_test "SMALL_OPERAND_UNSIGNED (ival)")) -+ (not (match_test "LUI_OPERAND (ival)")))) -+ -+(define_constraint "N" -+ "A constant in the range -65535 to -1 (inclusive)." -+ (and (match_code "const_int") -+ (match_test "ival >= -0xffff && ival < 0"))) -+ -+(define_constraint "O" -+ "A signed 15-bit constant." -+ (and (match_code "const_int") -+ (match_test "ival >= -0x4000 && ival < 0x4000"))) -+ -+(define_constraint "P" -+ "A constant in the range 1 to 65535 (inclusive)." -+ (and (match_code "const_int") -+ (match_test "ival > 0 && ival < 0x10000"))) -+ -+;; Floating-point constraints -+ -+(define_constraint "G" -+ "Floating-point zero." -+ (and (match_code "const_double") -+ (match_test "op == CONST0_RTX (mode)"))) -+ -+;; General constraints -+ -+(define_constraint "Q" -+ "@internal" -+ (match_operand 0 "const_arith_operand")) -+ -+(define_memory_constraint "R" -+ "An address that can be used in a non-macro load or store." -+ (and (match_code "mem") -+ (match_test "loongarch_address_insns (XEXP (op, 0), mode, false) == 1"))) -+ -+(define_memory_constraint "m" -+ "A memory operand whose address is formed by a base register and offset -+ that is suitable for use in instructions with the same addressing mode -+ as @code{st.w} and @code{ld.w}." -+ (and (match_code "mem") -+ (match_test "loongarch_12bit_offset_address_p (XEXP (op, 0), mode)"))) -+ -+(define_constraint "S" -+ "@internal -+ A constant call address." -+ (and (match_operand 0 "call_insn_operand") -+ (match_test "CONSTANT_P (op)"))) -+ -+(define_memory_constraint "W" -+ "@internal -+ A memory address based on a member of @code{BASE_REG_CLASS}. This is -+ true for allreferences (although it can sometimes be implicit -+ if @samp{!TARGET_EXPLICIT_RELOCS})." -+ (and (match_code "mem") -+ (match_operand 0 "memory_operand") -+ (and (not (match_operand 0 "stack_operand")) -+ (not (match_test "CONSTANT_P (XEXP (op, 0))"))))) -+ -+(define_constraint "YG" -+ "@internal -+ A vector zero." -+ (and (match_code "const_vector") -+ (match_test "op == CONST0_RTX (mode)"))) -+ -+(define_constraint "YA" -+ "@internal -+ An unsigned 6-bit constant." -+ (and (match_code "const_int") -+ (match_test "UIMM6_OPERAND (ival)"))) -+ -+(define_constraint "YB" -+ "@internal -+ A signed 10-bit constant." -+ (and (match_code "const_int") -+ (match_test "IMM10_OPERAND (ival)"))) -+ -+(define_constraint "Yb" -+ "@internal" -+ (match_operand 0 "qi_mask_operand")) -+ -+(define_constraint "Yd" -+ "@internal -+ A constant @code{move_operand} that can be safely loaded into @code{$25} -+ using @code{la}." -+ (and (match_operand 0 "move_operand") -+ (match_test "CONSTANT_P (op)"))) -+ -+(define_constraint "Yh" -+ "@internal" -+ (match_operand 0 "hi_mask_operand")) -+ -+(define_constraint "Yw" -+ "@internal" -+ (match_operand 0 "si_mask_operand")) -+ -+(define_constraint "Yx" -+ "@internal" -+ (match_operand 0 "low_bitmask_operand")) -+ -+(define_constraint "YI" -+ "@internal -+ A replicated vector const in which the replicated value is in the range -+ [-512,511]." -+ (and (match_code "const_vector") -+ (match_test "loongarch_const_vector_same_int_p (op, mode, -512, 511)"))) -+ -+(define_constraint "YC" -+ "@internal -+ A replicated vector const in which the replicated value has a single -+ bit set." -+ (and (match_code "const_vector") -+ (match_test "loongarch_const_vector_bitimm_set_p (op, mode)"))) -+ -+(define_constraint "YZ" -+ "@internal -+ A replicated vector const in which the replicated value has a single -+ bit clear." -+ (and (match_code "const_vector") -+ (match_test "loongarch_const_vector_bitimm_clr_p (op, mode)"))) -+ -+(define_constraint "Unv5" -+ "@internal -+ A replicated vector const in which the replicated value is in the range -+ [-31,0]." -+ (and (match_code "const_vector") -+ (match_test "loongarch_const_vector_same_int_p (op, mode, -31, 0)"))) -+ -+(define_constraint "Uuv5" -+ "@internal -+ A replicated vector const in which the replicated value is in the range -+ [0,31]." -+ (and (match_code "const_vector") -+ (match_test "loongarch_const_vector_same_int_p (op, mode, 0, 31)"))) -+ -+(define_constraint "Usv5" -+ "@internal -+ A replicated vector const in which the replicated value is in the range -+ [-16,15]." -+ (and (match_code "const_vector") -+ (match_test "loongarch_const_vector_same_int_p (op, mode, -16, 15)"))) -+ -+(define_constraint "Uuv6" -+ "@internal -+ A replicated vector const in which the replicated value is in the range -+ [0,63]." -+ (and (match_code "const_vector") -+ (match_test "loongarch_const_vector_same_int_p (op, mode, 0, 63)"))) -+ -+(define_constraint "Urv8" -+ "@internal -+ A replicated vector const with replicated byte values as well as elements" -+ (and (match_code "const_vector") -+ (match_test "loongarch_const_vector_same_bytes_p (op, mode)"))) -+ -+(define_memory_constraint "ZC" -+ "A memory operand whose address is formed by a base register and offset -+ that is suitable for use in instructions with the same addressing mode -+ as @code{ll.w} and @code{sc.w}." -+ (and (match_code "mem") -+ (match_test "loongarch_14bit_shifted_offset_address_p (XEXP (op, 0), mode)"))) -+ -+;;(define_address_constraint "ZD" -+;; "An address suitable for a @code{prefetch} instruction, or for any other -+;; instruction with the same addressing mode as @code{prefetch}." -+;; (if_then_else (match_test "ISA_HAS_9BIT_DISPLACEMENT") -+;; (match_test "loongarch_9bit_offset_address_p (op, mode)") -+;; (match_test "loongarch_address_insns (op, mode, false)"))) -+ -+(define_memory_constraint "ZR" -+ "@internal -+ An address valid for loading/storing register exclusive" -+ (match_operand 0 "mem_noofs_operand")) -+ -+(define_memory_constraint "ZB" -+ "@internal -+ An address that is held in a general-purpose register. -+ The offset is zero" -+ (and (match_code "mem") -+ (match_test "GET_CODE(XEXP(op,0)) == REG"))) -+ -diff --git a/gcc/config/loongarch/driver-native.c b/gcc/config/loongarch/driver-native.c -new file mode 100644 -index 000000000..5484ee502 ---- /dev/null -+++ b/gcc/config/loongarch/driver-native.c -@@ -0,0 +1,82 @@ -+/* Subroutines for the gcc driver. -+ Copyright (C) 2008-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#define IN_TARGET_CODE 1 -+ -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "tm.h" -+ -+ -+/* This function must set to noinline. Otherwise the arg can not be passed. */ -+int loongson_cpucfg (int arg) -+{ -+ int ret; -+ __asm__ __volatile__ ("cpucfg %0,%1\n\t" /* cpucfg $2,$4. */ -+ :"=r"(ret) -+ :"r"(arg) -+ :); -+ return ret; -+} -+ -+/* This will be called by the spec parser in gcc.c when it sees -+ a %:local_cpu_detect(args) construct. Currently it will be called -+ with either "arch" or "tune" as argument depending on if -march=native -+ or -mtune=native is to be substituted. -+ -+ It returns a string containing new command line parameters to be -+ put at the place of the above two options, depending on what CPU -+ this is executed. E.g. "-march=loongson2f" on a Loongson 2F for -+ -march=native. If the routine can't detect a known processor, -+ the -march or -mtune option is discarded. -+ -+ ARGC and ARGV are set depending on the actual arguments given -+ in the spec. */ -+const char * -+host_detect_local_cpu (int argc, const char **argv) -+{ -+ const char *cpu = NULL; -+ bool arch; -+ int cpucfg_arg; -+ int cpucfg_ret; -+ -+ if (argc < 1) -+ return NULL; -+ -+ arch = strcmp (argv[0], "arch") == 0; -+ if (!arch && strcmp (argv[0], "tune")) -+ return NULL; -+ -+ cpucfg_arg = 0; -+ cpucfg_ret = loongson_cpucfg (cpucfg_arg); -+ if (((cpucfg_ret >> 16) & 0xff) == 0x14) -+ { -+ if (((cpucfg_ret >> 8) & 0xff) == 0xc0) -+ cpu = "la464"; -+ else -+ cpu = NULL; -+ } -+ -+ -+ if (cpu == NULL) -+ return NULL; -+ -+ return concat ("-m", argv[0], "=", cpu, NULL); -+} -diff --git a/gcc/config/loongarch/elf.h b/gcc/config/loongarch/elf.h -new file mode 100644 -index 000000000..b7f938e31 ---- /dev/null -+++ b/gcc/config/loongarch/elf.h -@@ -0,0 +1,50 @@ -+/* Target macros for loongarch*-elf targets. -+ Copyright (C) 1994-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+/* LARCH assemblers don't have the usual .set foo,bar construct; -+ .set is used for assembler options instead. */ -+#undef SET_ASM_OP -+#define ASM_OUTPUT_DEF(FILE, LABEL1, LABEL2) \ -+ do \ -+ { \ -+ fputc ('\t', FILE); \ -+ assemble_name (FILE, LABEL1); \ -+ fputs (" = ", FILE); \ -+ assemble_name (FILE, LABEL2); \ -+ fputc ('\n', FILE); \ -+ } \ -+ while (0) -+ -+#undef ASM_DECLARE_OBJECT_NAME -+#define ASM_DECLARE_OBJECT_NAME loongarch_declare_object_name -+ -+#undef ASM_FINISH_DECLARE_OBJECT -+#define ASM_FINISH_DECLARE_OBJECT loongarch_finish_declare_object -+ -+/* Leave the linker script to choose the appropriate libraries. */ -+#undef LIB_SPEC -+#define LIB_SPEC "" -+ -+#undef STARTFILE_SPEC -+#define STARTFILE_SPEC "crti%O%s crtbegin%O%s" -+ -+#undef ENDFILE_SPEC -+#define ENDFILE_SPEC "crtend%O%s crtn%O%s" -+ -+#define NO_IMPLICIT_EXTERN_C 1 -diff --git a/gcc/config/loongarch/frame-header-opt.c b/gcc/config/loongarch/frame-header-opt.c -new file mode 100644 -index 000000000..86e5d423d ---- /dev/null -+++ b/gcc/config/loongarch/frame-header-opt.c -@@ -0,0 +1,292 @@ -+/* Analyze functions to determine if callers need to allocate a frame header -+ on the stack. The frame header is used by callees to save their arguments. -+ This optimization is specific to TARGET_OLDABI targets. For TARGET_NEWABI -+ targets, if a frame header is required, it is allocated by the callee. -+ -+ -+ Copyright (C) 2015-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify it -+under the terms of the GNU General Public License as published by the -+Free Software Foundation; either version 3, or (at your option) any -+later version. -+ -+GCC is distributed in the hope that it will be useful, but WITHOUT -+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+ -+#define IN_TARGET_CODE 1 -+ -+#include "config.h" -+#include "system.h" -+#include "context.h" -+#include "coretypes.h" -+#include "tree.h" -+#include "tree-core.h" -+#include "tree-pass.h" -+#include "target.h" -+#include "target-globals.h" -+#include "profile-count.h" -+#include "cfg.h" -+#include "cgraph.h" -+#include "function.h" -+#include "basic-block.h" -+#include "gimple.h" -+#include "gimple-iterator.h" -+#include "gimple-walk.h" -+ -+static unsigned int frame_header_opt (void); -+ -+namespace { -+ -+const pass_data pass_data_ipa_frame_header_opt = -+{ -+ IPA_PASS, /* type */ -+ "frame-header-opt", /* name */ -+ OPTGROUP_NONE, /* optinfo_flags */ -+ TV_CGRAPHOPT, /* tv_id */ -+ 0, /* properties_required */ -+ 0, /* properties_provided */ -+ 0, /* properties_destroyed */ -+ 0, /* todo_flags_start */ -+ 0, /* todo_flags_finish */ -+}; -+ -+class pass_ipa_frame_header_opt : public ipa_opt_pass_d -+{ -+public: -+ pass_ipa_frame_header_opt (gcc::context *ctxt) -+ : ipa_opt_pass_d (pass_data_ipa_frame_header_opt, ctxt, -+ NULL, /* generate_summary */ -+ NULL, /* write_summary */ -+ NULL, /* read_summary */ -+ NULL, /* write_optimization_summary */ -+ NULL, /* read_optimization_summary */ -+ NULL, /* stmt_fixup */ -+ 0, /* function_transform_todo_flags_start */ -+ NULL, /* function_transform */ -+ NULL) /* variable_transform */ -+ {} -+ -+ /* opt_pass methods: */ -+ virtual bool gate (function *) -+ { -+ /* This optimization has no affect if TARGET_NEWABI. If optimize -+ is not at least 1 then the data needed for the optimization is -+ not available and nothing will be done anyway. */ -+ return TARGET_OLDABI && flag_frame_header_optimization && optimize > 0; -+ } -+ -+ virtual unsigned int execute (function *) { return frame_header_opt (); } -+ -+}; // class pass_ipa_frame_header_opt -+ -+} // anon namespace -+ -+static ipa_opt_pass_d * -+make_pass_ipa_frame_header_opt (gcc::context *ctxt) -+{ -+ return new pass_ipa_frame_header_opt (ctxt); -+} -+ -+void -+loongarch_register_frame_header_opt (void) -+{ -+ opt_pass *p = make_pass_ipa_frame_header_opt (g); -+ struct register_pass_info f = { p, "comdats", 1, PASS_POS_INSERT_AFTER }; -+ register_pass (&f); -+} -+ -+ -+/* Return true if it is certain that this is a leaf function. False if it is -+ not a leaf function or if it is impossible to tell. */ -+ -+static bool -+is_leaf_function (function *fn) -+{ -+ basic_block bb; -+ gimple_stmt_iterator gsi; -+ -+ /* If we do not have a cfg for this function be conservative and assume -+ it is not a leaf function. */ -+ if (fn->cfg == NULL) -+ return false; -+ -+ FOR_EACH_BB_FN (bb, fn) -+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) -+ if (is_gimple_call (gsi_stmt (gsi))) -+ return false; -+ return true; -+} -+ -+/* Return true if this function has inline assembly code or if we cannot -+ be certain that it does not. False if we know that there is no inline -+ assembly. */ -+ -+static bool -+has_inlined_assembly (function *fn) -+{ -+ basic_block bb; -+ gimple_stmt_iterator gsi; -+ -+ /* If we do not have a cfg for this function be conservative and assume -+ it is may have inline assembly. */ -+ if (fn->cfg == NULL) -+ return true; -+ -+ FOR_EACH_BB_FN (bb, fn) -+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) -+ if (gimple_code (gsi_stmt (gsi)) == GIMPLE_ASM) -+ return true; -+ -+ return false; -+} -+ -+/* Return true if this function will use the stack space allocated by its -+ caller or if we cannot determine for certain that it does not. */ -+ -+static bool -+needs_frame_header_p (function *fn) -+{ -+ tree t; -+ -+ if (fn->decl == NULL) -+ return true; -+ -+ if (fn->stdarg) -+ return true; -+ -+ for (t = DECL_ARGUMENTS (fn->decl); t; t = TREE_CHAIN (t)) -+ { -+ if (!use_register_for_decl (t)) -+ return true; -+ -+ /* Some 64-bit types may get copied to general registers using the frame -+ header, see loongarch_output_64bit_xfer. Checking for SImode only may be -+ overly restrictive but it is guaranteed to be safe. */ -+ if (DECL_MODE (t) != SImode) -+ return true; -+ } -+ -+ return false; -+} -+ -+/* Return true if the argument stack space allocated by function FN is used. -+ Return false if the space is needed or if the need for the space cannot -+ be determined. */ -+ -+static bool -+callees_functions_use_frame_header (function *fn) -+{ -+ basic_block bb; -+ gimple_stmt_iterator gsi; -+ gimple *stmt; -+ tree called_fn_tree; -+ function *called_fn; -+ -+ if (fn->cfg == NULL) -+ return true; -+ -+ FOR_EACH_BB_FN (bb, fn) -+ { -+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) -+ { -+ stmt = gsi_stmt (gsi); -+ if (is_gimple_call (stmt)) -+ { -+ called_fn_tree = gimple_call_fndecl (stmt); -+ if (called_fn_tree != NULL) -+ { -+ called_fn = DECL_STRUCT_FUNCTION (called_fn_tree); -+ if (called_fn == NULL -+ || DECL_WEAK (called_fn_tree) -+ || has_inlined_assembly (called_fn) -+ || !is_leaf_function (called_fn) -+ || !called_fn->machine->does_not_use_frame_header) -+ return true; -+ } -+ else -+ return true; -+ } -+ } -+ } -+ return false; -+} -+ -+/* Set the callers_may_not_allocate_frame flag for any function which -+ function FN calls because FN may not allocate a frame header. */ -+ -+static void -+set_callers_may_not_allocate_frame (function *fn) -+{ -+ basic_block bb; -+ gimple_stmt_iterator gsi; -+ gimple *stmt; -+ tree called_fn_tree; -+ function *called_fn; -+ -+ if (fn->cfg == NULL) -+ return; -+ -+ FOR_EACH_BB_FN (bb, fn) -+ { -+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) -+ { -+ stmt = gsi_stmt (gsi); -+ if (is_gimple_call (stmt)) -+ { -+ called_fn_tree = gimple_call_fndecl (stmt); -+ if (called_fn_tree != NULL) -+ { -+ called_fn = DECL_STRUCT_FUNCTION (called_fn_tree); -+ if (called_fn != NULL) -+ called_fn->machine->callers_may_not_allocate_frame = true; -+ } -+ } -+ } -+ } -+ return; -+} -+ -+/* Scan each function to determine those that need its frame headers. Perform -+ a second scan to determine if the allocation can be skipped because none of -+ their callees require the frame header. */ -+ -+static unsigned int -+frame_header_opt () -+{ -+ struct cgraph_node *node; -+ function *fn; -+ -+ FOR_EACH_DEFINED_FUNCTION (node) -+ { -+ fn = node->get_fun (); -+ if (fn != NULL) -+ fn->machine->does_not_use_frame_header = !needs_frame_header_p (fn); -+ } -+ -+ FOR_EACH_DEFINED_FUNCTION (node) -+ { -+ fn = node->get_fun (); -+ if (fn != NULL) -+ fn->machine->optimize_call_stack -+ = !callees_functions_use_frame_header (fn) && !is_leaf_function (fn); -+ } -+ -+ FOR_EACH_DEFINED_FUNCTION (node) -+ { -+ fn = node->get_fun (); -+ if (fn != NULL && fn->machine->optimize_call_stack) -+ set_callers_may_not_allocate_frame (fn); -+ } -+ -+ return 0; -+} -diff --git a/gcc/config/loongarch/generic.md b/gcc/config/loongarch/generic.md -new file mode 100644 -index 000000000..321b8e561 ---- /dev/null -+++ b/gcc/config/loongarch/generic.md -@@ -0,0 +1,109 @@ -+;; Generic DFA-based pipeline description for LARCH targets -+;; Copyright (C) 2004-2018 Free Software Foundation, Inc. -+;; -+;; This file is part of GCC. -+ -+;; GCC is free software; you can redistribute it and/or modify it -+;; under the terms of the GNU General Public License as published -+;; by the Free Software Foundation; either version 3, or (at your -+;; option) any later version. -+ -+;; GCC is distributed in the hope that it will be useful, but WITHOUT -+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -+;; License for more details. -+ -+;; You should have received a copy of the GNU General Public License -+;; along with GCC; see the file COPYING3. If not see -+;; . -+ -+ -+;; This file is derived from the old define_function_unit description. -+;; Each reservation can be overridden on a processor-by-processor basis. -+ -+(define_insn_reservation "generic_alu" 1 -+ (eq_attr "type" "unknown,prefetch,prefetchx,condmove,const,arith, -+ shift,slt,clz,trap,multi,nop,logical,signext,move") -+ "alu") -+ -+(define_insn_reservation "generic_load" 3 -+ (eq_attr "type" "load,fpload,fpidxload") -+ "alu") -+ -+(define_insn_reservation "generic_store" 1 -+ (eq_attr "type" "store,fpstore,fpidxstore") -+ "alu") -+ -+(define_insn_reservation "generic_xfer" 2 -+ (eq_attr "type" "mftg,mgtf") -+ "alu") -+ -+(define_insn_reservation "generic_branch" 1 -+ (eq_attr "type" "branch,jump,call") -+ "alu") -+ -+(define_insn_reservation "generic_imul" 17 -+ (eq_attr "type" "imul,imul3") -+ "imuldiv*17") -+ -+(define_insn_reservation "generic_fcvt" 1 -+ (eq_attr "type" "fcvt") -+ "alu") -+ -+(define_insn_reservation "generic_fmove" 2 -+ (eq_attr "type" "fabs,fneg,fmove") -+ "alu") -+ -+(define_insn_reservation "generic_fcmp" 3 -+ (eq_attr "type" "fcmp") -+ "alu") -+ -+(define_insn_reservation "generic_fadd" 4 -+ (eq_attr "type" "fadd") -+ "alu") -+ -+(define_insn_reservation "generic_fmul_single" 7 -+ (and (eq_attr "type" "fmul,fmadd") -+ (eq_attr "mode" "SF")) -+ "alu") -+ -+(define_insn_reservation "generic_fmul_double" 8 -+ (and (eq_attr "type" "fmul,fmadd") -+ (eq_attr "mode" "DF")) -+ "alu") -+ -+(define_insn_reservation "generic_fdiv_single" 23 -+ (and (eq_attr "type" "fdiv,frdiv") -+ (eq_attr "mode" "SF")) -+ "alu") -+ -+(define_insn_reservation "generic_fdiv_double" 36 -+ (and (eq_attr "type" "fdiv,frdiv") -+ (eq_attr "mode" "DF")) -+ "alu") -+ -+(define_insn_reservation "generic_fsqrt_single" 54 -+ (and (eq_attr "type" "fsqrt,frsqrt") -+ (eq_attr "mode" "SF")) -+ "alu") -+ -+(define_insn_reservation "generic_fsqrt_double" 112 -+ (and (eq_attr "type" "fsqrt,frsqrt") -+ (eq_attr "mode" "DF")) -+ "alu") -+ -+(define_insn_reservation "generic_atomic" 10 -+ (eq_attr "type" "atomic") -+ "alu") -+ -+;; Sync loop consists of (in order) -+;; (1) optional sync, -+;; (2) LL instruction, -+;; (3) branch and 1-2 ALU instructions, -+;; (4) SC instruction, -+;; (5) branch and ALU instruction. -+;; The net result of this reservation is a big delay with a flush of -+;; ALU pipeline. -+(define_insn_reservation "generic_sync_loop" 40 -+ (eq_attr "type" "syncloop") -+ "alu*39") -diff --git a/gcc/config/loongarch/genopt.sh b/gcc/config/loongarch/genopt.sh -new file mode 100644 -index 000000000..272aac51d ---- /dev/null -+++ b/gcc/config/loongarch/genopt.sh -@@ -0,0 +1,110 @@ -+#!/bin/sh -+# Generate loongarch-tables.opt from the list of CPUs in loongarch-cpus.def. -+# Copyright (C) 2011-2018 Free Software Foundation, Inc. -+# -+# This file is part of GCC. -+# -+# GCC is free software; you can redistribute it and/or modify -+# it under the terms of the GNU General Public License as published by -+# the Free Software Foundation; either version 3, or (at your option) -+# any later version. -+# -+# GCC is distributed in the hope that it will be useful, -+# but WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+# GNU General Public License for more details. -+# -+# You should have received a copy of the GNU General Public License -+# along with GCC; see the file COPYING3. If not see -+# . -+ -+cat <. -+ -+Enum -+Name(loongarch_arch_opt_value) Type(int) -+Known LARCH CPUs (for use with the -march= and -mtune= options): -+ -+EnumValue -+Enum(loongarch_arch_opt_value) String(native) Value(LARCH_ARCH_OPTION_NATIVE) DriverOnly -+ -+EOF -+ -+awk -F'[(, ]+' ' -+BEGIN { -+ value = 0 -+} -+ -+# Write an entry for a single string accepted as a -march= argument. -+ -+function write_one_arch_value(name, value, flags) -+{ -+ print "EnumValue" -+ print "Enum(loongarch_arch_opt_value) String(" name ") Value(" value ")" flags -+ print "" -+} -+ -+# The logic for matching CPU name variants should be the same as in GAS. -+ -+# Write an entry for a single string accepted as a -march= argument, -+# plus any variant with a final "000" replaced by "k". -+ -+function write_arch_value_maybe_k(name, value, flags) -+{ -+ write_one_arch_value(name, value, flags) -+ if (name ~ "000$") { -+ sub("000$", "k", name) -+ write_one_arch_value(name, value, "") -+ } -+} -+ -+# Write all the entries for a -march= argument. In addition to -+# replacement of a final "000" with "k", an argument starting with -+# "vr", "rm" or "r" followed by a number, or just a plain number, -+# matches a plain number or "r" followed by a plain number. -+ -+function write_all_arch_values(name, value) -+{ -+ write_arch_value_maybe_k(name, value, " Canonical") -+ cname = name -+ if (cname ~ "^vr") { -+ sub("^vr", "", cname) -+ } else if (cname ~ "^rm") { -+ sub("^rm", "", cname) -+ } else if (cname ~ "^r") { -+ sub("^r", "", cname) -+ } -+ if (cname ~ "^[0-9]") { -+ if (cname != name) -+ write_arch_value_maybe_k(cname, value, "") -+ rname = "r" cname -+ if (rname != name) -+ write_arch_value_maybe_k(rname, value, "") -+ } -+} -+ -+/^LARCH_CPU/ { -+ name = $2 -+ gsub("\"", "", name) -+ write_all_arch_values(name, value) -+ value++ -+}' $1/loongarch-cpus.def -diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h -new file mode 100644 -index 000000000..1304e2e97 ---- /dev/null -+++ b/gcc/config/loongarch/gnu-user.h -@@ -0,0 +1,132 @@ -+/* Definitions for LARCH systems using GNU userspace. -+ Copyright (C) 1998-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#undef WCHAR_TYPE -+#define WCHAR_TYPE "int" -+ -+#undef WCHAR_TYPE_SIZE -+#define WCHAR_TYPE_SIZE 32 -+ -+#undef ASM_DECLARE_OBJECT_NAME -+#define ASM_DECLARE_OBJECT_NAME loongarch_declare_object_name -+ -+/* If we don't set MASK_ABICALLS, we can't default to PIC. */ -+/* #undef TARGET_DEFAULT */ -+/* #define TARGET_DEFAULT MASK_ABICALLS */ -+ -+#define TARGET_OS_CPP_BUILTINS() \ -+ do { \ -+ GNU_USER_TARGET_OS_CPP_BUILTINS(); \ -+ /* The GNU C++ standard library requires this. */ \ -+ if (c_dialect_cxx ()) \ -+ builtin_define ("_GNU_SOURCE"); \ -+ } while (0) -+ -+#undef SUBTARGET_CPP_SPEC -+#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}" -+ -+/* A standard GNU/Linux mapping. On most targets, it is included in -+ CC1_SPEC itself by config/linux.h, but loongarch.h overrides CC1_SPEC -+ and provides this hook instead. */ -+#undef SUBTARGET_CC1_SPEC -+#define SUBTARGET_CC1_SPEC GNU_USER_TARGET_CC1_SPEC -+ -+/* -G is incompatible with -KPIC which is the default, so only allow objects -+ in the small data section if the user explicitly asks for it. */ -+#undef LARCH_DEFAULT_GVALUE -+#define LARCH_DEFAULT_GVALUE 0 -+ -+#undef GNU_USER_TARGET_LINK_SPEC -+#define GNU_USER_TARGET_LINK_SPEC "\ -+ %{G*} %{EB} %{EL} %{shared} \ -+ %{!shared: \ -+ %{!static: \ -+ %{rdynamic:-export-dynamic} \ -+ %{mabi=lp32: -dynamic-linker " GNU_USER_DYNAMIC_LINKERLP32 "} \ -+ %{mabi=lp64: -dynamic-linker " GNU_USER_DYNAMIC_LINKERLP64 "}} \ -+ %{static}} \ -+ %{mabi=lp32:-m" GNU_USER_LINK_EMULATION32 "} \ -+ %{mabi=lp64:-m" GNU_USER_LINK_EMULATION64 "}" -+ -+#undef LINK_SPEC -+#define LINK_SPEC GNU_USER_TARGET_LINK_SPEC -+ -+/* The LARCH assembler has different syntax for .set. We set it to -+ .dummy to trap any errors. */ -+#undef SET_ASM_OP -+#define SET_ASM_OP "\t.dummy\t" -+ -+#undef ASM_OUTPUT_DEF -+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \ -+ do { \ -+ fputc ( '\t', FILE); \ -+ assemble_name (FILE, LABEL1); \ -+ fputs ( " = ", FILE); \ -+ assemble_name (FILE, LABEL2); \ -+ fputc ( '\n', FILE); \ -+ } while (0) -+ -+/* The glibc _mcount stub will save $v0 for us. Don't mess with saving -+ it, since ASM_OUTPUT_REG_PUSH/ASM_OUTPUT_REG_POP do not work in the -+ presence of $gp-relative calls. */ -+#undef ASM_OUTPUT_REG_PUSH -+#undef ASM_OUTPUT_REG_POP -+ -+#undef LIB_SPEC -+#define LIB_SPEC GNU_USER_TARGET_LIB_SPEC -+ -+#define NO_SHARED_SPECS "" -+ -+/* -march=native handling only makes sense with compiler running on -+ a LARCH chip. */ -+#if defined(__loongarch__) -+extern const char *host_detect_local_cpu (int argc, const char **argv); -+# define EXTRA_SPEC_FUNCTIONS \ -+ { "local_cpu_detect", host_detect_local_cpu }, -+ -+# define MARCH_MTUNE_NATIVE_SPECS \ -+ " %{march=native:%. */ -+ -+#ifndef _GCC_LOONGARCH_BASE_INTRIN_H -+#define _GCC_LOONGARCH_BASE_INTRIN_H -+ -+#ifdef __cplusplus -+extern "C"{ -+#endif -+ -+typedef struct drdtime{ -+ unsigned long dvalue; -+ unsigned long dtimeid; -+} __drdtime_t; -+ -+typedef struct rdtime{ -+ unsigned int value; -+ unsigned int timeid; -+} __rdtime_t; -+ -+#ifdef __loongarch64 -+extern __inline __drdtime_t __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__builtin_loongarch_rdtime_d (void) -+{ -+ __drdtime_t drdtime; -+ __asm__ volatile ( -+ "rdtime.d\t%[val],%[tid]\n\t" -+ : [val]"=&r"(drdtime.dvalue),[tid]"=&r"(drdtime.dtimeid) -+ : -+ ); -+ return drdtime; -+} -+#define __rdtime_d __builtin_loongarch_rdtime_d -+#endif -+ -+extern __inline __rdtime_t __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__builtin_loongarch_rdtimeh_w (void) -+{ -+ __rdtime_t rdtime; -+ __asm__ volatile ( -+ "rdtimeh.w\t%[val],%[tid]\n\t" -+ : [val]"=&r"(rdtime.value),[tid]"=&r"(rdtime.timeid) -+ : -+ ); -+ return rdtime; -+} -+#define __rdtimel_w __builtin_loongarch_rdtimel_w -+ -+extern __inline __rdtime_t __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__builtin_loongarch_rdtimel_w (void) -+{ -+ __rdtime_t rdtime; -+ __asm__ volatile ( -+ "rdtimel.w\t%[val],%[tid]\n\t" -+ : [val]"=&r"(rdtime.value),[tid]"=&r"(rdtime.timeid) -+ : -+ ); -+ return rdtime; -+} -+#define __rdtimeh_w __builtin_loongarch_rdtimeh_w -+ -+/* Assembly instruction format: rj, fcsr */ -+/* Data types in instruction templates: USI, UQI */ -+#define __movfcsr2gr(/*ui5*/_1) __builtin_loongarch_movfcsr2gr((_1)); -+ -+/* Assembly instruction format: 0, fcsr, rj */ -+/* Data types in instruction templates: VOID, UQI, USI */ -+#define __movgr2fcsr(/*ui5*/ _1, _2) __builtin_loongarch_movgr2fcsr((unsigned short)_1, (unsigned int)_2); -+ -+#ifdef __loongarch32 -+/* Assembly instruction format: ui5, rj, si12 */ -+/* Data types in instruction templates: VOID, USI, USI, SI */ -+#define __cacop(/*ui5*/ _1, /*unsigned int*/ _2, /*si12*/ _3) ((void)__builtin_loongarch_cacop((_1), (unsigned int)(_2), (_3))) -+#elif defined __loongarch64 -+/* Assembly instruction format: ui5, rj, si12 */ -+/* Data types in instruction templates: VOID, USI, UDI, SI */ -+#define __dcacop(/*ui5*/ _1, /*unsigned long int*/ _2, /*si12*/ _3) ((void)__builtin_loongarch_dcacop((_1), (unsigned long int)(_2), (_3))) -+#else -+# error "Don't support this ABI." -+#endif -+ -+/* Assembly instruction format: rd, rj */ -+/* Data types in instruction templates: USI, USI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+unsigned int __cpucfg(unsigned int _1) -+{ -+ return (unsigned int)__builtin_loongarch_cpucfg((unsigned int)_1); -+} -+ -+#ifdef __loongarch64 -+/* Assembly instruction format: rd, rj */ -+/* Data types in instruction templates: DI, DI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+void __asrtle_d(long int _1, long int _2) -+{ -+ __builtin_loongarch_asrtle_d((long int)_1, (long int)_2); -+} -+ -+/* Assembly instruction format: rd, rj */ -+/* Data types in instruction templates: DI, DI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+void __asrtgt_d(long int _1, long int _2) -+{ -+ __builtin_loongarch_asrtgt_d((long int)_1, (long int)_2); -+} -+#endif -+ -+#ifdef __loongarch32 -+/* Assembly instruction format: rd, rj, ui5 */ -+/* Data types in instruction templates: SI, SI, UQI */ -+#define __lddir(/*int*/ _1, /*ui5*/ _2) ((int)__builtin_loongarch_lddir((int)(_1), (_2))) -+#elif defined __loongarch64 -+/* Assembly instruction format: rd, rj, ui5 */ -+/* Data types in instruction templates: DI, DI, UQI */ -+#define __dlddir(/*long int*/ _1, /*ui5*/ _2) ((long int)__builtin_loongarch_dlddir((long int)(_1), (_2))) -+#else -+# error "Don't support this ABI." -+#endif -+ -+#ifdef __loongarch32 -+/* Assembly instruction format: rj, ui5 */ -+/* Data types in instruction templates: VOID, SI, UQI */ -+#define __ldpte(/*int*/ _1, /*ui5*/ _2) ((void)__builtin_loongarch_ldpte((int)(_1), (_2))) -+#elif defined __loongarch64 -+/* Assembly instruction format: rj, ui5 */ -+/* Data types in instruction templates: VOID, DI, UQI */ -+#define __dldpte(/*long int*/ _1, /*ui5*/ _2) ((void)__builtin_loongarch_dldpte((long int)(_1), (_2))) -+#else -+# error "Don't support this ABI." -+#endif -+ -+/* Assembly instruction format: rd, rj, rk */ -+/* Data types in instruction templates: SI, QI, SI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+int __crc_w_b_w(char _1, int _2) -+{ -+ return (int)__builtin_loongarch_crc_w_b_w((char)_1, (int)_2); -+} -+ -+/* Assembly instruction format: rd, rj, rk */ -+/* Data types in instruction templates: SI, HI, SI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+int __crc_w_h_w(short _1, int _2) -+{ -+ return (int)__builtin_loongarch_crc_w_h_w((short)_1, (int)_2); -+} -+ -+/* Assembly instruction format: rd, rj, rk */ -+/* Data types in instruction templates: SI, SI, SI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+int __crc_w_w_w(int _1, int _2) -+{ -+ return (int)__builtin_loongarch_crc_w_w_w((int)_1, (int)_2); -+} -+ -+#ifdef __loongarch64 -+/* Assembly instruction format: rd, rj, rk */ -+/* Data types in instruction templates: SI, DI, SI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+int __crc_w_d_w(long int _1, int _2) -+{ -+ return (int)__builtin_loongarch_crc_w_d_w((long int)_1, (int)_2); -+} -+#endif -+ -+/* Assembly instruction format: rd, rj, rk */ -+/* Data types in instruction templates: SI, QI, SI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+int __crcc_w_b_w(char _1, int _2) -+{ -+ return (int)__builtin_loongarch_crcc_w_b_w((char)_1, (int)_2); -+} -+ -+/* Assembly instruction format: rd, rj, rk */ -+/* Data types in instruction templates: SI, HI, SI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+int __crcc_w_h_w(short _1, int _2) -+{ -+ return (int)__builtin_loongarch_crcc_w_h_w((short)_1, (int)_2); -+} -+ -+/* Assembly instruction format: rd, rj, rk */ -+/* Data types in instruction templates: SI, SI, SI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+int __crcc_w_w_w(int _1, int _2) -+{ -+ return (int)__builtin_loongarch_crcc_w_w_w((int)_1, (int)_2); -+} -+ -+#ifdef __loongarch64 -+/* Assembly instruction format: rd, rj, rk */ -+/* Data types in instruction templates: SI, DI, SI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+int __crcc_w_d_w(long int _1, int _2) -+{ -+ return (int)__builtin_loongarch_crcc_w_d_w((long int)_1, (int)_2); -+} -+#endif -+ -+/* Assembly instruction format: rd, ui14 */ -+/* Data types in instruction templates: USI, USI */ -+#define __csrrd(/*ui14*/ _1) ((unsigned int)__builtin_loongarch_csrrd((_1))) -+ -+/* Assembly instruction format: rd, ui14 */ -+/* Data types in instruction templates: USI, USI, USI */ -+#define __csrwr(/*unsigned int*/ _1, /*ui14*/ _2) ((unsigned int)__builtin_loongarch_csrwr((unsigned int)(_1), (_2))) -+ -+/* Assembly instruction format: rd, rj, ui14 */ -+/* Data types in instruction templates: USI, USI, USI, USI */ -+#define __csrxchg(/*unsigned int*/ _1, /*unsigned int*/ _2, /*ui14*/ _3) ((unsigned int)__builtin_loongarch_csrxchg((unsigned int)(_1), (unsigned int)(_2), (_3))) -+ -+#ifdef __loongarch64 -+/* Assembly instruction format: rd, ui14 */ -+/* Data types in instruction templates: UDI, USI */ -+#define __dcsrrd(/*ui14*/ _1) ((unsigned long int)__builtin_loongarch_dcsrrd((_1))) -+ -+/* Assembly instruction format: rd, ui14 */ -+/* Data types in instruction templates: UDI, UDI, USI */ -+#define __dcsrwr(/*unsigned long int*/ _1, /*ui14*/ _2) ((unsigned long int)__builtin_loongarch_dcsrwr((unsigned long int)(_1), (_2))) -+ -+/* Assembly instruction format: rd, rj, ui14 */ -+/* Data types in instruction templates: UDI, UDI, UDI, USI */ -+#define __dcsrxchg(/*unsigned long int*/ _1, /*unsigned long int*/ _2, /*ui14*/ _3) ((unsigned long int)__builtin_loongarch_dcsrxchg((unsigned long int)(_1), (unsigned long int)(_2), (_3))) -+#endif -+ -+/* Assembly instruction format: rd, rj */ -+/* Data types in instruction templates: UQI, USI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+unsigned char __iocsrrd_b(unsigned int _1) -+{ -+ return (unsigned char)__builtin_loongarch_iocsrrd_b((unsigned int)_1); -+} -+ -+/* Assembly instruction format: rd, rj */ -+/* Data types in instruction templates: UHI, USI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+unsigned short __iocsrrd_h(unsigned int _1) -+{ -+ return (unsigned short)__builtin_loongarch_iocsrrd_h((unsigned int)_1); -+} -+ -+/* Assembly instruction format: rd, rj */ -+/* Data types in instruction templates: USI, USI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+unsigned int __iocsrrd_w(unsigned int _1) -+{ -+ return (unsigned int)__builtin_loongarch_iocsrrd_w((unsigned int)_1); -+} -+ -+#ifdef __loongarch64 -+/* Assembly instruction format: rd, rj */ -+/* Data types in instruction templates: UDI, USI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+unsigned long int __iocsrrd_d(unsigned int _1) -+{ -+ return (unsigned long int)__builtin_loongarch_iocsrrd_d((unsigned int)_1); -+} -+#endif -+ -+/* Assembly instruction format: rd, rj */ -+/* Data types in instruction templates: VOID, UQI, USI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+void __iocsrwr_b(unsigned char _1, unsigned int _2) -+{ -+ return (void)__builtin_loongarch_iocsrwr_b((unsigned char)_1, (unsigned int)_2); -+} -+ -+/* Assembly instruction format: rd, rj */ -+/* Data types in instruction templates: VOID, UHI, USI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+void __iocsrwr_h(unsigned short _1, unsigned int _2) -+{ -+ return (void)__builtin_loongarch_iocsrwr_h((unsigned short)_1, (unsigned int)_2); -+} -+ -+/* Assembly instruction format: rd, rj */ -+/* Data types in instruction templates: VOID, USI, USI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+void __iocsrwr_w(unsigned int _1, unsigned int _2) -+{ -+ return (void)__builtin_loongarch_iocsrwr_w((unsigned int)_1, (unsigned int)_2); -+} -+ -+#ifdef __loongarch64 -+/* Assembly instruction format: rd, rj */ -+/* Data types in instruction templates: VOID, UDI, USI */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+void __iocsrwr_d(unsigned long int _1, unsigned int _2) -+{ -+ return (void)__builtin_loongarch_iocsrwr_d((unsigned long int)_1, (unsigned int)_2); -+} -+#endif -+ -+/* Assembly instruction format: ui15 */ -+/* Data types in instruction templates: UQI */ -+#define __dbar(/*ui15*/ _1) __builtin_loongarch_dbar((_1)) -+ -+/* Assembly instruction format: ui15 */ -+/* Data types in instruction templates: UQI */ -+#define __ibar(/*ui15*/ _1) __builtin_loongarch_ibar((_1)) -+ -+#define __builtin_loongarch_syscall(a) \ -+{ \ -+ __asm__ volatile ("syscall %0\n\t" \ -+ ::"I"(a)); \ -+} -+#define __syscall __builtin_loongarch_syscall -+ -+#define __builtin_loongarch_break(a) \ -+{ \ -+ __asm__ volatile ("break %0\n\t" \ -+ ::"I"(a)); \ -+} -+#define __break __builtin_loongarch_break -+ -+ -+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__builtin_loongarch_tlbsrch (void) -+{ -+ __asm__ volatile ("tlbsrch\n\t"); -+} -+#define __tlbsrch __builtin_loongarch_tlbsrch -+ -+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__builtin_loongarch_tlbrd (void) -+{ -+ __asm__ volatile ("tlbrd\n\t"); -+} -+#define __tlbrd __builtin_loongarch_tlbrd -+ -+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__builtin_loongarch_tlbwr (void) -+{ -+ __asm__ volatile ("tlbwr\n\t"); -+} -+#define __tlbwr __builtin_loongarch_tlbwr -+ -+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__builtin_loongarch_tlbfill (void) -+{ -+ __asm__ volatile ("tlbfill\n\t"); -+} -+#define __tlbfill __builtin_loongarch_tlbfill -+ -+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__builtin_loongarch_tlbclr (void) -+{ -+ __asm__ volatile ("tlbclr\n\t"); -+} -+#define __tlbclr __builtin_loongarch_tlbclr -+ -+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__builtin_loongarch_tlbflush (void) -+{ -+ __asm__ volatile ("tlbflush\n\t"); -+} -+#define __tlbflush __builtin_loongarch_tlbflush -+ -+ -+#ifdef __cplusplus -+} -+#endif -+#endif /* _GCC_LOONGARCH_BASE_INTRIN_H */ -diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md -new file mode 100644 -index 000000000..24757aaa1 ---- /dev/null -+++ b/gcc/config/loongarch/lasx.md -@@ -0,0 +1,4825 @@ -+;; Machine Description for LARCH Loongson ASX ASE -+;; -+;; Copyright (C) 2018 Free Software Foundation, Inc. -+;; -+;; This file is part of GCC. -+;; -+;; GCC is free software; you can redistribute it and/or modify -+;; it under the terms of the GNU General Public License as published by -+;; the Free Software Foundation; either version 3, or (at your option) -+;; any later version. -+;; -+;; GCC is distributed in the hope that it will be useful, -+;; but WITHOUT ANY WARRANTY; without even the implied warranty of -+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+;; GNU General Public License for more details. -+;; -+;; You should have received a copy of the GNU General Public License -+;; along with GCC; see the file COPYING3. If not see -+;; . -+;; -+ -+(define_c_enum "unspec" [ -+ UNSPEC_LASX_XVABSD_S -+ UNSPEC_LASX_XVABSD_U -+ UNSPEC_LASX_XVAVG_S -+ UNSPEC_LASX_XVAVG_U -+ UNSPEC_LASX_XVAVGR_S -+ UNSPEC_LASX_XVAVGR_U -+ UNSPEC_LASX_XVBITCLR -+ UNSPEC_LASX_XVBITCLRI -+ UNSPEC_LASX_XVBITREV -+ UNSPEC_LASX_XVBITREVI -+ UNSPEC_LASX_XVBITSET -+ UNSPEC_LASX_XVBITSETI -+ UNSPEC_LASX_XVFCMP_CAF -+ UNSPEC_LASX_XVFCLASS -+ UNSPEC_LASX_XVFCMP_CUNE -+ UNSPEC_LASX_XVFCVT -+ UNSPEC_LASX_XVFCVTH -+ UNSPEC_LASX_XVFCVTL -+ UNSPEC_LASX_XVFLOGB -+ UNSPEC_LASX_XVFRECIP -+ UNSPEC_LASX_XVFRINT -+ UNSPEC_LASX_XVFRSQRT -+ UNSPEC_LASX_XVFCMP_SAF -+ UNSPEC_LASX_XVFCMP_SEQ -+ UNSPEC_LASX_XVFCMP_SLE -+ UNSPEC_LASX_XVFCMP_SLT -+ UNSPEC_LASX_XVFCMP_SNE -+ UNSPEC_LASX_XVFCMP_SOR -+ UNSPEC_LASX_XVFCMP_SUEQ -+ UNSPEC_LASX_XVFCMP_SULE -+ UNSPEC_LASX_XVFCMP_SULT -+ UNSPEC_LASX_XVFCMP_SUN -+ UNSPEC_LASX_XVFCMP_SUNE -+ UNSPEC_LASX_XVFTINT_S -+ UNSPEC_LASX_XVFTINT_U -+ UNSPEC_LASX_XVCLO -+ UNSPEC_LASX_XVSAT_S -+ UNSPEC_LASX_XVSAT_U -+ UNSPEC_LASX_XVREPLVE0 -+ UNSPEC_LASX_XVREPL128VEI -+ UNSPEC_LASX_XVSRAR -+ UNSPEC_LASX_XVSRARI -+ UNSPEC_LASX_XVSRLR -+ UNSPEC_LASX_XVSRLRI -+ UNSPEC_LASX_XVSSUB_S -+ UNSPEC_LASX_XVSSUB_U -+ UNSPEC_LASX_XVSHUF -+ UNSPEC_LASX_XVSHUF_B -+ UNSPEC_LASX_BRANCH -+ UNSPEC_LASX_BRANCH_V -+ -+ UNSPEC_LASX_XVMUH_S -+ UNSPEC_LASX_XVMUH_U -+ UNSPEC_LASX_MXVEXTW_U -+ UNSPEC_LASX_XVSLLWIL_S -+ UNSPEC_LASX_XVSLLWIL_U -+ UNSPEC_LASX_XVSRAN -+ UNSPEC_LASX_XVSSRAN_S -+ UNSPEC_LASX_XVSSRAN_U -+ UNSPEC_LASX_XVSRARN -+ UNSPEC_LASX_XVSSRARN_S -+ UNSPEC_LASX_XVSSRARN_U -+ UNSPEC_LASX_XVSRLN -+ UNSPEC_LASX_XVSSRLN_U -+ UNSPEC_LASX_XVSRLRN -+ UNSPEC_LASX_XVSSRLRN_U -+ UNSPEC_LASX_XVFRSTPI -+ UNSPEC_LASX_XVFRSTP -+ UNSPEC_LASX_XVSHUF4I -+ UNSPEC_LASX_XVBSRL_V -+ UNSPEC_LASX_XVBSLL_V -+ UNSPEC_LASX_XVEXTRINS -+ UNSPEC_LASX_XVMSKLTZ -+ UNSPEC_LASX_XVSIGNCOV -+ UNSPEC_LASX_XVFTINTRNE_W_S -+ UNSPEC_LASX_XVFTINTRNE_L_D -+ UNSPEC_LASX_XVFTINTRP_W_S -+ UNSPEC_LASX_XVFTINTRP_L_D -+ UNSPEC_LASX_XVFTINTRM_W_S -+ UNSPEC_LASX_XVFTINTRM_L_D -+ UNSPEC_LASX_XVFTINT_W_D -+ UNSPEC_LASX_XVFFINT_S_L -+ UNSPEC_LASX_XVFTINTRZ_W_D -+ UNSPEC_LASX_XVFTINTRP_W_D -+ UNSPEC_LASX_XVFTINTRM_W_D -+ UNSPEC_LASX_XVFTINTRNE_W_D -+ UNSPEC_LASX_XVFTINTH_L_S -+ UNSPEC_LASX_XVFTINTL_L_S -+ UNSPEC_LASX_XVFFINTH_D_W -+ UNSPEC_LASX_XVFFINTL_D_W -+ UNSPEC_LASX_XVFTINTRZH_L_S -+ UNSPEC_LASX_XVFTINTRZL_L_S -+ UNSPEC_LASX_XVFTINTRPH_L_S -+ UNSPEC_LASX_XVFTINTRPL_L_S -+ UNSPEC_LASX_XVFTINTRMH_L_S -+ UNSPEC_LASX_XVFTINTRML_L_S -+ UNSPEC_LASX_XVFTINTRNEL_L_S -+ UNSPEC_LASX_XVFTINTRNEH_L_S -+ UNSPEC_LASX_XVFRINTRNE_S -+ UNSPEC_LASX_XVFRINTRNE_D -+ UNSPEC_LASX_XVFRINTRZ_S -+ UNSPEC_LASX_XVFRINTRZ_D -+ UNSPEC_LASX_XVFRINTRP_S -+ UNSPEC_LASX_XVFRINTRP_D -+ UNSPEC_LASX_XVFRINTRM_S -+ UNSPEC_LASX_XVFRINTRM_D -+ UNSPEC_LASX_XVREPLVE0_Q -+ UNSPEC_LASX_XVPERM_W -+ UNSPEC_LASX_XVPERMI_Q -+ UNSPEC_LASX_XVPERMI_D -+ -+ UNSPEC_LASX_XVADDWEV -+ UNSPEC_LASX_XVADDWEV2 -+ UNSPEC_LASX_XVADDWEV3 -+ UNSPEC_LASX_XVSUBWEV -+ UNSPEC_LASX_XVSUBWEV2 -+ UNSPEC_LASX_XVMULWEV -+ UNSPEC_LASX_XVMULWEV2 -+ UNSPEC_LASX_XVMULWEV3 -+ UNSPEC_LASX_XVADDWOD -+ UNSPEC_LASX_XVADDWOD2 -+ UNSPEC_LASX_XVADDWOD3 -+ UNSPEC_LASX_XVSUBWOD -+ UNSPEC_LASX_XVSUBWOD2 -+ UNSPEC_LASX_XVMULWOD -+ UNSPEC_LASX_XVMULWOD2 -+ UNSPEC_LASX_XVMULWOD3 -+ UNSPEC_LASX_XVMADDWEV -+ UNSPEC_LASX_XVMADDWEV2 -+ UNSPEC_LASX_XVMADDWEV3 -+ UNSPEC_LASX_XVMADDWOD -+ UNSPEC_LASX_XVMADDWOD2 -+ UNSPEC_LASX_XVMADDWOD3 -+ UNSPEC_LASX_XVHADDW_Q_D -+ UNSPEC_LASX_XVHSUBW_Q_D -+ UNSPEC_LASX_XVHADDW_QU_DU -+ UNSPEC_LASX_XVHSUBW_QU_DU -+ UNSPEC_LASX_XVROTR -+ UNSPEC_LASX_XVADD_Q -+ UNSPEC_LASX_XVSUB_Q -+ UNSPEC_LASX_XVREPLVE -+ UNSPEC_LASX_XVSHUF4 -+ UNSPEC_LASX_XVMSKGEZ -+ UNSPEC_LASX_XVMSKNZ -+ UNSPEC_LASX_XVEXTH_Q_D -+ UNSPEC_LASX_XVEXTH_QU_DU -+ UNSPEC_LASX_XVROTRI -+ UNSPEC_LASX_XVEXTL_Q_D -+ UNSPEC_LASX_XVSRLNI -+ UNSPEC_LASX_XVSRLRNI -+ UNSPEC_LASX_XVSSRLNI -+ UNSPEC_LASX_XVSSRLNI2 -+ UNSPEC_LASX_XVSSRLRNI -+ UNSPEC_LASX_XVSSRLRNI2 -+ UNSPEC_LASX_XVSRANI -+ UNSPEC_LASX_XVSRARNI -+ UNSPEC_LASX_XVSSRANI -+ UNSPEC_LASX_XVSSRANI2 -+ UNSPEC_LASX_XVSSRARNI -+ UNSPEC_LASX_XVSSRARNI2 -+ UNSPEC_LASX_XVPERMI -+ UNSPEC_LASX_XVINSVE0 -+ UNSPEC_LASX_XVPICKVE -+ UNSPEC_LASX_XVSSRLN -+ UNSPEC_LASX_XVSSRLRN -+ UNSPEC_LASX_XVEXTL_QU_DU -+ UNSPEC_LASX_XVLDI -+ UNSPEC_LASX_XVLDX -+ UNSPEC_LASX_XVSTX -+]) -+ -+;; All vector modes with 256 bits. -+(define_mode_iterator LASX [V4DF V8SF V4DI V8SI V16HI V32QI]) -+ -+;; Same as LASX. Used by vcond to iterate two modes. -+(define_mode_iterator LASX_2 [V4DF V8SF V4DI V8SI V16HI V32QI]) -+ -+;; Only used for splitting insert_d and copy_{u,s}.d. -+(define_mode_iterator LASX_D [V4DI V4DF]) -+ -+;; Only used for splitting insert_d and copy_{u,s}.d. -+(define_mode_iterator LASX_WD [V4DI V4DF V8SI V8SF]) -+ -+;; Only used for copy256_{u,s}.w. -+(define_mode_iterator LASX_W [V8SI V8SF]) -+ -+;; Only integer modes in LASX. -+(define_mode_iterator ILASX [V4DI V8SI V16HI V32QI]) -+ -+;; As ILASX but excludes V32QI. -+(define_mode_iterator ILASX_DWH [V4DI V8SI V16HI]) -+ -+;; As ILASX but excludes V4DI. -+(define_mode_iterator ILASX_WHB [V8SI V16HI V32QI]) -+ -+;; Only integer modes equal or larger than a word. -+(define_mode_iterator ILASX_DW [V4DI V8SI]) -+ -+;; Only integer modes smaller than a word. -+(define_mode_iterator ILASX_HB [V16HI V32QI]) -+ -+;; Only floating-point modes in LASX. -+(define_mode_iterator FLASX [V4DF V8SF]) -+ -+;; Only used for immediate set shuffle elements instruction. -+(define_mode_iterator LASX_WHB_W [V8SI V16HI V32QI V8SF]) -+ -+;; The atribute gives the integer vector mode with same size in Loongson ASX. -+ (define_mode_attr VIMODE256 -+ [(V4DF "V4DI") -+ (V8SF "V8SI") -+ (V4DI "V4DI") -+ (V8SI "V8SI") -+ (V16HI "V16HI") -+ (V32QI "V32QI")]) -+ -+;;attribute gives half modes for vector modes. -+;;attribute gives half modes (Same Size) for vector modes. -+(define_mode_attr VHSMODE256 -+ [(V16HI "V32QI") -+ (V8SI "V16HI") -+ (V4DI "V8SI")]) -+ -+;;attribute gives half modes for vector modes. -+(define_mode_attr VHMODE256 -+ [(V32QI "V16QI") -+ (V16HI "V8HI") -+ (V8SI "V4SI") -+ (V4DI "V2DI")]) -+ -+;;attribute gives half float modes for vector modes. -+(define_mode_attr VFHMODE256 -+ [(V8SF "V4SF") -+ (V4DF "V2DF")]) -+ -+;; The attribute gives double modes for vector modes in LASX. -+(define_mode_attr VDMODE256 -+ [(V8SI "V4DI") -+ (V16HI "V8SI") -+ (V32QI "V16HI")]) -+ -+;; extended from VDMODE256 -+(define_mode_attr VDMODEEXD256 -+ [(V4DI "V4DI") -+ (V8SI "V4DI") -+ (V16HI "V8SI") -+ (V32QI "V16HI")]) -+ -+;; The attribute gives half modes with same number of elements for vector modes. -+(define_mode_attr VTRUNCMODE256 -+ [(V16HI "V16QI") -+ (V8SI "V8HI") -+ (V4DI "V4SI")]) -+ -+;; This attribute gives the mode of the result for "copy_s_b, copy_u_b" etc. -+(define_mode_attr VRES256 -+ [(V4DF "DF") -+ (V8SF "SF") -+ (V4DI "DI") -+ (V8SI "SI") -+ (V16HI "SI") -+ (V32QI "SI")]) -+ -+;; Only used with LASX_D iterator. -+(define_mode_attr lasx_d -+ [(V4DI "reg_or_0") -+ (V4DF "register")]) -+ -+;; This attribute gives the 256 bit integer vector mode with same size. -+(define_mode_attr mode256_i -+ [(V4DF "v4di") -+ (V8SF "v8si") -+ (V4DI "v4di") -+ (V8SI "v8si") -+ (V16HI "v16hi") -+ (V32QI "v32qi")]) -+ -+ -+;; This attribute gives the 256 bit float vector mode with same size. -+(define_mode_attr mode256_f -+ [(V4DF "v4df") -+ (V8SF "v8sf") -+ (V4DI "v4df") -+ (V8SI "v8sf")]) -+ -+ ;; This attribute gives suffix for LASX instructions.HOW? -+(define_mode_attr lasxfmt -+ [(V4DF "d") -+ (V8SF "w") -+ (V4DI "d") -+ (V8SI "w") -+ (V16HI "h") -+ (V32QI "b")]) -+ -+(define_mode_attr flasxfmt -+ [(V4DF "d") -+ (V8SF "s")]) -+ -+(define_mode_attr lasxfmt_u -+ [(V4DF "du") -+ (V8SF "wu") -+ (V4DI "du") -+ (V8SI "wu") -+ (V16HI "hu") -+ (V32QI "bu")]) -+ -+(define_mode_attr ilasxfmt -+ [(V4DF "l") -+ (V8SF "w")]) -+ -+(define_mode_attr ilasxfmt_u -+ [(V4DF "lu") -+ (V8SF "wu")]) -+ -+;; This attribute gives suffix for integers in VHMODE256. -+(define_mode_attr hlasxfmt -+ [(V4DI "w") -+ (V8SI "h") -+ (V16HI "b")]) -+ -+(define_mode_attr hlasxfmt_u -+ [(V4DI "wu") -+ (V8SI "hu") -+ (V16HI "bu")]) -+ -+;; This attribute gives suffix for integers in VHSMODE256. -+(define_mode_attr hslasxfmt -+ [(V4DI "w") -+ (V8SI "h") -+ (V16HI "b")]) -+ -+;; This attribute gives define_insn suffix for LASX instructions that need -+;; distinction between integer and floating point. -+(define_mode_attr lasxfmt_f -+ [(V4DF "d_f") -+ (V8SF "w_f") -+ (V4DI "d") -+ (V8SI "w") -+ (V16HI "h") -+ (V32QI "b")]) -+ -+(define_mode_attr flasxfmt_f -+ [(V4DF "d_f") -+ (V8SF "s_f") -+ (V4DI "d") -+ (V8SI "w") -+ (V16HI "h") -+ (V32QI "b")]) -+ -+;; This attribute gives define_insn suffix for LASX instructions that need -+;; distinction between integer and floating point. -+(define_mode_attr lasxfmt_f_wd -+ [(V4DF "d_f") -+ (V8SF "w_f") -+ (V4DI "d") -+ (V8SI "w")]) -+ -+;; This attribute gives suffix for integers in VHMODE256. -+(define_mode_attr dlasxfmt -+ [(V8SI "d") -+ (V16HI "w") -+ (V32QI "h")]) -+ -+(define_mode_attr dlasxfmt_u -+ [(V8SI "du") -+ (V16HI "wu") -+ (V32QI "hu")]) -+ -+;; for VDMODEEXD256 -+(define_mode_attr dlasxqfmt -+ [(V4DI "q") -+ (V8SI "d") -+ (V16HI "w") -+ (V32QI "h")]) -+ -+;; This is used to form an immediate operand constraint using -+;; "const__operand". -+(define_mode_attr indeximm256 -+ [(V4DF "0_to_3") -+ (V8SF "0_to_7") -+ (V4DI "0_to_3") -+ (V8SI "0_to_7") -+ (V16HI "uimm4") -+ (V32QI "uimm5")]) -+ -+;; This is used to form an immediate operand constraint using to ref high half -+;; "const__operand". -+(define_mode_attr indeximm_hi -+ [(V4DF "2_or_3") -+ (V8SF "4_to_7") -+ (V4DI "2_or_3") -+ (V8SI "4_to_7") -+ (V16HI "8_to_15") -+ (V32QI "16_to_31")]) -+ -+;; This is used to form an immediate operand constraint using to ref low half -+;; "const__operand". -+(define_mode_attr indeximm_lo -+ [(V4DF "0_or_1") -+ (V8SF "0_to_3") -+ (V4DI "0_or_1") -+ (V8SI "0_to_3") -+ (V16HI "uimm3") -+ (V32QI "uimm4")]) -+ -+;; This attribute represents bitmask needed for vec_merge using in lasx -+;; "const__operand". -+(define_mode_attr bitmask256 -+ [(V4DF "exp_4") -+ (V8SF "exp_8") -+ (V4DI "exp_4") -+ (V8SI "exp_8") -+ (V16HI "exp_16") -+ (V32QI "exp_32")]) -+ -+;; This attribute represents bitmask needed for vec_merge using to ref low half -+;; "const__operand". -+(define_mode_attr bitmask_lo -+ [(V4DF "exp_2") -+ (V8SF "exp_4") -+ (V4DI "exp_2") -+ (V8SI "exp_4") -+ (V16HI "exp_8") -+ (V32QI "exp_16")]) -+ -+ -+;; This attribute is used to form an immediate operand constraint using -+;; "const__operand". -+(define_mode_attr bitimm256 -+ [(V32QI "uimm3") -+ (V16HI "uimm4") -+ (V8SI "uimm5") -+ (V4DI "uimm6")]) -+ -+ -+(define_mode_attr d2lasxfmt -+ [(V8SI "q") -+ (V16HI "d") -+ (V32QI "w")]) -+ -+(define_mode_attr d2lasxfmt_u -+ [(V8SI "qu") -+ (V16HI "du") -+ (V32QI "wu")]) -+ -+(define_mode_attr VD2MODE256 -+ [(V8SI "V4DI") -+ (V16HI "V4DI") -+ (V32QI "V8SI")]) -+ -+(define_mode_attr lasxfmt_wd -+ [(V4DI "d") -+ (V8SI "w") -+ (V16HI "w") -+ (V32QI "w")]) -+ -+(define_expand "vec_init" -+ [(match_operand:LASX 0 "register_operand") -+ (match_operand:LASX 1 "")] -+ "ISA_HAS_LASX" -+{ -+ loongarch_expand_vector_init (operands[0], operands[1]); -+ DONE; -+}) -+ -+;; FIXME: Delete. -+(define_insn "vec_pack_trunc_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (vec_concat: -+ (truncate: -+ (match_operand:ILASX_DWH 1 "register_operand" "f")) -+ (truncate: -+ (match_operand:ILASX_DWH 2 "register_operand" "f"))))] -+ "ISA_HAS_LASX" -+ "xvpickev.\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "") -+ (set_attr "can_delay" "no") -+ (set_attr "length" "8")]) -+ -+(define_expand "vec_unpacks_hi_v8sf" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (float_extend:V4DF -+ (vec_select:V4SF -+ (match_operand:V8SF 1 "register_operand" "f") -+ (match_dup 2))))] -+ "ISA_HAS_LASX" -+{ -+ operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode, true/*high_p*/); -+}) -+ -+(define_expand "vec_unpacks_lo_v8sf" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (float_extend:V4DF -+ (vec_select:V4SF -+ (match_operand:V8SF 1 "register_operand" "f") -+ (match_dup 2))))] -+ "ISA_HAS_LASX" -+{ -+ operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode, false/*high_p*/); -+}) -+ -+ -+(define_expand "vec_unpacks_hi_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILASX_WHB 1 "register_operand")] -+ "ISA_HAS_LASX" -+{ -+ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, true/*high_p*/); -+ DONE; -+}) -+ -+(define_expand "vec_unpacks_lo_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILASX_WHB 1 "register_operand")] -+ "ISA_HAS_LASX" -+{ -+ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, false/*high_p*/); -+ DONE; -+}) -+ -+(define_expand "vec_unpacku_hi_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILASX_WHB 1 "register_operand")] -+ "ISA_HAS_LASX" -+{ -+ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, true/*high_p*/); -+ DONE; -+}) -+ -+(define_expand "vec_unpacku_lo_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILASX_WHB 1 "register_operand")] -+ "ISA_HAS_LASX" -+{ -+ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, false/*high_p*/); -+ DONE; -+}) -+ -+(define_insn "lasx_xvinsgr2vr_" -+ [(set (match_operand:LASX_WD 0 "register_operand" "=f") -+ (vec_merge:LASX_WD -+ (vec_duplicate:LASX_WD -+ (match_operand: 1 "reg_or_0_operand" "rJ")) -+ (match_operand:LASX_WD 2 "register_operand" "0") -+ (match_operand 3 "const__operand" "")))] -+ "ISA_HAS_LASX" -+{ -+#if 0 -+ if (!TARGET_64BIT && (mode == V4DImode || mode == V4DFmode)) -+ return "#"; -+ else -+#endif -+ return "xvinsgr2vr.\t%u0,%z1,%y3"; -+} -+ [(set_attr "type" "simd_insert") -+ (set_attr "mode" "")]) -+ -+(define_insn "vec_concatv4di" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (vec_concat:V4DI -+ (match_operand:V2DI 1 "register_operand" "0") -+ (match_operand:V2DI 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+{ -+ return "xvpermi.q\t%u0,%u2,0x20"; -+} -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "vec_concatv8si" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (vec_concat:V8SI -+ (match_operand:V4SI 1 "register_operand" "0") -+ (match_operand:V4SI 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+{ -+ return "xvpermi.q\t%u0,%u2,0x20"; -+} -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "vec_concatv16hi" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (vec_concat:V16HI -+ (match_operand:V8HI 1 "register_operand" "0") -+ (match_operand:V8HI 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+{ -+ return "xvpermi.q\t%u0,%u2,0x20"; -+} -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "vec_concatv32qi" -+ [(set (match_operand:V32QI 0 "register_operand" "=f") -+ (vec_concat:V32QI -+ (match_operand:V16QI 1 "register_operand" "0") -+ (match_operand:V16QI 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+{ -+ return "xvpermi.q\t%u0,%u2,0x20"; -+} -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "vec_concatv4df" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (vec_concat:V4DF -+ (match_operand:V2DF 1 "register_operand" "0") -+ (match_operand:V2DF 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+{ -+ return "xvpermi.q\t%u0,%u2,0x20"; -+} -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "vec_concatv8sf" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (vec_concat:V8SF -+ (match_operand:V4SF 1 "register_operand" "0") -+ (match_operand:V4SF 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+{ -+ return "xvpermi.q\t%u0,%u2,0x20"; -+} -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V4DI")]) -+ -+;; xshuf.w -+(define_insn "lasx_xvperm_w" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI -+ [(match_operand:V8SI 1 "register_operand" "f") -+ (match_operand:V8SI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVPERM_W))] -+ "ISA_HAS_LASX" -+ "xvperm.w\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V8SI")]) -+ -+;; xvpermi.d -+(define_insn "lasx_xvpermi_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI -+ [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand 2 "const_uimm8_operand")] -+ UNSPEC_LASX_XVPERMI_D))] -+ "ISA_HAS_LASX" -+ "xvpermi.d\t%u0,%u1,%2" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V4DI")]) -+ -+;; xvpermi.q -+(define_insn "lasx_xvpermi_q_" -+ [(set (match_operand:LASX 0 "register_operand" "=f") -+ (unspec:LASX -+ [(match_operand:LASX 1 "register_operand" "0") -+ (match_operand:LASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand")] -+ UNSPEC_LASX_XVPERMI_Q))] -+ "ISA_HAS_LASX" -+ "xvpermi.q\t%u0,%u2,%3" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvpickve2gr_d" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (any_extend:DI -+ (vec_select:DI -+ (match_operand:V4DI 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const_0_to_3_operand" "")]))))] -+ "ISA_HAS_LASX" -+ "xvpickve2gr.d\t%0,%u1,%2" -+ [(set_attr "type" "simd_copy") -+ (set_attr "mode" "V4DI")]) -+ -+(define_expand "vec_extract" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILASX 1 "register_operand") -+ (match_operand 2 "const__operand")] -+ "ISA_HAS_LASX" -+{ -+ if (mode == SImode || mode == DImode) -+ { -+ emit_insn(gen_lasx_xvpickve2gr_ (operands[0], operands[1], operands[2])); -+ } -+ else -+ { -+ HOST_WIDE_INT size_0 = GET_MODE_SIZE (GET_MODE (operands[0])); -+ HOST_WIDE_INT size_1 = GET_MODE_SIZE (GET_MODE (operands[1])); -+ HOST_WIDE_INT val = INTVAL (operands[2]); -+ -+ /* High part */ -+ if (val >= size_1/size_0/2 ) -+ { -+ rtx dest1 = gen_reg_rtx (GET_MODE (operands[1])); -+ rtx pos = GEN_INT( val - size_1/size_0/2); -+ emit_insn (gen_lasx_xvpermi_q_ (dest1, dest1, operands[1], GEN_INT(1))); -+ rtx dest2 = gen_reg_rtx (SImode); -+ emit_insn (gen_lsx_vpickve2gr_ (dest2, -+ gen_lowpart(mode, dest1), -+ pos)); -+ emit_move_insn (operands[0], -+ gen_lowpart (mode, dest2)); -+ } -+ else -+ { -+ rtx dest1 = gen_reg_rtx (SImode); -+ emit_insn (gen_lsx_vpickve2gr_ (dest1, -+ gen_lowpart(mode, operands[1]), -+ operands[2])); -+ emit_move_insn (operands[0], -+ gen_lowpart (mode, dest1)); -+ } -+ } -+ DONE; -+}) -+ -+(define_expand "vec_extract" -+ [(match_operand: 0 "register_operand") -+ (match_operand:FLASX 1 "register_operand") -+ (match_operand 2 "const__operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx temp; -+ HOST_WIDE_INT val = INTVAL (operands[2]); -+ -+ if (val == 0) -+ temp = operands[1]; -+ else -+ { -+ temp = gen_reg_rtx (mode); -+ emit_insn (gen_lasx_xvpickve_ (temp, operands[1], operands[2])); -+ } -+ emit_insn (gen_lasx_vec_extract_ (operands[0], temp)); -+ DONE; -+}) -+ -+(define_insn_and_split "lasx_vec_extract_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (vec_select: -+ (match_operand:FLASX 1 "register_operand" "f") -+ (parallel [(const_int 0)])))] -+ "ISA_HAS_LASX" -+ "#" -+ "&& reload_completed" -+ [(set (match_dup 0) (match_dup 1))] -+{ -+ operands[1] = gen_rtx_REG (mode, REGNO (operands[1])); -+} -+ [(set_attr "move_type" "fmove") -+ (set_attr "mode" "")]) -+ -+;; FIXME: 256?? -+(define_expand "vcondu" -+ [(match_operand:LASX 0 "register_operand") -+ (match_operand:LASX 1 "reg_or_m1_operand") -+ (match_operand:LASX 2 "reg_or_0_operand") -+ (match_operator 3 "" -+ [(match_operand:ILASX 4 "register_operand") -+ (match_operand:ILASX 5 "register_operand")])] -+ "ISA_HAS_LASX -+ && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" -+{ -+ loongarch_expand_vec_cond_expr (mode, mode, operands); -+ DONE; -+}) -+ -+;; FIXME: 256?? -+(define_expand "vcond" -+ [(match_operand:LASX 0 "register_operand") -+ (match_operand:LASX 1 "reg_or_m1_operand") -+ (match_operand:LASX 2 "reg_or_0_operand") -+ (match_operator 3 "" -+ [(match_operand:LASX_2 4 "register_operand") -+ (match_operand:LASX_2 5 "register_operand")])] -+ "ISA_HAS_LASX -+ && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" -+{ -+ loongarch_expand_vec_cond_expr (mode, mode, operands); -+ DONE; -+}) -+ -+;; Same as vcond_ -+(define_expand "vcond_mask" -+ [(match_operand:LASX 0 "register_operand") -+ (match_operand:LASX 1 "reg_or_m1_operand") -+ (match_operand:LASX 2 "reg_or_0_operand") -+ (match_operator 3 "" -+ [(match_operand:LASX_2 4 "register_operand") -+ (match_operand:LASX_2 5 "register_operand")])] -+ "ISA_HAS_LASX -+ && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" -+{ -+ loongarch_expand_vec_cond_expr (mode, mode, operands); -+ DONE; -+}) -+ -+(define_expand "lasx_xvrepli" -+ [(match_operand:ILASX 0 "register_operand") -+ (match_operand 1 "const_imm10_operand")] -+ "ISA_HAS_LASX" -+{ -+ if (mode == V32QImode) -+ operands[1] = GEN_INT (trunc_int_for_mode (INTVAL (operands[1]), -+ mode)); -+ emit_move_insn (operands[0], -+ loongarch_gen_const_int_vector (mode, INTVAL (operands[1]))); -+ DONE; -+}) -+ -+(define_expand "mov" -+ [(set (match_operand:LASX 0) -+ (match_operand:LASX 1))] -+ "ISA_HAS_LASX" -+{ -+ if (loongarch_legitimize_move (mode, operands[0], operands[1])) -+ DONE; -+}) -+ -+ -+(define_expand "movmisalign" -+ [(set (match_operand:LASX 0) -+ (match_operand:LASX 1))] -+ "ISA_HAS_LASX" -+{ -+ if (loongarch_legitimize_move (mode, operands[0], operands[1])) -+ DONE; -+}) -+ -+;; 256-bit LASX modes can only exist in LASX registers or memory. -+(define_insn "mov_lasx" -+ [(set (match_operand:LASX 0 "nonimmediate_operand" "=f,f,R,*r,*f") -+ (match_operand:LASX 1 "move_operand" "fYGYI,R,f,*f,*r"))] -+ "ISA_HAS_LASX" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert") -+ (set_attr "mode" "") -+ (set_attr "can_delay" "no,yes,yes,yes,yes") -+ (set_attr "length" "8,4,4,4,4")]) -+ -+ -+(define_split -+ [(set (match_operand:LASX 0 "nonimmediate_operand") -+ (match_operand:LASX 1 "move_operand"))] -+ "reload_completed && ISA_HAS_LASX -+ && loongarch_split_move_insn_p (operands[0], operands[1], insn)" -+ [(const_int 0)] -+{ -+ loongarch_split_move_insn (operands[0], operands[1], curr_insn); -+ DONE; -+}) -+ -+;; Offset load -+(define_expand "lasx_mxld_" -+ [(match_operand:LASX 0 "register_operand") -+ (match_operand 1 "pmode_register_operand") -+ (match_operand 2 "aq10_operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], -+ INTVAL (operands[2])); -+ loongarch_emit_move (operands[0], gen_rtx_MEM (mode, addr)); -+ DONE; -+}) -+ -+;; Offset store -+(define_expand "lasx_mxst_" -+ [(match_operand:LASX 0 "register_operand") -+ (match_operand 1 "pmode_register_operand") -+ (match_operand 2 "aq10_operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], -+ INTVAL (operands[2])); -+ loongarch_emit_move (gen_rtx_MEM (mode, addr), operands[0]); -+ DONE; -+}) -+ -+ -+ -+ -+ -+ -+;; LASX -+(define_insn "add3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f,f") -+ (plus:ILASX -+ (match_operand:ILASX 1 "register_operand" "f,f,f") -+ (match_operand:ILASX 2 "reg_or_vector_same_ximm5_operand" "f,Unv5,Uuv5")))] -+ "ISA_HAS_LASX" -+{ -+ switch (which_alternative) -+ { -+ case 0: -+ return "xvadd.\t%u0,%u1,%u2"; -+ case 1: -+ { -+ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0)); -+ -+ operands[2] = GEN_INT (-val); -+ return "xvsubi.\t%u0,%u1,%d2"; -+ } -+ case 2: -+ return "xvaddi.\t%u0,%u1,%E2"; -+ default: -+ gcc_unreachable (); -+ } -+} -+ [(set_attr "alu_type" "simd_add") -+ (set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "sub3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f") -+ (minus:ILASX -+ (match_operand:ILASX 1 "register_operand" "f,f") -+ (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] -+ "ISA_HAS_LASX" -+ "@ -+ xvsub.\t%u0,%u1,%u2 -+ xvsubi.\t%u0,%u1,%E2" -+ [(set_attr "alu_type" "simd_add") -+ (set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "mul3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (mult:ILASX (match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvmul.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_mul") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvmadd_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (plus:ILASX (mult:ILASX (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand:ILASX 3 "register_operand" "f")) -+ (match_operand:ILASX 1 "register_operand" "0")))] -+ "ISA_HAS_LASX" -+ "xvmadd.\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_mul") -+ (set_attr "mode" "")]) -+ -+ -+ -+(define_insn "lasx_xvmsub_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (minus:ILASX (match_operand:ILASX 1 "register_operand" "0") -+ (mult:ILASX (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand:ILASX 3 "register_operand" "f"))))] -+ "ISA_HAS_LASX" -+ "xvmsub.\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_mul") -+ (set_attr "mode" "")]) -+ -+(define_insn "div3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (div:ILASX (match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ { return loongarch_lsx_output_division ("xvdiv.\t%u0,%u1,%u2", operands); } -+ [(set_attr "type" "simd_div") -+ (set_attr "mode" "")]) -+ -+(define_insn "udiv3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (udiv:ILASX (match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ { return loongarch_lsx_output_division ("xvdiv.\t%u0,%u1,%u2", operands); } -+ [(set_attr "type" "simd_div") -+ (set_attr "mode" "")]) -+ -+(define_insn "mod3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (mod:ILASX (match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ { return loongarch_lsx_output_division ("xvmod.\t%u0,%u1,%u2", operands); } -+ [(set_attr "type" "simd_div") -+ (set_attr "mode" "")]) -+ -+(define_insn "umod3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (umod:ILASX (match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ { return loongarch_lsx_output_division ("xvmod.\t%u0,%u1,%u2", operands); } -+ [(set_attr "type" "simd_div") -+ (set_attr "mode" "")]) -+ -+(define_insn "xor3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f,f") -+ (xor:ILASX -+ (match_operand:ILASX 1 "register_operand" "f,f,f") -+ (match_operand:ILASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] -+ "ISA_HAS_LASX" -+ "@ -+ xvxor.v\t%u0,%u1,%u2 -+ xvbitrevi.%v0\t%u0,%u1,%V2 -+ xvxori.b\t%u0,%u1,%B2" -+ [(set_attr "type" "simd_logic,simd_bit,simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "ior3" -+ [(set (match_operand:LASX 0 "register_operand" "=f,f,f") -+ (ior:LASX -+ (match_operand:LASX 1 "register_operand" "f,f,f") -+ (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] -+ "ISA_HAS_LASX" -+ "@ -+ xvor.v\t%u0,%u1,%u2 -+ xvbitseti.%v0\t%u0,%u1,%V2 -+ xvori.b\t%u0,%u1,%B2" -+ [(set_attr "type" "simd_logic,simd_bit,simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "and3" -+ [(set (match_operand:LASX 0 "register_operand" "=f,f,f") -+ (and:LASX -+ (match_operand:LASX 1 "register_operand" "f,f,f") -+ (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YZ,Urv8")))] -+ "ISA_HAS_LASX" -+{ -+ switch (which_alternative) -+ { -+ case 0: -+ return "xvand.v\t%u0,%u1,%u2"; -+ case 1: -+ { -+ rtx elt0 = CONST_VECTOR_ELT (operands[2], 0); -+ unsigned HOST_WIDE_INT val = ~UINTVAL (elt0); -+ operands[2] = loongarch_gen_const_int_vector (mode, val & (-val)); -+ return "xvbitclri.%v0\t%u0,%u1,%V2"; -+ } -+ case 2: -+ return "xvandi.b\t%u0,%u1,%B2"; -+ default: -+ gcc_unreachable (); -+ } -+} -+ [(set_attr "type" "simd_logic,simd_bit,simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "one_cmpl2" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (not:ILASX (match_operand:ILASX 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvnor.v\t%u0,%u1,%u1" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "OI")]) -+ -+;; LASX -+(define_insn "vlshr3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f") -+ (lshiftrt:ILASX -+ (match_operand:ILASX 1 "register_operand" "f,f") -+ (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] -+ "ISA_HAS_LASX" -+ "@ -+ xvsrl.\t%u0,%u1,%u2 -+ xvsrli.\t%u0,%u1,%E2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+;; LASX ">>" -+(define_insn "vashr3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f") -+ (ashiftrt:ILASX -+ (match_operand:ILASX 1 "register_operand" "f,f") -+ (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] -+ "ISA_HAS_LASX" -+ "@ -+ xvsra.\t%u0,%u1,%u2 -+ xvsrai.\t%u0,%u1,%E2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+;; LASX "<<" -+(define_insn "vashl3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f") -+ (ashift:ILASX -+ (match_operand:ILASX 1 "register_operand" "f,f") -+ (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] -+ "ISA_HAS_LASX" -+ "@ -+ xvsll.\t%u0,%u1,%u2 -+ xvslli.\t%u0,%u1,%E2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "add3" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (plus:FLASX (match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvfadd.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "sub3" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (minus:FLASX (match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvfsub.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "mul3" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (mult:FLASX (match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvfmul.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fmul") -+ (set_attr "mode" "")]) -+ -+(define_insn "div3" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (div:FLASX (match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvfdiv.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ -+(define_insn "fma4" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (fma:FLASX (match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f") -+ (match_operand:FLASX 3 "register_operand" "0")))] -+ "ISA_HAS_LASX" -+ "xvfmadd.\t%u0,%u1,%u2,%u0" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "fnma4" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (fma:FLASX (neg:FLASX (match_operand:FLASX 1 "register_operand" "f")) -+ (match_operand:FLASX 2 "register_operand" "f") -+ (match_operand:FLASX 3 "register_operand" "0")))] -+ "ISA_HAS_LASX" -+ "xvfnmsub.\t%u0,%u1,%u2,%u0" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "sqrt2" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (sqrt:FLASX (match_operand:FLASX 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvfsqrt.\t%u0,%u1" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvadda_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (plus:ILASX (abs:ILASX (match_operand:ILASX 1 "register_operand" "f")) -+ (abs:ILASX (match_operand:ILASX 2 "register_operand" "f"))))] -+ "ISA_HAS_LASX" -+ "xvadda.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "ssadd3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (ss_plus:ILASX (match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvsadd.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "usadd3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (us_plus:ILASX (match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvsadd.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvabsd_s_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVABSD_S))] -+ "ISA_HAS_LASX" -+ "xvabsd.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvabsd_u_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVABSD_U))] -+ "ISA_HAS_LASX" -+ "xvabsd.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvavg_s_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVAVG_S))] -+ "ISA_HAS_LASX" -+ "xvavg.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvavg_u_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVAVG_U))] -+ "ISA_HAS_LASX" -+ "xvavg.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvavgr_s_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVAVGR_S))] -+ "ISA_HAS_LASX" -+ "xvavgr.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvavgr_u_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVAVGR_U))] -+ "ISA_HAS_LASX" -+ "xvavgr.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvbitclr_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVBITCLR))] -+ "ISA_HAS_LASX" -+ "xvbitclr.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvbitclri_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LASX_XVBITCLRI))] -+ "ISA_HAS_LASX" -+ "xvbitclri.\t%u0,%u1,%2" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvbitrev_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVBITREV))] -+ "ISA_HAS_LASX" -+ "xvbitrev.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvbitrevi_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LASX_XVBITREVI))] -+ "ISA_HAS_LASX" -+ "xvbitrevi.\t%u0,%u1,%2" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvbitsel_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (ior:ILASX (and:ILASX (not:ILASX -+ (match_operand:ILASX 3 "register_operand" "f")) -+ (match_operand:ILASX 1 "register_operand" "f")) -+ (and:ILASX (match_dup 3) -+ (match_operand:ILASX 2 "register_operand" "f"))))] -+ "ISA_HAS_LASX" -+ "xvbitsel.v\t%u0,%u1,%u2,%u3" -+ [(set_attr "type" "simd_bitmov") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvbitseli_b" -+ [(set (match_operand:V32QI 0 "register_operand" "=f") -+ (ior:V32QI (and:V32QI (not:V32QI -+ (match_operand:V32QI 1 "register_operand" "0")) -+ (match_operand:V32QI 2 "register_operand" "f")) -+ (and:V32QI (match_dup 1) -+ (match_operand:V32QI 3 "const_vector_same_val_operand" "Urv8"))))] -+ "ISA_HAS_LASX" -+ "xvbitseli.b\t%u0,%u2,%B3" -+ [(set_attr "type" "simd_bitmov") -+ (set_attr "mode" "V32QI")]) -+ -+(define_insn "lasx_xvbitset_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVBITSET))] -+ "ISA_HAS_LASX" -+ "xvbitset.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvbitseti_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LASX_XVBITSETI))] -+ "ISA_HAS_LASX" -+ "xvbitseti.\t%u0,%u1,%2" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvs_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f") -+ (ICC:ILASX -+ (match_operand:ILASX 1 "register_operand" "f,f") -+ (match_operand:ILASX 2 "reg_or_vector_same_imm5_operand" "f,Uv5")))] -+ "ISA_HAS_LASX" -+ "@ -+ xvs.\t%u0,%u1,%u2 -+ xvs.\t%u0,%u1,%E2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_expand "vec_cmp" -+ [(set (match_operand:ILASX 0 "register_operand") -+ (match_operator:ILASX 1 "" -+ [(match_operand:ILASX 2 "register_operand") -+ (match_operand:ILASX 3 "register_operand")]))] -+ "ISA_HAS_LASX" -+{ -+ bool ok = loongarch_expand_int_vec_cmp (operands); -+ gcc_assert (ok); -+ DONE; -+}) -+ -+(define_expand "vec_cmp" -+ [(set (match_operand:FLASX 0 "register_operand") -+ (match_operator:FLASX 1 "" -+ [(match_operand:FLASX 2 "register_operand") -+ (match_operand:FLASX 3 "register_operand")]))] -+ "ISA_HAS_LASX" -+{ -+ bool ok = loongarch_expand_fp_vec_cmp (operands); -+ gcc_assert (ok); -+ DONE; -+}) -+ -+(define_insn "lasx_xvfclass_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:FLASX 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFCLASS))] -+ "ISA_HAS_LASX" -+ "xvfclass.\t%u0,%u1" -+ [(set_attr "type" "simd_fclass") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvfcmp_caf_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVFCMP_CAF))] -+ "ISA_HAS_LASX" -+ "xvfcmp.caf.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fcmp") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvfcmp_cune_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVFCMP_CUNE))] -+ "ISA_HAS_LASX" -+ "xvfcmp.cune.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fcmp") -+ (set_attr "mode" "")]) -+ -+ -+ -+(define_int_iterator FSC256_UNS [UNSPEC_LASX_XVFCMP_SAF UNSPEC_LASX_XVFCMP_SUN UNSPEC_LASX_XVFCMP_SOR -+ UNSPEC_LASX_XVFCMP_SEQ UNSPEC_LASX_XVFCMP_SNE UNSPEC_LASX_XVFCMP_SUEQ -+ UNSPEC_LASX_XVFCMP_SUNE UNSPEC_LASX_XVFCMP_SULE UNSPEC_LASX_XVFCMP_SULT -+ UNSPEC_LASX_XVFCMP_SLE UNSPEC_LASX_XVFCMP_SLT]) -+ -+(define_int_attr fsc256 -+ [(UNSPEC_LASX_XVFCMP_SAF "saf") -+ (UNSPEC_LASX_XVFCMP_SUN "sun") -+ (UNSPEC_LASX_XVFCMP_SOR "sor") -+ (UNSPEC_LASX_XVFCMP_SEQ "seq") -+ (UNSPEC_LASX_XVFCMP_SNE "sne") -+ (UNSPEC_LASX_XVFCMP_SUEQ "sueq") -+ (UNSPEC_LASX_XVFCMP_SUNE "sune") -+ (UNSPEC_LASX_XVFCMP_SULE "sule") -+ (UNSPEC_LASX_XVFCMP_SULT "sult") -+ (UNSPEC_LASX_XVFCMP_SLE "sle") -+ (UNSPEC_LASX_XVFCMP_SLT "slt")]) -+ -+(define_insn "lasx_xvfcmp__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (vfcond: (match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvfcmp..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fcmp") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "lasx_xvfcmp__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f")] -+ FSC256_UNS))] -+ "ISA_HAS_LASX" -+ "xvfcmp..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fcmp") -+ (set_attr "mode" "")]) -+ -+ -+(define_mode_attr fint256 -+ [(V8SF "v8si") -+ (V4DF "v4di")]) -+ -+(define_mode_attr FINTCNV256 -+ [(V8SF "I2S") -+ (V4DF "I2D")]) -+ -+(define_mode_attr FINTCNV256_2 -+ [(V8SF "S2I") -+ (V4DF "D2I")]) -+ -+(define_insn "float2" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (float:FLASX (match_operand: 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvffint..\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "cnv_mode" "") -+ (set_attr "mode" "")]) -+ -+(define_insn "floatuns2" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (unsigned_float:FLASX -+ (match_operand: 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvffint..\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "cnv_mode" "") -+ (set_attr "mode" "")]) -+ -+(define_mode_attr FFQ256 -+ [(V4SF "V16HI") -+ (V2DF "V8SI")]) -+ -+(define_insn "lasx_xvreplgr2vr_" -+ [(set (match_operand:LASX 0 "register_operand" "=f,f") -+ (vec_duplicate:LASX -+ (match_operand: 1 "reg_or_0_operand" "r,J")))] -+ "ISA_HAS_LASX" -+{ -+ if (which_alternative == 1) -+ return "xvldi.b\t%u0,0" ; -+ -+ if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode)) -+ return "#"; -+ else -+ return "xvreplgr2vr.\t%u0,%z1"; -+} -+ [(set_attr "type" "simd_fill") -+ (set_attr "mode" "") -+ (set_attr "can_delay" "no") -+ (set_attr "length" "8")]) -+ -+(define_insn "lasx_xvflogb_" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFLOGB))] -+ "ISA_HAS_LASX" -+ "xvflogb.\t%u0,%u1" -+ [(set_attr "type" "simd_flog2") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "smax3" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (smax:FLASX (match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvfmax.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fminmax") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvfmaxa_" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (if_then_else:FLASX -+ (gt (abs:FLASX (match_operand:FLASX 1 "register_operand" "f")) -+ (abs:FLASX (match_operand:FLASX 2 "register_operand" "f"))) -+ (match_dup 1) -+ (match_dup 2)))] -+ "ISA_HAS_LASX" -+ "xvfmaxa.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fminmax") -+ (set_attr "mode" "")]) -+ -+(define_insn "smin3" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (smin:FLASX (match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvfmin.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fminmax") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvfmina_" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (if_then_else:FLASX -+ (lt (abs:FLASX (match_operand:FLASX 1 "register_operand" "f")) -+ (abs:FLASX (match_operand:FLASX 2 "register_operand" "f"))) -+ (match_dup 1) -+ (match_dup 2)))] -+ "ISA_HAS_LASX" -+ "xvfmina.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fminmax") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvfrecip_" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRECIP))] -+ "ISA_HAS_LASX" -+ "xvfrecip.\t%u0,%u1" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvfrint_" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRINT))] -+ "ISA_HAS_LASX" -+ "xvfrint.\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvfrsqrt_" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRSQRT))] -+ "ISA_HAS_LASX" -+ "xvfrsqrt.\t%u0,%u1" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvftint_s__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:FLASX 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINT_S))] -+ "ISA_HAS_LASX" -+ "xvftint..\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "cnv_mode" "") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvftint_u__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:FLASX 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINT_U))] -+ "ISA_HAS_LASX" -+ "xvftint..\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "cnv_mode" "") -+ (set_attr "mode" "")]) -+ -+ -+ -+(define_insn "fix_trunc2" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (fix: (match_operand:FLASX 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvftintrz..\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "cnv_mode" "") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "fixuns_trunc2" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unsigned_fix: (match_operand:FLASX 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvftintrz..\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "cnv_mode" "") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvhw_h_b" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (addsub:V16HI -+ (any_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 1 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15) -+ (const_int 17) (const_int 19) -+ (const_int 21) (const_int 23) -+ (const_int 25) (const_int 27) -+ (const_int 29) (const_int 31)]))) -+ (any_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14) -+ (const_int 16) (const_int 18) -+ (const_int 20) (const_int 22) -+ (const_int 24) (const_int 26) -+ (const_int 28) (const_int 30)])))))] -+ "ISA_HAS_LASX" -+ "xvhw.h.b\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V16HI")]) -+ -+(define_insn "lasx_xvhw_w_h" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (addsub:V8SI -+ (any_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 1 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))) -+ (any_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)])))))] -+ "ISA_HAS_LASX" -+ "xvhw.w.h\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_xvhw_d_w" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (addsub:V4DI -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 1 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))) -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)])))))] -+ "ISA_HAS_LASX" -+ "xvhw.d.w\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_xvpackev_b" -+ [(set (match_operand:V32QI 0 "register_operand" "=f") -+ (vec_select:V32QI -+ (vec_concat:V64QI -+ (match_operand:V32QI 1 "register_operand" "f") -+ (match_operand:V32QI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 32) -+ (const_int 2) (const_int 34) -+ (const_int 4) (const_int 36) -+ (const_int 6) (const_int 38) -+ (const_int 8) (const_int 40) -+ (const_int 10) (const_int 42) -+ (const_int 12) (const_int 44) -+ (const_int 14) (const_int 46) -+ (const_int 16) (const_int 48) -+ (const_int 18) (const_int 50) -+ (const_int 20) (const_int 52) -+ (const_int 22) (const_int 54) -+ (const_int 24) (const_int 56) -+ (const_int 26) (const_int 58) -+ (const_int 28) (const_int 60) -+ (const_int 30) (const_int 62)])))] -+ "ISA_HAS_LASX" -+ "xvpackev.b\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V32QI")]) -+ -+ -+(define_insn "lasx_xvpackev_h" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (vec_select:V16HI -+ (vec_concat:V32HI -+ (match_operand:V16HI 1 "register_operand" "f") -+ (match_operand:V16HI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 16) -+ (const_int 2) (const_int 18) -+ (const_int 4) (const_int 20) -+ (const_int 6) (const_int 22) -+ (const_int 8) (const_int 24) -+ (const_int 10) (const_int 26) -+ (const_int 12) (const_int 28) -+ (const_int 14) (const_int 30)])))] -+ "ISA_HAS_LASX" -+ "xvpackev.h\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V16HI")]) -+ -+(define_insn "lasx_xvpackev_w" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (vec_select:V8SI -+ (vec_concat:V16SI -+ (match_operand:V8SI 1 "register_operand" "f") -+ (match_operand:V8SI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 8) -+ (const_int 2) (const_int 10) -+ (const_int 4) (const_int 12) -+ (const_int 6) (const_int 14)])))] -+ "ISA_HAS_LASX" -+ "xvpackev.w\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_xvpackev_w_f" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (vec_select:V8SF -+ (vec_concat:V16SF -+ (match_operand:V8SF 1 "register_operand" "f") -+ (match_operand:V8SF 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 8) -+ (const_int 2) (const_int 10) -+ (const_int 4) (const_int 12) -+ (const_int 6) (const_int 14)])))] -+ "ISA_HAS_LASX" -+ "xvpackev.w\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvilvh_b" -+ [(set (match_operand:V32QI 0 "register_operand" "=f") -+ (vec_select:V32QI -+ (vec_concat:V64QI -+ (match_operand:V32QI 1 "register_operand" "f") -+ (match_operand:V32QI 2 "register_operand" "f")) -+ (parallel [(const_int 8) (const_int 40) -+ (const_int 9) (const_int 41) -+ (const_int 10) (const_int 42) -+ (const_int 11) (const_int 43) -+ (const_int 12) (const_int 44) -+ (const_int 13) (const_int 45) -+ (const_int 14) (const_int 46) -+ (const_int 15) (const_int 47) -+ (const_int 24) (const_int 56) -+ (const_int 25) (const_int 57) -+ (const_int 26) (const_int 58) -+ (const_int 27) (const_int 59) -+ (const_int 28) (const_int 60) -+ (const_int 29) (const_int 61) -+ (const_int 30) (const_int 62) -+ (const_int 31) (const_int 63)])))] -+ "ISA_HAS_LASX" -+ "xvilvh.b\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V32QI")]) -+ -+(define_insn "lasx_xvilvh_h" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (vec_select:V16HI -+ (vec_concat:V32HI -+ (match_operand:V16HI 1 "register_operand" "f") -+ (match_operand:V16HI 2 "register_operand" "f")) -+ (parallel [(const_int 4) (const_int 20) -+ (const_int 5) (const_int 21) -+ (const_int 6) (const_int 22) -+ (const_int 7) (const_int 23) -+ (const_int 12) (const_int 28) -+ (const_int 13) (const_int 29) -+ (const_int 14) (const_int 30) -+ (const_int 15) (const_int 31)])))] -+ "ISA_HAS_LASX" -+ "xvilvh.h\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V16HI")]) -+ -+(define_insn "lasx_xvilvh_w" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (vec_select:V8SI -+ (vec_concat:V16SI -+ (match_operand:V8SI 1 "register_operand" "f") -+ (match_operand:V8SI 2 "register_operand" "f")) -+ (parallel [(const_int 2) (const_int 10) -+ (const_int 3) (const_int 11) -+ (const_int 6) (const_int 14) -+ (const_int 7) (const_int 15)])))] -+ "ISA_HAS_LASX" -+ "xvilvh.w\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_xvilvh_w_f" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (vec_select:V8SF -+ (vec_concat:V16SF -+ (match_operand:V8SF 1 "register_operand" "f") -+ (match_operand:V8SF 2 "register_operand" "f")) -+ (parallel [(const_int 2) (const_int 10) -+ (const_int 3) (const_int 11) -+ (const_int 6) (const_int 14) -+ (const_int 7) (const_int 15)])))] -+ "ISA_HAS_LASX" -+ "xvilvh.w\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8SF")]) -+ -+ -+(define_insn "lasx_xvilvh_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (vec_select:V4DI -+ (vec_concat:V8DI -+ (match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 5) -+ (const_int 3) (const_int 7)])))] -+ "ISA_HAS_LASX" -+ "xvilvh.d\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lasx_xvilvh_d_f" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (vec_select:V4DF -+ (vec_concat:V8DF -+ (match_operand:V4DF 1 "register_operand" "f") -+ (match_operand:V4DF 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 5) -+ (const_int 3) (const_int 7)])))] -+ "ISA_HAS_LASX" -+ "xvilvh.d\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "lasx_xvpackod_b" -+ [(set (match_operand:V32QI 0 "register_operand" "=f") -+ (vec_select:V32QI -+ (vec_concat:V64QI -+ (match_operand:V32QI 1 "register_operand" "f") -+ (match_operand:V32QI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 33) -+ (const_int 3) (const_int 35) -+ (const_int 5) (const_int 37) -+ (const_int 7) (const_int 39) -+ (const_int 9) (const_int 41) -+ (const_int 11) (const_int 43) -+ (const_int 13) (const_int 45) -+ (const_int 15) (const_int 47) -+ (const_int 17) (const_int 49) -+ (const_int 19) (const_int 51) -+ (const_int 21) (const_int 53) -+ (const_int 23) (const_int 55) -+ (const_int 25) (const_int 57) -+ (const_int 27) (const_int 59) -+ (const_int 29) (const_int 61) -+ (const_int 31) (const_int 63)])))] -+ "ISA_HAS_LASX" -+ "xvpackod.b\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V32QI")]) -+ -+ -+(define_insn "lasx_xvpackod_h" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (vec_select:V16HI -+ (vec_concat:V32HI -+ (match_operand:V16HI 1 "register_operand" "f") -+ (match_operand:V16HI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 17) -+ (const_int 3) (const_int 19) -+ (const_int 5) (const_int 21) -+ (const_int 7) (const_int 23) -+ (const_int 9) (const_int 25) -+ (const_int 11) (const_int 27) -+ (const_int 13) (const_int 29) -+ (const_int 15) (const_int 31)])))] -+ "ISA_HAS_LASX" -+ "xvpackod.h\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V16HI")]) -+ -+ -+(define_insn "lasx_xvpackod_w" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (vec_select:V8SI -+ (vec_concat:V16SI -+ (match_operand:V8SI 1 "register_operand" "f") -+ (match_operand:V8SI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 9) -+ (const_int 3) (const_int 11) -+ (const_int 5) (const_int 13) -+ (const_int 7) (const_int 15)])))] -+ "ISA_HAS_LASX" -+ "xvpackod.w\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8SI")]) -+ -+ -+(define_insn "lasx_xvpackod_w_f" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (vec_select:V8SF -+ (vec_concat:V16SF -+ (match_operand:V8SF 1 "register_operand" "f") -+ (match_operand:V8SF 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 9) -+ (const_int 3) (const_int 11) -+ (const_int 5) (const_int 13) -+ (const_int 7) (const_int 15)])))] -+ "ISA_HAS_LASX" -+ "xvpackod.w\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvilvl_b" -+ [(set (match_operand:V32QI 0 "register_operand" "=f") -+ (vec_select:V32QI -+ (vec_concat:V64QI -+ (match_operand:V32QI 1 "register_operand" "f") -+ (match_operand:V32QI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 32) -+ (const_int 1) (const_int 33) -+ (const_int 2) (const_int 34) -+ (const_int 3) (const_int 35) -+ (const_int 4) (const_int 36) -+ (const_int 5) (const_int 37) -+ (const_int 6) (const_int 38) -+ (const_int 7) (const_int 39) -+ (const_int 16) (const_int 48) -+ (const_int 17) (const_int 49) -+ (const_int 18) (const_int 50) -+ (const_int 19) (const_int 51) -+ (const_int 20) (const_int 52) -+ (const_int 21) (const_int 53) -+ (const_int 22) (const_int 54) -+ (const_int 23) (const_int 55)])))] -+ "ISA_HAS_LASX" -+ "xvilvl.b\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V32QI")]) -+ -+(define_insn "lasx_xvilvl_h" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (vec_select:V16HI -+ (vec_concat:V32HI -+ (match_operand:V16HI 1 "register_operand" "f") -+ (match_operand:V16HI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 16) -+ (const_int 1) (const_int 17) -+ (const_int 2) (const_int 18) -+ (const_int 3) (const_int 19) -+ (const_int 8) (const_int 24) -+ (const_int 9) (const_int 25) -+ (const_int 10) (const_int 26) -+ (const_int 11) (const_int 27)])))] -+ "ISA_HAS_LASX" -+ "xvilvl.h\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V16HI")]) -+ -+(define_insn "lasx_xvilvl_w" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (vec_select:V8SI -+ (vec_concat:V16SI -+ (match_operand:V8SI 1 "register_operand" "f") -+ (match_operand:V8SI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 8) -+ (const_int 1) (const_int 9) -+ (const_int 4) (const_int 12) -+ (const_int 5) (const_int 13)])))] -+ "ISA_HAS_LASX" -+ "xvilvl.w\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_xvilvl_w_f" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (vec_select:V8SF -+ (vec_concat:V16SF -+ (match_operand:V8SF 1 "register_operand" "f") -+ (match_operand:V8SF 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 8) -+ (const_int 1) (const_int 9) -+ (const_int 4) (const_int 12) -+ (const_int 5) (const_int 13)])))] -+ "ISA_HAS_LASX" -+ "xvilvl.w\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvilvl_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (vec_select:V4DI -+ (vec_concat:V8DI -+ (match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 4) -+ (const_int 2) (const_int 6)])))] -+ "ISA_HAS_LASX" -+ "xvilvl.d\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_xvilvl_d_f" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (vec_select:V4DF -+ (vec_concat:V8DF -+ (match_operand:V4DF 1 "register_operand" "f") -+ (match_operand:V4DF 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 4) -+ (const_int 2) (const_int 6)])))] -+ "ISA_HAS_LASX" -+ "xvilvl.d\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "smax3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f") -+ (smax:ILASX (match_operand:ILASX 1 "register_operand" "f,f") -+ (match_operand:ILASX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] -+ "ISA_HAS_LASX" -+ "@ -+ xvmax.\t%u0,%u1,%u2 -+ xvmaxi.\t%u0,%u1,%E2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "umax3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f") -+ (umax:ILASX (match_operand:ILASX 1 "register_operand" "f,f") -+ (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] -+ "ISA_HAS_LASX" -+ "@ -+ xvmax.\t%u0,%u1,%u2 -+ xvmaxi.\t%u0,%u1,%B2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "smin3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f") -+ (smin:ILASX (match_operand:ILASX 1 "register_operand" "f,f") -+ (match_operand:ILASX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] -+ "ISA_HAS_LASX" -+ "@ -+ xvmin.\t%u0,%u1,%u2 -+ xvmini.\t%u0,%u1,%E2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "umin3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f") -+ (umin:ILASX (match_operand:ILASX 1 "register_operand" "f,f") -+ (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] -+ "ISA_HAS_LASX" -+ "@ -+ xvmin.\t%u0,%u1,%u2 -+ xvmini.\t%u0,%u1,%B2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvclo_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")] -+ UNSPEC_LASX_XVCLO))] -+ "ISA_HAS_LASX" -+ "xvclo.\t%u0,%u1" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "clz2" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (clz:ILASX (match_operand:ILASX 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvclz.\t%u0,%u1" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvnor_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f") -+ (and:ILASX (not:ILASX (match_operand:ILASX 1 "register_operand" "f,f")) -+ (not:ILASX (match_operand:ILASX 2 "reg_or_vector_same_val_operand" "f,Urv8"))))] -+ "ISA_HAS_LASX" -+ "@ -+ xvnor.v\t%u0,%u1,%u2 -+ xvnori.b\t%u0,%u1,%B2" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvpickev_b" -+[(set (match_operand:V32QI 0 "register_operand" "=f") -+ (vec_select:V32QI -+ (vec_concat:V64QI -+ (match_operand:V32QI 1 "register_operand" "f") -+ (match_operand:V32QI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14) -+ (const_int 32) (const_int 34) -+ (const_int 36) (const_int 38) -+ (const_int 40) (const_int 42) -+ (const_int 44) (const_int 46) -+ (const_int 16) (const_int 18) -+ (const_int 20) (const_int 22) -+ (const_int 24) (const_int 26) -+ (const_int 28) (const_int 30) -+ (const_int 48) (const_int 50) -+ (const_int 52) (const_int 54) -+ (const_int 56) (const_int 58) -+ (const_int 60) (const_int 62)])))] -+ "ISA_HAS_LASX" -+ "xvpickev.b\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V32QI")]) -+ -+(define_insn "lasx_xvpickev_h" -+[(set (match_operand:V16HI 0 "register_operand" "=f") -+ (vec_select:V16HI -+ (vec_concat:V32HI -+ (match_operand:V16HI 1 "register_operand" "f") -+ (match_operand:V16HI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 16) (const_int 18) -+ (const_int 20) (const_int 22) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14) -+ (const_int 24) (const_int 26) -+ (const_int 28) (const_int 30)])))] -+ "ISA_HAS_LASX" -+ "xvpickev.h\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V16HI")]) -+ -+(define_insn "lasx_xvpickev_w" -+[(set (match_operand:V8SI 0 "register_operand" "=f") -+ (vec_select:V8SI -+ (vec_concat:V16SI -+ (match_operand:V8SI 1 "register_operand" "f") -+ (match_operand:V8SI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 8) (const_int 10) -+ (const_int 4) (const_int 6) -+ (const_int 12) (const_int 14)])))] -+ "ISA_HAS_LASX" -+ "xvpickev.w\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_xvpickev_w_f" -+[(set (match_operand:V8SF 0 "register_operand" "=f") -+ (vec_select:V8SF -+ (vec_concat:V16SF -+ (match_operand:V8SF 1 "register_operand" "f") -+ (match_operand:V8SF 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 8) (const_int 10) -+ (const_int 4) (const_int 6) -+ (const_int 12) (const_int 14)])))] -+ "ISA_HAS_LASX" -+ "xvpickev.w\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvpickod_b" -+[(set (match_operand:V32QI 0 "register_operand" "=f") -+ (vec_select:V32QI -+ (vec_concat:V64QI -+ (match_operand:V32QI 1 "register_operand" "f") -+ (match_operand:V32QI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15) -+ (const_int 33) (const_int 35) -+ (const_int 37) (const_int 39) -+ (const_int 41) (const_int 43) -+ (const_int 45) (const_int 47) -+ (const_int 17) (const_int 19) -+ (const_int 21) (const_int 23) -+ (const_int 25) (const_int 27) -+ (const_int 29) (const_int 31) -+ (const_int 49) (const_int 51) -+ (const_int 53) (const_int 55) -+ (const_int 57) (const_int 59) -+ (const_int 61) (const_int 63)])))] -+ "ISA_HAS_LASX" -+ "xvpickod.b\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V32QI")]) -+ -+(define_insn "lasx_xvpickod_h" -+[(set (match_operand:V16HI 0 "register_operand" "=f") -+ (vec_select:V16HI -+ (vec_concat:V32HI -+ (match_operand:V16HI 1 "register_operand" "f") -+ (match_operand:V16HI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 17) (const_int 19) -+ (const_int 21) (const_int 23) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15) -+ (const_int 25) (const_int 27) -+ (const_int 29) (const_int 31)])))] -+ "ISA_HAS_LASX" -+ "xvpickod.h\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V16HI")]) -+ -+(define_insn "lasx_xvpickod_w" -+[(set (match_operand:V8SI 0 "register_operand" "=f") -+ (vec_select:V8SI -+ (vec_concat:V16SI -+ (match_operand:V8SI 1 "register_operand" "f") -+ (match_operand:V8SI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 9) (const_int 11) -+ (const_int 5) (const_int 7) -+ (const_int 13) (const_int 15)])))] -+ "ISA_HAS_LASX" -+ "xvpickod.w\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_xvpickod_w_f" -+[(set (match_operand:V8SF 0 "register_operand" "=f") -+ (vec_select:V8SF -+ (vec_concat:V16SF -+ (match_operand:V8SF 1 "register_operand" "f") -+ (match_operand:V8SF 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 9) (const_int 11) -+ (const_int 5) (const_int 7) -+ (const_int 13) (const_int 15)])))] -+ "ISA_HAS_LASX" -+ "xvpickod.w\t%u0,%u2,%u1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "popcount2" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (popcount:ILASX (match_operand:ILASX 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvpcnt.\t%u0,%u1" -+ [(set_attr "type" "simd_pcnt") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "lasx_xvsat_s_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LASX_XVSAT_S))] -+ "ISA_HAS_LASX" -+ "xvsat.\t%u0,%u1,%2" -+ [(set_attr "type" "simd_sat") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsat_u_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LASX_XVSAT_U))] -+ "ISA_HAS_LASX" -+ "xvsat.\t%u0,%u1,%2" -+ [(set_attr "type" "simd_sat") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvshuf4i_" -+ [(set (match_operand:LASX_WHB_W 0 "register_operand" "=f") -+ (unspec:LASX_WHB_W [(match_operand:LASX_WHB_W 1 "register_operand" "f") -+ (match_operand 2 "const_uimm8_operand")] -+ UNSPEC_LASX_XVSHUF4I))] -+ "ISA_HAS_LASX" -+ "xvshuf4i.\t%u0,%u1,%2" -+ [(set_attr "type" "simd_shf") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "lasx_xvsrar_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSRAR))] -+ "ISA_HAS_LASX" -+ "xvsrar.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsrari_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LASX_XVSRARI))] -+ "ISA_HAS_LASX" -+ "xvsrari.\t%u0,%u1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsrlr_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSRLR))] -+ "ISA_HAS_LASX" -+ "xvsrlr.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsrlri_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LASX_XVSRLRI))] -+ "ISA_HAS_LASX" -+ "xvsrlri.\t%u0,%u1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssub_s_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSSUB_S))] -+ "ISA_HAS_LASX" -+ "xvssub.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssub_u_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSSUB_U))] -+ "ISA_HAS_LASX" -+ "xvssub.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvshuf_" -+ [(set (match_operand:ILASX_DWH 0 "register_operand" "=f") -+ (unspec:ILASX_DWH [(match_operand: 1 "register_operand" "0") -+ (match_operand:ILASX_DWH 2 "register_operand" "f") -+ (match_operand:ILASX_DWH 3 "register_operand" "f")] -+ UNSPEC_LASX_XVSHUF))] -+ "ISA_HAS_LASX" -+ "xvshuf.\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_sld") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvshuf_b" -+ [(set (match_operand:V32QI 0 "register_operand" "=f") -+ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f") -+ (match_operand:V32QI 2 "register_operand" "f") -+ (match_operand:V32QI 3 "register_operand" "f")] -+ UNSPEC_LASX_XVSHUF_B))] -+ "ISA_HAS_LASX" -+ "xvshuf.b\t%u0,%u1,%u2,%u3" -+ [(set_attr "type" "simd_sld") -+ (set_attr "mode" "V32QI")]) -+ -+(define_insn "lasx_xvreplve0_" -+ [(set (match_operand:LASX 0 "register_operand" "=f") -+ (vec_duplicate:LASX -+ (vec_select: -+ (match_operand:LASX 1 "register_operand" "f") -+ (parallel [(const_int 0)]))))] -+ "ISA_HAS_LASX" -+ "xvreplve0.\t%u0,%u1" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvrepl128vei_b_internal" -+ [(set (match_operand:V32QI 0 "register_operand" "=f") -+ (vec_duplicate:V32QI -+ (vec_select:V32QI -+ (match_operand:V32QI 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const_uimm4_operand" "") -+ (match_dup 2) (match_dup 2) (match_dup 2) -+ (match_dup 2) (match_dup 2) (match_dup 2) -+ (match_dup 2) (match_dup 2) (match_dup 2) -+ (match_dup 2) (match_dup 2) (match_dup 2) -+ (match_dup 2) (match_dup 2) (match_dup 2) -+ (match_operand 3 "const_16_to_31_operand" "") -+ (match_dup 3) (match_dup 3) (match_dup 3) -+ (match_dup 3) (match_dup 3) (match_dup 3) -+ (match_dup 3) (match_dup 3) (match_dup 3) -+ (match_dup 3) (match_dup 3) (match_dup 3) -+ (match_dup 3) (match_dup 3) (match_dup 3)]))))] -+ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 16)" -+ "xvrepl128vei.b\t%u0,%u1,%2" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V32QI")]) -+ -+(define_insn "lasx_xvrepl128vei_h_internal" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (vec_duplicate:V16HI -+ (vec_select:V16HI -+ (match_operand:V16HI 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const_uimm3_operand" "") -+ (match_dup 2) (match_dup 2) (match_dup 2) -+ (match_dup 2) (match_dup 2) (match_dup 2) -+ (match_dup 2) -+ (match_operand 3 "const_8_to_15_operand" "") -+ (match_dup 3) (match_dup 3) (match_dup 3) -+ (match_dup 3) (match_dup 3) (match_dup 3) -+ (match_dup 3)]))))] -+ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 8)" -+ "xvrepl128vei.h\t%u0,%u1,%2" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V16HI")]) -+ -+(define_insn "lasx_xvrepl128vei_w_internal" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (vec_duplicate:V8SI -+ (vec_select:V8SI -+ (match_operand:V8SI 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const_0_to_3_operand" "") -+ (match_dup 2) (match_dup 2) (match_dup 2) -+ (match_operand 3 "const_4_to_7_operand" "") -+ (match_dup 3) (match_dup 3) (match_dup 3)]))))] -+ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 4)" -+ "xvrepl128vei.w\t%u0,%u1,%2" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_xvrepl128vei_d_internal" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (vec_duplicate:V4DI -+ (vec_select:V4DI -+ (match_operand:V4DI 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const_0_or_1_operand" "") -+ (match_dup 2) -+ (match_operand 3 "const_2_or_3_operand" "") -+ (match_dup 3)]))))] -+ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 2)" -+ "xvrepl128vei.d\t%u0,%u1,%2" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_xvrepl128vei_" -+ [(set (match_operand:LASX 0 "register_operand" "=f") -+ (unspec:LASX [(match_operand:LASX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LASX_XVREPL128VEI))] -+ "ISA_HAS_LASX" -+ "xvrepl128vei.\t%u0,%u1,%2" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "")]) -+ -+ (define_insn "lasx_xvreplve0__scalar" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (unspec:FLASX [(match_operand: 1 "register_operand" "f")] -+ UNSPEC_LASX_XVREPLVE0))] -+ "ISA_HAS_LASX" -+ "xvreplve0.\t%u0,%u1" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvreplve0_q" -+ [(set (match_operand:V32QI 0 "register_operand" "=f") -+ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")] -+ UNSPEC_LASX_XVREPLVE0_Q))] -+ "ISA_HAS_LASX" -+ "xvreplve0.q\t%u0,%u1" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "V32QI")]) -+ -+(define_insn "lasx_xvfcvt_h_s" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (unspec:V16HI [(match_operand:V8SF 1 "register_operand" "f") -+ (match_operand:V8SF 2 "register_operand" "f")] -+ UNSPEC_LASX_XVFCVT))] -+ "ISA_HAS_LASX" -+ "xvfcvt.h.s\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V16HI")]) -+ -+(define_insn "lasx_xvfcvt_s_d" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (unspec:V8SF [(match_operand:V4DF 1 "register_operand" "f") -+ (match_operand:V4DF 2 "register_operand" "f")] -+ UNSPEC_LASX_XVFCVT))] -+ "ISA_HAS_LASX" -+ "xvfcvt.s.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "vec_pack_trunc_v4df" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (vec_concat:V8SF -+ (float_truncate:V4SF (match_operand:V4DF 1 "register_operand" "f")) -+ (float_truncate:V4SF (match_operand:V4DF 2 "register_operand" "f"))))] -+ "ISA_HAS_LASX" -+ "xvfcvt.s.d\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V8SF") -+ (set_attr "can_delay" "no") -+ (set_attr "length" "8")]) -+ -+;; Define for builtin function. -+(define_insn "lasx_xvfcvth_s_h" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (unspec:V8SF [(match_operand:V16HI 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFCVTH))] -+ "ISA_HAS_LASX" -+ "xvfcvth.s.h\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V8SF")]) -+ -+;; Define for builtin function. -+(define_insn "lasx_xvfcvth_d_s" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (unspec:V4DF [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFCVTH))] -+ "ISA_HAS_LASX" -+ "xvfcvth.d.s\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4DF")]) -+ -+;; Define for gen insn. -+(define_insn "lasx_xvfcvth_d_insn" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (float_extend:V4DF -+ (vec_select:V4SF -+ (match_operand:V8SF 1 "register_operand" "f") -+ (parallel [(const_int 4) (const_int 5) -+ (const_int 6) (const_int 7)]))))] -+ "ISA_HAS_LASX" -+ "xvpermi.d\t%u0,%u1,0xfa\n\txvfcvtl.d.s\t%u0,%u0" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4DF") -+ (set_attr "can_delay" "no") -+ (set_attr "length" "12")]) -+ -+;; Define for builtin function. -+(define_insn "lasx_xvfcvtl_s_h" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (unspec:V8SF [(match_operand:V16HI 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFCVTL))] -+ "ISA_HAS_LASX" -+ "xvfcvtl.s.h\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V8SF")]) -+ -+;; Define for builtin function. -+(define_insn "lasx_xvfcvtl_d_s" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (unspec:V4DF [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFCVTL))] -+ "ISA_HAS_LASX" -+ "xvfcvtl.d.s\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4DF")]) -+ -+;; Define for gen insn. -+(define_insn "lasx_xvfcvtl_d_insn" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (float_extend:V4DF -+ (vec_select:V4SF -+ (match_operand:V8SF 1 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 1) -+ (const_int 2) (const_int 3)]))))] -+ "ISA_HAS_LASX" -+ "xvpermi.d\t%u0,%u1,0x50\n\txvfcvtl.d.s\t%u0,%u0" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4DF") -+ (set_attr "can_delay" "no") -+ (set_attr "length" "8")]) -+ -+(define_code_attr lasxbr -+ [(eq "xbz") -+ (ne "xbnz")]) -+ -+(define_code_attr lasxeq_v -+ [(eq "eqz") -+ (ne "nez")]) -+ -+(define_code_attr lasxne_v -+ [(eq "nez") -+ (ne "eqz")]) -+ -+(define_code_attr lasxeq -+ [(eq "anyeqz") -+ (ne "allnez")]) -+ -+(define_code_attr lasxne -+ [(eq "allnez") -+ (ne "anyeqz")]) -+ -+(define_insn "lasx__" -+ [(set (pc) (if_then_else -+ (equality_op -+ (unspec:SI [(match_operand:LASX 1 "register_operand" "f")] -+ UNSPEC_LASX_BRANCH) -+ (match_operand:SI 2 "const_0_operand")) -+ (label_ref (match_operand 0)) -+ (pc))) -+ (clobber (match_scratch:FCC 3 "=z"))] -+ "ISA_HAS_LASX" -+{ -+ return loongarch_output_conditional_branch (insn, operands, -+ "xvset.\t%Z3%u1\n\tbcnez\t%Z3%0", -+ "xvset.\t%z3%u1\n\tbcnez\t%Z3%0"); -+} -+ [(set_attr "type" "simd_branch") -+ (set_attr "mode" "") -+ (set_attr "compact_form" "never")]) -+ -+(define_insn "lasx__v_" -+ [(set (pc) (if_then_else -+ (equality_op -+ (unspec:SI [(match_operand:LASX 1 "register_operand" "f")] -+ UNSPEC_LASX_BRANCH_V) -+ (match_operand:SI 2 "const_0_operand")) -+ (label_ref (match_operand 0)) -+ (pc))) -+ (clobber (match_scratch:FCC 3 "=z"))] -+ "ISA_HAS_LASX" -+{ -+ return loongarch_output_conditional_branch (insn, operands, -+ "xvset.v\t%Z3%u1\n\tbcnez\t%Z3%0", -+ "xvset.v\t%Z3%u1\n\tbcnez\t%Z3%0"); -+} -+ [(set_attr "type" "simd_branch") -+ (set_attr "mode" "") -+ (set_attr "compact_form" "never")]) -+ -+ -+ -+ -+;; loongson-asx. -+(define_insn "lasx_vext2xv_h_b" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (any_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 1 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 1) -+ (const_int 2) (const_int 3) -+ (const_int 4) (const_int 5) -+ (const_int 6) (const_int 7) -+ (const_int 8) (const_int 9) -+ (const_int 10) (const_int 11) -+ (const_int 12) (const_int 13) -+ (const_int 14) (const_int 15)]))))] -+ "ISA_HAS_LASX" -+ "vext2xv.h.b\t%u0,%u1" -+[(set_attr "type" "simd_shift") -+ (set_attr "mode" "V16HI")]) -+ -+(define_insn "lasx_vext2xv_w_h" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (any_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 1 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 1) -+ (const_int 2) (const_int 3) -+ (const_int 4) (const_int 5) -+ (const_int 6) (const_int 7)]))))] -+ "ISA_HAS_LASX" -+ "vext2xv.w.h\t%u0,%u1" -+[(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_vext2xv_d_w" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 1 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 1) -+ (const_int 2) (const_int 3)]))))] -+ "ISA_HAS_LASX" -+ "vext2xv.d.w\t%u0,%u1" -+[(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_vext2xv_w_b" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (any_extend:V8SI -+ (vec_select:V8QI -+ (match_operand:V32QI 1 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 1) -+ (const_int 2) (const_int 3) -+ (const_int 4) (const_int 5) -+ (const_int 6) (const_int 7)]))))] -+ "ISA_HAS_LASX" -+ "vext2xv.w.b\t%u0,%u1" -+[(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_vext2xv_d_h" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (any_extend:V4DI -+ (vec_select:V4HI -+ (match_operand:V16HI 1 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 1) -+ (const_int 2) (const_int 3)]))))] -+ "ISA_HAS_LASX" -+ "vext2xv.d.h\t%u0,%u1" -+[(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_vext2xv_d_b" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (any_extend:V4DI -+ (vec_select:V4QI -+ (match_operand:V32QI 1 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 1) -+ (const_int 2) (const_int 3)]))))] -+ "ISA_HAS_LASX" -+ "vext2xv.d.b\t%u0,%u1" -+[(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4DI")]) -+ -+ -+;; Extend loongson-sx to loongson-asx. -+(define_insn "xvandn3" -+ [(set (match_operand:LASX 0 "register_operand" "=f") -+ (and:LASX (not:LASX (match_operand:LASX 1 "register_operand" "f")) -+ (match_operand:LASX 2 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvandn.v\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "abs2" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (abs:ILASX (match_operand:ILASX 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvsigncov.\t%u0,%u1,%u1" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "neg2" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (neg:ILASX (match_operand:ILASX 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvneg.\t%u0,%u1" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvmuh_s_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVMUH_S))] -+ "ISA_HAS_LASX" -+ "xvmuh.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvmuh_u_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVMUH_U))] -+ "ISA_HAS_LASX" -+ "xvmuh.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_mxvextw_u_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V8SI 1 "register_operand" "f")] -+ UNSPEC_LASX_MXVEXTW_U))] -+ "ISA_HAS_LASX" -+ "mxvextw_u.d\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_xvsllwil_s__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_WHB 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LASX_XVSLLWIL_S))] -+ "ISA_HAS_LASX" -+ "xvsllwil..\t%u0,%u1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsllwil_u__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_WHB 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LASX_XVSLLWIL_U))] -+ "ISA_HAS_LASX" -+ "xvsllwil..\t%u0,%u1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsran__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -+ (match_operand:ILASX_DWH 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSRAN))] -+ "ISA_HAS_LASX" -+ "xvsran..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssran_s__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -+ (match_operand:ILASX_DWH 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSSRAN_S))] -+ "ISA_HAS_LASX" -+ "xvssran..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssran_u__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -+ (match_operand:ILASX_DWH 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSSRAN_U))] -+ "ISA_HAS_LASX" -+ "xvssran..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsrarn__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -+ (match_operand:ILASX_DWH 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSRARN))] -+ "ISA_HAS_LASX" -+ "xvsrarn..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrarn_s__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -+ (match_operand:ILASX_DWH 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSSRARN_S))] -+ "ISA_HAS_LASX" -+ "xvssrarn..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrarn_u__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -+ (match_operand:ILASX_DWH 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSSRARN_U))] -+ "ISA_HAS_LASX" -+ "xvssrarn..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsrln__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -+ (match_operand:ILASX_DWH 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSRLN))] -+ "ISA_HAS_LASX" -+ "xvsrln..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrln_u__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -+ (match_operand:ILASX_DWH 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSSRLN_U))] -+ "ISA_HAS_LASX" -+ "xvssrln..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsrlrn__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -+ (match_operand:ILASX_DWH 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSRLRN))] -+ "ISA_HAS_LASX" -+ "xvsrlrn..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrlrn_u__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -+ (match_operand:ILASX_DWH 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSSRLRN_U))] -+ "ISA_HAS_LASX" -+ "xvssrlrn..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvfrstpi_" -+ [(set (match_operand:ILASX_HB 0 "register_operand" "=f") -+ (unspec:ILASX_HB [(match_operand:ILASX_HB 1 "register_operand" "0") -+ (match_operand:ILASX_HB 2 "register_operand" "f") -+ (match_operand 3 "const_uimm5_operand" "")] -+ UNSPEC_LASX_XVFRSTPI))] -+ "ISA_HAS_LASX" -+ "xvfrstpi.\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvfrstp_" -+ [(set (match_operand:ILASX_HB 0 "register_operand" "=f") -+ (unspec:ILASX_HB [(match_operand:ILASX_HB 1 "register_operand" "0") -+ (match_operand:ILASX_HB 2 "register_operand" "f") -+ (match_operand:ILASX_HB 3 "register_operand" "f")] -+ UNSPEC_LASX_XVFRSTP))] -+ "ISA_HAS_LASX" -+ "xvfrstp.\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvshuf4i_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") -+ (match_operand:V4DI 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand")] -+ UNSPEC_LASX_XVSHUF4I))] -+ "ISA_HAS_LASX" -+ "xvshuf4i.d\t%u0,%u2,%3" -+ [(set_attr "type" "simd_sld") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_xvbsrl_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand 2 "const_uimm5_operand" "")] -+ UNSPEC_LASX_XVBSRL_V))] -+ "ISA_HAS_LASX" -+ "xvbsrl.v\t%u0,%u1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvbsll_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand 2 "const_uimm5_operand" "")] -+ UNSPEC_LASX_XVBSLL_V))] -+ "ISA_HAS_LASX" -+ "xvbsll.v\t%u0,%u1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvextrins_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVEXTRINS))] -+ "ISA_HAS_LASX" -+ "xvextrins.\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvmskltz_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")] -+ UNSPEC_LASX_XVMSKLTZ))] -+ "ISA_HAS_LASX" -+ "xvmskltz.\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsigncov_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSIGNCOV))] -+ "ISA_HAS_LASX" -+ "xvsigncov.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_expand "copysign3" -+ [(set (match_dup 4) -+ (and:FLASX -+ (not:FLASX (match_dup 3)) -+ (match_operand:FLASX 1 "register_operand"))) -+ (set (match_dup 5) -+ (and:FLASX (match_dup 3) -+ (match_operand:FLASX 2 "register_operand"))) -+ (set (match_operand:FLASX 0 "register_operand") -+ (ior:FLASX (match_dup 4) (match_dup 5)))] -+ "ISA_HAS_LASX" -+{ -+ operands[3] = loongarch_build_signbit_mask (mode, 1, 0); -+ -+ operands[4] = gen_reg_rtx (mode); -+ operands[5] = gen_reg_rtx (mode); -+}) -+ -+ -+(define_insn "absv4df2" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (abs:V4DF (match_operand:V4DF 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvbitclri.d\t%u0,%u1,63" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "absv8sf2" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (abs:V8SF (match_operand:V8SF 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvbitclri.w\t%u0,%u1,31" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "negv4df2" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (neg:V4DF (match_operand:V4DF 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvbitrevi.d\t%u0,%u1,63" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "negv8sf2" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (neg:V8SF (match_operand:V8SF 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvbitrevi.w\t%u0,%u1,31" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "xvfmadd4" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (fma:FLASX (match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f") -+ (match_operand:FLASX 3 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvfmadd.\t%u0,%u1,$u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "xvfmsub4" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (fma:FLASX (match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f") -+ (neg:FLASX (match_operand:FLASX 3 "register_operand" "f"))))] -+ "ISA_HAS_LASX" -+ "xvfmsub.\t%u0,%u1,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "xvfnmsub4_nmsub4" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (neg:FLASX -+ (fma:FLASX -+ (match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f") -+ (neg:FLASX (match_operand:FLASX 3 "register_operand" "f")))))] -+ "ISA_HAS_LASX" -+ "xvfnmsub.\t%u0,%u1,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "xvfnmadd4_nmadd4" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (neg:FLASX -+ (fma:FLASX -+ (match_operand:FLASX 1 "register_operand" "f") -+ (match_operand:FLASX 2 "register_operand" "f") -+ (match_operand:FLASX 3 "register_operand" "f"))))] -+ "ISA_HAS_LASX" -+ "xvfnmadd.\t%u0,%u1,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvftintrne_w_s" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRNE_W_S))] -+ "ISA_HAS_LASX" -+ "xvftintrne.w.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvftintrne_l_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRNE_L_D))] -+ "ISA_HAS_LASX" -+ "xvftintrne.l.d\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "lasx_xvftintrp_w_s" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRP_W_S))] -+ "ISA_HAS_LASX" -+ "xvftintrp.w.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvftintrp_l_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRP_L_D))] -+ "ISA_HAS_LASX" -+ "xvftintrp.l.d\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "lasx_xvftintrm_w_s" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRM_W_S))] -+ "ISA_HAS_LASX" -+ "xvftintrm.w.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvftintrm_l_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRM_L_D))] -+ "ISA_HAS_LASX" -+ "xvftintrm.l.d\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "lasx_xvftint_w_d" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") -+ (match_operand:V4DF 2 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINT_W_D))] -+ "ISA_HAS_LASX" -+ "xvftint.w.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "lasx_xvffint_s_l" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (unspec:V8SF [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVFFINT_S_L))] -+ "ISA_HAS_LASX" -+ "xvffint.s.l\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_xvftintrz_w_d" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") -+ (match_operand:V4DF 2 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRZ_W_D))] -+ "ISA_HAS_LASX" -+ "xvftintrz.w.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "lasx_xvftintrp_w_d" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") -+ (match_operand:V4DF 2 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRP_W_D))] -+ "ISA_HAS_LASX" -+ "xvftintrp.w.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "lasx_xvftintrm_w_d" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") -+ (match_operand:V4DF 2 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRM_W_D))] -+ "ISA_HAS_LASX" -+ "xvftintrm.w.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "lasx_xvftintrne_w_d" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") -+ (match_operand:V4DF 2 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRNE_W_D))] -+ "ISA_HAS_LASX" -+ "xvftintrne.w.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "lasx_xvftinth_l_s" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTH_L_S))] -+ "ISA_HAS_LASX" -+ "xvftinth.l.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvftintl_l_s" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTL_L_S))] -+ "ISA_HAS_LASX" -+ "xvftintl.l.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvffinth_d_w" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (unspec:V4DF [(match_operand:V8SI 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFFINTH_D_W))] -+ "ISA_HAS_LASX" -+ "xvffinth.d.w\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_xvffintl_d_w" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (unspec:V4DF [(match_operand:V8SI 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFFINTL_D_W))] -+ "ISA_HAS_LASX" -+ "xvffintl.d.w\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_xvftintrzh_l_s" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRZH_L_S))] -+ "ISA_HAS_LASX" -+ "xvftintrzh.l.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvftintrzl_l_s" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRZL_L_S))] -+ "ISA_HAS_LASX" -+ "xvftintrzl.l.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lasx_xvftintrph_l_s" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRPH_L_S))] -+ "ISA_HAS_LASX" -+ "xvftintrph.l.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lasx_xvftintrpl_l_s" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRPL_L_S))] -+ "ISA_HAS_LASX" -+ "xvftintrpl.l.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvftintrmh_l_s" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRMH_L_S))] -+ "ISA_HAS_LASX" -+ "xvftintrmh.l.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvftintrml_l_s" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRML_L_S))] -+ "ISA_HAS_LASX" -+ "xvftintrml.l.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvftintrneh_l_s" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRNEH_L_S))] -+ "ISA_HAS_LASX" -+ "xvftintrneh.l.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvftintrnel_l_s" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFTINTRNEL_L_S))] -+ "ISA_HAS_LASX" -+ "xvftintrnel.l.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvfrintrne_s" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRINTRNE_S))] -+ "ISA_HAS_LASX" -+ "xvfrintrne.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvfrintrne_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRINTRNE_D))] -+ "ISA_HAS_LASX" -+ "xvfrintrne.d\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "lasx_xvfrintrz_s" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRINTRZ_S))] -+ "ISA_HAS_LASX" -+ "xvfrintrz.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvfrintrz_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRINTRZ_D))] -+ "ISA_HAS_LASX" -+ "xvfrintrz.d\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "lasx_xvfrintrp_s" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRINTRP_S))] -+ "ISA_HAS_LASX" -+ "xvfrintrp.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvfrintrp_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRINTRP_D))] -+ "ISA_HAS_LASX" -+ "xvfrintrp.d\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4DF")]) -+ -+(define_insn "lasx_xvfrintrm_s" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRINTRM_S))] -+ "ISA_HAS_LASX" -+ "xvfrintrm.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "lasx_xvfrintrm_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRINTRM_D))] -+ "ISA_HAS_LASX" -+ "xvfrintrm.d\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4DF")]) -+ -+;; Offset load and broadcast -+(define_expand "lasx_xvldrepl_" -+ [(match_operand:LASX 0 "register_operand") -+ (match_operand 2 "aq12_operand") -+ (match_operand 1 "pmode_register_operand")] -+ "ISA_HAS_LASX" -+{ -+ emit_insn (gen_lasx_xvldrepl__insn -+ (operands[0], operands[1], operands[2])); -+ DONE; -+}) -+ -+(define_insn "lasx_xvldrepl__insn" -+ [(set (match_operand:LASX 0 "register_operand" "=f") -+ (vec_duplicate:LASX -+ (mem: (plus:DI (match_operand:DI 1 "register_operand" "r") -+ (match_operand 2 "aq12_operand" )))))] -+ "ISA_HAS_LASX" -+{ -+ return "xvldrepl.\t%u0,%1,%2"; -+} -+ [(set_attr "type" "simd_load") -+ (set_attr "mode" "") -+ (set_attr "length" "4")]) -+ -+;;XVADDWEV.H.B XVSUBWEV.H.B XVMULWEV.H.B -+;;XVADDWEV.H.BU XVSUBWEV.H.BU XVMULWEV.H.BU -+(define_insn "lasx_xvwev_h_b" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (addsubmul:V16HI -+ (any_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14) -+ (const_int 16) (const_int 18) -+ (const_int 20) (const_int 22) -+ (const_int 24) (const_int 26) -+ (const_int 28) (const_int 30)]))) -+ (any_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14) -+ (const_int 16) (const_int 18) -+ (const_int 20) (const_int 22) -+ (const_int 24) (const_int 26) -+ (const_int 28) (const_int 30)])))))] -+ "ISA_HAS_LASX" -+ "xvwev.h.b\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V16HI")]) -+ -+;;XVADDWEV.W.H XVSUBWEV.W.H XVMULWEV.W.H -+;;XVADDWEV.W.HU XVSUBWEV.W.HU XVMULWEV.W.HU -+(define_insn "lasx_xvwev_w_h" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (addsubmul:V8SI -+ (any_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)]))) -+ (any_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)])))))] -+ "ISA_HAS_LASX" -+ "xvwev.w.h\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V8SI")]) -+ -+;;XVADDWEV.D.W XVSUBWEV.D.W XVMULWEV.D.W -+;;XVADDWEV.D.WU XVSUBWEV.D.WU XVMULWEV.D.WU -+(define_insn "lasx_xvwev_d_w" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (addsubmul:V4DI -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))) -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)])))))] -+ "ISA_HAS_LASX" -+ "xvwev.d.w\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVADDWEV.Q.D -+;;TODO2 -+(define_insn "lasx_xvaddwev_q_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVADDWEV))] -+ "ISA_HAS_LASX" -+ "xvaddwev.q.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVSUBWEV.Q.D -+;;TODO2 -+(define_insn "lasx_xvsubwev_q_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSUBWEV))] -+ "ISA_HAS_LASX" -+ "xvsubwev.q.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMULWEV.Q.D -+;;TODO2 -+(define_insn "lasx_xvmulwev_q_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVMULWEV))] -+ "ISA_HAS_LASX" -+ "xvmulwev.q.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+ -+;;XVADDWOD.H.B XVSUBWOD.H.B XVMULWOD.H.B -+;;XVADDWOD.H.BU XVSUBWOD.H.BU XVMULWOD.H.BU -+(define_insn "lasx_xvwod_h_b" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (addsubmul:V16HI -+ (any_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 1 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15) -+ (const_int 17) (const_int 19) -+ (const_int 21) (const_int 23) -+ (const_int 25) (const_int 27) -+ (const_int 29) (const_int 31)]))) -+ (any_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 2 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15) -+ (const_int 17) (const_int 19) -+ (const_int 21) (const_int 23) -+ (const_int 25) (const_int 27) -+ (const_int 29) (const_int 31)])))))] -+ "ISA_HAS_LASX" -+ "xvwod.h.b\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V16HI")]) -+ -+;;XVADDWOD.W.H XVSUBWOD.W.H XVMULWOD.W.H -+;;XVADDWOD.W.HU XVSUBWOD.W.HU XVMULWOD.W.HU -+(define_insn "lasx_xvwod_w_h" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (addsubmul:V8SI -+ (any_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 1 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))) -+ (any_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 2 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)])))))] -+ "ISA_HAS_LASX" -+ "xvwod.w.h\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V8SI")]) -+ -+ -+;;XVADDWOD.D.W XVSUBWOD.D.W XVMULWOD.D.W -+;;XVADDWOD.D.WU XVSUBWOD.D.WU XVMULWOD.D.WU -+(define_insn "lasx_xvwod_d_w" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (addsubmul:V4DI -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 1 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))) -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 2 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)])))))] -+ "ISA_HAS_LASX" -+ "xvwod.d.w\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVADDWOD.Q.D -+;;TODO2 -+(define_insn "lasx_xvaddwod_q_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVADDWOD))] -+ "ISA_HAS_LASX" -+ "xvaddwod.q.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVSUBWOD.Q.D -+;;TODO2 -+(define_insn "lasx_xvsubwod_q_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSUBWOD))] -+ "ISA_HAS_LASX" -+ "xvsubwod.q.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMULWOD.Q.D -+;;TODO2 -+(define_insn "lasx_xvmulwod_q_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVMULWOD))] -+ "ISA_HAS_LASX" -+ "xvmulwod.q.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVADDWEV.Q.DU -+;;TODO2 -+(define_insn "lasx_xvaddwev_q_du" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVADDWEV2))] -+ "ISA_HAS_LASX" -+ "xvaddwev.q.du\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVSUBWEV.Q.DU -+;;TODO2 -+(define_insn "lasx_xvsubwev_q_du" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSUBWEV2))] -+ "ISA_HAS_LASX" -+ "xvsubwev.q.du\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMULWEV.Q.DU -+;;TODO2 -+(define_insn "lasx_xvmulwev_q_du" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVMULWEV2))] -+ "ISA_HAS_LASX" -+ "xvmulwev.q.du\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVADDWOD.Q.DU -+;;TODO2 -+(define_insn "lasx_xvaddwod_q_du" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVADDWOD2))] -+ "ISA_HAS_LASX" -+ "xvaddwod.q.du\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVSUBWOD.Q.DU -+;;TODO2 -+(define_insn "lasx_xvsubwod_q_du" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSUBWOD2))] -+ "ISA_HAS_LASX" -+ "xvsubwod.q.du\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMULWOD.Q.DU -+;;TODO2 -+(define_insn "lasx_xvmulwod_q_du" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVMULWOD2))] -+ "ISA_HAS_LASX" -+ "xvmulwod.q.du\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVADDWEV.H.BU.B XVMULWEV.H.BU.B -+(define_insn "lasx_xvwev_h_bu_b" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (addmul:V16HI -+ (zero_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14) -+ (const_int 16) (const_int 18) -+ (const_int 20) (const_int 22) -+ (const_int 24) (const_int 26) -+ (const_int 28) (const_int 30)]))) -+ (sign_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14) -+ (const_int 16) (const_int 18) -+ (const_int 20) (const_int 22) -+ (const_int 24) (const_int 26) -+ (const_int 28) (const_int 30)])))))] -+ "ISA_HAS_LASX" -+ "xvwev.h.bu.b\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V16HI")]) -+ -+;;XVADDWEV.W.HU.H XVMULWEV.W.HU.H -+(define_insn "lasx_xvwev_w_hu_h" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (addmul:V8SI -+ (zero_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)]))) -+ (sign_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)])))))] -+ "ISA_HAS_LASX" -+ "xvwev.w.hu.h\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V8SI")]) -+ -+;;XVADDWEV.D.WU.W XVMULWEV.D.WU.W -+(define_insn "lasx_xvwev_d_wu_w" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (addmul:V4DI -+ (zero_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))) -+ (sign_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)])))))] -+ "ISA_HAS_LASX" -+ "xvwev.d.wu.w\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVADDWOD.H.BU.B XVMULWOD.H.BU.B -+(define_insn "lasx_xvwod_h_bu_b" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (addmul:V16HI -+ (zero_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 1 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15) -+ (const_int 17) (const_int 19) -+ (const_int 21) (const_int 23) -+ (const_int 25) (const_int 27) -+ (const_int 29) (const_int 31)]))) -+ (sign_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 2 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15) -+ (const_int 17) (const_int 19) -+ (const_int 21) (const_int 23) -+ (const_int 25) (const_int 27) -+ (const_int 29) (const_int 31)])))))] -+ "ISA_HAS_LASX" -+ "xvwod.h.bu.b\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V16HI")]) -+ -+;;XVADDWOD.W.HU.H XVMULWOD.W.HU.H -+(define_insn "lasx_xvwod_w_hu_h" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (addmul:V8SI -+ (zero_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 1 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))) -+ (sign_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 2 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)])))))] -+ "ISA_HAS_LASX" -+ "xvwod.w.hu.h\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V8SI")]) -+ -+;;XVADDWOD.D.WU.W XVMULWOD.D.WU.W -+(define_insn "lasx_xvwod_d_wu_w" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (addmul:V4DI -+ (zero_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 1 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))) -+ (sign_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 2 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)])))))] -+ "ISA_HAS_LASX" -+ "xvwod.d.wu.w\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMADDWEV.H.B XVMADDWEV.H.BU -+(define_insn "lasx_xvmaddwev_h_b" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (plus:V16HI -+ (match_operand:V16HI 1 "register_operand" "0") -+ (mult:V16HI -+ (any_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 2 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14) -+ (const_int 16) (const_int 18) -+ (const_int 20) (const_int 22) -+ (const_int 24) (const_int 26) -+ (const_int 28) (const_int 30)]))) -+ (any_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 3 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14) -+ (const_int 16) (const_int 18) -+ (const_int 20) (const_int 22) -+ (const_int 24) (const_int 26) -+ (const_int 28) (const_int 30)]))))))] -+ "ISA_HAS_LASX" -+ "xvmaddwev.h.b\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V16HI")]) -+ -+;;XVMADDWEV.W.H XVMADDWEV.W.HU -+(define_insn "lasx_xvmaddwev_w_h" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (plus:V8SI -+ (match_operand:V8SI 1 "register_operand" "0") -+ (mult:V8SI -+ (any_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 2 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)]))) -+ (any_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 3 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)]))))))] -+ "ISA_HAS_LASX" -+ "xvmaddwev.w.h\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V8SI")]) -+ -+;;XVMADDWEV.D.W XVMADDWEV.D.WU -+(define_insn "lasx_xvmaddwev_d_w" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (plus:V4DI -+ (match_operand:V4DI 1 "register_operand" "0") -+ (mult:V4DI -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 2 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))) -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 3 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))))))] -+ "ISA_HAS_LASX" -+ "xvmaddwev.d.w\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMADDWEV.Q.D -+;;TODO2 -+(define_insn "lasx_xvmaddwev_q_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") -+ (match_operand:V4DI 2 "register_operand" "f") -+ (match_operand:V4DI 3 "register_operand" "f")] -+ UNSPEC_LASX_XVMADDWEV))] -+ "ISA_HAS_LASX" -+ "xvmaddwev.q.d\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMADDWOD.H.B XVMADDWOD.H.BU -+(define_insn "lasx_xvmaddwod_h_b" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (plus:V16HI -+ (match_operand:V16HI 1 "register_operand" "0") -+ (mult:V16HI -+ (any_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 2 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15) -+ (const_int 17) (const_int 19) -+ (const_int 21) (const_int 23) -+ (const_int 25) (const_int 27) -+ (const_int 29) (const_int 31)]))) -+ (any_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 3 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15) -+ (const_int 17) (const_int 19) -+ (const_int 21) (const_int 23) -+ (const_int 25) (const_int 27) -+ (const_int 29) (const_int 31)]))))))] -+ "ISA_HAS_LASX" -+ "xvmaddwod.h.b\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V16HI")]) -+ -+;;XVMADDWOD.W.H XVMADDWOD.W.HU -+(define_insn "lasx_xvmaddwod_w_h" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (plus:V8SI -+ (match_operand:V8SI 1 "register_operand" "0") -+ (mult:V8SI -+ (any_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 2 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))) -+ (any_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 3 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))))))] -+ "ISA_HAS_LASX" -+ "xvmaddwod.w.h\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V8SI")]) -+ -+;;XVMADDWOD.D.W XVMADDWOD.D.WU -+(define_insn "lasx_xvmaddwod_d_w" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (plus:V4DI -+ (match_operand:V4DI 1 "register_operand" "0") -+ (mult:V4DI -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 2 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))) -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 3 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))))))] -+ "ISA_HAS_LASX" -+ "xvmaddwod.d.w\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMADDWOD.Q.D -+;;TODO2 -+(define_insn "lasx_xvmaddwod_q_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") -+ (match_operand:V4DI 2 "register_operand" "f") -+ (match_operand:V4DI 3 "register_operand" "f")] -+ UNSPEC_LASX_XVMADDWOD))] -+ "ISA_HAS_LASX" -+ "xvmaddwod.q.d\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMADDWEV.Q.DU -+;;TODO2 -+(define_insn "lasx_xvmaddwev_q_du" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") -+ (match_operand:V4DI 2 "register_operand" "f") -+ (match_operand:V4DI 3 "register_operand" "f")] -+ UNSPEC_LASX_XVMADDWEV2))] -+ "ISA_HAS_LASX" -+ "xvmaddwev.q.du\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMADDWOD.Q.DU -+;;TODO2 -+(define_insn "lasx_xvmaddwod_q_du" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") -+ (match_operand:V4DI 2 "register_operand" "f") -+ (match_operand:V4DI 3 "register_operand" "f")] -+ UNSPEC_LASX_XVMADDWOD2))] -+ "ISA_HAS_LASX" -+ "xvmaddwod.q.du\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMADDWEV.H.BU.B -+(define_insn "lasx_xvmaddwev_h_bu_b" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (plus:V16HI -+ (match_operand:V16HI 1 "register_operand" "0") -+ (mult:V16HI -+ (zero_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 2 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14) -+ (const_int 16) (const_int 18) -+ (const_int 20) (const_int 22) -+ (const_int 24) (const_int 26) -+ (const_int 28) (const_int 30)]))) -+ (sign_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 3 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14) -+ (const_int 16) (const_int 18) -+ (const_int 20) (const_int 22) -+ (const_int 24) (const_int 26) -+ (const_int 28) (const_int 30)]))))))] -+ "ISA_HAS_LASX" -+ "xvmaddwev.h.bu.b\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V16HI")]) -+ -+;;XVMADDWEV.W.HU.H -+(define_insn "lasx_xvmaddwev_w_hu_h" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (plus:V8SI -+ (match_operand:V8SI 1 "register_operand" "0") -+ (mult:V8SI -+ (zero_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 2 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)]))) -+ (sign_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 3 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)]))))))] -+ "ISA_HAS_LASX" -+ "xvmaddwev.w.hu.h\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V8SI")]) -+ -+;;XVMADDWEV.D.WU.W -+(define_insn "lasx_xvmaddwev_d_wu_w" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (plus:V4DI -+ (match_operand:V4DI 1 "register_operand" "0") -+ (mult:V4DI -+ (zero_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 2 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))) -+ (sign_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 3 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))))))] -+ "ISA_HAS_LASX" -+ "xvmaddwev.d.wu.w\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMADDWEV.Q.DU.D -+;;TODO2 -+(define_insn "lasx_xvmaddwev_q_du_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") -+ (match_operand:V4DI 2 "register_operand" "f") -+ (match_operand:V4DI 3 "register_operand" "f")] -+ UNSPEC_LASX_XVMADDWEV3))] -+ "ISA_HAS_LASX" -+ "xvmaddwev.q.du.d\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMADDWOD.H.BU.B -+(define_insn "lasx_xvmaddwod_h_bu_b" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (plus:V16HI -+ (match_operand:V16HI 1 "register_operand" "0") -+ (mult:V16HI -+ (zero_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 2 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15) -+ (const_int 17) (const_int 19) -+ (const_int 21) (const_int 23) -+ (const_int 25) (const_int 27) -+ (const_int 29) (const_int 31)]))) -+ (sign_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 3 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15) -+ (const_int 17) (const_int 19) -+ (const_int 21) (const_int 23) -+ (const_int 25) (const_int 27) -+ (const_int 29) (const_int 31)]))))))] -+ "ISA_HAS_LASX" -+ "xvmaddwod.h.bu.b\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V16HI")]) -+ -+;;XVMADDWOD.W.HU.H -+(define_insn "lasx_xvmaddwod_w_hu_h" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (plus:V8SI -+ (match_operand:V8SI 1 "register_operand" "0") -+ (mult:V8SI -+ (zero_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 2 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))) -+ (sign_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 3 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))))))] -+ "ISA_HAS_LASX" -+ "xvmaddwod.w.hu.h\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V8SI")]) -+ -+;;XVMADDWOD.D.WU.W -+(define_insn "lasx_xvmaddwod_d_wu_w" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (plus:V4DI -+ (match_operand:V4DI 1 "register_operand" "0") -+ (mult:V4DI -+ (zero_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 2 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))) -+ (sign_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 3 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))))))] -+ "ISA_HAS_LASX" -+ "xvmaddwod.d.wu.w\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMADDWOD.Q.DU.D -+;;TODO2 -+(define_insn "lasx_xvmaddwod_q_du_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") -+ (match_operand:V4DI 2 "register_operand" "f") -+ (match_operand:V4DI 3 "register_operand" "f")] -+ UNSPEC_LASX_XVMADDWOD3))] -+ "ISA_HAS_LASX" -+ "xvmaddwod.q.du.d\t%u0,%u2,%u3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVHADDW.Q.D -+;;TODO2 -+(define_insn "lasx_xvhaddw_q_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVHADDW_Q_D))] -+ "ISA_HAS_LASX" -+ "xvhaddw.q.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVHSUBW.Q.D -+;;TODO2 -+(define_insn "lasx_xvhsubw_q_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVHSUBW_Q_D))] -+ "ISA_HAS_LASX" -+ "xvhsubw.q.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVHADDW.QU.DU -+;;TODO2 -+(define_insn "lasx_xvhaddw_qu_du" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVHADDW_QU_DU))] -+ "ISA_HAS_LASX" -+ "xvhaddw.qu.du\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVHSUBW.QU.DU -+;;TODO2 -+(define_insn "lasx_xvhsubw_qu_du" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVHSUBW_QU_DU))] -+ "ISA_HAS_LASX" -+ "xvhsubw.qu.du\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVROTR.B XVROTR.H XVROTR.W XVROTR.D -+;;TODO-478 -+(define_insn "lasx_xvrotr_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand:ILASX 2 "register_operand" "f")] -+ UNSPEC_LASX_XVROTR))] -+ "ISA_HAS_LASX" -+ "xvrotr.\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+;;XVADD.Q -+;;TODO2 -+(define_insn "lasx_xvadd_q" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVADD_Q))] -+ "ISA_HAS_LASX" -+ "xvadd.q\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVSUB.Q -+;;TODO2 -+(define_insn "lasx_xvsub_q" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSUB_Q))] -+ "ISA_HAS_LASX" -+ "xvsub.q\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVSSRLN.B.H XVSSRLN.H.W XVSSRLN.W.D -+(define_insn "lasx_xvssrln__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -+ (match_operand:ILASX_DWH 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSSRLN))] -+ "ISA_HAS_LASX" -+ "xvssrln..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+;;XVREPLVE.B XVREPLVE.H XVREPLVE.W XVREPLVE.D -+(define_insn "lasx_xvreplve_" -+ [(set (match_operand:LASX 0 "register_operand" "=f") -+ (unspec:LASX [(match_operand:LASX 1 "register_operand" "f") -+ (match_operand:SI 2 "register_operand" "r")] -+ UNSPEC_LASX_XVREPLVE))] -+ "ISA_HAS_LASX" -+ "xvreplve.\t%u0,%u1,%z2" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "")]) -+ -+;;XVADDWEV.Q.DU.D -+(define_insn "lasx_xvaddwev_q_du_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVADDWEV3))] -+ "ISA_HAS_LASX" -+ "xvaddwev.q.du.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVADDWOD.Q.DU.D -+(define_insn "lasx_xvaddwod_q_du_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVADDWOD3))] -+ "ISA_HAS_LASX" -+ "xvaddwod.q.du.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMULWEV.Q.DU.D -+(define_insn "lasx_xvmulwev_q_du_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVMULWEV3))] -+ "ISA_HAS_LASX" -+ "xvmulwev.q.du.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;;XVMULWOD.Q.DU.D -+(define_insn "lasx_xvmulwod_q_du_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") -+ (match_operand:V4DI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVMULWOD3))] -+ "ISA_HAS_LASX" -+ "xvmulwod.q.du.d\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_xvpickve2gr_w" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (any_extend:SI -+ (vec_select:SI -+ (match_operand:V8SI 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const_0_to_7_operand" "")]))))] -+ "ISA_HAS_LASX" -+ "xvpickve2gr.w\t%0,%u1,%2" -+ [(set_attr "type" "simd_copy") -+ (set_attr "mode" "V8SI")]) -+ -+ -+(define_insn "lasx_xvmskgez_b" -+ [(set (match_operand:V32QI 0 "register_operand" "=f") -+ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")] -+ UNSPEC_LASX_XVMSKGEZ))] -+ "ISA_HAS_LASX" -+ "xvmskgez.b\t%u0,%u1" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "V32QI")]) -+ -+(define_insn "lasx_xvmsknz_b" -+ [(set (match_operand:V32QI 0 "register_operand" "=f") -+ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")] -+ UNSPEC_LASX_XVMSKNZ))] -+ "ISA_HAS_LASX" -+ "xvmsknz.b\t%u0,%u1" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "V32QI")]) -+ -+(define_insn "lasx_xvexth_h_b" -+ [(set (match_operand:V16HI 0 "register_operand" "=f") -+ (any_extend:V16HI -+ (vec_select:V16QI -+ (match_operand:V32QI 1 "register_operand" "f") -+ (parallel [(const_int 16) (const_int 17) -+ (const_int 18) (const_int 19) -+ (const_int 20) (const_int 21) -+ (const_int 22) (const_int 23) -+ (const_int 24) (const_int 25) -+ (const_int 26) (const_int 27) -+ (const_int 28) (const_int 29) -+ (const_int 30) (const_int 31)]))))] -+ "ISA_HAS_LASX" -+ "xvexth.h.b\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V16HI")]) -+ -+(define_insn "lasx_xvexth_w_h" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (any_extend:V8SI -+ (vec_select:V8HI -+ (match_operand:V16HI 1 "register_operand" "f") -+ (parallel [(const_int 8) (const_int 9) -+ (const_int 10) (const_int 11) -+ (const_int 12) (const_int 13) -+ (const_int 14) (const_int 15)]))))] -+ "ISA_HAS_LASX" -+ "xvexth.w.h\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V8SI")]) -+ -+(define_insn "lasx_xvexth_d_w" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 1 "register_operand" "f") -+ (parallel [(const_int 4) (const_int 5) -+ (const_int 6) (const_int 7)]))))] -+ "ISA_HAS_LASX" -+ "xvexth.d.w\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_xvexth_q_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] -+ UNSPEC_LASX_XVEXTH_Q_D))] -+ "ISA_HAS_LASX" -+ "xvexth.q.d\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_xvexth_qu_du" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] -+ UNSPEC_LASX_XVEXTH_QU_DU))] -+ "ISA_HAS_LASX" -+ "xvexth.qu.du\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_xvrotri_" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LASX_XVROTRI))] -+ "ISA_HAS_LASX" -+ "xvrotri.\t%u0,%u1,%2" -+ [(set_attr "type" "simd_shf") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvextl_q_d" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] -+ UNSPEC_LASX_XVEXTL_Q_D))] -+ "ISA_HAS_LASX" -+ "xvextl.q.d\t%u0,%u1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_xvsrlni__" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVSRLNI))] -+ "ISA_HAS_LASX" -+ "xvsrlni..\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsrlrni__" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVSRLRNI))] -+ "ISA_HAS_LASX" -+ "xvsrlrni..\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrlni__" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVSSRLNI))] -+ "ISA_HAS_LASX" -+ "xvssrlni..\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrlni__" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVSSRLNI2))] -+ "ISA_HAS_LASX" -+ "xvssrlni..\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrlrni__" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVSSRLRNI))] -+ "ISA_HAS_LASX" -+ "xvssrlrni..\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrlrni__" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVSSRLRNI2))] -+ "ISA_HAS_LASX" -+ "xvssrlrni..\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsrani__" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVSRANI))] -+ "ISA_HAS_LASX" -+ "xvsrani..\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvsrarni__" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVSRARNI))] -+ "ISA_HAS_LASX" -+ "xvsrarni..\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrani__" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVSSRANI))] -+ "ISA_HAS_LASX" -+ "xvssrani..\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrani__" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVSSRANI2))] -+ "ISA_HAS_LASX" -+ "xvssrani..\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrarni__" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVSSRARNI))] -+ "ISA_HAS_LASX" -+ "xvssrarni..\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrarni__" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") -+ (match_operand:ILASX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVSSRARNI2))] -+ "ISA_HAS_LASX" -+ "xvssrarni..\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvpermi_w" -+ [(set (match_operand:V8SI 0 "register_operand" "=f") -+ (unspec:V8SI [(match_operand:V8SI 1 "register_operand" "0") -+ (match_operand:V8SI 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVPERMI))] -+ "ISA_HAS_LASX" -+ "xvpermi.w\t%u0,%u2,%3" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "V8SI")]) -+ -+(define_expand "lasx_xvld" -+ [(match_operand:V32QI 0 "register_operand") -+ (match_operand 1 "pmode_register_operand") -+ (match_operand 2 "aq12b_operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], -+ INTVAL (operands[2])); -+ loongarch_emit_move (operands[0], gen_rtx_MEM (V32QImode, addr)); -+ DONE; -+}) -+ -+(define_expand "lasx_xvst" -+ [(match_operand:V32QI 0 "register_operand") -+ (match_operand 1 "pmode_register_operand") -+ (match_operand 2 "aq12b_operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], -+ INTVAL (operands[2])); -+ loongarch_emit_move (gen_rtx_MEM (V32QImode, addr), operands[0]); -+ DONE; -+}) -+ -+(define_expand "lasx_xvstelm_" -+ [(match_operand:LASX 0 "register_operand") -+ (match_operand 3 "const__operand") -+ (match_operand 2 "aq8_operand") -+ (match_operand 1 "pmode_register_operand")] -+ "ISA_HAS_LASX" -+{ -+ emit_insn (gen_lasx_xvstelm__insn -+ (operands[1], operands[2], operands[0], operands[3])); -+ DONE; -+}) -+ -+(define_insn "lasx_xvstelm__insn" -+ [(set (mem: (plus:DI (match_operand:DI 0 "register_operand" "r") -+ (match_operand 1 "aq8_operand" ))) -+ (vec_select: -+ (match_operand:LASX 2 "register_operand" "f") -+ (parallel [(match_operand 3 "const__operand" "")])))] -+ -+ "ISA_HAS_LASX" -+{ -+ return "xvstelm.\t%u2,%0,%1,%3"; -+} -+ [(set_attr "type" "simd_store") -+ (set_attr "mode" "") -+ (set_attr "length" "4")]) -+ -+(define_insn "lasx_xvinsve0_" -+ [(set (match_operand:ILASX_DW 0 "register_operand" "=f") -+ (unspec:ILASX_DW [(match_operand:ILASX_DW 1 "register_operand" "0") -+ (match_operand:ILASX_DW 2 "register_operand" "f") -+ (match_operand 3 "const__operand" "")] -+ UNSPEC_LASX_XVINSVE0))] -+ "ISA_HAS_LASX" -+ "xvinsve0.\t%u0,%u2,%3" -+ [(set_attr "type" "simd_shf") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvpickve_" -+ [(set (match_operand:LASX_WD 0 "register_operand" "=f") -+ (unspec:LASX_WD [(match_operand:LASX_WD 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LASX_XVPICKVE))] -+ "ISA_HAS_LASX" -+ "xvpickve.\t%u0,%u1,%2" -+ [(set_attr "type" "simd_shf") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvssrlrn__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -+ (match_operand:ILASX_DWH 2 "register_operand" "f")] -+ UNSPEC_LASX_XVSSRLRN))] -+ "ISA_HAS_LASX" -+ "xvssrlrn..\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "xvorn3" -+ [(set (match_operand:ILASX 0 "register_operand" "=f") -+ (ior:ILASX (not:ILASX (match_operand:ILASX 2 "register_operand" "f")) -+ (match_operand:ILASX 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvorn.v\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvextl_qu_du" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] -+ UNSPEC_LASX_XVEXTL_QU_DU))] -+ "ISA_HAS_LASX" -+ "xvextl.qu.du\t%u0,%u1" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_xvldi" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (unspec:V4DI[(match_operand 1 "const_imm13_operand")] -+ UNSPEC_LASX_XVLDI))] -+ "ISA_HAS_LASX" -+{ -+ HOST_WIDE_INT val = INTVAL (operands[1]); -+ if(val < 0) -+ { -+ HOST_WIDE_INT modeVal = (val & 0xf00) >> 8; -+ if(modeVal < 13) -+ return "xvldi\t%u0,%1"; -+ else -+ sorry("for const_imm13_operand, only support 0000 ~ 1100 in bits'12...9' when bit'13' is 1."); -+ } -+ else -+ return "xvldi\t%u0,%1"; -+} -+ [(set_attr "type" "simd_load") -+ (set_attr "mode" "V4DI")]) -+ -+(define_insn "lasx_xvldx" -+ [(set (match_operand:V32QI 0 "register_operand" "=f") -+ (unspec:V32QI [(match_operand:DI 1 "register_operand" "r") -+ (match_operand:DI 2 "reg_or_0_operand" "rJ")] -+ UNSPEC_LASX_XVLDX))] -+ "ISA_HAS_LASX" -+{ -+ return "xvldx\t%u0,%1,%z2"; -+} -+ [(set_attr "type" "simd_load") -+ (set_attr "mode" "V32QI")]) -+ -+(define_insn "lasx_xvstx" -+ [(set (mem:V32QI (plus:DI (match_operand:DI 1 "register_operand" "r") -+ (match_operand:DI 2 "reg_or_0_operand" "rJ"))) -+ (unspec: V32QI[(match_operand:V32QI 0 "register_operand" "f")] -+ UNSPEC_LASX_XVSTX))] -+ -+ "ISA_HAS_LASX" -+{ -+ return "xvstx\t%u0,%1,%z2"; -+} -+ [(set_attr "type" "simd_store") -+ (set_attr "mode" "DI")]) -+ -diff --git a/gcc/config/loongarch/lasxintrin.h b/gcc/config/loongarch/lasxintrin.h -new file mode 100644 -index 000000000..185eee869 ---- /dev/null -+++ b/gcc/config/loongarch/lasxintrin.h -@@ -0,0 +1,5139 @@ -+/* LARCH Loongson ASX intrinsics include file. -+ -+ Copyright (C) 2018 Free Software Foundation, Inc. -+ -+ This file is part of GCC. -+ -+ GCC is free software; you can redistribute it and/or modify it -+ under the terms of the GNU General Public License as published -+ by the Free Software Foundation; either version 3, or (at your -+ option) any later version. -+ -+ GCC is distributed in the hope that it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -+ License for more details. -+ -+ Under Section 7 of GPL version 3, you are granted additional -+ permissions described in the GCC Runtime Library Exception, version -+ 3.1, as published by the Free Software Foundation. -+ -+ You should have received a copy of the GNU General Public License and -+ a copy of the GCC Runtime Library Exception along with this program; -+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+ . */ -+ -+#ifndef _GCC_LOONGSON_ASXINTRIN_H -+#define _GCC_LOONGSON_ASXINTRIN_H 1 -+ -+#if defined(__loongarch_asx) -+ -+typedef signed char v32i8 __attribute__ ((vector_size(32), aligned(32))); -+typedef signed char v32i8_b __attribute__ ((vector_size(32), aligned(1))); -+typedef unsigned char v32u8 __attribute__ ((vector_size(32), aligned(32))); -+typedef unsigned char v32u8_b __attribute__ ((vector_size(32), aligned(1))); -+typedef short v16i16 __attribute__ ((vector_size(32), aligned(32))); -+typedef short v16i16_h __attribute__ ((vector_size(32), aligned(2))); -+typedef unsigned short v16u16 __attribute__ ((vector_size(32), aligned(32))); -+typedef unsigned short v16u16_h __attribute__ ((vector_size(32), aligned(2))); -+typedef int v8i32 __attribute__ ((vector_size(32), aligned(32))); -+typedef int v8i32_w __attribute__ ((vector_size(32), aligned(4))); -+typedef unsigned int v8u32 __attribute__ ((vector_size(32), aligned(32))); -+typedef unsigned int v8u32_w __attribute__ ((vector_size(32), aligned(4))); -+typedef long long v4i64 __attribute__ ((vector_size(32), aligned(32))); -+typedef long long v4i64_d __attribute__ ((vector_size(32), aligned(8))); -+typedef unsigned long long v4u64 __attribute__ ((vector_size(32), aligned(32))); -+typedef unsigned long long v4u64_d __attribute__ ((vector_size(32), aligned(8))); -+typedef float v8f32 __attribute__ ((vector_size(32), aligned(32))); -+typedef float v8f32_w __attribute__ ((vector_size(32), aligned(4))); -+typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); -+typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); -+ -+typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); -+typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); -+ -+typedef float __m256 __attribute__ ((__vector_size__ (32), -+ __may_alias__)); -+typedef long long __m256i __attribute__ ((__vector_size__ (32), -+ __may_alias__)); -+typedef double __m256d __attribute__ ((__vector_size__ (32), -+ __may_alias__)); -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsll_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsll_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsll_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsll_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsll_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsll_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsll_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsll_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: V32QI, V32QI, UQI. */ -+#define __lasx_xvslli_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvslli_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V16HI, V16HI, UQI. */ -+#define __lasx_xvslli_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvslli_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V8SI, V8SI, UQI. */ -+#define __lasx_xvslli_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslli_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V4DI, V4DI, UQI. */ -+#define __lasx_xvslli_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvslli_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsra_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsra_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsra_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsra_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsra_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsra_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsra_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsra_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: V32QI, V32QI, UQI. */ -+#define __lasx_xvsrai_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsrai_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V16HI, V16HI, UQI. */ -+#define __lasx_xvsrai_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsrai_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V8SI, V8SI, UQI. */ -+#define __lasx_xvsrai_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsrai_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V4DI, V4DI, UQI. */ -+#define __lasx_xvsrai_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvsrai_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrar_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrar_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrar_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrar_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrar_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrar_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrar_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrar_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: V32QI, V32QI, UQI. */ -+#define __lasx_xvsrari_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsrari_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V16HI, V16HI, UQI. */ -+#define __lasx_xvsrari_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsrari_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V8SI, V8SI, UQI. */ -+#define __lasx_xvsrari_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsrari_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V4DI, V4DI, UQI. */ -+#define __lasx_xvsrari_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvsrari_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrl_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrl_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrl_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrl_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrl_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrl_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrl_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrl_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: V32QI, V32QI, UQI. */ -+#define __lasx_xvsrli_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsrli_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V16HI, V16HI, UQI. */ -+#define __lasx_xvsrli_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsrli_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V8SI, V8SI, UQI. */ -+#define __lasx_xvsrli_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsrli_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V4DI, V4DI, UQI. */ -+#define __lasx_xvsrli_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvsrli_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrlr_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrlr_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrlr_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrlr_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrlr_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrlr_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrlr_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrlr_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: V32QI, V32QI, UQI. */ -+#define __lasx_xvsrlri_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsrlri_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V16HI, V16HI, UQI. */ -+#define __lasx_xvsrlri_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsrlri_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V8SI, V8SI, UQI. */ -+#define __lasx_xvsrlri_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsrlri_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V4DI, V4DI, UQI. */ -+#define __lasx_xvsrlri_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvsrlri_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitclr_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvbitclr_b((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitclr_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvbitclr_h((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitclr_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvbitclr_w((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitclr_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvbitclr_d((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ -+#define __lasx_xvbitclri_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvbitclri_b((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ -+#define __lasx_xvbitclri_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvbitclri_h((v16u16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ -+#define __lasx_xvbitclri_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvbitclri_w((v8u32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ -+#define __lasx_xvbitclri_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvbitclri_d((v4u64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitset_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvbitset_b((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitset_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvbitset_h((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitset_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvbitset_w((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitset_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvbitset_d((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ -+#define __lasx_xvbitseti_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvbitseti_b((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ -+#define __lasx_xvbitseti_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvbitseti_h((v16u16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ -+#define __lasx_xvbitseti_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvbitseti_w((v8u32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ -+#define __lasx_xvbitseti_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvbitseti_d((v4u64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitrev_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvbitrev_b((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitrev_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvbitrev_h((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitrev_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvbitrev_w((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitrev_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvbitrev_d((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ -+#define __lasx_xvbitrevi_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvbitrevi_b((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ -+#define __lasx_xvbitrevi_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvbitrevi_h((v16u16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ -+#define __lasx_xvbitrevi_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvbitrevi_w((v8u32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ -+#define __lasx_xvbitrevi_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvbitrevi_d((v4u64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvadd_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvadd_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvadd_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvadd_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvadd_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvadd_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvadd_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvadd_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V32QI, V32QI, UQI. */ -+#define __lasx_xvaddi_bu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvaddi_bu((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, V16HI, UQI. */ -+#define __lasx_xvaddi_hu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvaddi_hu((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V8SI, V8SI, UQI. */ -+#define __lasx_xvaddi_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvaddi_wu((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V4DI, V4DI, UQI. */ -+#define __lasx_xvaddi_du(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvaddi_du((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsub_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsub_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsub_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsub_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsub_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsub_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsub_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsub_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V32QI, V32QI, UQI. */ -+#define __lasx_xvsubi_bu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsubi_bu((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, V16HI, UQI. */ -+#define __lasx_xvsubi_hu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsubi_hu((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V8SI, V8SI, UQI. */ -+#define __lasx_xvsubi_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsubi_wu((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V4DI, V4DI, UQI. */ -+#define __lasx_xvsubi_du(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsubi_du((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmax_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmax_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmax_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmax_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmax_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmax_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmax_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmax_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V32QI, V32QI, QI. */ -+#define __lasx_xvmaxi_b(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V16HI, V16HI, QI. */ -+#define __lasx_xvmaxi_h(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V8SI, V8SI, QI. */ -+#define __lasx_xvmaxi_w(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V4DI, V4DI, QI. */ -+#define __lasx_xvmaxi_d(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmax_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmax_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmax_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmax_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmax_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmax_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmax_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmax_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ -+#define __lasx_xvmaxi_bu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_bu((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ -+#define __lasx_xvmaxi_hu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_hu((v16u16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ -+#define __lasx_xvmaxi_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_wu((v8u32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ -+#define __lasx_xvmaxi_du(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_du((v4u64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmin_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmin_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmin_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmin_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmin_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmin_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmin_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmin_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V32QI, V32QI, QI. */ -+#define __lasx_xvmini_b(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmini_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V16HI, V16HI, QI. */ -+#define __lasx_xvmini_h(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmini_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V8SI, V8SI, QI. */ -+#define __lasx_xvmini_w(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmini_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V4DI, V4DI, QI. */ -+#define __lasx_xvmini_d(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmini_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmin_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmin_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmin_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmin_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmin_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmin_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmin_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmin_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ -+#define __lasx_xvmini_bu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmini_bu((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ -+#define __lasx_xvmini_hu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmini_hu((v16u16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ -+#define __lasx_xvmini_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmini_wu((v8u32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ -+#define __lasx_xvmini_du(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmini_du((v4u64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvseq_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvseq_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvseq_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvseq_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvseq_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvseq_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvseq_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvseq_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V32QI, V32QI, QI. */ -+#define __lasx_xvseqi_b(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvseqi_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V16HI, V16HI, QI. */ -+#define __lasx_xvseqi_h(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvseqi_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V8SI, V8SI, QI. */ -+#define __lasx_xvseqi_w(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvseqi_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V4DI, V4DI, QI. */ -+#define __lasx_xvseqi_d(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvseqi_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvslt_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvslt_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvslt_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvslt_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvslt_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvslt_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvslt_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvslt_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V32QI, V32QI, QI. */ -+#define __lasx_xvslti_b(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslti_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V16HI, V16HI, QI. */ -+#define __lasx_xvslti_h(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslti_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V8SI, V8SI, QI. */ -+#define __lasx_xvslti_w(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslti_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V4DI, V4DI, QI. */ -+#define __lasx_xvslti_d(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslti_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvslt_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvslt_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvslt_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvslt_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvslt_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvslt_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvslt_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvslt_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V32QI, UV32QI, UQI. */ -+#define __lasx_xvslti_bu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslti_bu((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, UV16HI, UQI. */ -+#define __lasx_xvslti_hu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslti_hu((v16u16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V8SI, UV8SI, UQI. */ -+#define __lasx_xvslti_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslti_wu((v8u32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V4DI, UV4DI, UQI. */ -+#define __lasx_xvslti_du(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslti_du((v4u64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsle_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsle_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsle_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsle_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsle_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsle_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsle_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsle_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V32QI, V32QI, QI. */ -+#define __lasx_xvslei_b(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslei_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V16HI, V16HI, QI. */ -+#define __lasx_xvslei_h(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslei_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V8SI, V8SI, QI. */ -+#define __lasx_xvslei_w(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslei_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, si5. */ -+/* Data types in instruction templates: V4DI, V4DI, QI. */ -+#define __lasx_xvslei_d(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslei_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsle_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsle_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsle_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsle_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsle_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsle_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsle_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsle_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V32QI, UV32QI, UQI. */ -+#define __lasx_xvslei_bu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslei_bu((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, UV16HI, UQI. */ -+#define __lasx_xvslei_hu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslei_hu((v16u16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V8SI, UV8SI, UQI. */ -+#define __lasx_xvslei_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslei_wu((v8u32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V4DI, UV4DI, UQI. */ -+#define __lasx_xvslei_du(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslei_du((v4u64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: V32QI, V32QI, UQI. */ -+#define __lasx_xvsat_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsat_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V16HI, V16HI, UQI. */ -+#define __lasx_xvsat_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsat_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V8SI, V8SI, UQI. */ -+#define __lasx_xvsat_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsat_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V4DI, V4DI, UQI. */ -+#define __lasx_xvsat_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvsat_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ -+#define __lasx_xvsat_bu(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsat_bu((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ -+#define __lasx_xvsat_hu(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsat_hu((v16u16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ -+#define __lasx_xvsat_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsat_wu((v8u32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ -+#define __lasx_xvsat_du(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvsat_du((v4u64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvadda_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvadda_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvadda_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvadda_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvadda_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvadda_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvadda_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvadda_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsadd_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsadd_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsadd_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsadd_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsadd_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsadd_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsadd_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsadd_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsadd_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsadd_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsadd_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsadd_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsadd_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsadd_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsadd_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsadd_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavg_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavg_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavg_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavg_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavg_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavg_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavg_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavg_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavg_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavg_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavg_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavg_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavg_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavg_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavg_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavg_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavgr_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavgr_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavgr_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavgr_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavgr_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavgr_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavgr_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavgr_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavgr_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavgr_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavgr_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavgr_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavgr_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavgr_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvavgr_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvavgr_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssub_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssub_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssub_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssub_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssub_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssub_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssub_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssub_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssub_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssub_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssub_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssub_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssub_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssub_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssub_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssub_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvabsd_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvabsd_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvabsd_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvabsd_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvabsd_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvabsd_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvabsd_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvabsd_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvabsd_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvabsd_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvabsd_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvabsd_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvabsd_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvabsd_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvabsd_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvabsd_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmul_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmul_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmul_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmul_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmul_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmul_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmul_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmul_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmadd_b(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmadd_b((v32i8)_1, (v32i8)_2, (v32i8)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmadd_h(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmadd_h((v16i16)_1, (v16i16)_2, (v16i16)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmadd_w(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmadd_w((v8i32)_1, (v8i32)_2, (v8i32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmadd_d(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmadd_d((v4i64)_1, (v4i64)_2, (v4i64)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmsub_b(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmsub_b((v32i8)_1, (v32i8)_2, (v32i8)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmsub_h(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmsub_h((v16i16)_1, (v16i16)_2, (v16i16)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmsub_w(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmsub_w((v8i32)_1, (v8i32)_2, (v8i32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmsub_d(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmsub_d((v4i64)_1, (v4i64)_2, (v4i64)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvdiv_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvdiv_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvdiv_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvdiv_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvdiv_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvdiv_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvdiv_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvdiv_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvdiv_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvdiv_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvdiv_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvdiv_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvdiv_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvdiv_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvdiv_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvdiv_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhaddw_h_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhaddw_h_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhaddw_w_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhaddw_w_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhaddw_d_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhaddw_d_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhaddw_hu_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhaddw_hu_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhaddw_wu_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhaddw_wu_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhaddw_du_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhaddw_du_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhsubw_h_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhsubw_h_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhsubw_w_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhsubw_w_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhsubw_d_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhsubw_d_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhsubw_hu_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhsubw_hu_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhsubw_wu_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhsubw_wu_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhsubw_du_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhsubw_du_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmod_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmod_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmod_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmod_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmod_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmod_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmod_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmod_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmod_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmod_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmod_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmod_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmod_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmod_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmod_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmod_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V32QI, V32QI, UQI. */ -+#define __lasx_xvrepl128vei_b(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvrepl128vei_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: V16HI, V16HI, UQI. */ -+#define __lasx_xvrepl128vei_h(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvrepl128vei_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui2. */ -+/* Data types in instruction templates: V8SI, V8SI, UQI. */ -+#define __lasx_xvrepl128vei_w(/*__m256i*/ _1, /*ui2*/ _2) ((__m256i)__builtin_lasx_xvrepl128vei_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui1. */ -+/* Data types in instruction templates: V4DI, V4DI, UQI. */ -+#define __lasx_xvrepl128vei_d(/*__m256i*/ _1, /*ui1*/ _2) ((__m256i)__builtin_lasx_xvrepl128vei_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpickev_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpickev_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpickev_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpickev_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpickev_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpickev_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpickev_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpickev_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpickod_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpickod_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpickod_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpickod_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpickod_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpickod_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpickod_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpickod_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvilvh_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvilvh_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvilvh_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvilvh_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvilvh_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvilvh_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvilvh_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvilvh_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvilvl_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvilvl_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvilvl_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvilvl_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvilvl_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvilvl_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvilvl_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvilvl_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpackev_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpackev_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpackev_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpackev_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpackev_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpackev_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpackev_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpackev_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpackod_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpackod_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpackod_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpackod_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpackod_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpackod_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpackod_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvpackod_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk, xa. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvshuf_b(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvshuf_b((v32i8)_1, (v32i8)_2, (v32i8)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvshuf_h(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvshuf_h((v16i16)_1, (v16i16)_2, (v16i16)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvshuf_w(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvshuf_w((v8i32)_1, (v8i32)_2, (v8i32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvshuf_d(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvshuf_d((v4i64)_1, (v4i64)_2, (v4i64)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvand_v(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvand_v((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ -+#define __lasx_xvandi_b(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvandi_b((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvor_v(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvor_v((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ -+#define __lasx_xvori_b(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvori_b((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvnor_v(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvnor_v((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ -+#define __lasx_xvnori_b(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvnori_b((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvxor_v(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvxor_v((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ -+#define __lasx_xvxori_b(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvxori_b((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk, xa. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvbitsel_v(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvbitsel_v((v32u8)_1, (v32u8)_2, (v32u8)_3); -+} -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI, USI. */ -+#define __lasx_xvbitseli_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvbitseli_b((v32u8)(_1), (v32u8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: V32QI, V32QI, USI. */ -+#define __lasx_xvshuf4i_b(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvshuf4i_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: V16HI, V16HI, USI. */ -+#define __lasx_xvshuf4i_h(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvshuf4i_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: V8SI, V8SI, USI. */ -+#define __lasx_xvshuf4i_w(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvshuf4i_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, rj. */ -+/* Data types in instruction templates: V32QI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplgr2vr_b(int _1) -+{ -+ return (__m256i)__builtin_lasx_xvreplgr2vr_b((int)_1); -+} -+ -+/* Assembly instruction format: xd, rj. */ -+/* Data types in instruction templates: V16HI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplgr2vr_h(int _1) -+{ -+ return (__m256i)__builtin_lasx_xvreplgr2vr_h((int)_1); -+} -+ -+/* Assembly instruction format: xd, rj. */ -+/* Data types in instruction templates: V8SI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplgr2vr_w(int _1) -+{ -+ return (__m256i)__builtin_lasx_xvreplgr2vr_w((int)_1); -+} -+ -+/* Assembly instruction format: xd, rj. */ -+/* Data types in instruction templates: V4DI, DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplgr2vr_d(long int _1) -+{ -+ return (__m256i)__builtin_lasx_xvreplgr2vr_d((long int)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpcnt_b(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvpcnt_b((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpcnt_h(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvpcnt_h((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpcnt_w(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvpcnt_w((v8i32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvpcnt_d(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvpcnt_d((v4i64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvclo_b(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvclo_b((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvclo_h(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvclo_h((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvclo_w(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvclo_w((v8i32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvclo_d(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvclo_d((v4i64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvclz_b(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvclz_b((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvclz_h(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvclz_h((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvclz_w(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvclz_w((v8i32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvclz_d(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvclz_d((v4i64)_1); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SF, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfadd_s(__m256 _1, __m256 _2) -+{ -+ return (__m256)__builtin_lasx_xvfadd_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfadd_d(__m256d _1, __m256d _2) -+{ -+ return (__m256d)__builtin_lasx_xvfadd_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SF, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfsub_s(__m256 _1, __m256 _2) -+{ -+ return (__m256)__builtin_lasx_xvfsub_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfsub_d(__m256d _1, __m256d _2) -+{ -+ return (__m256d)__builtin_lasx_xvfsub_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SF, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfmul_s(__m256 _1, __m256 _2) -+{ -+ return (__m256)__builtin_lasx_xvfmul_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfmul_d(__m256d _1, __m256d _2) -+{ -+ return (__m256d)__builtin_lasx_xvfmul_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SF, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfdiv_s(__m256 _1, __m256 _2) -+{ -+ return (__m256)__builtin_lasx_xvfdiv_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfdiv_d(__m256d _1, __m256d _2) -+{ -+ return (__m256d)__builtin_lasx_xvfdiv_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcvt_h_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcvt_h_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfcvt_s_d(__m256d _1, __m256d _2) -+{ -+ return (__m256)__builtin_lasx_xvfcvt_s_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SF, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfmin_s(__m256 _1, __m256 _2) -+{ -+ return (__m256)__builtin_lasx_xvfmin_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfmin_d(__m256d _1, __m256d _2) -+{ -+ return (__m256d)__builtin_lasx_xvfmin_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SF, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfmina_s(__m256 _1, __m256 _2) -+{ -+ return (__m256)__builtin_lasx_xvfmina_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfmina_d(__m256d _1, __m256d _2) -+{ -+ return (__m256d)__builtin_lasx_xvfmina_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SF, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfmax_s(__m256 _1, __m256 _2) -+{ -+ return (__m256)__builtin_lasx_xvfmax_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfmax_d(__m256d _1, __m256d _2) -+{ -+ return (__m256d)__builtin_lasx_xvfmax_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SF, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfmaxa_s(__m256 _1, __m256 _2) -+{ -+ return (__m256)__builtin_lasx_xvfmaxa_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfmaxa_d(__m256d _1, __m256d _2) -+{ -+ return (__m256d)__builtin_lasx_xvfmaxa_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfclass_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvfclass_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfclass_d(__m256d _1) -+{ -+ return (__m256i)__builtin_lasx_xvfclass_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfsqrt_s(__m256 _1) -+{ -+ return (__m256)__builtin_lasx_xvfsqrt_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfsqrt_d(__m256d _1) -+{ -+ return (__m256d)__builtin_lasx_xvfsqrt_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfrecip_s(__m256 _1) -+{ -+ return (__m256)__builtin_lasx_xvfrecip_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfrecip_d(__m256d _1) -+{ -+ return (__m256d)__builtin_lasx_xvfrecip_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfrint_s(__m256 _1) -+{ -+ return (__m256)__builtin_lasx_xvfrint_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfrint_d(__m256d _1) -+{ -+ return (__m256d)__builtin_lasx_xvfrint_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfrsqrt_s(__m256 _1) -+{ -+ return (__m256)__builtin_lasx_xvfrsqrt_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfrsqrt_d(__m256d _1) -+{ -+ return (__m256d)__builtin_lasx_xvfrsqrt_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvflogb_s(__m256 _1) -+{ -+ return (__m256)__builtin_lasx_xvflogb_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvflogb_d(__m256d _1) -+{ -+ return (__m256d)__builtin_lasx_xvflogb_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SF, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfcvth_s_h(__m256i _1) -+{ -+ return (__m256)__builtin_lasx_xvfcvth_s_h((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfcvth_d_s(__m256 _1) -+{ -+ return (__m256d)__builtin_lasx_xvfcvth_d_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SF, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfcvtl_s_h(__m256i _1) -+{ -+ return (__m256)__builtin_lasx_xvfcvtl_s_h((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfcvtl_d_s(__m256 _1) -+{ -+ return (__m256d)__builtin_lasx_xvfcvtl_d_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftint_w_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftint_w_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftint_l_d(__m256d _1) -+{ -+ return (__m256i)__builtin_lasx_xvftint_l_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: UV8SI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftint_wu_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftint_wu_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: UV4DI, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftint_lu_d(__m256d _1) -+{ -+ return (__m256i)__builtin_lasx_xvftint_lu_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrz_w_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrz_w_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrz_l_d(__m256d _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrz_l_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: UV8SI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrz_wu_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrz_wu_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: UV4DI, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrz_lu_d(__m256d _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrz_lu_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SF, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvffint_s_w(__m256i _1) -+{ -+ return (__m256)__builtin_lasx_xvffint_s_w((v8i32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DF, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvffint_d_l(__m256i _1) -+{ -+ return (__m256d)__builtin_lasx_xvffint_d_l((v4i64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SF, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvffint_s_wu(__m256i _1) -+{ -+ return (__m256)__builtin_lasx_xvffint_s_wu((v8u32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DF, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvffint_d_lu(__m256i _1) -+{ -+ return (__m256d)__builtin_lasx_xvffint_d_lu((v4u64)_1); -+} -+ -+/* Assembly instruction format: xd, xj, rk. */ -+/* Data types in instruction templates: V32QI, V32QI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplve_b(__m256i _1, int _2) -+{ -+ return (__m256i)__builtin_lasx_xvreplve_b((v32i8)_1, (int)_2); -+} -+ -+/* Assembly instruction format: xd, xj, rk. */ -+/* Data types in instruction templates: V16HI, V16HI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplve_h(__m256i _1, int _2) -+{ -+ return (__m256i)__builtin_lasx_xvreplve_h((v16i16)_1, (int)_2); -+} -+ -+/* Assembly instruction format: xd, xj, rk. */ -+/* Data types in instruction templates: V8SI, V8SI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplve_w(__m256i _1, int _2) -+{ -+ return (__m256i)__builtin_lasx_xvreplve_w((v8i32)_1, (int)_2); -+} -+ -+/* Assembly instruction format: xd, xj, rk. */ -+/* Data types in instruction templates: V4DI, V4DI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplve_d(__m256i _1, int _2) -+{ -+ return (__m256i)__builtin_lasx_xvreplve_d((v4i64)_1, (int)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ -+#define __lasx_xvpermi_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvpermi_w((v8i32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvandn_v(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvandn_v((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvneg_b(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvneg_b((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvneg_h(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvneg_h((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvneg_w(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvneg_w((v8i32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvneg_d(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvneg_d((v4i64)_1); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmuh_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmuh_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmuh_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmuh_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmuh_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmuh_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmuh_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmuh_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmuh_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmuh_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmuh_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmuh_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmuh_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmuh_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmuh_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmuh_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: V16HI, V32QI, UQI. */ -+#define __lasx_xvsllwil_h_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsllwil_h_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V8SI, V16HI, UQI. */ -+#define __lasx_xvsllwil_w_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsllwil_w_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V4DI, V8SI, UQI. */ -+#define __lasx_xvsllwil_d_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsllwil_d_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: UV16HI, UV32QI, UQI. */ -+#define __lasx_xvsllwil_hu_bu(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsllwil_hu_bu((v32u8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: UV8SI, UV16HI, UQI. */ -+#define __lasx_xvsllwil_wu_hu(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsllwil_wu_hu((v16u16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV4DI, UV8SI, UQI. */ -+#define __lasx_xvsllwil_du_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsllwil_du_wu((v8u32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsran_b_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsran_b_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsran_h_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsran_h_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsran_w_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsran_w_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssran_b_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssran_b_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssran_h_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssran_h_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssran_w_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssran_w_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssran_bu_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssran_bu_h((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssran_hu_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssran_hu_w((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssran_wu_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssran_wu_d((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrarn_b_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrarn_b_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrarn_h_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrarn_h_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrarn_w_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrarn_w_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrarn_b_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrarn_b_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrarn_h_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrarn_h_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrarn_w_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrarn_w_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrarn_bu_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrarn_bu_h((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrarn_hu_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrarn_hu_w((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrarn_wu_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrarn_wu_d((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrln_b_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrln_b_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrln_h_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrln_h_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrln_w_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrln_w_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrln_bu_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrln_bu_h((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrln_hu_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrln_hu_w((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrln_wu_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrln_wu_d((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrlrn_b_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrlrn_b_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrlrn_h_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrlrn_h_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsrlrn_w_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsrlrn_w_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrlrn_bu_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrlrn_bu_h((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrlrn_hu_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrlrn_hu_w((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrlrn_wu_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrlrn_wu_d((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, UQI. */ -+#define __lasx_xvfrstpi_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvfrstpi_b((v32i8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, UQI. */ -+#define __lasx_xvfrstpi_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvfrstpi_h((v16i16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfrstp_b(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvfrstp_b((v32i8)_1, (v32i8)_2, (v32i8)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfrstp_h(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvfrstp_h((v16i16)_1, (v16i16)_2, (v16i16)_3); -+} -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ -+#define __lasx_xvshuf4i_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvshuf4i_d((v4i64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V32QI, V32QI, UQI. */ -+#define __lasx_xvbsrl_v(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvbsrl_v((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V32QI, V32QI, UQI. */ -+#define __lasx_xvbsll_v(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvbsll_v((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ -+#define __lasx_xvextrins_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvextrins_b((v32i8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ -+#define __lasx_xvextrins_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvextrins_h((v16i16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ -+#define __lasx_xvextrins_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvextrins_w((v8i32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ -+#define __lasx_xvextrins_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvextrins_d((v4i64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmskltz_b(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvmskltz_b((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmskltz_h(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvmskltz_h((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmskltz_w(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvmskltz_w((v8i32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmskltz_d(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvmskltz_d((v4i64)_1); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsigncov_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsigncov_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsigncov_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsigncov_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsigncov_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsigncov_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsigncov_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsigncov_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk, xa. */ -+/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfmadd_s(__m256 _1, __m256 _2, __m256 _3) -+{ -+ return (__m256)__builtin_lasx_xvfmadd_s((v8f32)_1, (v8f32)_2, (v8f32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk, xa. */ -+/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfmadd_d(__m256d _1, __m256d _2, __m256d _3) -+{ -+ return (__m256d)__builtin_lasx_xvfmadd_d((v4f64)_1, (v4f64)_2, (v4f64)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk, xa. */ -+/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfmsub_s(__m256 _1, __m256 _2, __m256 _3) -+{ -+ return (__m256)__builtin_lasx_xvfmsub_s((v8f32)_1, (v8f32)_2, (v8f32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk, xa. */ -+/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfmsub_d(__m256d _1, __m256d _2, __m256d _3) -+{ -+ return (__m256d)__builtin_lasx_xvfmsub_d((v4f64)_1, (v4f64)_2, (v4f64)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk, xa. */ -+/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfnmadd_s(__m256 _1, __m256 _2, __m256 _3) -+{ -+ return (__m256)__builtin_lasx_xvfnmadd_s((v8f32)_1, (v8f32)_2, (v8f32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk, xa. */ -+/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfnmadd_d(__m256d _1, __m256d _2, __m256d _3) -+{ -+ return (__m256d)__builtin_lasx_xvfnmadd_d((v4f64)_1, (v4f64)_2, (v4f64)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk, xa. */ -+/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvfnmsub_s(__m256 _1, __m256 _2, __m256 _3) -+{ -+ return (__m256)__builtin_lasx_xvfnmsub_s((v8f32)_1, (v8f32)_2, (v8f32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk, xa. */ -+/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvfnmsub_d(__m256d _1, __m256d _2, __m256d _3) -+{ -+ return (__m256d)__builtin_lasx_xvfnmsub_d((v4f64)_1, (v4f64)_2, (v4f64)_3); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrne_w_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrne_w_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrne_l_d(__m256d _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrne_l_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrp_w_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrp_w_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrp_l_d(__m256d _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrp_l_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrm_w_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrm_w_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrm_l_d(__m256d _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrm_l_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftint_w_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvftint_w_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SF, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256 __lasx_xvffint_s_l(__m256i _1, __m256i _2) -+{ -+ return (__m256)__builtin_lasx_xvffint_s_l((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrz_w_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvftintrz_w_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrp_w_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvftintrp_w_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrm_w_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvftintrm_w_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrne_w_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvftintrne_w_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftinth_l_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftinth_l_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintl_l_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintl_l_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DF, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvffinth_d_w(__m256i _1) -+{ -+ return (__m256d)__builtin_lasx_xvffinth_d_w((v8i32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DF, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256d __lasx_xvffintl_d_w(__m256i _1) -+{ -+ return (__m256d)__builtin_lasx_xvffintl_d_w((v8i32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrzh_l_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrzh_l_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrzl_l_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrzl_l_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrph_l_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrph_l_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrpl_l_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrpl_l_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrmh_l_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrmh_l_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrml_l_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrml_l_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrneh_l_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrneh_l_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvftintrnel_l_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvftintrnel_l_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfrintrne_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvfrintrne_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfrintrne_d(__m256d _1) -+{ -+ return (__m256i)__builtin_lasx_xvfrintrne_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfrintrz_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvfrintrz_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfrintrz_d(__m256d _1) -+{ -+ return (__m256i)__builtin_lasx_xvfrintrz_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfrintrp_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvfrintrp_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfrintrp_d(__m256d _1) -+{ -+ return (__m256i)__builtin_lasx_xvfrintrp_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfrintrm_s(__m256 _1) -+{ -+ return (__m256i)__builtin_lasx_xvfrintrm_s((v8f32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfrintrm_d(__m256d _1) -+{ -+ return (__m256i)__builtin_lasx_xvfrintrm_d((v4f64)_1); -+} -+ -+/* Assembly instruction format: xd, rj, si12. */ -+/* Data types in instruction templates: V32QI, CVPOINTER, SI. */ -+#define __lasx_xvld(/*void **/ _1, /*si12*/ _2) ((__m256i)__builtin_lasx_xvld((void *)(_1), (_2))) -+ -+/* Assembly instruction format: xd, rj, si12. */ -+/* Data types in instruction templates: VOID, V32QI, CVPOINTER, SI. */ -+#define __lasx_xvst(/*__m256i*/ _1, /*void **/ _2, /*si12*/ _3) ((void)__builtin_lasx_xvst((v32i8)(_1), (void *)(_2), (_3))) -+ -+/* Assembly instruction format: xd, rj, si8, idx. */ -+/* Data types in instruction templates: VOID, V32QI, CVPOINTER, SI, UQI. */ -+#define __lasx_xvstelm_b(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lasx_xvstelm_b((v32i8)(_1), (void *)(_2), (_3), (_4))) -+ -+/* Assembly instruction format: xd, rj, si8, idx. */ -+/* Data types in instruction templates: VOID, V16HI, CVPOINTER, SI, UQI. */ -+#define __lasx_xvstelm_h(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lasx_xvstelm_h((v16i16)(_1), (void *)(_2), (_3), (_4))) -+ -+/* Assembly instruction format: xd, rj, si8, idx. */ -+/* Data types in instruction templates: VOID, V8SI, CVPOINTER, SI, UQI. */ -+#define __lasx_xvstelm_w(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lasx_xvstelm_w((v8i32)(_1), (void *)(_2), (_3), (_4))) -+ -+/* Assembly instruction format: xd, rj, si8, idx. */ -+/* Data types in instruction templates: VOID, V4DI, CVPOINTER, SI, UQI. */ -+#define __lasx_xvstelm_d(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lasx_xvstelm_d((v4i64)(_1), (void *)(_2), (_3), (_4))) -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, UQI. */ -+#define __lasx_xvinsve0_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui3*/ _3) ((__m256i)__builtin_lasx_xvinsve0_w((v8i32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui2. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, UQI. */ -+#define __lasx_xvinsve0_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui2*/ _3) ((__m256i)__builtin_lasx_xvinsve0_d((v4i64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: V8SI, V8SI, UQI. */ -+#define __lasx_xvpickve_w(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvpickve_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui2. */ -+/* Data types in instruction templates: V4DI, V4DI, UQI. */ -+#define __lasx_xvpickve_d(/*__m256i*/ _1, /*ui2*/ _2) ((__m256i)__builtin_lasx_xvpickve_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrlrn_b_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrlrn_b_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrlrn_h_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrlrn_h_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrlrn_w_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrlrn_w_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrln_b_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrln_b_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrln_h_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrln_h_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvssrln_w_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvssrln_w_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvorn_v(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvorn_v((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, i13. */ -+/* Data types in instruction templates: V4DI, HI. */ -+#define __lasx_xvldi(/*i13*/ _1) ((__m256i)__builtin_lasx_xvldi((_1))) -+ -+/* Assembly instruction format: xd, rj, rk. */ -+/* Data types in instruction templates: V32QI, CVPOINTER, DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvldx(void * _1, long int _2) -+{ -+ return (__m256i)__builtin_lasx_xvldx((void *)_1, (long int)_2); -+} -+ -+/* Assembly instruction format: xd, rj, rk. */ -+/* Data types in instruction templates: VOID, V32QI, CVPOINTER, DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+void __lasx_xvstx(__m256i _1, void * _2, long int _3) -+{ -+ return (void)__builtin_lasx_xvstx((v32i8)_1, (void *)_2, (long int)_3); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvextl_qu_du(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvextl_qu_du((v4u64)_1); -+} -+ -+/* Assembly instruction format: xd, rj, ui3. */ -+/* Data types in instruction templates: V8SI, V8SI, SI, UQI. */ -+#define __lasx_xvinsgr2vr_w(/*__m256i*/ _1, /*int*/ _2, /*ui3*/ _3) ((__m256i)__builtin_lasx_xvinsgr2vr_w((v8i32)(_1), (int)(_2), (_3))) -+ -+/* Assembly instruction format: xd, rj, ui2. */ -+/* Data types in instruction templates: V4DI, V4DI, DI, UQI. */ -+#define __lasx_xvinsgr2vr_d(/*__m256i*/ _1, /*long int*/ _2, /*ui2*/ _3) ((__m256i)__builtin_lasx_xvinsgr2vr_d((v4i64)(_1), (long int)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplve0_b(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvreplve0_b((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplve0_h(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvreplve0_h((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplve0_w(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvreplve0_w((v8i32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplve0_d(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvreplve0_d((v4i64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvreplve0_q(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvreplve0_q((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V16HI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_vext2xv_h_b(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_vext2xv_h_b((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_vext2xv_w_h(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_vext2xv_w_h((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_vext2xv_d_w(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_vext2xv_d_w((v8i32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_vext2xv_w_b(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_vext2xv_w_b((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_vext2xv_d_h(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_vext2xv_d_h((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_vext2xv_d_b(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_vext2xv_d_b((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V16HI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_vext2xv_hu_bu(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_vext2xv_hu_bu((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_vext2xv_wu_hu(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_vext2xv_wu_hu((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_vext2xv_du_wu(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_vext2xv_du_wu((v8i32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_vext2xv_wu_bu(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_vext2xv_wu_bu((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_vext2xv_du_hu(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_vext2xv_du_hu((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_vext2xv_du_bu(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_vext2xv_du_bu((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ -+#define __lasx_xvpermi_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvpermi_q((v32i8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui8. */ -+/* Data types in instruction templates: V4DI, V4DI, USI. */ -+#define __lasx_xvpermi_d(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvpermi_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvperm_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvperm_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, rj, si12. */ -+/* Data types in instruction templates: V32QI, CVPOINTER, SI. */ -+#define __lasx_xvldrepl_b(/*void **/ _1, /*si12*/ _2) ((__m256i)__builtin_lasx_xvldrepl_b((void *)(_1), (_2))) -+ -+/* Assembly instruction format: xd, rj, si11. */ -+/* Data types in instruction templates: V16HI, CVPOINTER, SI. */ -+#define __lasx_xvldrepl_h(/*void **/ _1, /*si11*/ _2) ((__m256i)__builtin_lasx_xvldrepl_h((void *)(_1), (_2))) -+ -+/* Assembly instruction format: xd, rj, si10. */ -+/* Data types in instruction templates: V8SI, CVPOINTER, SI. */ -+#define __lasx_xvldrepl_w(/*void **/ _1, /*si10*/ _2) ((__m256i)__builtin_lasx_xvldrepl_w((void *)(_1), (_2))) -+ -+/* Assembly instruction format: xd, rj, si9. */ -+/* Data types in instruction templates: V4DI, CVPOINTER, SI. */ -+#define __lasx_xvldrepl_d(/*void **/ _1, /*si9*/ _2) ((__m256i)__builtin_lasx_xvldrepl_d((void *)(_1), (_2))) -+ -+/* Assembly instruction format: rd, xj, ui3. */ -+/* Data types in instruction templates: SI, V8SI, UQI. */ -+#define __lasx_xvpickve2gr_w(/*__m256i*/ _1, /*ui3*/ _2) ((int)__builtin_lasx_xvpickve2gr_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: rd, xj, ui3. */ -+/* Data types in instruction templates: USI, V8SI, UQI. */ -+#define __lasx_xvpickve2gr_wu(/*__m256i*/ _1, /*ui3*/ _2) ((unsigned int)__builtin_lasx_xvpickve2gr_wu((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: rd, xj, ui2. */ -+/* Data types in instruction templates: DI, V4DI, UQI. */ -+#define __lasx_xvpickve2gr_d(/*__m256i*/ _1, /*ui2*/ _2) ((long int)__builtin_lasx_xvpickve2gr_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: rd, xj, ui2. */ -+/* Data types in instruction templates: UDI, V4DI, UQI. */ -+#define __lasx_xvpickve2gr_du(/*__m256i*/ _1, /*ui2*/ _2) ((unsigned long int)__builtin_lasx_xvpickve2gr_du((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwev_q_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwev_q_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwev_d_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwev_d_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwev_w_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwev_w_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwev_h_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwev_h_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwev_q_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwev_q_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwev_d_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwev_d_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwev_w_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwev_w_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwev_h_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwev_h_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwev_q_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwev_q_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwev_d_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwev_d_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwev_w_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwev_w_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwev_h_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwev_h_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwev_q_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwev_q_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwev_d_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwev_d_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwev_w_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwev_w_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwev_h_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwev_h_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwev_q_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwev_q_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwev_d_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwev_d_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwev_w_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwev_w_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwev_h_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwev_h_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwev_q_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwev_q_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwev_d_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwev_d_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwev_w_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwev_w_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwev_h_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwev_h_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwod_q_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwod_q_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwod_d_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwod_d_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwod_w_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwod_w_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwod_h_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwod_h_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwod_q_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwod_q_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwod_d_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwod_d_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwod_w_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwod_w_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwod_h_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwod_h_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwod_q_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwod_q_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwod_d_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwod_d_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwod_w_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwod_w_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwod_h_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwod_h_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwod_q_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwod_q_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwod_d_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwod_d_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwod_w_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwod_w_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsubwod_h_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsubwod_h_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwod_q_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwod_q_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwod_d_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwod_d_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwod_w_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwod_w_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwod_h_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwod_h_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwod_q_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwod_q_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwod_d_wu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwod_d_wu((v8u32)_1, (v8u32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwod_w_hu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwod_w_hu((v16u16)_1, (v16u16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwod_h_bu(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwod_h_bu((v32u8)_1, (v32u8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwev_d_wu_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwev_d_wu_w((v8u32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwev_w_hu_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwev_w_hu_h((v16u16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwev_h_bu_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwev_h_bu_b((v32u8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwev_d_wu_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwev_d_wu_w((v8u32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwev_w_hu_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwev_w_hu_h((v16u16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwev_h_bu_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwev_h_bu_b((v32u8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwod_d_wu_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwod_d_wu_w((v8u32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwod_w_hu_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwod_w_hu_h((v16u16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwod_h_bu_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwod_h_bu_b((v32u8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwod_d_wu_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwod_d_wu_w((v8u32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwod_w_hu_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwod_w_hu_h((v16u16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwod_h_bu_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwod_h_bu_b((v32u8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhaddw_q_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhaddw_q_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhaddw_qu_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhaddw_qu_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhsubw_q_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhsubw_q_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvhsubw_qu_du(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvhsubw_qu_du((v4u64)_1, (v4u64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwev_q_d(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwev_q_d((v4i64)_1, (v4i64)_2, (v4i64)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwev_d_w(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwev_d_w((v4i64)_1, (v8i32)_2, (v8i32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwev_w_h(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwev_w_h((v8i32)_1, (v16i16)_2, (v16i16)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwev_h_b(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwev_h_b((v16i16)_1, (v32i8)_2, (v32i8)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwev_q_du(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwev_q_du((v4u64)_1, (v4u64)_2, (v4u64)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwev_d_wu(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwev_d_wu((v4u64)_1, (v8u32)_2, (v8u32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwev_w_hu(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwev_w_hu((v8u32)_1, (v16u16)_2, (v16u16)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwev_h_bu(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwev_h_bu((v16u16)_1, (v32u8)_2, (v32u8)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwod_q_d(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwod_q_d((v4i64)_1, (v4i64)_2, (v4i64)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwod_d_w(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwod_d_w((v4i64)_1, (v8i32)_2, (v8i32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwod_w_h(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwod_w_h((v8i32)_1, (v16i16)_2, (v16i16)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwod_h_b(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwod_h_b((v16i16)_1, (v32i8)_2, (v32i8)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwod_q_du(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwod_q_du((v4u64)_1, (v4u64)_2, (v4u64)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV4DI, UV4DI, UV8SI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwod_d_wu(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwod_d_wu((v4u64)_1, (v8u32)_2, (v8u32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV8SI, UV8SI, UV16HI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwod_w_hu(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwod_w_hu((v8u32)_1, (v16u16)_2, (v16u16)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: UV16HI, UV16HI, UV32QI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwod_h_bu(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwod_h_bu((v16u16)_1, (v32u8)_2, (v32u8)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, UV4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwev_q_du_d(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwev_q_du_d((v4i64)_1, (v4u64)_2, (v4i64)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, UV8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwev_d_wu_w(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwev_d_wu_w((v4i64)_1, (v8u32)_2, (v8i32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, UV16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwev_w_hu_h(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwev_w_hu_h((v8i32)_1, (v16u16)_2, (v16i16)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, UV32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwev_h_bu_b(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwev_h_bu_b((v16i16)_1, (v32u8)_2, (v32i8)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, UV4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwod_q_du_d(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwod_q_du_d((v4i64)_1, (v4u64)_2, (v4i64)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, UV8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwod_d_wu_w(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwod_d_wu_w((v4i64)_1, (v8u32)_2, (v8i32)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, UV16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwod_w_hu_h(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwod_w_hu_h((v8i32)_1, (v16u16)_2, (v16i16)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, UV32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmaddwod_h_bu_b(__m256i _1, __m256i _2, __m256i _3) -+{ -+ return (__m256i)__builtin_lasx_xvmaddwod_h_bu_b((v16i16)_1, (v32u8)_2, (v32i8)_3); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvrotr_b(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvrotr_b((v32i8)_1, (v32i8)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvrotr_h(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvrotr_h((v16i16)_1, (v16i16)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvrotr_w(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvrotr_w((v8i32)_1, (v8i32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvrotr_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvrotr_d((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvadd_q(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvadd_q((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvsub_q(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvsub_q((v4i64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwev_q_du_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwev_q_du_d((v4u64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvaddwod_q_du_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvaddwod_q_du_d((v4u64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwev_q_du_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwev_q_du_d((v4u64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmulwod_q_du_d(__m256i _1, __m256i _2) -+{ -+ return (__m256i)__builtin_lasx_xvmulwod_q_du_d((v4u64)_1, (v4i64)_2); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmskgez_b(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvmskgez_b((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V32QI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvmsknz_b(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvmsknz_b((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V16HI, V32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvexth_h_b(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvexth_h_b((v32i8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V8SI, V16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvexth_w_h(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvexth_w_h((v16i16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvexth_d_w(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvexth_d_w((v8i32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvexth_q_d(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvexth_q_d((v4i64)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: UV16HI, UV32QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvexth_hu_bu(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvexth_hu_bu((v32u8)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: UV8SI, UV16HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvexth_wu_hu(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvexth_wu_hu((v16u16)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: UV4DI, UV8SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvexth_du_wu(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvexth_du_wu((v8u32)_1); -+} -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: UV4DI, UV4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvexth_qu_du(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvexth_qu_du((v4u64)_1); -+} -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: V32QI, V32QI, UQI. */ -+#define __lasx_xvrotri_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvrotri_b((v32i8)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V16HI, V16HI, UQI. */ -+#define __lasx_xvrotri_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvrotri_h((v16i16)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V8SI, V8SI, UQI. */ -+#define __lasx_xvrotri_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvrotri_w((v8i32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V4DI, V4DI, UQI. */ -+#define __lasx_xvrotri_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvrotri_d((v4i64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj. */ -+/* Data types in instruction templates: V4DI, V4DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvextl_q_d(__m256i _1) -+{ -+ return (__m256i)__builtin_lasx_xvextl_q_d((v4i64)_1); -+} -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ -+#define __lasx_xvsrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvsrlni_b_h((v32i8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ -+#define __lasx_xvsrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvsrlni_h_w((v16i16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ -+#define __lasx_xvsrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvsrlni_w_d((v8i32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui7. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ -+#define __lasx_xvsrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvsrlni_d_q((v4i64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ -+#define __lasx_xvsrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvsrlrni_b_h((v32i8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ -+#define __lasx_xvsrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvsrlrni_h_w((v16i16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ -+#define __lasx_xvsrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvsrlrni_w_d((v8i32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui7. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ -+#define __lasx_xvsrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvsrlrni_d_q((v4i64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ -+#define __lasx_xvssrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrlni_b_h((v32i8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ -+#define __lasx_xvssrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrlni_h_w((v16i16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ -+#define __lasx_xvssrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrlni_w_d((v8i32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui7. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ -+#define __lasx_xvssrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrlni_d_q((v4i64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ -+#define __lasx_xvssrlni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrlni_bu_h((v32u8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ -+#define __lasx_xvssrlni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrlni_hu_w((v16u16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ -+#define __lasx_xvssrlni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrlni_wu_d((v8u32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui7. */ -+/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ -+#define __lasx_xvssrlni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrlni_du_q((v4u64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ -+#define __lasx_xvssrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_b_h((v32i8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ -+#define __lasx_xvssrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_h_w((v16i16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ -+#define __lasx_xvssrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_w_d((v8i32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui7. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ -+#define __lasx_xvssrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_d_q((v4i64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ -+#define __lasx_xvssrlrni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_bu_h((v32u8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ -+#define __lasx_xvssrlrni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_hu_w((v16u16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ -+#define __lasx_xvssrlrni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_wu_d((v8u32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui7. */ -+/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ -+#define __lasx_xvssrlrni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_du_q((v4u64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ -+#define __lasx_xvsrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvsrani_b_h((v32i8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ -+#define __lasx_xvsrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvsrani_h_w((v16i16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ -+#define __lasx_xvsrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvsrani_w_d((v8i32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui7. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ -+#define __lasx_xvsrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvsrani_d_q((v4i64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ -+#define __lasx_xvsrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvsrarni_b_h((v32i8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ -+#define __lasx_xvsrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvsrarni_h_w((v16i16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ -+#define __lasx_xvsrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvsrarni_w_d((v8i32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui7. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ -+#define __lasx_xvsrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvsrarni_d_q((v4i64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ -+#define __lasx_xvssrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrani_b_h((v32i8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ -+#define __lasx_xvssrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrani_h_w((v16i16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ -+#define __lasx_xvssrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrani_w_d((v8i32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui7. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ -+#define __lasx_xvssrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrani_d_q((v4i64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ -+#define __lasx_xvssrani_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrani_bu_h((v32u8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ -+#define __lasx_xvssrani_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrani_hu_w((v16u16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ -+#define __lasx_xvssrani_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrani_wu_d((v8u32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui7. */ -+/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ -+#define __lasx_xvssrani_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrani_du_q((v4u64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ -+#define __lasx_xvssrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrarni_b_h((v32i8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ -+#define __lasx_xvssrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrarni_h_w((v16i16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ -+#define __lasx_xvssrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrarni_w_d((v8i32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui7. */ -+/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ -+#define __lasx_xvssrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrarni_d_q((v4i64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui4. */ -+/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ -+#define __lasx_xvssrarni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrarni_bu_h((v32u8)(_1), (v32i8)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui5. */ -+/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ -+#define __lasx_xvssrarni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrarni_hu_w((v16u16)(_1), (v16i16)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui6. */ -+/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ -+#define __lasx_xvssrarni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrarni_wu_d((v8u32)(_1), (v8i32)(_2), (_3))) -+ -+/* Assembly instruction format: xd, xj, ui7. */ -+/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ -+#define __lasx_xvssrarni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrarni_du_q((v4u64)(_1), (v4i64)(_2), (_3))) -+ -+/* Assembly instruction format: cd, xj. */ -+/* Data types in instruction templates: SI, UV32QI. */ -+#define __lasx_xbnz_b(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_b((v32u8)(_1))) -+ -+/* Assembly instruction format: cd, xj. */ -+/* Data types in instruction templates: SI, UV4DI. */ -+#define __lasx_xbnz_d(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_d((v4u64)(_1))) -+ -+/* Assembly instruction format: cd, xj. */ -+/* Data types in instruction templates: SI, UV16HI. */ -+#define __lasx_xbnz_h(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_h((v16u16)(_1))) -+ -+/* Assembly instruction format: cd, xj. */ -+/* Data types in instruction templates: SI, UV32QI. */ -+#define __lasx_xbnz_v(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_v((v32u8)(_1))) -+ -+/* Assembly instruction format: cd, xj. */ -+/* Data types in instruction templates: SI, UV8SI. */ -+#define __lasx_xbnz_w(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_w((v8u32)(_1))) -+ -+/* Assembly instruction format: cd, xj. */ -+/* Data types in instruction templates: SI, UV32QI. */ -+#define __lasx_xbz_b(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_b((v32u8)(_1))) -+ -+/* Assembly instruction format: cd, xj. */ -+/* Data types in instruction templates: SI, UV4DI. */ -+#define __lasx_xbz_d(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_d((v4u64)(_1))) -+ -+/* Assembly instruction format: cd, xj. */ -+/* Data types in instruction templates: SI, UV16HI. */ -+#define __lasx_xbz_h(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_h((v16u16)(_1))) -+ -+/* Assembly instruction format: cd, xj. */ -+/* Data types in instruction templates: SI, UV32QI. */ -+#define __lasx_xbz_v(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_v((v32u8)(_1))) -+ -+/* Assembly instruction format: cd, xj. */ -+/* Data types in instruction templates: SI, UV8SI. */ -+#define __lasx_xbz_w(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_w((v8u32)(_1))) -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_caf_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_caf_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_caf_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_caf_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_ceq_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_ceq_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_ceq_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_ceq_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cle_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cle_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cle_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cle_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_clt_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_clt_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_clt_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_clt_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cne_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cne_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cne_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cne_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cor_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cor_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cor_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cor_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cueq_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cueq_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cueq_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cueq_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cule_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cule_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cule_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cule_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cult_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cult_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cult_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cult_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cun_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cun_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cune_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cune_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cune_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cune_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_cun_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_cun_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_saf_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_saf_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_saf_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_saf_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_seq_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_seq_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_seq_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_seq_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sle_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sle_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sle_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sle_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_slt_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_slt_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_slt_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_slt_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sne_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sne_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sne_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sne_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sor_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sor_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sor_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sor_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sueq_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sueq_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sueq_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sueq_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sule_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sule_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sule_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sule_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sult_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sult_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sult_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sult_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sun_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sun_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V4DI, V4DF, V4DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sune_d(__m256d _1, __m256d _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sune_d((v4f64)_1, (v4f64)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sune_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sune_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, xk. */ -+/* Data types in instruction templates: V8SI, V8SF, V8SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m256i __lasx_xvfcmp_sun_s(__m256 _1, __m256 _2) -+{ -+ return (__m256i)__builtin_lasx_xvfcmp_sun_s((v8f32)_1, (v8f32)_2); -+} -+ -+/* Assembly instruction format: xd, xj, ui2. */ -+/* Data types in instruction templates: V4DF, V4DF, UQI. */ -+#define __lasx_xvpickve_d_f(/*__m256d*/ _1, /*ui2*/ _2) ((__m256d)__builtin_lasx_xvpickve_d_f((v4f64)(_1), (_2))) -+ -+/* Assembly instruction format: xd, xj, ui3. */ -+/* Data types in instruction templates: V8SF, V8SF, UQI. */ -+#define __lasx_xvpickve_w_f(/*__m256*/ _1, /*ui3*/ _2) ((__m256)__builtin_lasx_xvpickve_w_f((v8f32)(_1), (_2))) -+ -+/* Assembly instruction format: xd, si10. */ -+/* Data types in instruction templates: V32QI, HI. */ -+#define __lasx_xvrepli_b(/*si10*/ _1) ((__m256i)__builtin_lasx_xvrepli_b((_1))) -+ -+/* Assembly instruction format: xd, si10. */ -+/* Data types in instruction templates: V4DI, HI. */ -+#define __lasx_xvrepli_d(/*si10*/ _1) ((__m256i)__builtin_lasx_xvrepli_d((_1))) -+ -+/* Assembly instruction format: xd, si10. */ -+/* Data types in instruction templates: V16HI, HI. */ -+#define __lasx_xvrepli_h(/*si10*/ _1) ((__m256i)__builtin_lasx_xvrepli_h((_1))) -+ -+/* Assembly instruction format: xd, si10. */ -+/* Data types in instruction templates: V8SI, HI. */ -+#define __lasx_xvrepli_w(/*si10*/ _1) ((__m256i)__builtin_lasx_xvrepli_w((_1))) -+ -+#endif /* defined(__loongarch_asx). */ -+#endif /* _GCC_LOONGSON_ASXINTRIN_H. */ -diff --git a/gcc/config/loongarch/linux-common.h b/gcc/config/loongarch/linux-common.h -new file mode 100644 -index 000000000..9e1a1b50f ---- /dev/null -+++ b/gcc/config/loongarch/linux-common.h -@@ -0,0 +1,68 @@ -+/* Definitions for LARCH running Linux-based GNU systems with ELF format. -+ Copyright (C) 2012-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#undef TARGET_OS_CPP_BUILTINS -+#define TARGET_OS_CPP_BUILTINS() \ -+ do { \ -+ GNU_USER_TARGET_OS_CPP_BUILTINS(); \ -+ /* The GNU C++ standard library requires this. */ \ -+ if (c_dialect_cxx ()) \ -+ builtin_define ("_GNU_SOURCE"); \ -+ ANDROID_TARGET_OS_CPP_BUILTINS(); \ -+ } while (0) -+ -+#define EXTRA_TARGET_D_OS_VERSIONS() \ -+ ANDROID_TARGET_D_OS_VERSIONS(); -+ -+#undef LINK_SPEC -+#define LINK_SPEC \ -+ LINUX_OR_ANDROID_LD (GNU_USER_TARGET_LINK_SPEC, \ -+ GNU_USER_TARGET_LINK_SPEC " " ANDROID_LINK_SPEC) -+ -+#undef SUBTARGET_CC1_SPEC -+#define SUBTARGET_CC1_SPEC \ -+ LINUX_OR_ANDROID_CC (GNU_USER_TARGET_CC1_SPEC, \ -+ GNU_USER_TARGET_CC1_SPEC " " ANDROID_CC1_SPEC) -+ -+#undef CC1PLUS_SPEC -+#define CC1PLUS_SPEC \ -+ LINUX_OR_ANDROID_CC ("", ANDROID_CC1PLUS_SPEC) -+ -+#undef LIB_SPEC -+#define LIB_SPEC \ -+ LINUX_OR_ANDROID_LD (GNU_USER_TARGET_LIB_SPEC, \ -+ GNU_USER_TARGET_NO_PTHREADS_LIB_SPEC " " ANDROID_LIB_SPEC) -+ -+#undef STARTFILE_SPEC -+#define STARTFILE_SPEC \ -+ LINUX_OR_ANDROID_LD (GNU_USER_TARGET_STARTFILE_SPEC, ANDROID_STARTFILE_SPEC) -+ -+#undef ENDFILE_SPEC -+#define ENDFILE_SPEC \ -+ LINUX_OR_ANDROID_LD (GNU_USER_TARGET_MATHFILE_SPEC " " \ -+ GNU_USER_TARGET_ENDFILE_SPEC, \ -+ GNU_USER_TARGET_MATHFILE_SPEC " " \ -+ ANDROID_ENDFILE_SPEC) -+ -+/* Define this to be nonzero if static stack checking is supported. */ -+#define STACK_CHECK_STATIC_BUILTIN 1 -+ -+/* FIXME*/ -+/* The default value isn't sufficient in 64-bit mode. */ -+#define STACK_CHECK_PROTECT (TARGET_64BIT ? 16 * 1024 : 12 * 1024) -diff --git a/gcc/config/loongarch/linux.h b/gcc/config/loongarch/linux.h -new file mode 100644 -index 000000000..520a8ef32 ---- /dev/null -+++ b/gcc/config/loongarch/linux.h -@@ -0,0 +1,33 @@ -+/* Definitions for LARCH running Linux-based GNU systems with ELF format. -+ Copyright (C) 1998-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#define GNU_USER_LINK_EMULATION32 "elf32loongarch" -+#define GNU_USER_LINK_EMULATION64 "elf64loongarch" -+ -+#define GLIBC_DYNAMIC_LINKERLP32 \ -+ "/lib32/ld.so.1" -+#define GLIBC_DYNAMIC_LINKERLP64 \ -+ "/lib64/ld.so.1" -+ -+#define GNU_USER_DYNAMIC_LINKERLP32 GLIBC_DYNAMIC_LINKERLP32 -+#define GNU_USER_DYNAMIC_LINKERLP64 GLIBC_DYNAMIC_LINKERLP64 -+ -+ -+#undef TARGET_ASM_FILE_END -+#define TARGET_ASM_FILE_END file_end_indicate_exec_stack -diff --git a/gcc/config/loongarch/loongarch-builtins.c b/gcc/config/loongarch/loongarch-builtins.c -new file mode 100644 -index 000000000..9fa68b11f ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-builtins.c -@@ -0,0 +1,3152 @@ -+ -+/* Subroutines used for expanding LOONGARCH builtins. -+ Copyright (C) 2011-2018 Free Software Foundation, Inc. -+ Contributed by Andrew Waterman (andrew@sifive.com). -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#define IN_TARGET_CODE 1 -+ -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "backend.h" -+#include "target.h" -+#include "rtl.h" -+#include "tree.h" -+#include "memmodel.h" -+#include "gimple.h" -+#include "cfghooks.h" -+#include "df.h" -+#include "tm_p.h" -+#include "stringpool.h" -+#include "attribs.h" -+#include "optabs.h" -+#include "regs.h" -+#include "emit-rtl.h" -+#include "recog.h" -+#include "cgraph.h" -+#include "diagnostic.h" -+#include "insn-attr.h" -+#include "output.h" -+#include "alias.h" -+#include "fold-const.h" -+#include "varasm.h" -+#include "stor-layout.h" -+#include "calls.h" -+#include "explow.h" -+#include "expr.h" -+#include "libfuncs.h" -+#include "reload.h" -+#include "common/common-target.h" -+#include "langhooks.h" -+#include "cfgrtl.h" -+#include "cfganal.h" -+#include "sched-int.h" -+#include "gimplify.h" -+#include "target-globals.h" -+#include "tree-pass.h" -+#include "context.h" -+#include "builtins.h" -+#include "rtl-iter.h" -+ -+/* This file should be included last. */ -+#include "target-def.h" -+/* Macros to create an enumeration identifier for a function prototype. */ -+#define LARCH_FTYPE_NAME1(A, B) LARCH_##A##_FTYPE_##B -+#define LARCH_FTYPE_NAME2(A, B, C) LARCH_##A##_FTYPE_##B##_##C -+#define LARCH_FTYPE_NAME3(A, B, C, D) LARCH_##A##_FTYPE_##B##_##C##_##D -+#define LARCH_FTYPE_NAME4(A, B, C, D, E) LARCH_##A##_FTYPE_##B##_##C##_##D##_##E -+ -+/* Classifies the prototype of a built-in function. */ -+enum loongarch_function_type { -+#define DEF_LARCH_FTYPE(NARGS, LIST) LARCH_FTYPE_NAME##NARGS LIST, -+#include "config/loongarch/loongarch-ftypes.def" -+#undef DEF_LARCH_FTYPE -+ LARCH_MAX_FTYPE_MAX -+}; -+ -+/* Specifies how a built-in function should be converted into rtl. */ -+enum loongarch_builtin_type { -+ /* The function corresponds directly to an .md pattern. The return -+ value is mapped to operand 0 and the arguments are mapped to -+ operands 1 and above. */ -+ LARCH_BUILTIN_DIRECT, -+ -+ /* The function corresponds directly to an .md pattern. There is no return -+ value and the arguments are mapped to operands 0 and above. */ -+ LARCH_BUILTIN_DIRECT_NO_TARGET, -+ -+ /* The function corresponds to an LSX conditional branch instruction -+ combined with a compare instruction. */ -+ LARCH_BUILTIN_LSX_TEST_BRANCH, -+ -+ /* For generating LoongArch LSX. */ -+ LARCH_BUILTIN_LSX, -+ -+ /* For generating LoongArch LASX. */ -+ LARCH_BUILTIN_LASX, -+ -+ /* The function corresponds to an LASX conditional branch instruction -+ combined with a compare instruction. */ -+ LARCH_BUILTIN_LASX_TEST_BRANCH, -+ -+}; -+ -+/* Invoke MACRO (COND) for each C.cond.fmt condition. */ -+#define LARCH_FP_CONDITIONS(MACRO) \ -+ MACRO (f), \ -+ MACRO (un), \ -+ MACRO (eq), \ -+ MACRO (ueq), \ -+ MACRO (olt), \ -+ MACRO (ult), \ -+ MACRO (ole), \ -+ MACRO (ule), \ -+ MACRO (sf), \ -+ MACRO (ngle), \ -+ MACRO (seq), \ -+ MACRO (ngl), \ -+ MACRO (lt), \ -+ MACRO (nge), \ -+ MACRO (le), \ -+ MACRO (ngt) -+ -+/* Enumerates the codes above as LARCH_FP_COND_. */ -+#define DECLARE_LARCH_COND(X) LARCH_FP_COND_ ## X -+enum loongarch_fp_condition { -+ LARCH_FP_CONDITIONS (DECLARE_LARCH_COND) -+}; -+#undef DECLARE_LARCH_COND -+ -+/* Index X provides the string representation of LARCH_FP_COND_. */ -+#define STRINGIFY(X) #X -+const char *const loongarch_fp_conditions[16] = { -+ LARCH_FP_CONDITIONS (STRINGIFY) -+}; -+#undef STRINGIFY -+/* Declare an availability predicate for built-in functions that require -+ * COND to be true. NAME is the main part of the predicate's name. */ -+#define AVAIL_ALL(NAME, COND) \ -+ static unsigned int \ -+ loongarch_builtin_avail_##NAME (void) \ -+ { \ -+ return (COND) ? 1 : 0; \ -+ } -+ -+static unsigned int -+loongarch_builtin_avail_default (void) -+{ -+ return 1; -+} -+/* This structure describes a single built-in function. */ -+struct loongarch_builtin_description { -+ /* The code of the main .md file instruction. See loongarch_builtin_type -+ for more information. */ -+ enum insn_code icode; -+ -+ /* The floating-point comparison code to use with ICODE, if any. */ -+ enum loongarch_fp_condition cond; -+ -+ /* The name of the built-in function. */ -+ const char *name; -+ -+ /* Specifies how the function should be expanded. */ -+ enum loongarch_builtin_type builtin_type; -+ -+ /* The function's prototype. */ -+ enum loongarch_function_type function_type; -+ -+ /* Whether the function is available. */ -+ unsigned int (*avail) (void); -+}; -+ -+AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI) -+AVAIL_ALL (lsx, TARGET_LSX) -+AVAIL_ALL (lasx, TARGET_LASX) -+ -+/* Construct a loongarch_builtin_description from the given arguments. -+ -+ INSN is the name of the associated instruction pattern, without the -+ leading CODE_FOR_loongarch_. -+ -+ CODE is the floating-point condition code associated with the -+ function. It can be 'f' if the field is not applicable. -+ -+ NAME is the name of the function itself, without the leading -+ "__builtin_loongarch_". -+ -+ BUILTIN_TYPE and FUNCTION_TYPE are loongarch_builtin_description fields. -+ -+ AVAIL is the name of the availability predicate, without the leading -+ loongarch_builtin_avail_. */ -+#define LARCH_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \ -+ FUNCTION_TYPE, AVAIL) \ -+ { CODE_FOR_loongarch_ ## INSN, LARCH_FP_COND_ ## COND, \ -+ "__builtin_loongarch_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \ -+ loongarch_builtin_avail_ ## AVAIL } -+ -+/* Define __builtin_loongarch_, which is a LARCH_BUILTIN_DIRECT function -+ mapped to instruction CODE_FOR_loongarch_, FUNCTION_TYPE and AVAIL -+ are as for LARCH_BUILTIN. */ -+#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \ -+ LARCH_BUILTIN (INSN, f, #INSN, LARCH_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL) -+ -+/* Define __builtin_loongarch_, which is a LARCH_BUILTIN_DIRECT_NO_TARGET -+ function mapped to instruction CODE_FOR_loongarch_, FUNCTION_TYPE -+ and AVAIL are as for LARCH_BUILTIN. */ -+#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \ -+ LARCH_BUILTIN (INSN, f, #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ -+ FUNCTION_TYPE, AVAIL) -+ -+/* Define an LSX LARCH_BUILTIN_DIRECT function __builtin_lsx_ -+ for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description -+ field. */ -+#define LSX_BUILTIN(INSN, FUNCTION_TYPE) \ -+ { CODE_FOR_lsx_ ## INSN, LARCH_FP_COND_f, \ -+ "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT, \ -+ FUNCTION_TYPE, loongarch_builtin_avail_lsx } -+ -+ -+/* Define an LSX LARCH_BUILTIN_LSX_TEST_BRANCH function __builtin_lsx_ -+ for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description -+ field. */ -+#define LSX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \ -+ { CODE_FOR_lsx_ ## INSN, LARCH_FP_COND_f, \ -+ "__builtin_lsx_" #INSN, LARCH_BUILTIN_LSX_TEST_BRANCH, \ -+ FUNCTION_TYPE, loongarch_builtin_avail_lsx } -+ -+/* Define an LSX LARCH_BUILTIN_DIRECT_NO_TARGET function __builtin_lsx_ -+ for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description -+ field. */ -+#define LSX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \ -+ { CODE_FOR_lsx_ ## INSN, LARCH_FP_COND_f, \ -+ "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ -+ FUNCTION_TYPE, loongarch_builtin_avail_lsx } -+ -+/* Define an LASX LARCH_BUILTIN_DIRECT function __builtin_lasx_ -+ for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description -+ field. */ -+#define LASX_BUILTIN(INSN, FUNCTION_TYPE) \ -+ { CODE_FOR_lasx_ ## INSN, LARCH_FP_COND_f, \ -+ "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX, \ -+ FUNCTION_TYPE, loongarch_builtin_avail_lasx } -+ -+/* Define an LASX LARCH_BUILTIN_DIRECT_NO_TARGET function __builtin_lasx_ -+ for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description -+ field. */ -+#define LASX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \ -+ { CODE_FOR_lasx_ ## INSN, LARCH_FP_COND_f, \ -+ "__builtin_lasx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ -+ FUNCTION_TYPE, loongarch_builtin_avail_lasx } -+ -+/* Define an LASX LARCH_BUILTIN_LASX_TEST_BRANCH function __builtin_lasx_ -+ for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description -+ field. */ -+#define LASX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \ -+ { CODE_FOR_lasx_ ## INSN, LARCH_FP_COND_f, \ -+ "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX_TEST_BRANCH, \ -+ FUNCTION_TYPE, loongarch_builtin_avail_lasx } -+ -+/* LoongArch BASE instructions define CODE_FOR_loongarch_xxx */ -+#define CODE_FOR_loongarch_fmax_sf CODE_FOR_smaxsf3 -+#define CODE_FOR_loongarch_fmax_df CODE_FOR_smaxdf3 -+#define CODE_FOR_loongarch_fmin_sf CODE_FOR_sminsf3 -+#define CODE_FOR_loongarch_fmin_df CODE_FOR_smindf3 -+#define CODE_FOR_loongarch_fmaxa_sf CODE_FOR_smaxasf3 -+#define CODE_FOR_loongarch_fmaxa_df CODE_FOR_smaxadf3 -+#define CODE_FOR_loongarch_fmina_sf CODE_FOR_sminasf3 -+#define CODE_FOR_loongarch_fmina_df CODE_FOR_sminadf3 -+#define CODE_FOR_loongarch_fclass_s CODE_FOR_fclass_s -+#define CODE_FOR_loongarch_fclass_d CODE_FOR_fclass_d -+#define CODE_FOR_loongarch_frint_s CODE_FOR_frint_s -+#define CODE_FOR_loongarch_frint_d CODE_FOR_frint_d -+#define CODE_FOR_loongarch_bytepick_w CODE_FOR_bytepick_w -+#define CODE_FOR_loongarch_bytepick_d CODE_FOR_bytepick_d -+#define CODE_FOR_loongarch_bitrev_4b CODE_FOR_bitrev_4b -+#define CODE_FOR_loongarch_bitrev_8b CODE_FOR_bitrev_8b -+ -+/* LoongArch support crc */ -+#define CODE_FOR_loongarch_crc_w_b_w CODE_FOR_crc_w_b_w -+#define CODE_FOR_loongarch_crc_w_h_w CODE_FOR_crc_w_h_w -+#define CODE_FOR_loongarch_crc_w_w_w CODE_FOR_crc_w_w_w -+#define CODE_FOR_loongarch_crc_w_d_w CODE_FOR_crc_w_d_w -+#define CODE_FOR_loongarch_crcc_w_b_w CODE_FOR_crcc_w_b_w -+#define CODE_FOR_loongarch_crcc_w_h_w CODE_FOR_crcc_w_h_w -+#define CODE_FOR_loongarch_crcc_w_w_w CODE_FOR_crcc_w_w_w -+#define CODE_FOR_loongarch_crcc_w_d_w CODE_FOR_crcc_w_d_w -+ -+/* Privileged state instruction */ -+#define CODE_FOR_loongarch_cpucfg CODE_FOR_cpucfg -+#define CODE_FOR_loongarch_asrtle_d CODE_FOR_asrtle_d -+#define CODE_FOR_loongarch_asrtgt_d CODE_FOR_asrtgt_d -+#define CODE_FOR_loongarch_csrrd CODE_FOR_csrrd -+#define CODE_FOR_loongarch_dcsrrd CODE_FOR_dcsrrd -+#define CODE_FOR_loongarch_csrwr CODE_FOR_csrwr -+#define CODE_FOR_loongarch_dcsrwr CODE_FOR_dcsrwr -+#define CODE_FOR_loongarch_csrxchg CODE_FOR_csrxchg -+#define CODE_FOR_loongarch_dcsrxchg CODE_FOR_dcsrxchg -+#define CODE_FOR_loongarch_iocsrrd_b CODE_FOR_iocsrrd_b -+#define CODE_FOR_loongarch_iocsrrd_h CODE_FOR_iocsrrd_h -+#define CODE_FOR_loongarch_iocsrrd_w CODE_FOR_iocsrrd_w -+#define CODE_FOR_loongarch_iocsrrd_d CODE_FOR_iocsrrd_d -+#define CODE_FOR_loongarch_iocsrwr_b CODE_FOR_iocsrwr_b -+#define CODE_FOR_loongarch_iocsrwr_h CODE_FOR_iocsrwr_h -+#define CODE_FOR_loongarch_iocsrwr_w CODE_FOR_iocsrwr_w -+#define CODE_FOR_loongarch_iocsrwr_d CODE_FOR_iocsrwr_d -+#define CODE_FOR_loongarch_lddir CODE_FOR_lddir -+#define CODE_FOR_loongarch_dlddir CODE_FOR_dlddir -+#define CODE_FOR_loongarch_ldpte CODE_FOR_ldpte -+#define CODE_FOR_loongarch_dldpte CODE_FOR_dldpte -+#define CODE_FOR_loongarch_cacop CODE_FOR_cacop -+#define CODE_FOR_loongarch_dcacop CODE_FOR_dcacop -+#define CODE_FOR_loongarch_dbar CODE_FOR_dbar -+#define CODE_FOR_loongarch_ibar CODE_FOR_ibar -+ -+/* LoongArch SX define CODE_FOR_lsx_xxx */ -+#define CODE_FOR_lsx_vsadd_b CODE_FOR_ssaddv16qi3 -+#define CODE_FOR_lsx_vsadd_h CODE_FOR_ssaddv8hi3 -+#define CODE_FOR_lsx_vsadd_w CODE_FOR_ssaddv4si3 -+#define CODE_FOR_lsx_vsadd_d CODE_FOR_ssaddv2di3 -+#define CODE_FOR_lsx_vsadd_bu CODE_FOR_usaddv16qi3 -+#define CODE_FOR_lsx_vsadd_hu CODE_FOR_usaddv8hi3 -+#define CODE_FOR_lsx_vsadd_wu CODE_FOR_usaddv4si3 -+#define CODE_FOR_lsx_vsadd_du CODE_FOR_usaddv2di3 -+#define CODE_FOR_lsx_vadd_b CODE_FOR_addv16qi3 -+#define CODE_FOR_lsx_vadd_h CODE_FOR_addv8hi3 -+#define CODE_FOR_lsx_vadd_w CODE_FOR_addv4si3 -+#define CODE_FOR_lsx_vadd_d CODE_FOR_addv2di3 -+#define CODE_FOR_lsx_vaddi_bu CODE_FOR_addv16qi3 -+#define CODE_FOR_lsx_vaddi_hu CODE_FOR_addv8hi3 -+#define CODE_FOR_lsx_vaddi_wu CODE_FOR_addv4si3 -+#define CODE_FOR_lsx_vaddi_du CODE_FOR_addv2di3 -+#define CODE_FOR_lsx_vand_v CODE_FOR_andv16qi3 -+#define CODE_FOR_lsx_vandi_b CODE_FOR_andv16qi3 -+#define CODE_FOR_lsx_bnz_v CODE_FOR_lsx_bnz_v_b -+#define CODE_FOR_lsx_bz_v CODE_FOR_lsx_bz_v_b -+#define CODE_FOR_lsx_vbitsel_v CODE_FOR_lsx_vbitsel_b -+#define CODE_FOR_lsx_vseqi_b CODE_FOR_lsx_vseq_b -+#define CODE_FOR_lsx_vseqi_h CODE_FOR_lsx_vseq_h -+#define CODE_FOR_lsx_vseqi_w CODE_FOR_lsx_vseq_w -+#define CODE_FOR_lsx_vseqi_d CODE_FOR_lsx_vseq_d -+#define CODE_FOR_lsx_vslti_b CODE_FOR_lsx_vslt_b -+#define CODE_FOR_lsx_vslti_h CODE_FOR_lsx_vslt_h -+#define CODE_FOR_lsx_vslti_w CODE_FOR_lsx_vslt_w -+#define CODE_FOR_lsx_vslti_d CODE_FOR_lsx_vslt_d -+#define CODE_FOR_lsx_vslti_bu CODE_FOR_lsx_vslt_bu -+#define CODE_FOR_lsx_vslti_hu CODE_FOR_lsx_vslt_hu -+#define CODE_FOR_lsx_vslti_wu CODE_FOR_lsx_vslt_wu -+#define CODE_FOR_lsx_vslti_du CODE_FOR_lsx_vslt_du -+#define CODE_FOR_lsx_vslei_b CODE_FOR_lsx_vsle_b -+#define CODE_FOR_lsx_vslei_h CODE_FOR_lsx_vsle_h -+#define CODE_FOR_lsx_vslei_w CODE_FOR_lsx_vsle_w -+#define CODE_FOR_lsx_vslei_d CODE_FOR_lsx_vsle_d -+#define CODE_FOR_lsx_vslei_bu CODE_FOR_lsx_vsle_bu -+#define CODE_FOR_lsx_vslei_hu CODE_FOR_lsx_vsle_hu -+#define CODE_FOR_lsx_vslei_wu CODE_FOR_lsx_vsle_wu -+#define CODE_FOR_lsx_vslei_du CODE_FOR_lsx_vsle_du -+#define CODE_FOR_lsx_vdiv_b CODE_FOR_divv16qi3 -+#define CODE_FOR_lsx_vdiv_h CODE_FOR_divv8hi3 -+#define CODE_FOR_lsx_vdiv_w CODE_FOR_divv4si3 -+#define CODE_FOR_lsx_vdiv_d CODE_FOR_divv2di3 -+#define CODE_FOR_lsx_vdiv_bu CODE_FOR_udivv16qi3 -+#define CODE_FOR_lsx_vdiv_hu CODE_FOR_udivv8hi3 -+#define CODE_FOR_lsx_vdiv_wu CODE_FOR_udivv4si3 -+#define CODE_FOR_lsx_vdiv_du CODE_FOR_udivv2di3 -+#define CODE_FOR_lsx_vfadd_s CODE_FOR_addv4sf3 -+#define CODE_FOR_lsx_vfadd_d CODE_FOR_addv2df3 -+#define CODE_FOR_lsx_vftintrz_w_s CODE_FOR_fix_truncv4sfv4si2 -+#define CODE_FOR_lsx_vftintrz_l_d CODE_FOR_fix_truncv2dfv2di2 -+#define CODE_FOR_lsx_vftintrz_wu_s CODE_FOR_fixuns_truncv4sfv4si2 -+#define CODE_FOR_lsx_vftintrz_lu_d CODE_FOR_fixuns_truncv2dfv2di2 -+#define CODE_FOR_lsx_vffint_s_w CODE_FOR_floatv4siv4sf2 -+#define CODE_FOR_lsx_vffint_d_l CODE_FOR_floatv2div2df2 -+#define CODE_FOR_lsx_vffint_s_wu CODE_FOR_floatunsv4siv4sf2 -+#define CODE_FOR_lsx_vffint_d_lu CODE_FOR_floatunsv2div2df2 -+#define CODE_FOR_lsx_vfsub_s CODE_FOR_subv4sf3 -+#define CODE_FOR_lsx_vfsub_d CODE_FOR_subv2df3 -+#define CODE_FOR_lsx_vfmul_s CODE_FOR_mulv4sf3 -+#define CODE_FOR_lsx_vfmul_d CODE_FOR_mulv2df3 -+#define CODE_FOR_lsx_vfdiv_s CODE_FOR_divv4sf3 -+#define CODE_FOR_lsx_vfdiv_d CODE_FOR_divv2df3 -+#define CODE_FOR_lsx_vfmax_s CODE_FOR_smaxv4sf3 -+#define CODE_FOR_lsx_vfmax_d CODE_FOR_smaxv2df3 -+#define CODE_FOR_lsx_vfmin_s CODE_FOR_sminv4sf3 -+#define CODE_FOR_lsx_vfmin_d CODE_FOR_sminv2df3 -+#define CODE_FOR_lsx_vfsqrt_s CODE_FOR_sqrtv4sf2 -+#define CODE_FOR_lsx_vfsqrt_d CODE_FOR_sqrtv2df2 -+#define CODE_FOR_lsx_vmax_b CODE_FOR_smaxv16qi3 -+#define CODE_FOR_lsx_vmax_h CODE_FOR_smaxv8hi3 -+#define CODE_FOR_lsx_vmax_w CODE_FOR_smaxv4si3 -+#define CODE_FOR_lsx_vmax_d CODE_FOR_smaxv2di3 -+#define CODE_FOR_lsx_vmaxi_b CODE_FOR_smaxv16qi3 -+#define CODE_FOR_lsx_vmaxi_h CODE_FOR_smaxv8hi3 -+#define CODE_FOR_lsx_vmaxi_w CODE_FOR_smaxv4si3 -+#define CODE_FOR_lsx_vmaxi_d CODE_FOR_smaxv2di3 -+#define CODE_FOR_lsx_vmax_bu CODE_FOR_umaxv16qi3 -+#define CODE_FOR_lsx_vmax_hu CODE_FOR_umaxv8hi3 -+#define CODE_FOR_lsx_vmax_wu CODE_FOR_umaxv4si3 -+#define CODE_FOR_lsx_vmax_du CODE_FOR_umaxv2di3 -+#define CODE_FOR_lsx_vmaxi_bu CODE_FOR_umaxv16qi3 -+#define CODE_FOR_lsx_vmaxi_hu CODE_FOR_umaxv8hi3 -+#define CODE_FOR_lsx_vmaxi_wu CODE_FOR_umaxv4si3 -+#define CODE_FOR_lsx_vmaxi_du CODE_FOR_umaxv2di3 -+#define CODE_FOR_lsx_vmin_b CODE_FOR_sminv16qi3 -+#define CODE_FOR_lsx_vmin_h CODE_FOR_sminv8hi3 -+#define CODE_FOR_lsx_vmin_w CODE_FOR_sminv4si3 -+#define CODE_FOR_lsx_vmin_d CODE_FOR_sminv2di3 -+#define CODE_FOR_lsx_vmini_b CODE_FOR_sminv16qi3 -+#define CODE_FOR_lsx_vmini_h CODE_FOR_sminv8hi3 -+#define CODE_FOR_lsx_vmini_w CODE_FOR_sminv4si3 -+#define CODE_FOR_lsx_vmini_d CODE_FOR_sminv2di3 -+#define CODE_FOR_lsx_vmin_bu CODE_FOR_uminv16qi3 -+#define CODE_FOR_lsx_vmin_hu CODE_FOR_uminv8hi3 -+#define CODE_FOR_lsx_vmin_wu CODE_FOR_uminv4si3 -+#define CODE_FOR_lsx_vmin_du CODE_FOR_uminv2di3 -+#define CODE_FOR_lsx_vmini_bu CODE_FOR_uminv16qi3 -+#define CODE_FOR_lsx_vmini_hu CODE_FOR_uminv8hi3 -+#define CODE_FOR_lsx_vmini_wu CODE_FOR_uminv4si3 -+#define CODE_FOR_lsx_vmini_du CODE_FOR_uminv2di3 -+#define CODE_FOR_lsx_vmod_b CODE_FOR_modv16qi3 -+#define CODE_FOR_lsx_vmod_h CODE_FOR_modv8hi3 -+#define CODE_FOR_lsx_vmod_w CODE_FOR_modv4si3 -+#define CODE_FOR_lsx_vmod_d CODE_FOR_modv2di3 -+#define CODE_FOR_lsx_vmod_bu CODE_FOR_umodv16qi3 -+#define CODE_FOR_lsx_vmod_hu CODE_FOR_umodv8hi3 -+#define CODE_FOR_lsx_vmod_wu CODE_FOR_umodv4si3 -+#define CODE_FOR_lsx_vmod_du CODE_FOR_umodv2di3 -+#define CODE_FOR_lsx_vmul_b CODE_FOR_mulv16qi3 -+#define CODE_FOR_lsx_vmul_h CODE_FOR_mulv8hi3 -+#define CODE_FOR_lsx_vmul_w CODE_FOR_mulv4si3 -+#define CODE_FOR_lsx_vmul_d CODE_FOR_mulv2di3 -+#define CODE_FOR_lsx_vclz_b CODE_FOR_clzv16qi2 -+#define CODE_FOR_lsx_vclz_h CODE_FOR_clzv8hi2 -+#define CODE_FOR_lsx_vclz_w CODE_FOR_clzv4si2 -+#define CODE_FOR_lsx_vclz_d CODE_FOR_clzv2di2 -+#define CODE_FOR_lsx_vnor_v CODE_FOR_lsx_nor_b -+#define CODE_FOR_lsx_vor_v CODE_FOR_iorv16qi3 -+#define CODE_FOR_lsx_vori_b CODE_FOR_iorv16qi3 -+#define CODE_FOR_lsx_vnori_b CODE_FOR_lsx_nor_b -+#define CODE_FOR_lsx_vpcnt_b CODE_FOR_popcountv16qi2 -+#define CODE_FOR_lsx_vpcnt_h CODE_FOR_popcountv8hi2 -+#define CODE_FOR_lsx_vpcnt_w CODE_FOR_popcountv4si2 -+#define CODE_FOR_lsx_vpcnt_d CODE_FOR_popcountv2di2 -+#define CODE_FOR_lsx_vxor_v CODE_FOR_xorv16qi3 -+#define CODE_FOR_lsx_vxori_b CODE_FOR_xorv16qi3 -+#define CODE_FOR_lsx_vsll_b CODE_FOR_vashlv16qi3 -+#define CODE_FOR_lsx_vsll_h CODE_FOR_vashlv8hi3 -+#define CODE_FOR_lsx_vsll_w CODE_FOR_vashlv4si3 -+#define CODE_FOR_lsx_vsll_d CODE_FOR_vashlv2di3 -+#define CODE_FOR_lsx_vslli_b CODE_FOR_vashlv16qi3 -+#define CODE_FOR_lsx_vslli_h CODE_FOR_vashlv8hi3 -+#define CODE_FOR_lsx_vslli_w CODE_FOR_vashlv4si3 -+#define CODE_FOR_lsx_vslli_d CODE_FOR_vashlv2di3 -+#define CODE_FOR_lsx_vsra_b CODE_FOR_vashrv16qi3 -+#define CODE_FOR_lsx_vsra_h CODE_FOR_vashrv8hi3 -+#define CODE_FOR_lsx_vsra_w CODE_FOR_vashrv4si3 -+#define CODE_FOR_lsx_vsra_d CODE_FOR_vashrv2di3 -+#define CODE_FOR_lsx_vsrai_b CODE_FOR_vashrv16qi3 -+#define CODE_FOR_lsx_vsrai_h CODE_FOR_vashrv8hi3 -+#define CODE_FOR_lsx_vsrai_w CODE_FOR_vashrv4si3 -+#define CODE_FOR_lsx_vsrai_d CODE_FOR_vashrv2di3 -+#define CODE_FOR_lsx_vsrl_b CODE_FOR_vlshrv16qi3 -+#define CODE_FOR_lsx_vsrl_h CODE_FOR_vlshrv8hi3 -+#define CODE_FOR_lsx_vsrl_w CODE_FOR_vlshrv4si3 -+#define CODE_FOR_lsx_vsrl_d CODE_FOR_vlshrv2di3 -+#define CODE_FOR_lsx_vsrli_b CODE_FOR_vlshrv16qi3 -+#define CODE_FOR_lsx_vsrli_h CODE_FOR_vlshrv8hi3 -+#define CODE_FOR_lsx_vsrli_w CODE_FOR_vlshrv4si3 -+#define CODE_FOR_lsx_vsrli_d CODE_FOR_vlshrv2di3 -+#define CODE_FOR_lsx_vsub_b CODE_FOR_subv16qi3 -+#define CODE_FOR_lsx_vsub_h CODE_FOR_subv8hi3 -+#define CODE_FOR_lsx_vsub_w CODE_FOR_subv4si3 -+#define CODE_FOR_lsx_vsub_d CODE_FOR_subv2di3 -+#define CODE_FOR_lsx_vsubi_bu CODE_FOR_subv16qi3 -+#define CODE_FOR_lsx_vsubi_hu CODE_FOR_subv8hi3 -+#define CODE_FOR_lsx_vsubi_wu CODE_FOR_subv4si3 -+#define CODE_FOR_lsx_vsubi_du CODE_FOR_subv2di3 -+ -+#define CODE_FOR_lsx_vpackod_d CODE_FOR_lsx_vilvh_d -+#define CODE_FOR_lsx_vpackev_d CODE_FOR_lsx_vilvl_d -+#define CODE_FOR_lsx_vpickod_d CODE_FOR_lsx_vilvh_d -+#define CODE_FOR_lsx_vpickev_d CODE_FOR_lsx_vilvl_d -+ -+#define CODE_FOR_lsx_vrepli_b CODE_FOR_lsx_vrepliv16qi -+#define CODE_FOR_lsx_vrepli_h CODE_FOR_lsx_vrepliv8hi -+#define CODE_FOR_lsx_vrepli_w CODE_FOR_lsx_vrepliv4si -+#define CODE_FOR_lsx_vrepli_d CODE_FOR_lsx_vrepliv2di -+#define CODE_FOR_lsx_vsat_b CODE_FOR_lsx_vsat_s_b -+#define CODE_FOR_lsx_vsat_h CODE_FOR_lsx_vsat_s_h -+#define CODE_FOR_lsx_vsat_w CODE_FOR_lsx_vsat_s_w -+#define CODE_FOR_lsx_vsat_d CODE_FOR_lsx_vsat_s_d -+#define CODE_FOR_lsx_vsat_bu CODE_FOR_lsx_vsat_u_bu -+#define CODE_FOR_lsx_vsat_hu CODE_FOR_lsx_vsat_u_hu -+#define CODE_FOR_lsx_vsat_wu CODE_FOR_lsx_vsat_u_wu -+#define CODE_FOR_lsx_vsat_du CODE_FOR_lsx_vsat_u_du -+#define CODE_FOR_lsx_vavg_b CODE_FOR_lsx_vavg_s_b -+#define CODE_FOR_lsx_vavg_h CODE_FOR_lsx_vavg_s_h -+#define CODE_FOR_lsx_vavg_w CODE_FOR_lsx_vavg_s_w -+#define CODE_FOR_lsx_vavg_d CODE_FOR_lsx_vavg_s_d -+#define CODE_FOR_lsx_vavg_bu CODE_FOR_lsx_vavg_u_bu -+#define CODE_FOR_lsx_vavg_hu CODE_FOR_lsx_vavg_u_hu -+#define CODE_FOR_lsx_vavg_wu CODE_FOR_lsx_vavg_u_wu -+#define CODE_FOR_lsx_vavg_du CODE_FOR_lsx_vavg_u_du -+#define CODE_FOR_lsx_vavgr_b CODE_FOR_lsx_vavgr_s_b -+#define CODE_FOR_lsx_vavgr_h CODE_FOR_lsx_vavgr_s_h -+#define CODE_FOR_lsx_vavgr_w CODE_FOR_lsx_vavgr_s_w -+#define CODE_FOR_lsx_vavgr_d CODE_FOR_lsx_vavgr_s_d -+#define CODE_FOR_lsx_vavgr_bu CODE_FOR_lsx_vavgr_u_bu -+#define CODE_FOR_lsx_vavgr_hu CODE_FOR_lsx_vavgr_u_hu -+#define CODE_FOR_lsx_vavgr_wu CODE_FOR_lsx_vavgr_u_wu -+#define CODE_FOR_lsx_vavgr_du CODE_FOR_lsx_vavgr_u_du -+#define CODE_FOR_lsx_vssub_b CODE_FOR_lsx_vssub_s_b -+#define CODE_FOR_lsx_vssub_h CODE_FOR_lsx_vssub_s_h -+#define CODE_FOR_lsx_vssub_w CODE_FOR_lsx_vssub_s_w -+#define CODE_FOR_lsx_vssub_d CODE_FOR_lsx_vssub_s_d -+#define CODE_FOR_lsx_vssub_bu CODE_FOR_lsx_vssub_u_bu -+#define CODE_FOR_lsx_vssub_hu CODE_FOR_lsx_vssub_u_hu -+#define CODE_FOR_lsx_vssub_wu CODE_FOR_lsx_vssub_u_wu -+#define CODE_FOR_lsx_vssub_du CODE_FOR_lsx_vssub_u_du -+#define CODE_FOR_lsx_vabsd_b CODE_FOR_lsx_vabsd_s_b -+#define CODE_FOR_lsx_vabsd_h CODE_FOR_lsx_vabsd_s_h -+#define CODE_FOR_lsx_vabsd_w CODE_FOR_lsx_vabsd_s_w -+#define CODE_FOR_lsx_vabsd_d CODE_FOR_lsx_vabsd_s_d -+#define CODE_FOR_lsx_vabsd_bu CODE_FOR_lsx_vabsd_u_bu -+#define CODE_FOR_lsx_vabsd_hu CODE_FOR_lsx_vabsd_u_hu -+#define CODE_FOR_lsx_vabsd_wu CODE_FOR_lsx_vabsd_u_wu -+#define CODE_FOR_lsx_vabsd_du CODE_FOR_lsx_vabsd_u_du -+#define CODE_FOR_lsx_vftint_w_s CODE_FOR_lsx_vftint_s_w_s -+#define CODE_FOR_lsx_vftint_l_d CODE_FOR_lsx_vftint_s_l_d -+#define CODE_FOR_lsx_vftint_wu_s CODE_FOR_lsx_vftint_u_wu_s -+#define CODE_FOR_lsx_vftint_lu_d CODE_FOR_lsx_vftint_u_lu_d -+#define CODE_FOR_lsx_vandn_v CODE_FOR_vandnv16qi3 -+#define CODE_FOR_lsx_vorn_v CODE_FOR_vornv16qi3 -+#define CODE_FOR_lsx_vneg_b CODE_FOR_vnegv16qi2 -+#define CODE_FOR_lsx_vneg_h CODE_FOR_vnegv8hi2 -+#define CODE_FOR_lsx_vneg_w CODE_FOR_vnegv4si2 -+#define CODE_FOR_lsx_vneg_d CODE_FOR_vnegv2di2 -+#define CODE_FOR_lsx_vshuf4i_d CODE_FOR_lsx_vshuf4i_d -+#define CODE_FOR_lsx_vbsrl_v CODE_FOR_lsx_vbsrl_b -+#define CODE_FOR_lsx_vbsll_v CODE_FOR_lsx_vbsll_b -+#define CODE_FOR_lsx_vfmadd_s CODE_FOR_vfmaddv4sf4 -+#define CODE_FOR_lsx_vfmadd_d CODE_FOR_vfmaddv2df4 -+#define CODE_FOR_lsx_vfmsub_s CODE_FOR_vfmsubv4sf4 -+#define CODE_FOR_lsx_vfmsub_d CODE_FOR_vfmsubv2df4 -+#define CODE_FOR_lsx_vfnmadd_s CODE_FOR_vfnmaddv4sf4_nmadd4 -+#define CODE_FOR_lsx_vfnmadd_d CODE_FOR_vfnmaddv2df4_nmadd4 -+#define CODE_FOR_lsx_vfnmsub_s CODE_FOR_vfnmsubv4sf4_nmsub4 -+#define CODE_FOR_lsx_vfnmsub_d CODE_FOR_vfnmsubv2df4_nmsub4 -+ -+#define CODE_FOR_lsx_vmuh_b CODE_FOR_lsx_vmuh_s_b -+#define CODE_FOR_lsx_vmuh_h CODE_FOR_lsx_vmuh_s_h -+#define CODE_FOR_lsx_vmuh_w CODE_FOR_lsx_vmuh_s_w -+#define CODE_FOR_lsx_vmuh_d CODE_FOR_lsx_vmuh_s_d -+#define CODE_FOR_lsx_vmuh_bu CODE_FOR_lsx_vmuh_u_bu -+#define CODE_FOR_lsx_vmuh_hu CODE_FOR_lsx_vmuh_u_hu -+#define CODE_FOR_lsx_vmuh_wu CODE_FOR_lsx_vmuh_u_wu -+#define CODE_FOR_lsx_vmuh_du CODE_FOR_lsx_vmuh_u_du -+#define CODE_FOR_lsx_vsllwil_h_b CODE_FOR_lsx_vsllwil_s_h_b -+#define CODE_FOR_lsx_vsllwil_w_h CODE_FOR_lsx_vsllwil_s_w_h -+#define CODE_FOR_lsx_vsllwil_d_w CODE_FOR_lsx_vsllwil_s_d_w -+#define CODE_FOR_lsx_vsllwil_hu_bu CODE_FOR_lsx_vsllwil_u_hu_bu -+#define CODE_FOR_lsx_vsllwil_wu_hu CODE_FOR_lsx_vsllwil_u_wu_hu -+#define CODE_FOR_lsx_vsllwil_du_wu CODE_FOR_lsx_vsllwil_u_du_wu -+#define CODE_FOR_lsx_vssran_b_h CODE_FOR_lsx_vssran_s_b_h -+#define CODE_FOR_lsx_vssran_h_w CODE_FOR_lsx_vssran_s_h_w -+#define CODE_FOR_lsx_vssran_w_d CODE_FOR_lsx_vssran_s_w_d -+#define CODE_FOR_lsx_vssran_bu_h CODE_FOR_lsx_vssran_u_bu_h -+#define CODE_FOR_lsx_vssran_hu_w CODE_FOR_lsx_vssran_u_hu_w -+#define CODE_FOR_lsx_vssran_wu_d CODE_FOR_lsx_vssran_u_wu_d -+#define CODE_FOR_lsx_vssrarn_b_h CODE_FOR_lsx_vssrarn_s_b_h -+#define CODE_FOR_lsx_vssrarn_h_w CODE_FOR_lsx_vssrarn_s_h_w -+#define CODE_FOR_lsx_vssrarn_w_d CODE_FOR_lsx_vssrarn_s_w_d -+#define CODE_FOR_lsx_vssrarn_bu_h CODE_FOR_lsx_vssrarn_u_bu_h -+#define CODE_FOR_lsx_vssrarn_hu_w CODE_FOR_lsx_vssrarn_u_hu_w -+#define CODE_FOR_lsx_vssrarn_wu_d CODE_FOR_lsx_vssrarn_u_wu_d -+#define CODE_FOR_lsx_vssrln_bu_h CODE_FOR_lsx_vssrln_u_bu_h -+#define CODE_FOR_lsx_vssrln_hu_w CODE_FOR_lsx_vssrln_u_hu_w -+#define CODE_FOR_lsx_vssrln_wu_d CODE_FOR_lsx_vssrln_u_wu_d -+#define CODE_FOR_lsx_vssrlrn_bu_h CODE_FOR_lsx_vssrlrn_u_bu_h -+#define CODE_FOR_lsx_vssrlrn_hu_w CODE_FOR_lsx_vssrlrn_u_hu_w -+#define CODE_FOR_lsx_vssrlrn_wu_d CODE_FOR_lsx_vssrlrn_u_wu_d -+ -+/* LoongArch ASX define CODE_FOR_lasx_mxxx */ -+#define CODE_FOR_lasx_xvsadd_b CODE_FOR_ssaddv32qi3 -+#define CODE_FOR_lasx_xvsadd_h CODE_FOR_ssaddv16hi3 -+#define CODE_FOR_lasx_xvsadd_w CODE_FOR_ssaddv8si3 -+#define CODE_FOR_lasx_xvsadd_d CODE_FOR_ssaddv4di3 -+#define CODE_FOR_lasx_xvsadd_bu CODE_FOR_usaddv32qi3 -+#define CODE_FOR_lasx_xvsadd_hu CODE_FOR_usaddv16hi3 -+#define CODE_FOR_lasx_xvsadd_wu CODE_FOR_usaddv8si3 -+#define CODE_FOR_lasx_xvsadd_du CODE_FOR_usaddv4di3 -+#define CODE_FOR_lasx_xvadd_b CODE_FOR_addv32qi3 -+#define CODE_FOR_lasx_xvadd_h CODE_FOR_addv16hi3 -+#define CODE_FOR_lasx_xvadd_w CODE_FOR_addv8si3 -+#define CODE_FOR_lasx_xvadd_d CODE_FOR_addv4di3 -+#define CODE_FOR_lasx_xvaddi_bu CODE_FOR_addv32qi3 -+#define CODE_FOR_lasx_xvaddi_hu CODE_FOR_addv16hi3 -+#define CODE_FOR_lasx_xvaddi_wu CODE_FOR_addv8si3 -+#define CODE_FOR_lasx_xvaddi_du CODE_FOR_addv4di3 -+#define CODE_FOR_lasx_xvand_v CODE_FOR_andv32qi3 -+#define CODE_FOR_lasx_xvandi_b CODE_FOR_andv32qi3 -+#define CODE_FOR_lasx_xvbitsel_v CODE_FOR_lasx_xvbitsel_b -+#define CODE_FOR_lasx_xvseqi_b CODE_FOR_lasx_xvseq_b -+#define CODE_FOR_lasx_xvseqi_h CODE_FOR_lasx_xvseq_h -+#define CODE_FOR_lasx_xvseqi_w CODE_FOR_lasx_xvseq_w -+#define CODE_FOR_lasx_xvseqi_d CODE_FOR_lasx_xvseq_d -+#define CODE_FOR_lasx_xvslti_b CODE_FOR_lasx_xvslt_b -+#define CODE_FOR_lasx_xvslti_h CODE_FOR_lasx_xvslt_h -+#define CODE_FOR_lasx_xvslti_w CODE_FOR_lasx_xvslt_w -+#define CODE_FOR_lasx_xvslti_d CODE_FOR_lasx_xvslt_d -+#define CODE_FOR_lasx_xvslti_bu CODE_FOR_lasx_xvslt_bu -+#define CODE_FOR_lasx_xvslti_hu CODE_FOR_lasx_xvslt_hu -+#define CODE_FOR_lasx_xvslti_wu CODE_FOR_lasx_xvslt_wu -+#define CODE_FOR_lasx_xvslti_du CODE_FOR_lasx_xvslt_du -+#define CODE_FOR_lasx_xvslei_b CODE_FOR_lasx_xvsle_b -+#define CODE_FOR_lasx_xvslei_h CODE_FOR_lasx_xvsle_h -+#define CODE_FOR_lasx_xvslei_w CODE_FOR_lasx_xvsle_w -+#define CODE_FOR_lasx_xvslei_d CODE_FOR_lasx_xvsle_d -+#define CODE_FOR_lasx_xvslei_bu CODE_FOR_lasx_xvsle_bu -+#define CODE_FOR_lasx_xvslei_hu CODE_FOR_lasx_xvsle_hu -+#define CODE_FOR_lasx_xvslei_wu CODE_FOR_lasx_xvsle_wu -+#define CODE_FOR_lasx_xvslei_du CODE_FOR_lasx_xvsle_du -+#define CODE_FOR_lasx_xvdiv_b CODE_FOR_divv32qi3 -+#define CODE_FOR_lasx_xvdiv_h CODE_FOR_divv16hi3 -+#define CODE_FOR_lasx_xvdiv_w CODE_FOR_divv8si3 -+#define CODE_FOR_lasx_xvdiv_d CODE_FOR_divv4di3 -+#define CODE_FOR_lasx_xvdiv_bu CODE_FOR_udivv32qi3 -+#define CODE_FOR_lasx_xvdiv_hu CODE_FOR_udivv16hi3 -+#define CODE_FOR_lasx_xvdiv_wu CODE_FOR_udivv8si3 -+#define CODE_FOR_lasx_xvdiv_du CODE_FOR_udivv4di3 -+#define CODE_FOR_lasx_xvfadd_s CODE_FOR_addv8sf3 -+#define CODE_FOR_lasx_xvfadd_d CODE_FOR_addv4df3 -+#define CODE_FOR_lasx_xvftintrz_w_s CODE_FOR_fix_truncv8sfv8si2 -+#define CODE_FOR_lasx_xvftintrz_l_d CODE_FOR_fix_truncv4dfv4di2 -+#define CODE_FOR_lasx_xvftintrz_wu_s CODE_FOR_fixuns_truncv8sfv8si2 -+#define CODE_FOR_lasx_xvftintrz_lu_d CODE_FOR_fixuns_truncv4dfv4di2 -+#define CODE_FOR_lasx_xvffint_s_w CODE_FOR_floatv8siv8sf2 -+#define CODE_FOR_lasx_xvffint_d_l CODE_FOR_floatv4div4df2 -+#define CODE_FOR_lasx_xvffint_s_wu CODE_FOR_floatunsv8siv8sf2 -+#define CODE_FOR_lasx_xvffint_d_lu CODE_FOR_floatunsv4div4df2 -+#define CODE_FOR_lasx_xvfsub_s CODE_FOR_subv8sf3 -+#define CODE_FOR_lasx_xvfsub_d CODE_FOR_subv4df3 -+#define CODE_FOR_lasx_xvfmul_s CODE_FOR_mulv8sf3 -+#define CODE_FOR_lasx_xvfmul_d CODE_FOR_mulv4df3 -+#define CODE_FOR_lasx_xvfdiv_s CODE_FOR_divv8sf3 -+#define CODE_FOR_lasx_xvfdiv_d CODE_FOR_divv4df3 -+#define CODE_FOR_lasx_xvfmax_s CODE_FOR_smaxv8sf3 -+#define CODE_FOR_lasx_xvfmax_d CODE_FOR_smaxv4df3 -+#define CODE_FOR_lasx_xvfmin_s CODE_FOR_sminv8sf3 -+#define CODE_FOR_lasx_xvfmin_d CODE_FOR_sminv4df3 -+#define CODE_FOR_lasx_xvfsqrt_s CODE_FOR_sqrtv8sf2 -+#define CODE_FOR_lasx_xvfsqrt_d CODE_FOR_sqrtv4df2 -+#define CODE_FOR_lasx_xvmax_b CODE_FOR_smaxv32qi3 -+#define CODE_FOR_lasx_xvmax_h CODE_FOR_smaxv16hi3 -+#define CODE_FOR_lasx_xvmax_w CODE_FOR_smaxv8si3 -+#define CODE_FOR_lasx_xvmax_d CODE_FOR_smaxv4di3 -+#define CODE_FOR_lasx_xvmaxi_b CODE_FOR_smaxv32qi3 -+#define CODE_FOR_lasx_xvmaxi_h CODE_FOR_smaxv16hi3 -+#define CODE_FOR_lasx_xvmaxi_w CODE_FOR_smaxv8si3 -+#define CODE_FOR_lasx_xvmaxi_d CODE_FOR_smaxv4di3 -+#define CODE_FOR_lasx_xvmax_bu CODE_FOR_umaxv32qi3 -+#define CODE_FOR_lasx_xvmax_hu CODE_FOR_umaxv16hi3 -+#define CODE_FOR_lasx_xvmax_wu CODE_FOR_umaxv8si3 -+#define CODE_FOR_lasx_xvmax_du CODE_FOR_umaxv4di3 -+#define CODE_FOR_lasx_xvmaxi_bu CODE_FOR_umaxv32qi3 -+#define CODE_FOR_lasx_xvmaxi_hu CODE_FOR_umaxv16hi3 -+#define CODE_FOR_lasx_xvmaxi_wu CODE_FOR_umaxv8si3 -+#define CODE_FOR_lasx_xvmaxi_du CODE_FOR_umaxv4di3 -+#define CODE_FOR_lasx_xvmin_b CODE_FOR_sminv32qi3 -+#define CODE_FOR_lasx_xvmin_h CODE_FOR_sminv16hi3 -+#define CODE_FOR_lasx_xvmin_w CODE_FOR_sminv8si3 -+#define CODE_FOR_lasx_xvmin_d CODE_FOR_sminv4di3 -+#define CODE_FOR_lasx_xvmini_b CODE_FOR_sminv32qi3 -+#define CODE_FOR_lasx_xvmini_h CODE_FOR_sminv16hi3 -+#define CODE_FOR_lasx_xvmini_w CODE_FOR_sminv8si3 -+#define CODE_FOR_lasx_xvmini_d CODE_FOR_sminv4di3 -+#define CODE_FOR_lasx_xvmin_bu CODE_FOR_uminv32qi3 -+#define CODE_FOR_lasx_xvmin_hu CODE_FOR_uminv16hi3 -+#define CODE_FOR_lasx_xvmin_wu CODE_FOR_uminv8si3 -+#define CODE_FOR_lasx_xvmin_du CODE_FOR_uminv4di3 -+#define CODE_FOR_lasx_xvmini_bu CODE_FOR_uminv32qi3 -+#define CODE_FOR_lasx_xvmini_hu CODE_FOR_uminv16hi3 -+#define CODE_FOR_lasx_xvmini_wu CODE_FOR_uminv8si3 -+#define CODE_FOR_lasx_xvmini_du CODE_FOR_uminv4di3 -+#define CODE_FOR_lasx_xvmod_b CODE_FOR_modv32qi3 -+#define CODE_FOR_lasx_xvmod_h CODE_FOR_modv16hi3 -+#define CODE_FOR_lasx_xvmod_w CODE_FOR_modv8si3 -+#define CODE_FOR_lasx_xvmod_d CODE_FOR_modv4di3 -+#define CODE_FOR_lasx_xvmod_bu CODE_FOR_umodv32qi3 -+#define CODE_FOR_lasx_xvmod_hu CODE_FOR_umodv16hi3 -+#define CODE_FOR_lasx_xvmod_wu CODE_FOR_umodv8si3 -+#define CODE_FOR_lasx_xvmod_du CODE_FOR_umodv4di3 -+#define CODE_FOR_lasx_xvmul_b CODE_FOR_mulv32qi3 -+#define CODE_FOR_lasx_xvmul_h CODE_FOR_mulv16hi3 -+#define CODE_FOR_lasx_xvmul_w CODE_FOR_mulv8si3 -+#define CODE_FOR_lasx_xvmul_d CODE_FOR_mulv4di3 -+#define CODE_FOR_lasx_xvclz_b CODE_FOR_clzv32qi2 -+#define CODE_FOR_lasx_xvclz_h CODE_FOR_clzv16hi2 -+#define CODE_FOR_lasx_xvclz_w CODE_FOR_clzv8si2 -+#define CODE_FOR_lasx_xvclz_d CODE_FOR_clzv4di2 -+#define CODE_FOR_lasx_xvnor_v CODE_FOR_lasx_xvnor_b -+#define CODE_FOR_lasx_xvor_v CODE_FOR_iorv32qi3 -+#define CODE_FOR_lasx_xvori_b CODE_FOR_iorv32qi3 -+#define CODE_FOR_lasx_xvnori_b CODE_FOR_lasx_xvnor_b -+#define CODE_FOR_lasx_xvpcnt_b CODE_FOR_popcountv32qi2 -+#define CODE_FOR_lasx_xvpcnt_h CODE_FOR_popcountv16hi2 -+#define CODE_FOR_lasx_xvpcnt_w CODE_FOR_popcountv8si2 -+#define CODE_FOR_lasx_xvpcnt_d CODE_FOR_popcountv4di2 -+#define CODE_FOR_lasx_xvxor_v CODE_FOR_xorv32qi3 -+#define CODE_FOR_lasx_xvxori_b CODE_FOR_xorv32qi3 -+#define CODE_FOR_lasx_xvsll_b CODE_FOR_vashlv32qi3 -+#define CODE_FOR_lasx_xvsll_h CODE_FOR_vashlv16hi3 -+#define CODE_FOR_lasx_xvsll_w CODE_FOR_vashlv8si3 -+#define CODE_FOR_lasx_xvsll_d CODE_FOR_vashlv4di3 -+#define CODE_FOR_lasx_xvslli_b CODE_FOR_vashlv32qi3 -+#define CODE_FOR_lasx_xvslli_h CODE_FOR_vashlv16hi3 -+#define CODE_FOR_lasx_xvslli_w CODE_FOR_vashlv8si3 -+#define CODE_FOR_lasx_xvslli_d CODE_FOR_vashlv4di3 -+#define CODE_FOR_lasx_xvsra_b CODE_FOR_vashrv32qi3 -+#define CODE_FOR_lasx_xvsra_h CODE_FOR_vashrv16hi3 -+#define CODE_FOR_lasx_xvsra_w CODE_FOR_vashrv8si3 -+#define CODE_FOR_lasx_xvsra_d CODE_FOR_vashrv4di3 -+#define CODE_FOR_lasx_xvsrai_b CODE_FOR_vashrv32qi3 -+#define CODE_FOR_lasx_xvsrai_h CODE_FOR_vashrv16hi3 -+#define CODE_FOR_lasx_xvsrai_w CODE_FOR_vashrv8si3 -+#define CODE_FOR_lasx_xvsrai_d CODE_FOR_vashrv4di3 -+#define CODE_FOR_lasx_xvsrl_b CODE_FOR_vlshrv32qi3 -+#define CODE_FOR_lasx_xvsrl_h CODE_FOR_vlshrv16hi3 -+#define CODE_FOR_lasx_xvsrl_w CODE_FOR_vlshrv8si3 -+#define CODE_FOR_lasx_xvsrl_d CODE_FOR_vlshrv4di3 -+#define CODE_FOR_lasx_xvsrli_b CODE_FOR_vlshrv32qi3 -+#define CODE_FOR_lasx_xvsrli_h CODE_FOR_vlshrv16hi3 -+#define CODE_FOR_lasx_xvsrli_w CODE_FOR_vlshrv8si3 -+#define CODE_FOR_lasx_xvsrli_d CODE_FOR_vlshrv4di3 -+#define CODE_FOR_lasx_xvsub_b CODE_FOR_subv32qi3 -+#define CODE_FOR_lasx_xvsub_h CODE_FOR_subv16hi3 -+#define CODE_FOR_lasx_xvsub_w CODE_FOR_subv8si3 -+#define CODE_FOR_lasx_xvsub_d CODE_FOR_subv4di3 -+#define CODE_FOR_lasx_xvsubi_bu CODE_FOR_subv32qi3 -+#define CODE_FOR_lasx_xvsubi_hu CODE_FOR_subv16hi3 -+#define CODE_FOR_lasx_xvsubi_wu CODE_FOR_subv8si3 -+#define CODE_FOR_lasx_xvsubi_du CODE_FOR_subv4di3 -+#define CODE_FOR_lasx_xvpackod_d CODE_FOR_lasx_xvilvh_d -+#define CODE_FOR_lasx_xvpackev_d CODE_FOR_lasx_xvilvl_d -+#define CODE_FOR_lasx_xvpickod_d CODE_FOR_lasx_xvilvh_d -+#define CODE_FOR_lasx_xvpickev_d CODE_FOR_lasx_xvilvl_d -+#define CODE_FOR_lasx_xvrepli_b CODE_FOR_lasx_xvrepliv32qi -+#define CODE_FOR_lasx_xvrepli_h CODE_FOR_lasx_xvrepliv16hi -+#define CODE_FOR_lasx_xvrepli_w CODE_FOR_lasx_xvrepliv8si -+#define CODE_FOR_lasx_xvrepli_d CODE_FOR_lasx_xvrepliv4di -+ -+#define CODE_FOR_lasx_xvandn_v CODE_FOR_xvandnv32qi3 -+#define CODE_FOR_lasx_xvorn_v CODE_FOR_xvornv32qi3 -+#define CODE_FOR_lasx_xvneg_b CODE_FOR_negv32qi2 -+#define CODE_FOR_lasx_xvneg_h CODE_FOR_negv16hi2 -+#define CODE_FOR_lasx_xvneg_w CODE_FOR_negv8si2 -+#define CODE_FOR_lasx_xvneg_d CODE_FOR_negv4di2 -+#define CODE_FOR_lasx_xvbsrl_v CODE_FOR_lasx_xvbsrl_b -+#define CODE_FOR_lasx_xvbsll_v CODE_FOR_lasx_xvbsll_b -+#define CODE_FOR_lasx_xvfmadd_s CODE_FOR_xvfmaddv8sf4 -+#define CODE_FOR_lasx_xvfmadd_d CODE_FOR_xvfmaddv4df4 -+#define CODE_FOR_lasx_xvfmsub_s CODE_FOR_xvfmsubv8sf4 -+#define CODE_FOR_lasx_xvfmsub_d CODE_FOR_xvfmsubv4df4 -+#define CODE_FOR_lasx_xvfnmadd_s CODE_FOR_xvfnmaddv8sf4_nmadd4 -+#define CODE_FOR_lasx_xvfnmadd_d CODE_FOR_xvfnmaddv4df4_nmadd4 -+#define CODE_FOR_lasx_xvfnmsub_s CODE_FOR_xvfnmsubv8sf4_nmsub4 -+#define CODE_FOR_lasx_xvfnmsub_d CODE_FOR_xvfnmsubv4df4_nmsub4 -+ -+#define CODE_FOR_lasx_xvpermi_q CODE_FOR_lasx_xvpermi_q_v32qi -+#define CODE_FOR_lasx_xbnz_v CODE_FOR_lasx_xbnz_v_b -+#define CODE_FOR_lasx_xbz_v CODE_FOR_lasx_xbz_v_b -+ -+#define CODE_FOR_lasx_xvssub_b CODE_FOR_lasx_xvssub_s_b -+#define CODE_FOR_lasx_xvssub_h CODE_FOR_lasx_xvssub_s_h -+#define CODE_FOR_lasx_xvssub_w CODE_FOR_lasx_xvssub_s_w -+#define CODE_FOR_lasx_xvssub_d CODE_FOR_lasx_xvssub_s_d -+#define CODE_FOR_lasx_xvssub_bu CODE_FOR_lasx_xvssub_u_bu -+#define CODE_FOR_lasx_xvssub_hu CODE_FOR_lasx_xvssub_u_hu -+#define CODE_FOR_lasx_xvssub_wu CODE_FOR_lasx_xvssub_u_wu -+#define CODE_FOR_lasx_xvssub_du CODE_FOR_lasx_xvssub_u_du -+#define CODE_FOR_lasx_xvabsd_b CODE_FOR_lasx_xvabsd_s_b -+#define CODE_FOR_lasx_xvabsd_h CODE_FOR_lasx_xvabsd_s_h -+#define CODE_FOR_lasx_xvabsd_w CODE_FOR_lasx_xvabsd_s_w -+#define CODE_FOR_lasx_xvabsd_d CODE_FOR_lasx_xvabsd_s_d -+#define CODE_FOR_lasx_xvabsd_bu CODE_FOR_lasx_xvabsd_u_bu -+#define CODE_FOR_lasx_xvabsd_hu CODE_FOR_lasx_xvabsd_u_hu -+#define CODE_FOR_lasx_xvabsd_wu CODE_FOR_lasx_xvabsd_u_wu -+#define CODE_FOR_lasx_xvabsd_du CODE_FOR_lasx_xvabsd_u_du -+#define CODE_FOR_lasx_xvavg_b CODE_FOR_lasx_xvavg_s_b -+#define CODE_FOR_lasx_xvavg_h CODE_FOR_lasx_xvavg_s_h -+#define CODE_FOR_lasx_xvavg_w CODE_FOR_lasx_xvavg_s_w -+#define CODE_FOR_lasx_xvavg_d CODE_FOR_lasx_xvavg_s_d -+#define CODE_FOR_lasx_xvavg_bu CODE_FOR_lasx_xvavg_u_bu -+#define CODE_FOR_lasx_xvavg_hu CODE_FOR_lasx_xvavg_u_hu -+#define CODE_FOR_lasx_xvavg_wu CODE_FOR_lasx_xvavg_u_wu -+#define CODE_FOR_lasx_xvavg_du CODE_FOR_lasx_xvavg_u_du -+#define CODE_FOR_lasx_xvavgr_b CODE_FOR_lasx_xvavgr_s_b -+#define CODE_FOR_lasx_xvavgr_h CODE_FOR_lasx_xvavgr_s_h -+#define CODE_FOR_lasx_xvavgr_w CODE_FOR_lasx_xvavgr_s_w -+#define CODE_FOR_lasx_xvavgr_d CODE_FOR_lasx_xvavgr_s_d -+#define CODE_FOR_lasx_xvavgr_bu CODE_FOR_lasx_xvavgr_u_bu -+#define CODE_FOR_lasx_xvavgr_hu CODE_FOR_lasx_xvavgr_u_hu -+#define CODE_FOR_lasx_xvavgr_wu CODE_FOR_lasx_xvavgr_u_wu -+#define CODE_FOR_lasx_xvavgr_du CODE_FOR_lasx_xvavgr_u_du -+#define CODE_FOR_lasx_xvmuh_b CODE_FOR_lasx_xvmuh_s_b -+#define CODE_FOR_lasx_xvmuh_h CODE_FOR_lasx_xvmuh_s_h -+#define CODE_FOR_lasx_xvmuh_w CODE_FOR_lasx_xvmuh_s_w -+#define CODE_FOR_lasx_xvmuh_d CODE_FOR_lasx_xvmuh_s_d -+#define CODE_FOR_lasx_xvmuh_bu CODE_FOR_lasx_xvmuh_u_bu -+#define CODE_FOR_lasx_xvmuh_hu CODE_FOR_lasx_xvmuh_u_hu -+#define CODE_FOR_lasx_xvmuh_wu CODE_FOR_lasx_xvmuh_u_wu -+#define CODE_FOR_lasx_xvmuh_du CODE_FOR_lasx_xvmuh_u_du -+#define CODE_FOR_lasx_xvssran_b_h CODE_FOR_lasx_xvssran_s_b_h -+#define CODE_FOR_lasx_xvssran_h_w CODE_FOR_lasx_xvssran_s_h_w -+#define CODE_FOR_lasx_xvssran_w_d CODE_FOR_lasx_xvssran_s_w_d -+#define CODE_FOR_lasx_xvssran_bu_h CODE_FOR_lasx_xvssran_u_bu_h -+#define CODE_FOR_lasx_xvssran_hu_w CODE_FOR_lasx_xvssran_u_hu_w -+#define CODE_FOR_lasx_xvssran_wu_d CODE_FOR_lasx_xvssran_u_wu_d -+#define CODE_FOR_lasx_xvssrarn_b_h CODE_FOR_lasx_xvssrarn_s_b_h -+#define CODE_FOR_lasx_xvssrarn_h_w CODE_FOR_lasx_xvssrarn_s_h_w -+#define CODE_FOR_lasx_xvssrarn_w_d CODE_FOR_lasx_xvssrarn_s_w_d -+#define CODE_FOR_lasx_xvssrarn_bu_h CODE_FOR_lasx_xvssrarn_u_bu_h -+#define CODE_FOR_lasx_xvssrarn_hu_w CODE_FOR_lasx_xvssrarn_u_hu_w -+#define CODE_FOR_lasx_xvssrarn_wu_d CODE_FOR_lasx_xvssrarn_u_wu_d -+#define CODE_FOR_lasx_xvssrln_bu_h CODE_FOR_lasx_xvssrln_u_bu_h -+#define CODE_FOR_lasx_xvssrln_hu_w CODE_FOR_lasx_xvssrln_u_hu_w -+#define CODE_FOR_lasx_xvssrln_wu_d CODE_FOR_lasx_xvssrln_u_wu_d -+#define CODE_FOR_lasx_xvssrlrn_bu_h CODE_FOR_lasx_xvssrlrn_u_bu_h -+#define CODE_FOR_lasx_xvssrlrn_hu_w CODE_FOR_lasx_xvssrlrn_u_hu_w -+#define CODE_FOR_lasx_xvssrlrn_wu_d CODE_FOR_lasx_xvssrlrn_u_wu_d -+#define CODE_FOR_lasx_xvftint_w_s CODE_FOR_lasx_xvftint_s_w_s -+#define CODE_FOR_lasx_xvftint_l_d CODE_FOR_lasx_xvftint_s_l_d -+#define CODE_FOR_lasx_xvftint_wu_s CODE_FOR_lasx_xvftint_u_wu_s -+#define CODE_FOR_lasx_xvftint_lu_d CODE_FOR_lasx_xvftint_u_lu_d -+#define CODE_FOR_lasx_xvsllwil_h_b CODE_FOR_lasx_xvsllwil_s_h_b -+#define CODE_FOR_lasx_xvsllwil_w_h CODE_FOR_lasx_xvsllwil_s_w_h -+#define CODE_FOR_lasx_xvsllwil_d_w CODE_FOR_lasx_xvsllwil_s_d_w -+#define CODE_FOR_lasx_xvsllwil_hu_bu CODE_FOR_lasx_xvsllwil_u_hu_bu -+#define CODE_FOR_lasx_xvsllwil_wu_hu CODE_FOR_lasx_xvsllwil_u_wu_hu -+#define CODE_FOR_lasx_xvsllwil_du_wu CODE_FOR_lasx_xvsllwil_u_du_wu -+#define CODE_FOR_lasx_xvsat_b CODE_FOR_lasx_xvsat_s_b -+#define CODE_FOR_lasx_xvsat_h CODE_FOR_lasx_xvsat_s_h -+#define CODE_FOR_lasx_xvsat_w CODE_FOR_lasx_xvsat_s_w -+#define CODE_FOR_lasx_xvsat_d CODE_FOR_lasx_xvsat_s_d -+#define CODE_FOR_lasx_xvsat_bu CODE_FOR_lasx_xvsat_u_bu -+#define CODE_FOR_lasx_xvsat_hu CODE_FOR_lasx_xvsat_u_hu -+#define CODE_FOR_lasx_xvsat_wu CODE_FOR_lasx_xvsat_u_wu -+#define CODE_FOR_lasx_xvsat_du CODE_FOR_lasx_xvsat_u_du -+ -+static const struct loongarch_builtin_description loongarch_builtins[] = { -+#define LARCH_MOVFCSR2GR 0 -+ DIRECT_BUILTIN (movfcsr2gr, LARCH_USI_FTYPE_UQI, hard_float), -+#define LARCH_MOVGR2FCSR 1 -+ DIRECT_NO_TARGET_BUILTIN (movgr2fcsr, LARCH_VOID_FTYPE_UQI_USI, hard_float), -+ -+ DIRECT_NO_TARGET_BUILTIN (cacop, LARCH_VOID_FTYPE_USI_USI_SI, default), -+ DIRECT_NO_TARGET_BUILTIN (dcacop, LARCH_VOID_FTYPE_USI_UDI_SI, default), -+ DIRECT_NO_TARGET_BUILTIN (dbar, LARCH_VOID_FTYPE_USI, default), -+ DIRECT_NO_TARGET_BUILTIN (ibar, LARCH_VOID_FTYPE_USI, default), -+ -+ DIRECT_BUILTIN (fmax_sf, LARCH_SF_FTYPE_SF_SF, hard_float), -+ DIRECT_BUILTIN (fmax_df, LARCH_DF_FTYPE_DF_DF, hard_float), -+ DIRECT_BUILTIN (fmin_sf, LARCH_SF_FTYPE_SF_SF, hard_float), -+ DIRECT_BUILTIN (fmin_df, LARCH_DF_FTYPE_DF_DF, hard_float), -+ DIRECT_BUILTIN (fmaxa_sf, LARCH_SF_FTYPE_SF_SF, hard_float), -+ DIRECT_BUILTIN (fmaxa_df, LARCH_DF_FTYPE_DF_DF, hard_float), -+ DIRECT_BUILTIN (fmina_sf, LARCH_SF_FTYPE_SF_SF, hard_float), -+ DIRECT_BUILTIN (fmina_df, LARCH_DF_FTYPE_DF_DF, hard_float), -+ DIRECT_BUILTIN (fclass_s, LARCH_SF_FTYPE_SF, hard_float), -+ DIRECT_BUILTIN (fclass_d, LARCH_DF_FTYPE_DF, hard_float), -+ DIRECT_BUILTIN (frint_s, LARCH_SF_FTYPE_SF, hard_float), -+ DIRECT_BUILTIN (frint_d, LARCH_DF_FTYPE_DF, hard_float), -+ DIRECT_BUILTIN (bytepick_w, LARCH_SI_FTYPE_SI_SI_QI, default), -+ DIRECT_BUILTIN (bytepick_d, LARCH_DI_FTYPE_DI_DI_QI, default), -+ DIRECT_BUILTIN (bitrev_4b, LARCH_SI_FTYPE_SI, default), -+ DIRECT_BUILTIN (bitrev_8b, LARCH_DI_FTYPE_DI, default), -+ DIRECT_BUILTIN (cpucfg, LARCH_USI_FTYPE_USI, default), -+ DIRECT_BUILTIN (asrtle_d, LARCH_VOID_FTYPE_DI_DI, default), -+ DIRECT_BUILTIN (asrtgt_d, LARCH_VOID_FTYPE_DI_DI, default), -+ DIRECT_BUILTIN (dlddir, LARCH_DI_FTYPE_DI_UQI, default), -+ DIRECT_BUILTIN (lddir, LARCH_SI_FTYPE_SI_UQI, default), -+ DIRECT_NO_TARGET_BUILTIN (dldpte, LARCH_VOID_FTYPE_DI_UQI, default), -+ DIRECT_NO_TARGET_BUILTIN (ldpte, LARCH_VOID_FTYPE_SI_UQI, default), -+ -+ /* CRC Instrinsic */ -+ -+ DIRECT_BUILTIN (crc_w_b_w, LARCH_SI_FTYPE_QI_SI, default), -+ DIRECT_BUILTIN (crc_w_h_w, LARCH_SI_FTYPE_HI_SI, default), -+ DIRECT_BUILTIN (crc_w_w_w, LARCH_SI_FTYPE_SI_SI, default), -+ DIRECT_BUILTIN (crc_w_d_w, LARCH_SI_FTYPE_DI_SI, default), -+ DIRECT_BUILTIN (crcc_w_b_w, LARCH_SI_FTYPE_QI_SI, default), -+ DIRECT_BUILTIN (crcc_w_h_w, LARCH_SI_FTYPE_HI_SI, default), -+ DIRECT_BUILTIN (crcc_w_w_w, LARCH_SI_FTYPE_SI_SI, default), -+ DIRECT_BUILTIN (crcc_w_d_w, LARCH_SI_FTYPE_DI_SI, default), -+ -+ DIRECT_BUILTIN (csrrd, LARCH_USI_FTYPE_USI, default), -+ DIRECT_BUILTIN (dcsrrd, LARCH_UDI_FTYPE_USI, default), -+ DIRECT_BUILTIN (csrwr, LARCH_USI_FTYPE_USI_USI, default), -+ DIRECT_BUILTIN (dcsrwr, LARCH_UDI_FTYPE_UDI_USI, default), -+ DIRECT_BUILTIN (csrxchg, LARCH_USI_FTYPE_USI_USI_USI, default), -+ DIRECT_BUILTIN (dcsrxchg, LARCH_UDI_FTYPE_UDI_UDI_USI, default), -+ DIRECT_BUILTIN (iocsrrd_b, LARCH_UQI_FTYPE_USI, default), -+ DIRECT_BUILTIN (iocsrrd_h, LARCH_UHI_FTYPE_USI, default), -+ DIRECT_BUILTIN (iocsrrd_w, LARCH_USI_FTYPE_USI, default), -+ DIRECT_BUILTIN (iocsrrd_d, LARCH_UDI_FTYPE_USI, default), -+ DIRECT_NO_TARGET_BUILTIN (iocsrwr_b, LARCH_VOID_FTYPE_UQI_USI, default), -+ DIRECT_NO_TARGET_BUILTIN (iocsrwr_h, LARCH_VOID_FTYPE_UHI_USI, default), -+ DIRECT_NO_TARGET_BUILTIN (iocsrwr_w, LARCH_VOID_FTYPE_USI_USI, default), -+ DIRECT_NO_TARGET_BUILTIN (iocsrwr_d, LARCH_VOID_FTYPE_UDI_USI, default), -+ -+ /* Built-in functions for LSX. */ -+ LSX_BUILTIN (vsll_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vsll_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsll_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsll_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vslli_b, LARCH_V16QI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vslli_h, LARCH_V8HI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vslli_w, LARCH_V4SI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vslli_d, LARCH_V2DI_FTYPE_V2DI_UQI), -+ LSX_BUILTIN (vsra_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vsra_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsra_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsra_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vsrai_b, LARCH_V16QI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vsrai_h, LARCH_V8HI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vsrai_w, LARCH_V4SI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vsrai_d, LARCH_V2DI_FTYPE_V2DI_UQI), -+ LSX_BUILTIN (vsrar_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vsrar_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsrar_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsrar_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vsrari_b, LARCH_V16QI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vsrari_h, LARCH_V8HI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vsrari_w, LARCH_V4SI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vsrari_d, LARCH_V2DI_FTYPE_V2DI_UQI), -+ LSX_BUILTIN (vsrl_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vsrl_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsrl_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsrl_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vsrli_b, LARCH_V16QI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vsrli_h, LARCH_V8HI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vsrli_w, LARCH_V4SI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vsrli_d, LARCH_V2DI_FTYPE_V2DI_UQI), -+ LSX_BUILTIN (vsrlr_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vsrlr_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsrlr_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsrlr_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vsrlri_b, LARCH_V16QI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vsrlri_h, LARCH_V8HI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vsrlri_w, LARCH_V4SI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vsrlri_d, LARCH_V2DI_FTYPE_V2DI_UQI), -+ LSX_BUILTIN (vbitclr_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vbitclr_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vbitclr_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vbitclr_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vbitclri_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vbitclri_h, LARCH_UV8HI_FTYPE_UV8HI_UQI), -+ LSX_BUILTIN (vbitclri_w, LARCH_UV4SI_FTYPE_UV4SI_UQI), -+ LSX_BUILTIN (vbitclri_d, LARCH_UV2DI_FTYPE_UV2DI_UQI), -+ LSX_BUILTIN (vbitset_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vbitset_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vbitset_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vbitset_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vbitseti_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vbitseti_h, LARCH_UV8HI_FTYPE_UV8HI_UQI), -+ LSX_BUILTIN (vbitseti_w, LARCH_UV4SI_FTYPE_UV4SI_UQI), -+ LSX_BUILTIN (vbitseti_d, LARCH_UV2DI_FTYPE_UV2DI_UQI), -+ LSX_BUILTIN (vbitrev_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vbitrev_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vbitrev_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vbitrev_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vbitrevi_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vbitrevi_h, LARCH_UV8HI_FTYPE_UV8HI_UQI), -+ LSX_BUILTIN (vbitrevi_w, LARCH_UV4SI_FTYPE_UV4SI_UQI), -+ LSX_BUILTIN (vbitrevi_d, LARCH_UV2DI_FTYPE_UV2DI_UQI), -+ LSX_BUILTIN (vadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vaddi_bu, LARCH_V16QI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vaddi_hu, LARCH_V8HI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vaddi_wu, LARCH_V4SI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vaddi_du, LARCH_V2DI_FTYPE_V2DI_UQI), -+ LSX_BUILTIN (vsub_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vsub_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsub_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsub_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vsubi_bu, LARCH_V16QI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vsubi_hu, LARCH_V8HI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vsubi_wu, LARCH_V4SI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vsubi_du, LARCH_V2DI_FTYPE_V2DI_UQI), -+ LSX_BUILTIN (vmax_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vmax_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vmax_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vmax_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vmaxi_b, LARCH_V16QI_FTYPE_V16QI_QI), -+ LSX_BUILTIN (vmaxi_h, LARCH_V8HI_FTYPE_V8HI_QI), -+ LSX_BUILTIN (vmaxi_w, LARCH_V4SI_FTYPE_V4SI_QI), -+ LSX_BUILTIN (vmaxi_d, LARCH_V2DI_FTYPE_V2DI_QI), -+ LSX_BUILTIN (vmax_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vmax_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vmax_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vmax_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vmaxi_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vmaxi_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI), -+ LSX_BUILTIN (vmaxi_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI), -+ LSX_BUILTIN (vmaxi_du, LARCH_UV2DI_FTYPE_UV2DI_UQI), -+ LSX_BUILTIN (vmin_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vmin_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vmin_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vmin_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vmini_b, LARCH_V16QI_FTYPE_V16QI_QI), -+ LSX_BUILTIN (vmini_h, LARCH_V8HI_FTYPE_V8HI_QI), -+ LSX_BUILTIN (vmini_w, LARCH_V4SI_FTYPE_V4SI_QI), -+ LSX_BUILTIN (vmini_d, LARCH_V2DI_FTYPE_V2DI_QI), -+ LSX_BUILTIN (vmin_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vmin_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vmin_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vmin_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vmini_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vmini_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI), -+ LSX_BUILTIN (vmini_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI), -+ LSX_BUILTIN (vmini_du, LARCH_UV2DI_FTYPE_UV2DI_UQI), -+ LSX_BUILTIN (vseq_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vseq_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vseq_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vseq_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vseqi_b, LARCH_V16QI_FTYPE_V16QI_QI), -+ LSX_BUILTIN (vseqi_h, LARCH_V8HI_FTYPE_V8HI_QI), -+ LSX_BUILTIN (vseqi_w, LARCH_V4SI_FTYPE_V4SI_QI), -+ LSX_BUILTIN (vseqi_d, LARCH_V2DI_FTYPE_V2DI_QI), -+ LSX_BUILTIN (vslti_b, LARCH_V16QI_FTYPE_V16QI_QI), -+ LSX_BUILTIN (vslt_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vslt_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vslt_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vslt_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vslti_h, LARCH_V8HI_FTYPE_V8HI_QI), -+ LSX_BUILTIN (vslti_w, LARCH_V4SI_FTYPE_V4SI_QI), -+ LSX_BUILTIN (vslti_d, LARCH_V2DI_FTYPE_V2DI_QI), -+ LSX_BUILTIN (vslt_bu, LARCH_V16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vslt_hu, LARCH_V8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vslt_wu, LARCH_V4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vslt_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vslti_bu, LARCH_V16QI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vslti_hu, LARCH_V8HI_FTYPE_UV8HI_UQI), -+ LSX_BUILTIN (vslti_wu, LARCH_V4SI_FTYPE_UV4SI_UQI), -+ LSX_BUILTIN (vslti_du, LARCH_V2DI_FTYPE_UV2DI_UQI), -+ LSX_BUILTIN (vsle_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vsle_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsle_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsle_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vslei_b, LARCH_V16QI_FTYPE_V16QI_QI), -+ LSX_BUILTIN (vslei_h, LARCH_V8HI_FTYPE_V8HI_QI), -+ LSX_BUILTIN (vslei_w, LARCH_V4SI_FTYPE_V4SI_QI), -+ LSX_BUILTIN (vslei_d, LARCH_V2DI_FTYPE_V2DI_QI), -+ LSX_BUILTIN (vsle_bu, LARCH_V16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vsle_hu, LARCH_V8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vsle_wu, LARCH_V4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vsle_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vslei_bu, LARCH_V16QI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vslei_hu, LARCH_V8HI_FTYPE_UV8HI_UQI), -+ LSX_BUILTIN (vslei_wu, LARCH_V4SI_FTYPE_UV4SI_UQI), -+ LSX_BUILTIN (vslei_du, LARCH_V2DI_FTYPE_UV2DI_UQI), -+ LSX_BUILTIN (vsat_b, LARCH_V16QI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vsat_h, LARCH_V8HI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vsat_w, LARCH_V4SI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vsat_d, LARCH_V2DI_FTYPE_V2DI_UQI), -+ LSX_BUILTIN (vsat_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vsat_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI), -+ LSX_BUILTIN (vsat_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI), -+ LSX_BUILTIN (vsat_du, LARCH_UV2DI_FTYPE_UV2DI_UQI), -+ LSX_BUILTIN (vadda_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vadda_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vadda_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vadda_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vsadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vsadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vsadd_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vsadd_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vsadd_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vsadd_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vavg_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vavg_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vavg_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vavg_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vavg_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vavg_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vavg_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vavg_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vavgr_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vavgr_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vavgr_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vavgr_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vavgr_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vavgr_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vavgr_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vavgr_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vssub_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vssub_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vssub_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vssub_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vssub_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vssub_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vssub_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vssub_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vabsd_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vabsd_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vabsd_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vabsd_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vabsd_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vabsd_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vabsd_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vabsd_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vmul_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vmul_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vmul_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vmul_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vmadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), -+ LSX_BUILTIN (vmadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), -+ LSX_BUILTIN (vmadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI), -+ LSX_BUILTIN (vmadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), -+ LSX_BUILTIN (vmsub_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), -+ LSX_BUILTIN (vmsub_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), -+ LSX_BUILTIN (vmsub_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI), -+ LSX_BUILTIN (vmsub_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), -+ LSX_BUILTIN (vdiv_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vdiv_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vdiv_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vdiv_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vdiv_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vdiv_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vdiv_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vdiv_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vhaddw_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vhaddw_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vhaddw_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vhaddw_hu_bu, LARCH_UV8HI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vhaddw_wu_hu, LARCH_UV4SI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vhaddw_du_wu, LARCH_UV2DI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vhsubw_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vhsubw_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vhsubw_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vhsubw_hu_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vhsubw_wu_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vhsubw_du_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vmod_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vmod_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vmod_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vmod_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vmod_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vmod_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vmod_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vmod_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vreplve_b, LARCH_V16QI_FTYPE_V16QI_SI), -+ LSX_BUILTIN (vreplve_h, LARCH_V8HI_FTYPE_V8HI_SI), -+ LSX_BUILTIN (vreplve_w, LARCH_V4SI_FTYPE_V4SI_SI), -+ LSX_BUILTIN (vreplve_d, LARCH_V2DI_FTYPE_V2DI_SI), -+ LSX_BUILTIN (vreplvei_b, LARCH_V16QI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vreplvei_h, LARCH_V8HI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vreplvei_w, LARCH_V4SI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vreplvei_d, LARCH_V2DI_FTYPE_V2DI_UQI), -+ LSX_BUILTIN (vpickev_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vpickev_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vpickev_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vpickev_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vpickod_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vpickod_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vpickod_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vpickod_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vilvh_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vilvh_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vilvh_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vilvh_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vilvl_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vilvl_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vilvl_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vilvl_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vpackev_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vpackev_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vpackev_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vpackev_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vpackod_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vpackod_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vpackod_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vpackod_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vshuf_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), -+ LSX_BUILTIN (vshuf_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI), -+ LSX_BUILTIN (vshuf_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), -+ LSX_BUILTIN (vand_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vandi_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vnor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vnori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vxor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vxori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vbitsel_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI), -+ LSX_BUILTIN (vbitseli_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI_USI), -+ LSX_BUILTIN (vshuf4i_b, LARCH_V16QI_FTYPE_V16QI_USI), -+ LSX_BUILTIN (vshuf4i_h, LARCH_V8HI_FTYPE_V8HI_USI), -+ LSX_BUILTIN (vshuf4i_w, LARCH_V4SI_FTYPE_V4SI_USI), -+ LSX_BUILTIN (vreplgr2vr_b, LARCH_V16QI_FTYPE_SI), -+ LSX_BUILTIN (vreplgr2vr_h, LARCH_V8HI_FTYPE_SI), -+ LSX_BUILTIN (vreplgr2vr_w, LARCH_V4SI_FTYPE_SI), -+ LSX_BUILTIN (vreplgr2vr_d, LARCH_V2DI_FTYPE_DI), -+ LSX_BUILTIN (vpcnt_b, LARCH_V16QI_FTYPE_V16QI), -+ LSX_BUILTIN (vpcnt_h, LARCH_V8HI_FTYPE_V8HI), -+ LSX_BUILTIN (vpcnt_w, LARCH_V4SI_FTYPE_V4SI), -+ LSX_BUILTIN (vpcnt_d, LARCH_V2DI_FTYPE_V2DI), -+ LSX_BUILTIN (vclo_b, LARCH_V16QI_FTYPE_V16QI), -+ LSX_BUILTIN (vclo_h, LARCH_V8HI_FTYPE_V8HI), -+ LSX_BUILTIN (vclo_w, LARCH_V4SI_FTYPE_V4SI), -+ LSX_BUILTIN (vclo_d, LARCH_V2DI_FTYPE_V2DI), -+ LSX_BUILTIN (vclz_b, LARCH_V16QI_FTYPE_V16QI), -+ LSX_BUILTIN (vclz_h, LARCH_V8HI_FTYPE_V8HI), -+ LSX_BUILTIN (vclz_w, LARCH_V4SI_FTYPE_V4SI), -+ LSX_BUILTIN (vclz_d, LARCH_V2DI_FTYPE_V2DI), -+ LSX_BUILTIN (vpickve2gr_b, LARCH_SI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vpickve2gr_h, LARCH_SI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vpickve2gr_w, LARCH_SI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vpickve2gr_d, LARCH_DI_FTYPE_V2DI_UQI), -+ LSX_BUILTIN (vpickve2gr_bu, LARCH_USI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vpickve2gr_hu, LARCH_USI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vpickve2gr_wu, LARCH_USI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vpickve2gr_du, LARCH_UDI_FTYPE_V2DI_UQI), -+ LSX_BUILTIN (vinsgr2vr_b, LARCH_V16QI_FTYPE_V16QI_SI_UQI), -+ LSX_BUILTIN (vinsgr2vr_h, LARCH_V8HI_FTYPE_V8HI_SI_UQI), -+ LSX_BUILTIN (vinsgr2vr_w, LARCH_V4SI_FTYPE_V4SI_SI_UQI), -+ LSX_BUILTIN (vinsgr2vr_d, LARCH_V2DI_FTYPE_V2DI_DI_UQI), -+ LSX_BUILTIN_TEST_BRANCH (bnz_b, LARCH_SI_FTYPE_UV16QI), -+ LSX_BUILTIN_TEST_BRANCH (bnz_h, LARCH_SI_FTYPE_UV8HI), -+ LSX_BUILTIN_TEST_BRANCH (bnz_w, LARCH_SI_FTYPE_UV4SI), -+ LSX_BUILTIN_TEST_BRANCH (bnz_d, LARCH_SI_FTYPE_UV2DI), -+ LSX_BUILTIN_TEST_BRANCH (bz_b, LARCH_SI_FTYPE_UV16QI), -+ LSX_BUILTIN_TEST_BRANCH (bz_h, LARCH_SI_FTYPE_UV8HI), -+ LSX_BUILTIN_TEST_BRANCH (bz_w, LARCH_SI_FTYPE_UV4SI), -+ LSX_BUILTIN_TEST_BRANCH (bz_d, LARCH_SI_FTYPE_UV2DI), -+ LSX_BUILTIN_TEST_BRANCH (bz_v, LARCH_SI_FTYPE_UV16QI), -+ LSX_BUILTIN_TEST_BRANCH (bnz_v, LARCH_SI_FTYPE_UV16QI), -+ LSX_BUILTIN (vrepli_b, LARCH_V16QI_FTYPE_HI), -+ LSX_BUILTIN (vrepli_h, LARCH_V8HI_FTYPE_HI), -+ LSX_BUILTIN (vrepli_w, LARCH_V4SI_FTYPE_HI), -+ LSX_BUILTIN (vrepli_d, LARCH_V2DI_FTYPE_HI), -+ LSX_BUILTIN (vfcmp_caf_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_caf_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_cor_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_cor_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_cun_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_cun_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_cune_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_cune_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_cueq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_cueq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_ceq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_ceq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_cne_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_cne_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_clt_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_clt_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_cult_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_cult_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_cle_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_cle_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_cule_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_cule_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_saf_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_saf_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_sor_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_sor_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_sun_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_sun_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_sune_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_sune_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_sueq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_sueq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_seq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_seq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_sne_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_sne_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_slt_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_slt_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_sult_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_sult_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_sle_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_sle_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcmp_sule_s, LARCH_V4SI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcmp_sule_d, LARCH_V2DI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfmul_s, LARCH_V4SF_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfmul_d, LARCH_V2DF_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfdiv_s, LARCH_V4SF_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfdiv_d, LARCH_V2DF_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfcvt_h_s, LARCH_V8HI_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfcvt_s_d, LARCH_V4SF_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfmin_s, LARCH_V4SF_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfmin_d, LARCH_V2DF_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfmina_s, LARCH_V4SF_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfmina_d, LARCH_V2DF_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfmax_s, LARCH_V4SF_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfmax_d, LARCH_V2DF_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfmaxa_s, LARCH_V4SF_FTYPE_V4SF_V4SF), -+ LSX_BUILTIN (vfmaxa_d, LARCH_V2DF_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vfclass_s, LARCH_V4SI_FTYPE_V4SF), -+ LSX_BUILTIN (vfclass_d, LARCH_V2DI_FTYPE_V2DF), -+ LSX_BUILTIN (vfsqrt_s, LARCH_V4SF_FTYPE_V4SF), -+ LSX_BUILTIN (vfsqrt_d, LARCH_V2DF_FTYPE_V2DF), -+ LSX_BUILTIN (vfrecip_s, LARCH_V4SF_FTYPE_V4SF), -+ LSX_BUILTIN (vfrecip_d, LARCH_V2DF_FTYPE_V2DF), -+ LSX_BUILTIN (vfrint_s, LARCH_V4SF_FTYPE_V4SF), -+ LSX_BUILTIN (vfrint_d, LARCH_V2DF_FTYPE_V2DF), -+ LSX_BUILTIN (vfrsqrt_s, LARCH_V4SF_FTYPE_V4SF), -+ LSX_BUILTIN (vfrsqrt_d, LARCH_V2DF_FTYPE_V2DF), -+ LSX_BUILTIN (vflogb_s, LARCH_V4SF_FTYPE_V4SF), -+ LSX_BUILTIN (vflogb_d, LARCH_V2DF_FTYPE_V2DF), -+ LSX_BUILTIN (vfcvth_s_h, LARCH_V4SF_FTYPE_V8HI), -+ LSX_BUILTIN (vfcvth_d_s, LARCH_V2DF_FTYPE_V4SF), -+ LSX_BUILTIN (vfcvtl_s_h, LARCH_V4SF_FTYPE_V8HI), -+ LSX_BUILTIN (vfcvtl_d_s, LARCH_V2DF_FTYPE_V4SF), -+ LSX_BUILTIN (vftint_w_s, LARCH_V4SI_FTYPE_V4SF), -+ LSX_BUILTIN (vftint_l_d, LARCH_V2DI_FTYPE_V2DF), -+ LSX_BUILTIN (vftint_wu_s, LARCH_UV4SI_FTYPE_V4SF), -+ LSX_BUILTIN (vftint_lu_d, LARCH_UV2DI_FTYPE_V2DF), -+ LSX_BUILTIN (vftintrz_w_s, LARCH_V4SI_FTYPE_V4SF), -+ LSX_BUILTIN (vftintrz_l_d, LARCH_V2DI_FTYPE_V2DF), -+ LSX_BUILTIN (vftintrz_wu_s, LARCH_UV4SI_FTYPE_V4SF), -+ LSX_BUILTIN (vftintrz_lu_d, LARCH_UV2DI_FTYPE_V2DF), -+ LSX_BUILTIN (vffint_s_w, LARCH_V4SF_FTYPE_V4SI), -+ LSX_BUILTIN (vffint_d_l, LARCH_V2DF_FTYPE_V2DI), -+ LSX_BUILTIN (vffint_s_wu, LARCH_V4SF_FTYPE_UV4SI), -+ LSX_BUILTIN (vffint_d_lu, LARCH_V2DF_FTYPE_UV2DI), -+ -+ -+ LSX_BUILTIN (vandn_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vneg_b, LARCH_V16QI_FTYPE_V16QI), -+ LSX_BUILTIN (vneg_h, LARCH_V8HI_FTYPE_V8HI), -+ LSX_BUILTIN (vneg_w, LARCH_V4SI_FTYPE_V4SI), -+ LSX_BUILTIN (vneg_d, LARCH_V2DI_FTYPE_V2DI), -+ LSX_BUILTIN (vmuh_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vmuh_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vmuh_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vmuh_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vmuh_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vmuh_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vmuh_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vmuh_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vsllwil_h_b, LARCH_V8HI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vsllwil_w_h, LARCH_V4SI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vsllwil_d_w, LARCH_V2DI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vsllwil_hu_bu, LARCH_UV8HI_FTYPE_UV16QI_UQI), -+ LSX_BUILTIN (vsllwil_wu_hu, LARCH_UV4SI_FTYPE_UV8HI_UQI), -+ LSX_BUILTIN (vsllwil_du_wu, LARCH_UV2DI_FTYPE_UV4SI_UQI), -+ LSX_BUILTIN (vsran_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsran_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsran_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vssran_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vssran_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vssran_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vssran_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vssran_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vssran_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vsrarn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsrarn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsrarn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vssrarn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vssrarn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vssrarn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vssrarn_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vssrarn_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vssrarn_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vsrln_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsrln_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsrln_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vssrln_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vssrln_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vssrln_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vsrlrn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsrlrn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsrlrn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vssrlrn_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vssrlrn_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vssrlrn_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vfrstpi_b, LARCH_V16QI_FTYPE_V16QI_V16QI_UQI), -+ LSX_BUILTIN (vfrstpi_h, LARCH_V8HI_FTYPE_V8HI_V8HI_UQI), -+ LSX_BUILTIN (vfrstp_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), -+ LSX_BUILTIN (vfrstp_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), -+ LSX_BUILTIN (vshuf4i_d, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), -+ LSX_BUILTIN (vbsrl_v, LARCH_V16QI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vbsll_v, LARCH_V16QI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vextrins_b, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), -+ LSX_BUILTIN (vextrins_h, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), -+ LSX_BUILTIN (vextrins_w, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), -+ LSX_BUILTIN (vextrins_d, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), -+ LSX_BUILTIN (vmskltz_b, LARCH_V16QI_FTYPE_V16QI), -+ LSX_BUILTIN (vmskltz_h, LARCH_V8HI_FTYPE_V8HI), -+ LSX_BUILTIN (vmskltz_w, LARCH_V4SI_FTYPE_V4SI), -+ LSX_BUILTIN (vmskltz_d, LARCH_V2DI_FTYPE_V2DI), -+ LSX_BUILTIN (vsigncov_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vsigncov_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsigncov_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsigncov_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vfmadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), -+ LSX_BUILTIN (vfmadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), -+ LSX_BUILTIN (vfmsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), -+ LSX_BUILTIN (vfmsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), -+ LSX_BUILTIN (vfnmadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), -+ LSX_BUILTIN (vfnmadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), -+ LSX_BUILTIN (vfnmsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), -+ LSX_BUILTIN (vfnmsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), -+ LSX_BUILTIN (vftintrne_w_s, LARCH_V4SI_FTYPE_V4SF), -+ LSX_BUILTIN (vftintrne_l_d, LARCH_V2DI_FTYPE_V2DF), -+ LSX_BUILTIN (vftintrp_w_s, LARCH_V4SI_FTYPE_V4SF), -+ LSX_BUILTIN (vftintrp_l_d, LARCH_V2DI_FTYPE_V2DF), -+ LSX_BUILTIN (vftintrm_w_s, LARCH_V4SI_FTYPE_V4SF), -+ LSX_BUILTIN (vftintrm_l_d, LARCH_V2DI_FTYPE_V2DF), -+ LSX_BUILTIN (vftint_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vffint_s_l, LARCH_V4SF_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vftintrz_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vftintrp_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vftintrm_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vftintrne_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), -+ LSX_BUILTIN (vftintl_l_s, LARCH_V2DI_FTYPE_V4SF), -+ LSX_BUILTIN (vftinth_l_s, LARCH_V2DI_FTYPE_V4SF), -+ LSX_BUILTIN (vffinth_d_w, LARCH_V2DF_FTYPE_V4SI), -+ LSX_BUILTIN (vffintl_d_w, LARCH_V2DF_FTYPE_V4SI), -+ LSX_BUILTIN (vftintrzl_l_s, LARCH_V2DI_FTYPE_V4SF), -+ LSX_BUILTIN (vftintrzh_l_s, LARCH_V2DI_FTYPE_V4SF), -+ LSX_BUILTIN (vftintrpl_l_s, LARCH_V2DI_FTYPE_V4SF), -+ LSX_BUILTIN (vftintrph_l_s, LARCH_V2DI_FTYPE_V4SF), -+ LSX_BUILTIN (vftintrml_l_s, LARCH_V2DI_FTYPE_V4SF), -+ LSX_BUILTIN (vftintrmh_l_s, LARCH_V2DI_FTYPE_V4SF), -+ LSX_BUILTIN (vftintrnel_l_s, LARCH_V2DI_FTYPE_V4SF), -+ LSX_BUILTIN (vftintrneh_l_s, LARCH_V2DI_FTYPE_V4SF), -+ LSX_BUILTIN (vfrintrne_s, LARCH_V4SI_FTYPE_V4SF), -+ LSX_BUILTIN (vfrintrne_d, LARCH_V2DI_FTYPE_V2DF), -+ LSX_BUILTIN (vfrintrz_s, LARCH_V4SI_FTYPE_V4SF), -+ LSX_BUILTIN (vfrintrz_d, LARCH_V2DI_FTYPE_V2DF), -+ LSX_BUILTIN (vfrintrp_s, LARCH_V4SI_FTYPE_V4SF), -+ LSX_BUILTIN (vfrintrp_d, LARCH_V2DI_FTYPE_V2DF), -+ LSX_BUILTIN (vfrintrm_s, LARCH_V4SI_FTYPE_V4SF), -+ LSX_BUILTIN (vfrintrm_d, LARCH_V2DI_FTYPE_V2DF), -+ LSX_NO_TARGET_BUILTIN (vstelm_b, LARCH_VOID_FTYPE_V16QI_CVPOINTER_SI_UQI), -+ LSX_NO_TARGET_BUILTIN (vstelm_h, LARCH_VOID_FTYPE_V8HI_CVPOINTER_SI_UQI), -+ LSX_NO_TARGET_BUILTIN (vstelm_w, LARCH_VOID_FTYPE_V4SI_CVPOINTER_SI_UQI), -+ LSX_NO_TARGET_BUILTIN (vstelm_d, LARCH_VOID_FTYPE_V2DI_CVPOINTER_SI_UQI), -+ LSX_BUILTIN (vaddwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vaddwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vaddwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vaddwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vaddwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vaddwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vaddwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vaddwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vaddwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vaddwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vaddwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vaddwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vaddwev_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), -+ LSX_BUILTIN (vaddwev_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), -+ LSX_BUILTIN (vaddwev_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), -+ LSX_BUILTIN (vaddwod_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), -+ LSX_BUILTIN (vaddwod_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), -+ LSX_BUILTIN (vaddwod_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), -+ LSX_BUILTIN (vsubwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsubwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsubwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vsubwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vsubwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vsubwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vsubwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vsubwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vsubwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vsubwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vsubwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vsubwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vaddwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vaddwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vaddwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vaddwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vsubwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vsubwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vsubwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vsubwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vaddwev_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), -+ LSX_BUILTIN (vaddwod_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), -+ -+ LSX_BUILTIN (vmulwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vmulwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vmulwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vmulwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vmulwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vmulwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vmulwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vmulwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vmulwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vmulwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), -+ LSX_BUILTIN (vmulwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), -+ LSX_BUILTIN (vmulwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), -+ LSX_BUILTIN (vmulwev_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), -+ LSX_BUILTIN (vmulwev_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), -+ LSX_BUILTIN (vmulwev_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), -+ LSX_BUILTIN (vmulwod_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), -+ LSX_BUILTIN (vmulwod_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), -+ LSX_BUILTIN (vmulwod_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), -+ LSX_BUILTIN (vmulwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vmulwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vmulwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vmulwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vmulwev_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), -+ LSX_BUILTIN (vmulwod_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), -+ LSX_BUILTIN (vhaddw_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vhaddw_qu_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vhsubw_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vhsubw_qu_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), -+ LSX_BUILTIN (vmaddwev_d_w, LARCH_V2DI_FTYPE_V2DI_V4SI_V4SI), -+ LSX_BUILTIN (vmaddwev_w_h, LARCH_V4SI_FTYPE_V4SI_V8HI_V8HI), -+ LSX_BUILTIN (vmaddwev_h_b, LARCH_V8HI_FTYPE_V8HI_V16QI_V16QI), -+ LSX_BUILTIN (vmaddwev_d_wu, LARCH_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI), -+ LSX_BUILTIN (vmaddwev_w_hu, LARCH_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI), -+ LSX_BUILTIN (vmaddwev_h_bu, LARCH_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI), -+ LSX_BUILTIN (vmaddwod_d_w, LARCH_V2DI_FTYPE_V2DI_V4SI_V4SI), -+ LSX_BUILTIN (vmaddwod_w_h, LARCH_V4SI_FTYPE_V4SI_V8HI_V8HI), -+ LSX_BUILTIN (vmaddwod_h_b, LARCH_V8HI_FTYPE_V8HI_V16QI_V16QI), -+ LSX_BUILTIN (vmaddwod_d_wu, LARCH_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI), -+ LSX_BUILTIN (vmaddwod_w_hu, LARCH_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI), -+ LSX_BUILTIN (vmaddwod_h_bu, LARCH_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI), -+ LSX_BUILTIN (vmaddwev_d_wu_w, LARCH_V2DI_FTYPE_V2DI_UV4SI_V4SI), -+ LSX_BUILTIN (vmaddwev_w_hu_h, LARCH_V4SI_FTYPE_V4SI_UV8HI_V8HI), -+ LSX_BUILTIN (vmaddwev_h_bu_b, LARCH_V8HI_FTYPE_V8HI_UV16QI_V16QI), -+ LSX_BUILTIN (vmaddwod_d_wu_w, LARCH_V2DI_FTYPE_V2DI_UV4SI_V4SI), -+ LSX_BUILTIN (vmaddwod_w_hu_h, LARCH_V4SI_FTYPE_V4SI_UV8HI_V8HI), -+ LSX_BUILTIN (vmaddwod_h_bu_b, LARCH_V8HI_FTYPE_V8HI_UV16QI_V16QI), -+ LSX_BUILTIN (vmaddwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), -+ LSX_BUILTIN (vmaddwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), -+ LSX_BUILTIN (vmaddwev_q_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI), -+ LSX_BUILTIN (vmaddwod_q_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI), -+ LSX_BUILTIN (vmaddwev_q_du_d, LARCH_V2DI_FTYPE_V2DI_UV2DI_V2DI), -+ LSX_BUILTIN (vmaddwod_q_du_d, LARCH_V2DI_FTYPE_V2DI_UV2DI_V2DI), -+ LSX_BUILTIN (vrotr_b, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vrotr_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vrotr_w, LARCH_V4SI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vrotr_d, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vadd_q, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vsub_q, LARCH_V2DI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vldrepl_b, LARCH_V16QI_FTYPE_CVPOINTER_SI), -+ LSX_BUILTIN (vldrepl_h, LARCH_V8HI_FTYPE_CVPOINTER_SI), -+ LSX_BUILTIN (vldrepl_w, LARCH_V4SI_FTYPE_CVPOINTER_SI), -+ LSX_BUILTIN (vldrepl_d, LARCH_V2DI_FTYPE_CVPOINTER_SI), -+ -+ LSX_BUILTIN (vmskgez_b, LARCH_V16QI_FTYPE_V16QI), -+ LSX_BUILTIN (vmsknz_b, LARCH_V16QI_FTYPE_V16QI), -+ LSX_BUILTIN (vexth_h_b, LARCH_V8HI_FTYPE_V16QI), -+ LSX_BUILTIN (vexth_w_h, LARCH_V4SI_FTYPE_V8HI), -+ LSX_BUILTIN (vexth_d_w, LARCH_V2DI_FTYPE_V4SI), -+ LSX_BUILTIN (vexth_q_d, LARCH_V2DI_FTYPE_V2DI), -+ LSX_BUILTIN (vexth_hu_bu, LARCH_UV8HI_FTYPE_UV16QI), -+ LSX_BUILTIN (vexth_wu_hu, LARCH_UV4SI_FTYPE_UV8HI), -+ LSX_BUILTIN (vexth_du_wu, LARCH_UV2DI_FTYPE_UV4SI), -+ LSX_BUILTIN (vexth_qu_du, LARCH_UV2DI_FTYPE_UV2DI), -+ LSX_BUILTIN (vrotri_b, LARCH_V16QI_FTYPE_V16QI_UQI), -+ LSX_BUILTIN (vrotri_h, LARCH_V8HI_FTYPE_V8HI_UQI), -+ LSX_BUILTIN (vrotri_w, LARCH_V4SI_FTYPE_V4SI_UQI), -+ LSX_BUILTIN (vrotri_d, LARCH_V2DI_FTYPE_V2DI_UQI), -+ LSX_BUILTIN (vextl_q_d, LARCH_V2DI_FTYPE_V2DI), -+ LSX_BUILTIN (vsrlni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), -+ LSX_BUILTIN (vsrlni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), -+ LSX_BUILTIN (vsrlni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), -+ LSX_BUILTIN (vsrlni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), -+ LSX_BUILTIN (vsrlrni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), -+ LSX_BUILTIN (vsrlrni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), -+ LSX_BUILTIN (vsrlrni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), -+ LSX_BUILTIN (vsrlrni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), -+ LSX_BUILTIN (vssrlni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), -+ LSX_BUILTIN (vssrlni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), -+ LSX_BUILTIN (vssrlni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), -+ LSX_BUILTIN (vssrlni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), -+ LSX_BUILTIN (vssrlni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), -+ LSX_BUILTIN (vssrlni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), -+ LSX_BUILTIN (vssrlni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), -+ LSX_BUILTIN (vssrlni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), -+ LSX_BUILTIN (vssrlrni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), -+ LSX_BUILTIN (vssrlrni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), -+ LSX_BUILTIN (vssrlrni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), -+ LSX_BUILTIN (vssrlrni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), -+ LSX_BUILTIN (vssrlrni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), -+ LSX_BUILTIN (vssrlrni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), -+ LSX_BUILTIN (vssrlrni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), -+ LSX_BUILTIN (vssrlrni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), -+ LSX_BUILTIN (vsrani_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), -+ LSX_BUILTIN (vsrani_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), -+ LSX_BUILTIN (vsrani_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), -+ LSX_BUILTIN (vsrani_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), -+ LSX_BUILTIN (vsrarni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), -+ LSX_BUILTIN (vsrarni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), -+ LSX_BUILTIN (vsrarni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), -+ LSX_BUILTIN (vsrarni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), -+ LSX_BUILTIN (vssrani_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), -+ LSX_BUILTIN (vssrani_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), -+ LSX_BUILTIN (vssrani_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), -+ LSX_BUILTIN (vssrani_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), -+ LSX_BUILTIN (vssrani_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), -+ LSX_BUILTIN (vssrani_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), -+ LSX_BUILTIN (vssrani_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), -+ LSX_BUILTIN (vssrani_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), -+ LSX_BUILTIN (vssrarni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), -+ LSX_BUILTIN (vssrarni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), -+ LSX_BUILTIN (vssrarni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), -+ LSX_BUILTIN (vssrarni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), -+ LSX_BUILTIN (vssrarni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), -+ LSX_BUILTIN (vssrarni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), -+ LSX_BUILTIN (vssrarni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), -+ LSX_BUILTIN (vssrarni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), -+ LSX_BUILTIN (vpermi_w, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), -+ LSX_BUILTIN (vld, LARCH_V16QI_FTYPE_CVPOINTER_SI), -+ LSX_NO_TARGET_BUILTIN (vst, LARCH_VOID_FTYPE_V16QI_CVPOINTER_SI), -+ LSX_BUILTIN (vssrlrn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vssrlrn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vssrlrn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vssrln_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), -+ LSX_BUILTIN (vssrln_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), -+ LSX_BUILTIN (vssrln_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), -+ LSX_BUILTIN (vorn_v, LARCH_V16QI_FTYPE_V16QI_V16QI), -+ LSX_BUILTIN (vldi, LARCH_V2DI_FTYPE_HI), -+ LSX_BUILTIN (vshuf_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), -+ LSX_BUILTIN (vldx, LARCH_V16QI_FTYPE_CVPOINTER_DI), -+ LSX_NO_TARGET_BUILTIN (vstx, LARCH_VOID_FTYPE_V16QI_CVPOINTER_DI), -+ LSX_BUILTIN (vextl_qu_du, LARCH_UV2DI_FTYPE_UV2DI), -+ -+ /* Built-in functions for LASX */ -+ LASX_BUILTIN (xvsll_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvsll_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsll_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsll_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvslli_b, LARCH_V32QI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvslli_h, LARCH_V16HI_FTYPE_V16HI_UQI), -+ LASX_BUILTIN (xvslli_w, LARCH_V8SI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvslli_d, LARCH_V4DI_FTYPE_V4DI_UQI), -+ LASX_BUILTIN (xvsra_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvsra_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsra_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsra_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvsrai_b, LARCH_V32QI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvsrai_h, LARCH_V16HI_FTYPE_V16HI_UQI), -+ LASX_BUILTIN (xvsrai_w, LARCH_V8SI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvsrai_d, LARCH_V4DI_FTYPE_V4DI_UQI), -+ LASX_BUILTIN (xvsrar_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvsrar_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsrar_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsrar_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvsrari_b, LARCH_V32QI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvsrari_h, LARCH_V16HI_FTYPE_V16HI_UQI), -+ LASX_BUILTIN (xvsrari_w, LARCH_V8SI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvsrari_d, LARCH_V4DI_FTYPE_V4DI_UQI), -+ LASX_BUILTIN (xvsrl_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvsrl_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsrl_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsrl_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvsrli_b, LARCH_V32QI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvsrli_h, LARCH_V16HI_FTYPE_V16HI_UQI), -+ LASX_BUILTIN (xvsrli_w, LARCH_V8SI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvsrli_d, LARCH_V4DI_FTYPE_V4DI_UQI), -+ LASX_BUILTIN (xvsrlr_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvsrlr_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsrlr_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsrlr_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvsrlri_b, LARCH_V32QI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvsrlri_h, LARCH_V16HI_FTYPE_V16HI_UQI), -+ LASX_BUILTIN (xvsrlri_w, LARCH_V8SI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvsrlri_d, LARCH_V4DI_FTYPE_V4DI_UQI), -+ LASX_BUILTIN (xvbitclr_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvbitclr_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvbitclr_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvbitclr_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvbitclri_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), -+ LASX_BUILTIN (xvbitclri_h, LARCH_UV16HI_FTYPE_UV16HI_UQI), -+ LASX_BUILTIN (xvbitclri_w, LARCH_UV8SI_FTYPE_UV8SI_UQI), -+ LASX_BUILTIN (xvbitclri_d, LARCH_UV4DI_FTYPE_UV4DI_UQI), -+ LASX_BUILTIN (xvbitset_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvbitset_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvbitset_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvbitset_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvbitseti_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), -+ LASX_BUILTIN (xvbitseti_h, LARCH_UV16HI_FTYPE_UV16HI_UQI), -+ LASX_BUILTIN (xvbitseti_w, LARCH_UV8SI_FTYPE_UV8SI_UQI), -+ LASX_BUILTIN (xvbitseti_d, LARCH_UV4DI_FTYPE_UV4DI_UQI), -+ LASX_BUILTIN (xvbitrev_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvbitrev_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvbitrev_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvbitrev_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvbitrevi_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), -+ LASX_BUILTIN (xvbitrevi_h, LARCH_UV16HI_FTYPE_UV16HI_UQI), -+ LASX_BUILTIN (xvbitrevi_w, LARCH_UV8SI_FTYPE_UV8SI_UQI), -+ LASX_BUILTIN (xvbitrevi_d, LARCH_UV4DI_FTYPE_UV4DI_UQI), -+ LASX_BUILTIN (xvadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvaddi_bu, LARCH_V32QI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvaddi_hu, LARCH_V16HI_FTYPE_V16HI_UQI), -+ LASX_BUILTIN (xvaddi_wu, LARCH_V8SI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvaddi_du, LARCH_V4DI_FTYPE_V4DI_UQI), -+ LASX_BUILTIN (xvsub_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvsub_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsub_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsub_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvsubi_bu, LARCH_V32QI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvsubi_hu, LARCH_V16HI_FTYPE_V16HI_UQI), -+ LASX_BUILTIN (xvsubi_wu, LARCH_V8SI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvsubi_du, LARCH_V4DI_FTYPE_V4DI_UQI), -+ LASX_BUILTIN (xvmax_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvmax_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvmax_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvmax_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvmaxi_b, LARCH_V32QI_FTYPE_V32QI_QI), -+ LASX_BUILTIN (xvmaxi_h, LARCH_V16HI_FTYPE_V16HI_QI), -+ LASX_BUILTIN (xvmaxi_w, LARCH_V8SI_FTYPE_V8SI_QI), -+ LASX_BUILTIN (xvmaxi_d, LARCH_V4DI_FTYPE_V4DI_QI), -+ LASX_BUILTIN (xvmax_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvmax_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvmax_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvmax_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvmaxi_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI), -+ LASX_BUILTIN (xvmaxi_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI), -+ LASX_BUILTIN (xvmaxi_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI), -+ LASX_BUILTIN (xvmaxi_du, LARCH_UV4DI_FTYPE_UV4DI_UQI), -+ LASX_BUILTIN (xvmin_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvmin_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvmin_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvmin_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvmini_b, LARCH_V32QI_FTYPE_V32QI_QI), -+ LASX_BUILTIN (xvmini_h, LARCH_V16HI_FTYPE_V16HI_QI), -+ LASX_BUILTIN (xvmini_w, LARCH_V8SI_FTYPE_V8SI_QI), -+ LASX_BUILTIN (xvmini_d, LARCH_V4DI_FTYPE_V4DI_QI), -+ LASX_BUILTIN (xvmin_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvmin_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvmin_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvmin_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvmini_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI), -+ LASX_BUILTIN (xvmini_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI), -+ LASX_BUILTIN (xvmini_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI), -+ LASX_BUILTIN (xvmini_du, LARCH_UV4DI_FTYPE_UV4DI_UQI), -+ LASX_BUILTIN (xvseq_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvseq_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvseq_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvseq_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvseqi_b, LARCH_V32QI_FTYPE_V32QI_QI), -+ LASX_BUILTIN (xvseqi_h, LARCH_V16HI_FTYPE_V16HI_QI), -+ LASX_BUILTIN (xvseqi_w, LARCH_V8SI_FTYPE_V8SI_QI), -+ LASX_BUILTIN (xvseqi_d, LARCH_V4DI_FTYPE_V4DI_QI), -+ LASX_BUILTIN (xvslt_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvslt_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvslt_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvslt_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvslti_b, LARCH_V32QI_FTYPE_V32QI_QI), -+ LASX_BUILTIN (xvslti_h, LARCH_V16HI_FTYPE_V16HI_QI), -+ LASX_BUILTIN (xvslti_w, LARCH_V8SI_FTYPE_V8SI_QI), -+ LASX_BUILTIN (xvslti_d, LARCH_V4DI_FTYPE_V4DI_QI), -+ LASX_BUILTIN (xvslt_bu, LARCH_V32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvslt_hu, LARCH_V16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvslt_wu, LARCH_V8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvslt_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvslti_bu, LARCH_V32QI_FTYPE_UV32QI_UQI), -+ LASX_BUILTIN (xvslti_hu, LARCH_V16HI_FTYPE_UV16HI_UQI), -+ LASX_BUILTIN (xvslti_wu, LARCH_V8SI_FTYPE_UV8SI_UQI), -+ LASX_BUILTIN (xvslti_du, LARCH_V4DI_FTYPE_UV4DI_UQI), -+ LASX_BUILTIN (xvsle_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvsle_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsle_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsle_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvslei_b, LARCH_V32QI_FTYPE_V32QI_QI), -+ LASX_BUILTIN (xvslei_h, LARCH_V16HI_FTYPE_V16HI_QI), -+ LASX_BUILTIN (xvslei_w, LARCH_V8SI_FTYPE_V8SI_QI), -+ LASX_BUILTIN (xvslei_d, LARCH_V4DI_FTYPE_V4DI_QI), -+ LASX_BUILTIN (xvsle_bu, LARCH_V32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvsle_hu, LARCH_V16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvsle_wu, LARCH_V8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvsle_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvslei_bu, LARCH_V32QI_FTYPE_UV32QI_UQI), -+ LASX_BUILTIN (xvslei_hu, LARCH_V16HI_FTYPE_UV16HI_UQI), -+ LASX_BUILTIN (xvslei_wu, LARCH_V8SI_FTYPE_UV8SI_UQI), -+ LASX_BUILTIN (xvslei_du, LARCH_V4DI_FTYPE_UV4DI_UQI), -+ -+ LASX_BUILTIN (xvsat_b, LARCH_V32QI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvsat_h, LARCH_V16HI_FTYPE_V16HI_UQI), -+ LASX_BUILTIN (xvsat_w, LARCH_V8SI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvsat_d, LARCH_V4DI_FTYPE_V4DI_UQI), -+ LASX_BUILTIN (xvsat_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI), -+ LASX_BUILTIN (xvsat_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI), -+ LASX_BUILTIN (xvsat_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI), -+ LASX_BUILTIN (xvsat_du, LARCH_UV4DI_FTYPE_UV4DI_UQI), -+ -+ LASX_BUILTIN (xvadda_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvadda_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvadda_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvadda_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvsadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvsadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvsadd_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvsadd_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvsadd_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvsadd_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ -+ LASX_BUILTIN (xvavg_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvavg_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvavg_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvavg_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvavg_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvavg_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvavg_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvavg_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ -+ LASX_BUILTIN (xvavgr_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvavgr_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvavgr_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvavgr_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvavgr_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvavgr_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvavgr_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvavgr_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ -+ LASX_BUILTIN (xvssub_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvssub_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvssub_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvssub_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvssub_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvssub_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvssub_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvssub_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvabsd_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvabsd_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvabsd_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvabsd_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvabsd_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvabsd_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvabsd_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvabsd_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ -+ LASX_BUILTIN (xvmul_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvmul_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvmul_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvmul_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvmadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), -+ LASX_BUILTIN (xvmadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), -+ LASX_BUILTIN (xvmadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI), -+ LASX_BUILTIN (xvmadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), -+ LASX_BUILTIN (xvmsub_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), -+ LASX_BUILTIN (xvmsub_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), -+ LASX_BUILTIN (xvmsub_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI), -+ LASX_BUILTIN (xvmsub_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), -+ LASX_BUILTIN (xvdiv_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvdiv_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvdiv_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvdiv_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvdiv_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvdiv_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvdiv_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvdiv_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvhaddw_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvhaddw_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvhaddw_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvhaddw_hu_bu, LARCH_UV16HI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvhaddw_wu_hu, LARCH_UV8SI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvhaddw_du_wu, LARCH_UV4DI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvhsubw_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvhsubw_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvhsubw_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvhsubw_hu_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvhsubw_wu_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvhsubw_du_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvmod_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvmod_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvmod_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvmod_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvmod_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvmod_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvmod_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvmod_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ -+ -+ LASX_BUILTIN (xvrepl128vei_b, LARCH_V32QI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvrepl128vei_h, LARCH_V16HI_FTYPE_V16HI_UQI), -+ LASX_BUILTIN (xvrepl128vei_w, LARCH_V8SI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvrepl128vei_d, LARCH_V4DI_FTYPE_V4DI_UQI), -+ LASX_BUILTIN (xvpickev_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvpickev_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvpickev_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvpickev_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvpickod_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvpickod_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvpickod_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvpickod_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvilvh_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvilvh_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvilvh_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvilvh_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvilvl_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvilvl_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvilvl_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvilvl_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvpackev_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvpackev_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvpackev_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvpackev_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvpackod_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvpackod_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvpackod_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvpackod_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvshuf_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), -+ LASX_BUILTIN (xvshuf_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), -+ LASX_BUILTIN (xvshuf_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI), -+ LASX_BUILTIN (xvshuf_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), -+ LASX_BUILTIN (xvand_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvandi_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), -+ LASX_BUILTIN (xvor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), -+ LASX_BUILTIN (xvnor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvnori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), -+ LASX_BUILTIN (xvxor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvxori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), -+ LASX_BUILTIN (xvbitsel_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI_UV32QI), -+ LASX_BUILTIN (xvbitseli_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI_USI), -+ -+ LASX_BUILTIN (xvshuf4i_b, LARCH_V32QI_FTYPE_V32QI_USI), -+ LASX_BUILTIN (xvshuf4i_h, LARCH_V16HI_FTYPE_V16HI_USI), -+ LASX_BUILTIN (xvshuf4i_w, LARCH_V8SI_FTYPE_V8SI_USI), -+ -+ LASX_BUILTIN (xvreplgr2vr_b, LARCH_V32QI_FTYPE_SI), -+ LASX_BUILTIN (xvreplgr2vr_h, LARCH_V16HI_FTYPE_SI), -+ LASX_BUILTIN (xvreplgr2vr_w, LARCH_V8SI_FTYPE_SI), -+ LASX_BUILTIN (xvreplgr2vr_d, LARCH_V4DI_FTYPE_DI), -+ LASX_BUILTIN (xvpcnt_b, LARCH_V32QI_FTYPE_V32QI), -+ LASX_BUILTIN (xvpcnt_h, LARCH_V16HI_FTYPE_V16HI), -+ LASX_BUILTIN (xvpcnt_w, LARCH_V8SI_FTYPE_V8SI), -+ LASX_BUILTIN (xvpcnt_d, LARCH_V4DI_FTYPE_V4DI), -+ LASX_BUILTIN (xvclo_b, LARCH_V32QI_FTYPE_V32QI), -+ LASX_BUILTIN (xvclo_h, LARCH_V16HI_FTYPE_V16HI), -+ LASX_BUILTIN (xvclo_w, LARCH_V8SI_FTYPE_V8SI), -+ LASX_BUILTIN (xvclo_d, LARCH_V4DI_FTYPE_V4DI), -+ LASX_BUILTIN (xvclz_b, LARCH_V32QI_FTYPE_V32QI), -+ LASX_BUILTIN (xvclz_h, LARCH_V16HI_FTYPE_V16HI), -+ LASX_BUILTIN (xvclz_w, LARCH_V8SI_FTYPE_V8SI), -+ LASX_BUILTIN (xvclz_d, LARCH_V4DI_FTYPE_V4DI), -+ -+ LASX_BUILTIN (xvrepli_b, LARCH_V32QI_FTYPE_HI), -+ LASX_BUILTIN (xvrepli_h, LARCH_V16HI_FTYPE_HI), -+ LASX_BUILTIN (xvrepli_w, LARCH_V8SI_FTYPE_HI), -+ LASX_BUILTIN (xvrepli_d, LARCH_V4DI_FTYPE_HI), -+ LASX_BUILTIN (xvfcmp_caf_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_caf_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_cor_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_cor_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_cun_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_cun_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_cune_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_cune_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_cueq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_cueq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_ceq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_ceq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_cne_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_cne_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_clt_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_clt_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_cult_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_cult_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_cle_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_cle_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_cule_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_cule_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_saf_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_saf_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_sor_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_sor_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_sun_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_sun_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_sune_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_sune_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_sueq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_sueq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_seq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_seq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_sne_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_sne_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_slt_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_slt_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_sult_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_sult_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_sle_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_sle_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcmp_sule_s, LARCH_V8SI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcmp_sule_d, LARCH_V4DI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfmul_s, LARCH_V8SF_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfmul_d, LARCH_V4DF_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfdiv_s, LARCH_V8SF_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfdiv_d, LARCH_V4DF_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfcvt_h_s, LARCH_V16HI_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfcvt_s_d, LARCH_V8SF_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfmin_s, LARCH_V8SF_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfmin_d, LARCH_V4DF_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfmina_s, LARCH_V8SF_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfmina_d, LARCH_V4DF_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfmax_s, LARCH_V8SF_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfmax_d, LARCH_V4DF_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfmaxa_s, LARCH_V8SF_FTYPE_V8SF_V8SF), -+ LASX_BUILTIN (xvfmaxa_d, LARCH_V4DF_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvfclass_s, LARCH_V8SI_FTYPE_V8SF), -+ LASX_BUILTIN (xvfclass_d, LARCH_V4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvfsqrt_s, LARCH_V8SF_FTYPE_V8SF), -+ LASX_BUILTIN (xvfsqrt_d, LARCH_V4DF_FTYPE_V4DF), -+ LASX_BUILTIN (xvfrecip_s, LARCH_V8SF_FTYPE_V8SF), -+ LASX_BUILTIN (xvfrecip_d, LARCH_V4DF_FTYPE_V4DF), -+ LASX_BUILTIN (xvfrint_s, LARCH_V8SF_FTYPE_V8SF), -+ LASX_BUILTIN (xvfrint_d, LARCH_V4DF_FTYPE_V4DF), -+ LASX_BUILTIN (xvfrsqrt_s, LARCH_V8SF_FTYPE_V8SF), -+ LASX_BUILTIN (xvfrsqrt_d, LARCH_V4DF_FTYPE_V4DF), -+ LASX_BUILTIN (xvflogb_s, LARCH_V8SF_FTYPE_V8SF), -+ LASX_BUILTIN (xvflogb_d, LARCH_V4DF_FTYPE_V4DF), -+ LASX_BUILTIN (xvfcvth_s_h, LARCH_V8SF_FTYPE_V16HI), -+ LASX_BUILTIN (xvfcvth_d_s, LARCH_V4DF_FTYPE_V8SF), -+ LASX_BUILTIN (xvfcvtl_s_h, LARCH_V8SF_FTYPE_V16HI), -+ LASX_BUILTIN (xvfcvtl_d_s, LARCH_V4DF_FTYPE_V8SF), -+ LASX_BUILTIN (xvftint_w_s, LARCH_V8SI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftint_l_d, LARCH_V4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvftint_wu_s, LARCH_UV8SI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftint_lu_d, LARCH_UV4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvftintrz_w_s, LARCH_V8SI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintrz_l_d, LARCH_V4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvftintrz_wu_s, LARCH_UV8SI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintrz_lu_d, LARCH_UV4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvffint_s_w, LARCH_V8SF_FTYPE_V8SI), -+ LASX_BUILTIN (xvffint_d_l, LARCH_V4DF_FTYPE_V4DI), -+ LASX_BUILTIN (xvffint_s_wu, LARCH_V8SF_FTYPE_UV8SI), -+ LASX_BUILTIN (xvffint_d_lu, LARCH_V4DF_FTYPE_UV4DI), -+ -+ LASX_BUILTIN (xvreplve_b, LARCH_V32QI_FTYPE_V32QI_SI), -+ LASX_BUILTIN (xvreplve_h, LARCH_V16HI_FTYPE_V16HI_SI), -+ LASX_BUILTIN (xvreplve_w, LARCH_V8SI_FTYPE_V8SI_SI), -+ LASX_BUILTIN (xvreplve_d, LARCH_V4DI_FTYPE_V4DI_SI), -+ LASX_BUILTIN (xvpermi_w, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), -+ -+ LASX_BUILTIN (xvandn_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvneg_b, LARCH_V32QI_FTYPE_V32QI), -+ LASX_BUILTIN (xvneg_h, LARCH_V16HI_FTYPE_V16HI), -+ LASX_BUILTIN (xvneg_w, LARCH_V8SI_FTYPE_V8SI), -+ LASX_BUILTIN (xvneg_d, LARCH_V4DI_FTYPE_V4DI), -+ LASX_BUILTIN (xvmuh_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvmuh_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvmuh_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvmuh_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvmuh_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvmuh_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvmuh_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvmuh_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvsllwil_h_b, LARCH_V16HI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvsllwil_w_h, LARCH_V8SI_FTYPE_V16HI_UQI), -+ LASX_BUILTIN (xvsllwil_d_w, LARCH_V4DI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvsllwil_hu_bu, LARCH_UV16HI_FTYPE_UV32QI_UQI), /* FIXME: U? */ -+ LASX_BUILTIN (xvsllwil_wu_hu, LARCH_UV8SI_FTYPE_UV16HI_UQI), -+ LASX_BUILTIN (xvsllwil_du_wu, LARCH_UV4DI_FTYPE_UV8SI_UQI), -+ LASX_BUILTIN (xvsran_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsran_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsran_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvssran_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvssran_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvssran_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvssran_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvssran_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvssran_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvsrarn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsrarn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsrarn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvssrarn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvssrarn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvssrarn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvssrarn_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvssrarn_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvssrarn_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvsrln_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsrln_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsrln_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvssrln_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvssrln_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvssrln_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvsrlrn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsrlrn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsrlrn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvssrlrn_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvssrlrn_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvssrlrn_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvfrstpi_b, LARCH_V32QI_FTYPE_V32QI_V32QI_UQI), -+ LASX_BUILTIN (xvfrstpi_h, LARCH_V16HI_FTYPE_V16HI_V16HI_UQI), -+ LASX_BUILTIN (xvfrstp_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), -+ LASX_BUILTIN (xvfrstp_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), -+ LASX_BUILTIN (xvshuf4i_d, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), -+ LASX_BUILTIN (xvbsrl_v, LARCH_V32QI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvbsll_v, LARCH_V32QI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvextrins_b, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), -+ LASX_BUILTIN (xvextrins_h, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), -+ LASX_BUILTIN (xvextrins_w, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), -+ LASX_BUILTIN (xvextrins_d, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), -+ LASX_BUILTIN (xvmskltz_b, LARCH_V32QI_FTYPE_V32QI), -+ LASX_BUILTIN (xvmskltz_h, LARCH_V16HI_FTYPE_V16HI), -+ LASX_BUILTIN (xvmskltz_w, LARCH_V8SI_FTYPE_V8SI), -+ LASX_BUILTIN (xvmskltz_d, LARCH_V4DI_FTYPE_V4DI), -+ LASX_BUILTIN (xvsigncov_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvsigncov_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsigncov_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsigncov_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvfmadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), -+ LASX_BUILTIN (xvfmadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), -+ LASX_BUILTIN (xvfmsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), -+ LASX_BUILTIN (xvfmsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), -+ LASX_BUILTIN (xvfnmadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), -+ LASX_BUILTIN (xvfnmadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), -+ LASX_BUILTIN (xvfnmsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), -+ LASX_BUILTIN (xvfnmsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), -+ LASX_BUILTIN (xvftintrne_w_s, LARCH_V8SI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintrne_l_d, LARCH_V4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvftintrp_w_s, LARCH_V8SI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintrp_l_d, LARCH_V4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvftintrm_w_s, LARCH_V8SI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintrm_l_d, LARCH_V4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvftint_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvffint_s_l, LARCH_V8SF_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvftintrz_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvftintrp_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvftintrm_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvftintrne_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), -+ LASX_BUILTIN (xvftinth_l_s, LARCH_V4DI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintl_l_s, LARCH_V4DI_FTYPE_V8SF), -+ LASX_BUILTIN (xvffinth_d_w, LARCH_V4DF_FTYPE_V8SI), -+ LASX_BUILTIN (xvffintl_d_w, LARCH_V4DF_FTYPE_V8SI), -+ LASX_BUILTIN (xvftintrzh_l_s, LARCH_V4DI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintrzl_l_s, LARCH_V4DI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintrph_l_s, LARCH_V4DI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintrpl_l_s, LARCH_V4DI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintrmh_l_s, LARCH_V4DI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintrml_l_s, LARCH_V4DI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintrneh_l_s, LARCH_V4DI_FTYPE_V8SF), -+ LASX_BUILTIN (xvftintrnel_l_s, LARCH_V4DI_FTYPE_V8SF), -+ LASX_BUILTIN (xvfrintrne_s, LARCH_V8SI_FTYPE_V8SF), -+ LASX_BUILTIN (xvfrintrne_d, LARCH_V4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvfrintrz_s, LARCH_V8SI_FTYPE_V8SF), -+ LASX_BUILTIN (xvfrintrz_d, LARCH_V4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvfrintrp_s, LARCH_V8SI_FTYPE_V8SF), -+ LASX_BUILTIN (xvfrintrp_d, LARCH_V4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvfrintrm_s, LARCH_V8SI_FTYPE_V8SF), -+ LASX_BUILTIN (xvfrintrm_d, LARCH_V4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvld, LARCH_V32QI_FTYPE_CVPOINTER_SI), -+ LASX_NO_TARGET_BUILTIN (xvst, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI), -+ LASX_NO_TARGET_BUILTIN (xvstelm_b, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI_UQI), -+ LASX_NO_TARGET_BUILTIN (xvstelm_h, LARCH_VOID_FTYPE_V16HI_CVPOINTER_SI_UQI), -+ LASX_NO_TARGET_BUILTIN (xvstelm_w, LARCH_VOID_FTYPE_V8SI_CVPOINTER_SI_UQI), -+ LASX_NO_TARGET_BUILTIN (xvstelm_d, LARCH_VOID_FTYPE_V4DI_CVPOINTER_SI_UQI), -+ LASX_BUILTIN (xvinsve0_w, LARCH_V8SI_FTYPE_V8SI_V8SI_UQI), -+ LASX_BUILTIN (xvinsve0_d, LARCH_V4DI_FTYPE_V4DI_V4DI_UQI), -+ LASX_BUILTIN (xvpickve_w, LARCH_V8SI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvpickve_d, LARCH_V4DI_FTYPE_V4DI_UQI), -+ LASX_BUILTIN (xvpickve_w_f, LARCH_V8SF_FTYPE_V8SF_UQI), -+ LASX_BUILTIN (xvpickve_d_f, LARCH_V4DF_FTYPE_V4DF_UQI), -+ LASX_BUILTIN (xvssrlrn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvssrlrn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvssrlrn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvssrln_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvssrln_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvssrln_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvorn_v, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvldi, LARCH_V4DI_FTYPE_HI), -+ LASX_BUILTIN (xvldx, LARCH_V32QI_FTYPE_CVPOINTER_DI), -+ LASX_NO_TARGET_BUILTIN (xvstx, LARCH_VOID_FTYPE_V32QI_CVPOINTER_DI), -+ LASX_BUILTIN (xvextl_qu_du, LARCH_UV4DI_FTYPE_UV4DI), -+ -+ /* LASX */ -+ LASX_BUILTIN (xvinsgr2vr_w, LARCH_V8SI_FTYPE_V8SI_SI_UQI), -+ LASX_BUILTIN (xvinsgr2vr_d, LARCH_V4DI_FTYPE_V4DI_DI_UQI), -+ -+ LASX_BUILTIN (xvreplve0_b, LARCH_V32QI_FTYPE_V32QI), -+ LASX_BUILTIN (xvreplve0_h, LARCH_V16HI_FTYPE_V16HI), -+ LASX_BUILTIN (xvreplve0_w, LARCH_V8SI_FTYPE_V8SI), -+ LASX_BUILTIN (xvreplve0_d, LARCH_V4DI_FTYPE_V4DI), -+ LASX_BUILTIN (xvreplve0_q, LARCH_V32QI_FTYPE_V32QI), -+ LASX_BUILTIN (vext2xv_h_b, LARCH_V16HI_FTYPE_V32QI), -+ LASX_BUILTIN (vext2xv_w_h, LARCH_V8SI_FTYPE_V16HI), -+ LASX_BUILTIN (vext2xv_d_w, LARCH_V4DI_FTYPE_V8SI), -+ LASX_BUILTIN (vext2xv_w_b, LARCH_V8SI_FTYPE_V32QI), -+ LASX_BUILTIN (vext2xv_d_h, LARCH_V4DI_FTYPE_V16HI), -+ LASX_BUILTIN (vext2xv_d_b, LARCH_V4DI_FTYPE_V32QI), -+ LASX_BUILTIN (vext2xv_hu_bu, LARCH_V16HI_FTYPE_V32QI), -+ LASX_BUILTIN (vext2xv_wu_hu, LARCH_V8SI_FTYPE_V16HI), -+ LASX_BUILTIN (vext2xv_du_wu, LARCH_V4DI_FTYPE_V8SI), -+ LASX_BUILTIN (vext2xv_wu_bu, LARCH_V8SI_FTYPE_V32QI), -+ LASX_BUILTIN (vext2xv_du_hu, LARCH_V4DI_FTYPE_V16HI), -+ LASX_BUILTIN (vext2xv_du_bu, LARCH_V4DI_FTYPE_V32QI), -+ LASX_BUILTIN (xvpermi_q, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), -+ LASX_BUILTIN (xvpermi_d, LARCH_V4DI_FTYPE_V4DI_USI), -+ LASX_BUILTIN (xvperm_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN_TEST_BRANCH (xbz_b, LARCH_SI_FTYPE_UV32QI), -+ LASX_BUILTIN_TEST_BRANCH (xbz_h, LARCH_SI_FTYPE_UV16HI), -+ LASX_BUILTIN_TEST_BRANCH (xbz_w, LARCH_SI_FTYPE_UV8SI), -+ LASX_BUILTIN_TEST_BRANCH (xbz_d, LARCH_SI_FTYPE_UV4DI), -+ LASX_BUILTIN_TEST_BRANCH (xbnz_b, LARCH_SI_FTYPE_UV32QI), -+ LASX_BUILTIN_TEST_BRANCH (xbnz_h, LARCH_SI_FTYPE_UV16HI), -+ LASX_BUILTIN_TEST_BRANCH (xbnz_w, LARCH_SI_FTYPE_UV8SI), -+ LASX_BUILTIN_TEST_BRANCH (xbnz_d, LARCH_SI_FTYPE_UV4DI), -+ LASX_BUILTIN_TEST_BRANCH (xbz_v, LARCH_SI_FTYPE_UV32QI), -+ LASX_BUILTIN_TEST_BRANCH (xbnz_v, LARCH_SI_FTYPE_UV32QI), -+ LASX_BUILTIN (xvldrepl_b, LARCH_V32QI_FTYPE_CVPOINTER_SI), -+ LASX_BUILTIN (xvldrepl_h, LARCH_V16HI_FTYPE_CVPOINTER_SI), -+ LASX_BUILTIN (xvldrepl_w, LARCH_V8SI_FTYPE_CVPOINTER_SI), -+ LASX_BUILTIN (xvldrepl_d, LARCH_V4DI_FTYPE_CVPOINTER_SI), -+ LASX_BUILTIN (xvpickve2gr_w, LARCH_SI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvpickve2gr_wu, LARCH_USI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvpickve2gr_d, LARCH_DI_FTYPE_V4DI_UQI), -+ LASX_BUILTIN (xvpickve2gr_du, LARCH_UDI_FTYPE_V4DI_UQI), -+ -+ -+ LASX_BUILTIN (xvaddwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvaddwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvaddwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvaddwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvaddwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvaddwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvaddwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvaddwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvsubwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvsubwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsubwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsubwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvsubwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvsubwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvsubwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvsubwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvmulwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvmulwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvmulwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvmulwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvmulwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvmulwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvmulwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvmulwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvaddwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvaddwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvaddwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvaddwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvaddwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvaddwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvaddwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvaddwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvsubwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvsubwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvsubwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvsubwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvsubwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvsubwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvsubwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvsubwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvmulwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvmulwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvmulwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvmulwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvmulwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvmulwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), -+ LASX_BUILTIN (xvmulwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), -+ LASX_BUILTIN (xvmulwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), -+ LASX_BUILTIN (xvaddwev_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), -+ LASX_BUILTIN (xvaddwev_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), -+ LASX_BUILTIN (xvaddwev_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), -+ LASX_BUILTIN (xvmulwev_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), -+ LASX_BUILTIN (xvmulwev_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), -+ LASX_BUILTIN (xvmulwev_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), -+ LASX_BUILTIN (xvaddwod_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), -+ LASX_BUILTIN (xvaddwod_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), -+ LASX_BUILTIN (xvaddwod_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), -+ LASX_BUILTIN (xvmulwod_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), -+ LASX_BUILTIN (xvmulwod_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), -+ LASX_BUILTIN (xvmulwod_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), -+ LASX_BUILTIN (xvhaddw_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvhaddw_qu_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvhsubw_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvhsubw_qu_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), -+ LASX_BUILTIN (xvmaddwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), -+ LASX_BUILTIN (xvmaddwev_d_w, LARCH_V4DI_FTYPE_V4DI_V8SI_V8SI), -+ LASX_BUILTIN (xvmaddwev_w_h, LARCH_V8SI_FTYPE_V8SI_V16HI_V16HI), -+ LASX_BUILTIN (xvmaddwev_h_b, LARCH_V16HI_FTYPE_V16HI_V32QI_V32QI), -+ LASX_BUILTIN (xvmaddwev_q_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI_UV4DI), -+ LASX_BUILTIN (xvmaddwev_d_wu, LARCH_UV4DI_FTYPE_UV4DI_UV8SI_UV8SI), -+ LASX_BUILTIN (xvmaddwev_w_hu, LARCH_UV8SI_FTYPE_UV8SI_UV16HI_UV16HI), -+ LASX_BUILTIN (xvmaddwev_h_bu, LARCH_UV16HI_FTYPE_UV16HI_UV32QI_UV32QI), -+ LASX_BUILTIN (xvmaddwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), -+ LASX_BUILTIN (xvmaddwod_d_w, LARCH_V4DI_FTYPE_V4DI_V8SI_V8SI), -+ LASX_BUILTIN (xvmaddwod_w_h, LARCH_V8SI_FTYPE_V8SI_V16HI_V16HI), -+ LASX_BUILTIN (xvmaddwod_h_b, LARCH_V16HI_FTYPE_V16HI_V32QI_V32QI), -+ LASX_BUILTIN (xvmaddwod_q_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI_UV4DI), -+ LASX_BUILTIN (xvmaddwod_d_wu, LARCH_UV4DI_FTYPE_UV4DI_UV8SI_UV8SI), -+ LASX_BUILTIN (xvmaddwod_w_hu, LARCH_UV8SI_FTYPE_UV8SI_UV16HI_UV16HI), -+ LASX_BUILTIN (xvmaddwod_h_bu, LARCH_UV16HI_FTYPE_UV16HI_UV32QI_UV32QI), -+ LASX_BUILTIN (xvmaddwev_q_du_d, LARCH_V4DI_FTYPE_V4DI_UV4DI_V4DI), -+ LASX_BUILTIN (xvmaddwev_d_wu_w, LARCH_V4DI_FTYPE_V4DI_UV8SI_V8SI), -+ LASX_BUILTIN (xvmaddwev_w_hu_h, LARCH_V8SI_FTYPE_V8SI_UV16HI_V16HI), -+ LASX_BUILTIN (xvmaddwev_h_bu_b, LARCH_V16HI_FTYPE_V16HI_UV32QI_V32QI), -+ LASX_BUILTIN (xvmaddwod_q_du_d, LARCH_V4DI_FTYPE_V4DI_UV4DI_V4DI), -+ LASX_BUILTIN (xvmaddwod_d_wu_w, LARCH_V4DI_FTYPE_V4DI_UV8SI_V8SI), -+ LASX_BUILTIN (xvmaddwod_w_hu_h, LARCH_V8SI_FTYPE_V8SI_UV16HI_V16HI), -+ LASX_BUILTIN (xvmaddwod_h_bu_b, LARCH_V16HI_FTYPE_V16HI_UV32QI_V32QI), -+ LASX_BUILTIN (xvrotr_b, LARCH_V32QI_FTYPE_V32QI_V32QI), -+ LASX_BUILTIN (xvrotr_h, LARCH_V16HI_FTYPE_V16HI_V16HI), -+ LASX_BUILTIN (xvrotr_w, LARCH_V8SI_FTYPE_V8SI_V8SI), -+ LASX_BUILTIN (xvrotr_d, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvadd_q, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvsub_q, LARCH_V4DI_FTYPE_V4DI_V4DI), -+ LASX_BUILTIN (xvaddwev_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), -+ LASX_BUILTIN (xvaddwod_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), -+ LASX_BUILTIN (xvmulwev_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), -+ LASX_BUILTIN (xvmulwod_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), -+ LASX_BUILTIN (xvmskgez_b, LARCH_V32QI_FTYPE_V32QI), -+ LASX_BUILTIN (xvmsknz_b, LARCH_V32QI_FTYPE_V32QI), -+ LASX_BUILTIN (xvexth_h_b, LARCH_V16HI_FTYPE_V32QI), -+ LASX_BUILTIN (xvexth_w_h, LARCH_V8SI_FTYPE_V16HI), -+ LASX_BUILTIN (xvexth_d_w, LARCH_V4DI_FTYPE_V8SI), -+ LASX_BUILTIN (xvexth_q_d, LARCH_V4DI_FTYPE_V4DI), -+ LASX_BUILTIN (xvexth_hu_bu, LARCH_UV16HI_FTYPE_UV32QI), -+ LASX_BUILTIN (xvexth_wu_hu, LARCH_UV8SI_FTYPE_UV16HI), -+ LASX_BUILTIN (xvexth_du_wu, LARCH_UV4DI_FTYPE_UV8SI), -+ LASX_BUILTIN (xvexth_qu_du, LARCH_UV4DI_FTYPE_UV4DI), -+ LASX_BUILTIN (xvrotri_b, LARCH_V32QI_FTYPE_V32QI_UQI), -+ LASX_BUILTIN (xvrotri_h, LARCH_V16HI_FTYPE_V16HI_UQI), -+ LASX_BUILTIN (xvrotri_w, LARCH_V8SI_FTYPE_V8SI_UQI), -+ LASX_BUILTIN (xvrotri_d, LARCH_V4DI_FTYPE_V4DI_UQI), -+ LASX_BUILTIN (xvextl_q_d, LARCH_V4DI_FTYPE_V4DI), -+ LASX_BUILTIN (xvsrlni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), -+ LASX_BUILTIN (xvsrlni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), -+ LASX_BUILTIN (xvsrlni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), -+ LASX_BUILTIN (xvsrlni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), -+ LASX_BUILTIN (xvsrlrni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), -+ LASX_BUILTIN (xvsrlrni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), -+ LASX_BUILTIN (xvsrlrni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), -+ LASX_BUILTIN (xvsrlrni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), -+ LASX_BUILTIN (xvssrlni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), -+ LASX_BUILTIN (xvssrlni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), -+ LASX_BUILTIN (xvssrlni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), -+ LASX_BUILTIN (xvssrlni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), -+ LASX_BUILTIN (xvssrlni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), -+ LASX_BUILTIN (xvssrlni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), -+ LASX_BUILTIN (xvssrlni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), -+ LASX_BUILTIN (xvssrlni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), -+ LASX_BUILTIN (xvssrlrni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), -+ LASX_BUILTIN (xvssrlrni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), -+ LASX_BUILTIN (xvssrlrni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), -+ LASX_BUILTIN (xvssrlrni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), -+ LASX_BUILTIN (xvssrlrni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), -+ LASX_BUILTIN (xvssrlrni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), -+ LASX_BUILTIN (xvssrlrni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), -+ LASX_BUILTIN (xvssrlrni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), -+ LASX_BUILTIN (xvsrani_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), -+ LASX_BUILTIN (xvsrani_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), -+ LASX_BUILTIN (xvsrani_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), -+ LASX_BUILTIN (xvsrani_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), -+ LASX_BUILTIN (xvsrarni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), -+ LASX_BUILTIN (xvsrarni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), -+ LASX_BUILTIN (xvsrarni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), -+ LASX_BUILTIN (xvsrarni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), -+ LASX_BUILTIN (xvssrani_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), -+ LASX_BUILTIN (xvssrani_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), -+ LASX_BUILTIN (xvssrani_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), -+ LASX_BUILTIN (xvssrani_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), -+ LASX_BUILTIN (xvssrani_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), -+ LASX_BUILTIN (xvssrani_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), -+ LASX_BUILTIN (xvssrani_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), -+ LASX_BUILTIN (xvssrani_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), -+ LASX_BUILTIN (xvssrarni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), -+ LASX_BUILTIN (xvssrarni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), -+ LASX_BUILTIN (xvssrarni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), -+ LASX_BUILTIN (xvssrarni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), -+ LASX_BUILTIN (xvssrarni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), -+ LASX_BUILTIN (xvssrarni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), -+ LASX_BUILTIN (xvssrarni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), -+ LASX_BUILTIN (xvssrarni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), -+}; -+ -+ -+/* MODE is a vector mode whose elements have type TYPE. Return the type -+ of the vector itself. */ -+ -+static tree -+loongarch_builtin_vector_type (tree type, machine_mode mode) -+{ -+ static tree types[2 * (int) MAX_MACHINE_MODE]; -+ int mode_index; -+ -+ mode_index = (int) mode; -+ -+ if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type)) -+ mode_index += MAX_MACHINE_MODE; -+ -+ if (types[mode_index] == NULL_TREE) -+ types[mode_index] = build_vector_type_for_mode (type, mode); -+ return types[mode_index]; -+} -+ -+/* Return a type for 'const volatile void *'. */ -+ -+static tree -+loongarch_build_cvpointer_type (void) -+{ -+ static tree cache; -+ -+ if (cache == NULL_TREE) -+ cache = build_pointer_type (build_qualified_type -+ (void_type_node, -+ TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)); -+ return cache; -+} -+ -+/* Source-level argument types. */ -+#define LARCH_ATYPE_VOID void_type_node -+#define LARCH_ATYPE_INT integer_type_node -+#define LARCH_ATYPE_POINTER ptr_type_node -+#define LARCH_ATYPE_CVPOINTER loongarch_build_cvpointer_type () -+#define LARCH_ATYPE_BOOLEAN boolean_type_node -+/* Standard mode-based argument types. */ -+#define LARCH_ATYPE_QI intQI_type_node -+#define LARCH_ATYPE_UQI unsigned_intQI_type_node -+#define LARCH_ATYPE_HI intHI_type_node -+#define LARCH_ATYPE_UHI unsigned_intHI_type_node -+#define LARCH_ATYPE_SI intSI_type_node -+#define LARCH_ATYPE_USI unsigned_intSI_type_node -+#define LARCH_ATYPE_DI intDI_type_node -+#define LARCH_ATYPE_UDI unsigned_intDI_type_node -+#define LARCH_ATYPE_SF float_type_node -+#define LARCH_ATYPE_DF double_type_node -+ -+/* Vector argument types. */ -+#define LARCH_ATYPE_V2SF loongarch_builtin_vector_type (float_type_node, V2SFmode) -+#define LARCH_ATYPE_V2HI loongarch_builtin_vector_type (intHI_type_node, V2HImode) -+#define LARCH_ATYPE_V2SI loongarch_builtin_vector_type (intSI_type_node, V2SImode) -+#define LARCH_ATYPE_V4QI loongarch_builtin_vector_type (intQI_type_node, V4QImode) -+#define LARCH_ATYPE_V4HI loongarch_builtin_vector_type (intHI_type_node, V4HImode) -+#define LARCH_ATYPE_V8QI loongarch_builtin_vector_type (intQI_type_node, V8QImode) -+ -+#define LARCH_ATYPE_V2DI \ -+ loongarch_builtin_vector_type (long_long_integer_type_node, V2DImode) -+#define LARCH_ATYPE_V4SI loongarch_builtin_vector_type (intSI_type_node, V4SImode) -+#define LARCH_ATYPE_V8HI loongarch_builtin_vector_type (intHI_type_node, V8HImode) -+#define LARCH_ATYPE_V16QI loongarch_builtin_vector_type (intQI_type_node, V16QImode) -+#define LARCH_ATYPE_V2DF loongarch_builtin_vector_type (double_type_node, V2DFmode) -+#define LARCH_ATYPE_V4SF loongarch_builtin_vector_type (float_type_node, V4SFmode) -+ -+/* LoongArch ASX. */ -+#define LARCH_ATYPE_V4DI \ -+ loongarch_builtin_vector_type (long_long_integer_type_node, V4DImode) -+#define LARCH_ATYPE_V8SI loongarch_builtin_vector_type (intSI_type_node, V8SImode) -+#define LARCH_ATYPE_V16HI loongarch_builtin_vector_type (intHI_type_node, V16HImode) -+#define LARCH_ATYPE_V32QI loongarch_builtin_vector_type (intQI_type_node, V32QImode) -+#define LARCH_ATYPE_V4DF loongarch_builtin_vector_type (double_type_node, V4DFmode) -+#define LARCH_ATYPE_V8SF loongarch_builtin_vector_type (float_type_node, V8SFmode) -+ -+#define LARCH_ATYPE_UV2DI \ -+ loongarch_builtin_vector_type (long_long_unsigned_type_node, V2DImode) -+#define LARCH_ATYPE_UV4SI \ -+ loongarch_builtin_vector_type (unsigned_intSI_type_node, V4SImode) -+#define LARCH_ATYPE_UV8HI \ -+ loongarch_builtin_vector_type (unsigned_intHI_type_node, V8HImode) -+#define LARCH_ATYPE_UV16QI \ -+ loongarch_builtin_vector_type (unsigned_intQI_type_node, V16QImode) -+ -+#define LARCH_ATYPE_UV4DI \ -+ loongarch_builtin_vector_type (long_long_unsigned_type_node, V4DImode) -+#define LARCH_ATYPE_UV8SI \ -+ loongarch_builtin_vector_type (unsigned_intSI_type_node, V8SImode) -+#define LARCH_ATYPE_UV16HI \ -+ loongarch_builtin_vector_type (unsigned_intHI_type_node, V16HImode) -+#define LARCH_ATYPE_UV32QI \ -+ loongarch_builtin_vector_type (unsigned_intQI_type_node, V32QImode) -+ -+#define LARCH_ATYPE_UV2SI \ -+ loongarch_builtin_vector_type (unsigned_intSI_type_node, V2SImode) -+#define LARCH_ATYPE_UV4HI \ -+ loongarch_builtin_vector_type (unsigned_intHI_type_node, V4HImode) -+#define LARCH_ATYPE_UV8QI \ -+ loongarch_builtin_vector_type (unsigned_intQI_type_node, V8QImode) -+ -+/* LARCH_FTYPE_ATYPESN takes N LARCH_FTYPES-like type codes and lists -+ their associated LARCH_ATYPEs. */ -+#define LARCH_FTYPE_ATYPES1(A, B) \ -+ LARCH_ATYPE_##A, LARCH_ATYPE_##B -+ -+#define LARCH_FTYPE_ATYPES2(A, B, C) \ -+ LARCH_ATYPE_##A, LARCH_ATYPE_##B, LARCH_ATYPE_##C -+ -+#define LARCH_FTYPE_ATYPES3(A, B, C, D) \ -+ LARCH_ATYPE_##A, LARCH_ATYPE_##B, LARCH_ATYPE_##C, LARCH_ATYPE_##D -+ -+#define LARCH_FTYPE_ATYPES4(A, B, C, D, E) \ -+ LARCH_ATYPE_##A, LARCH_ATYPE_##B, LARCH_ATYPE_##C, LARCH_ATYPE_##D, \ -+ LARCH_ATYPE_##E -+ -+/* Index I is the function declaration for loongarch_builtins[I], or null if the -+ function isn't defined on this target. */ -+static GTY(()) tree loongarch_builtin_decls[ARRAY_SIZE (loongarch_builtins)]; -+/* Get the index I of the function declaration for loongarch_builtin_decls[I] -+ using the instruction code or return null if not defined for the target. */ -+static GTY(()) int loongarch_get_builtin_decl_index[NUM_INSN_CODES]; -+ -+/* Return the function type associated with function prototype TYPE. */ -+ -+static tree -+loongarch_build_function_type (enum loongarch_function_type type) -+{ -+ static tree types[(int) LARCH_MAX_FTYPE_MAX]; -+ -+ if (types[(int) type] == NULL_TREE) -+ switch (type) -+ { -+#define DEF_LARCH_FTYPE(NUM, ARGS) \ -+ case LARCH_FTYPE_NAME##NUM ARGS: \ -+ types[(int) type] \ -+ = build_function_type_list (LARCH_FTYPE_ATYPES##NUM ARGS, \ -+ NULL_TREE); \ -+ break; -+#include "config/loongarch/loongarch-ftypes.def" -+#undef DEF_LARCH_FTYPE -+ default: -+ gcc_unreachable (); -+ } -+ -+ return types[(int) type]; -+} -+ -+/* Implement TARGET_INIT_BUILTINS. */ -+ -+void -+loongarch_init_builtins (void) -+{ -+ const struct loongarch_builtin_description *d; -+ unsigned int i; -+ -+ /* Iterate through all of the bdesc arrays, initializing all of the -+ builtin functions. */ -+ for (i = 0; i < ARRAY_SIZE (loongarch_builtins); i++) -+ { -+ d = &loongarch_builtins[i]; -+ if (d->avail ()) -+ { -+ loongarch_builtin_decls[i] -+ = add_builtin_function (d->name, -+ loongarch_build_function_type (d->function_type), -+ i, BUILT_IN_MD, NULL, NULL); -+ loongarch_get_builtin_decl_index[d->icode] = i; -+ } -+ } -+} -+ -+/* Implement TARGET_BUILTIN_DECL. */ -+ -+tree -+loongarch_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED) -+{ -+ if (code >= ARRAY_SIZE (loongarch_builtins)) -+ return error_mark_node; -+ return loongarch_builtin_decls[code]; -+} -+ -+/* Implement TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION. */ -+ -+tree -+loongarch_builtin_vectorized_function (unsigned int fn, tree type_out, tree type_in) -+{ -+ machine_mode in_mode, out_mode; -+ int in_n, out_n; -+ -+ if (TREE_CODE (type_out) != VECTOR_TYPE -+ || TREE_CODE (type_in) != VECTOR_TYPE -+ || !ISA_HAS_LSX) -+ return NULL_TREE; -+ -+ out_mode = TYPE_MODE (TREE_TYPE (type_out)); -+ out_n = TYPE_VECTOR_SUBPARTS (type_out); -+ in_mode = TYPE_MODE (TREE_TYPE (type_in)); -+ in_n = TYPE_VECTOR_SUBPARTS (type_in); -+ -+ /* INSN is the name of the associated instruction pattern, without -+ the leading CODE_FOR_. */ -+#define LARCH_GET_BUILTIN(INSN) \ -+ loongarch_builtin_decls[loongarch_get_builtin_decl_index[CODE_FOR_##INSN]] -+ -+ switch (fn) -+ { -+ case BUILT_IN_SQRT: -+ if (out_mode == DFmode && out_n == 2 -+ && in_mode == DFmode && in_n == 2) -+ return LARCH_GET_BUILTIN (lsx_vfsqrt_d); -+ break; -+ case BUILT_IN_SQRTF: -+ if (out_mode == SFmode && out_n == 4 -+ && in_mode == SFmode && in_n == 4) -+ return LARCH_GET_BUILTIN (lsx_vfsqrt_s); -+ break; -+ default: -+ break; -+ } -+ -+ return NULL_TREE; -+} -+ -+/* Take argument ARGNO from EXP's argument list and convert it into -+ an expand operand. Store the operand in *OP. */ -+ -+static void -+loongarch_prepare_builtin_arg (struct expand_operand *op, tree exp, -+ unsigned int argno) -+{ -+ tree arg; -+ rtx value; -+ -+ arg = CALL_EXPR_ARG (exp, argno); -+ value = expand_normal (arg); -+ create_input_operand (op, value, TYPE_MODE (TREE_TYPE (arg))); -+} -+ -+/* Return a const_int vector of VAL with mode MODE. */ -+ -+rtx -+loongarch_gen_const_int_vector (machine_mode mode, HOST_WIDE_INT val) -+{ -+ rtx c = gen_int_mode (val, GET_MODE_INNER (mode)); -+ return gen_const_vec_duplicate (mode, c); -+} -+ -+/* Expand instruction ICODE as part of a built-in function sequence. -+ Use the first NOPS elements of OPS as the instruction's operands. -+ HAS_TARGET_P is true if operand 0 is a target; it is false if the -+ instruction has no target. -+ -+ Return the target rtx if HAS_TARGET_P, otherwise return const0_rtx. */ -+ -+static rtx -+loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, -+ struct expand_operand *ops, bool has_target_p) -+{ -+ machine_mode imode; -+ int rangelo = 0, rangehi = 0, error_opno = 0; -+ rtx sireg; -+ -+ switch (icode) -+ { -+ case CODE_FOR_lsx_vaddi_bu: -+ case CODE_FOR_lsx_vaddi_hu: -+ case CODE_FOR_lsx_vaddi_wu: -+ case CODE_FOR_lsx_vaddi_du: -+ case CODE_FOR_lsx_vslti_bu: -+ case CODE_FOR_lsx_vslti_hu: -+ case CODE_FOR_lsx_vslti_wu: -+ case CODE_FOR_lsx_vslti_du: -+ case CODE_FOR_lsx_vslei_bu: -+ case CODE_FOR_lsx_vslei_hu: -+ case CODE_FOR_lsx_vslei_wu: -+ case CODE_FOR_lsx_vslei_du: -+ case CODE_FOR_lsx_vmaxi_bu: -+ case CODE_FOR_lsx_vmaxi_hu: -+ case CODE_FOR_lsx_vmaxi_wu: -+ case CODE_FOR_lsx_vmaxi_du: -+ case CODE_FOR_lsx_vmini_bu: -+ case CODE_FOR_lsx_vmini_hu: -+ case CODE_FOR_lsx_vmini_wu: -+ case CODE_FOR_lsx_vmini_du: -+ case CODE_FOR_lsx_vsubi_bu: -+ case CODE_FOR_lsx_vsubi_hu: -+ case CODE_FOR_lsx_vsubi_wu: -+ case CODE_FOR_lsx_vsubi_du: -+ case CODE_FOR_lasx_xvaddi_bu: -+ case CODE_FOR_lasx_xvaddi_hu: -+ case CODE_FOR_lasx_xvaddi_wu: -+ case CODE_FOR_lasx_xvaddi_du: -+ case CODE_FOR_lasx_xvslti_bu: -+ case CODE_FOR_lasx_xvslti_hu: -+ case CODE_FOR_lasx_xvslti_wu: -+ case CODE_FOR_lasx_xvslti_du: -+ case CODE_FOR_lasx_xvslei_bu: -+ case CODE_FOR_lasx_xvslei_hu: -+ case CODE_FOR_lasx_xvslei_wu: -+ case CODE_FOR_lasx_xvslei_du: -+ case CODE_FOR_lasx_xvmaxi_bu: -+ case CODE_FOR_lasx_xvmaxi_hu: -+ case CODE_FOR_lasx_xvmaxi_wu: -+ case CODE_FOR_lasx_xvmaxi_du: -+ case CODE_FOR_lasx_xvmini_bu: -+ case CODE_FOR_lasx_xvmini_hu: -+ case CODE_FOR_lasx_xvmini_wu: -+ case CODE_FOR_lasx_xvmini_du: -+ case CODE_FOR_lasx_xvsubi_bu: -+ case CODE_FOR_lasx_xvsubi_hu: -+ case CODE_FOR_lasx_xvsubi_wu: -+ case CODE_FOR_lasx_xvsubi_du: -+ gcc_assert (has_target_p && nops == 3); -+ /* We only generate a vector of constants iff the second argument -+ is an immediate. We also validate the range of the immediate. */ -+ if (CONST_INT_P (ops[2].value)) -+ { -+ rangelo = 0; -+ rangehi = 31; -+ if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi)) -+ { -+ ops[2].mode = ops[0].mode; -+ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, -+ INTVAL (ops[2].value)); -+ } -+ else -+ error_opno = 2; -+ } -+ break; -+ -+ case CODE_FOR_lsx_vseqi_b: -+ case CODE_FOR_lsx_vseqi_h: -+ case CODE_FOR_lsx_vseqi_w: -+ case CODE_FOR_lsx_vseqi_d: -+ case CODE_FOR_lsx_vslti_b: -+ case CODE_FOR_lsx_vslti_h: -+ case CODE_FOR_lsx_vslti_w: -+ case CODE_FOR_lsx_vslti_d: -+ case CODE_FOR_lsx_vslei_b: -+ case CODE_FOR_lsx_vslei_h: -+ case CODE_FOR_lsx_vslei_w: -+ case CODE_FOR_lsx_vslei_d: -+ case CODE_FOR_lsx_vmaxi_b: -+ case CODE_FOR_lsx_vmaxi_h: -+ case CODE_FOR_lsx_vmaxi_w: -+ case CODE_FOR_lsx_vmaxi_d: -+ case CODE_FOR_lsx_vmini_b: -+ case CODE_FOR_lsx_vmini_h: -+ case CODE_FOR_lsx_vmini_w: -+ case CODE_FOR_lsx_vmini_d: -+ case CODE_FOR_lasx_xvseqi_b: -+ case CODE_FOR_lasx_xvseqi_h: -+ case CODE_FOR_lasx_xvseqi_w: -+ case CODE_FOR_lasx_xvseqi_d: -+ case CODE_FOR_lasx_xvslti_b: -+ case CODE_FOR_lasx_xvslti_h: -+ case CODE_FOR_lasx_xvslti_w: -+ case CODE_FOR_lasx_xvslti_d: -+ case CODE_FOR_lasx_xvslei_b: -+ case CODE_FOR_lasx_xvslei_h: -+ case CODE_FOR_lasx_xvslei_w: -+ case CODE_FOR_lasx_xvslei_d: -+ case CODE_FOR_lasx_xvmaxi_b: -+ case CODE_FOR_lasx_xvmaxi_h: -+ case CODE_FOR_lasx_xvmaxi_w: -+ case CODE_FOR_lasx_xvmaxi_d: -+ case CODE_FOR_lasx_xvmini_b: -+ case CODE_FOR_lasx_xvmini_h: -+ case CODE_FOR_lasx_xvmini_w: -+ case CODE_FOR_lasx_xvmini_d: -+ gcc_assert (has_target_p && nops == 3); -+ /* We only generate a vector of constants iff the second argument -+ is an immediate. We also validate the range of the immediate. */ -+ if (CONST_INT_P (ops[2].value)) -+ { -+ rangelo = -16; -+ rangehi = 15; -+ if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi)) -+ { -+ ops[2].mode = ops[0].mode; -+ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, -+ INTVAL (ops[2].value)); -+ } -+ else -+ error_opno = 2; -+ } -+ break; -+ -+ case CODE_FOR_lsx_vandi_b: -+ case CODE_FOR_lsx_vori_b: -+ case CODE_FOR_lsx_vnori_b: -+ case CODE_FOR_lsx_vxori_b: -+ case CODE_FOR_lasx_xvandi_b: -+ case CODE_FOR_lasx_xvori_b: -+ case CODE_FOR_lasx_xvnori_b: -+ case CODE_FOR_lasx_xvxori_b: -+ gcc_assert (has_target_p && nops == 3); -+ if (!CONST_INT_P (ops[2].value)) -+ break; -+ ops[2].mode = ops[0].mode; -+ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, -+ INTVAL (ops[2].value)); -+ break; -+ -+ case CODE_FOR_lsx_vbitseli_b: -+ case CODE_FOR_lasx_xvbitseli_b: -+ gcc_assert (has_target_p && nops == 4); -+ if (!CONST_INT_P (ops[3].value)) -+ break; -+ ops[3].mode = ops[0].mode; -+ ops[3].value = loongarch_gen_const_int_vector (ops[3].mode, -+ INTVAL (ops[3].value)); -+ break; -+ -+ case CODE_FOR_lsx_vreplgr2vr_b: -+ case CODE_FOR_lsx_vreplgr2vr_h: -+ case CODE_FOR_lsx_vreplgr2vr_w: -+ case CODE_FOR_lsx_vreplgr2vr_d: -+ case CODE_FOR_lasx_xvreplgr2vr_b: -+ case CODE_FOR_lasx_xvreplgr2vr_h: -+ case CODE_FOR_lasx_xvreplgr2vr_w: -+ case CODE_FOR_lasx_xvreplgr2vr_d: -+ /* Map the built-ins to vector fill operations. We need fix up the mode -+ for the element being inserted. */ -+ gcc_assert (has_target_p && nops == 2); -+ imode = GET_MODE_INNER (ops[0].mode); -+ ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode); -+ ops[1].mode = imode; -+ break; -+ -+ case CODE_FOR_lsx_vilvh_b: -+ case CODE_FOR_lsx_vilvh_h: -+ case CODE_FOR_lsx_vilvh_w: -+ case CODE_FOR_lsx_vilvh_d: -+ case CODE_FOR_lsx_vilvl_b: -+ case CODE_FOR_lsx_vilvl_h: -+ case CODE_FOR_lsx_vilvl_w: -+ case CODE_FOR_lsx_vilvl_d: -+ case CODE_FOR_lsx_vpackev_b: -+ case CODE_FOR_lsx_vpackev_h: -+ case CODE_FOR_lsx_vpackev_w: -+ case CODE_FOR_lsx_vpackod_b: -+ case CODE_FOR_lsx_vpackod_h: -+ case CODE_FOR_lsx_vpackod_w: -+ case CODE_FOR_lsx_vpickev_b: -+ case CODE_FOR_lsx_vpickev_h: -+ case CODE_FOR_lsx_vpickev_w: -+ case CODE_FOR_lsx_vpickod_b: -+ case CODE_FOR_lsx_vpickod_h: -+ case CODE_FOR_lsx_vpickod_w: -+ case CODE_FOR_lasx_xvilvh_b: -+ case CODE_FOR_lasx_xvilvh_h: -+ case CODE_FOR_lasx_xvilvh_w: -+ case CODE_FOR_lasx_xvilvh_d: -+ case CODE_FOR_lasx_xvilvl_b: -+ case CODE_FOR_lasx_xvilvl_h: -+ case CODE_FOR_lasx_xvilvl_w: -+ case CODE_FOR_lasx_xvilvl_d: -+ case CODE_FOR_lasx_xvpackev_b: -+ case CODE_FOR_lasx_xvpackev_h: -+ case CODE_FOR_lasx_xvpackev_w: -+ case CODE_FOR_lasx_xvpackod_b: -+ case CODE_FOR_lasx_xvpackod_h: -+ case CODE_FOR_lasx_xvpackod_w: -+ case CODE_FOR_lasx_xvpickev_b: -+ case CODE_FOR_lasx_xvpickev_h: -+ case CODE_FOR_lasx_xvpickev_w: -+ case CODE_FOR_lasx_xvpickod_b: -+ case CODE_FOR_lasx_xvpickod_h: -+ case CODE_FOR_lasx_xvpickod_w: -+ /* Swap the operands 1 and 2 for interleave operations. Built-ins follow -+ convention of ISA, which have op1 as higher component and op2 as lower -+ component. However, the VEC_PERM op in tree and vec_concat in RTL -+ expects first operand to be lower component, because of which this -+ swap is needed for builtins. */ -+ gcc_assert (has_target_p && nops == 3); -+ std::swap (ops[1], ops[2]); -+ break; -+ -+ case CODE_FOR_lsx_vslli_b: -+ case CODE_FOR_lsx_vslli_h: -+ case CODE_FOR_lsx_vslli_w: -+ case CODE_FOR_lsx_vslli_d: -+ case CODE_FOR_lsx_vsrai_b: -+ case CODE_FOR_lsx_vsrai_h: -+ case CODE_FOR_lsx_vsrai_w: -+ case CODE_FOR_lsx_vsrai_d: -+ case CODE_FOR_lsx_vsrli_b: -+ case CODE_FOR_lsx_vsrli_h: -+ case CODE_FOR_lsx_vsrli_w: -+ case CODE_FOR_lsx_vsrli_d: -+ case CODE_FOR_lasx_xvslli_b: -+ case CODE_FOR_lasx_xvslli_h: -+ case CODE_FOR_lasx_xvslli_w: -+ case CODE_FOR_lasx_xvslli_d: -+ case CODE_FOR_lasx_xvsrai_b: -+ case CODE_FOR_lasx_xvsrai_h: -+ case CODE_FOR_lasx_xvsrai_w: -+ case CODE_FOR_lasx_xvsrai_d: -+ case CODE_FOR_lasx_xvsrli_b: -+ case CODE_FOR_lasx_xvsrli_h: -+ case CODE_FOR_lasx_xvsrli_w: -+ case CODE_FOR_lasx_xvsrli_d: -+ gcc_assert (has_target_p && nops == 3); -+ if (CONST_INT_P (ops[2].value)) -+ { -+ rangelo = 0; -+ rangehi = GET_MODE_UNIT_BITSIZE (ops[0].mode) - 1; -+ if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi)) -+ { -+ ops[2].mode = ops[0].mode; -+ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, -+ INTVAL (ops[2].value)); -+ } -+ else -+ error_opno = 2; -+ } -+ break; -+ -+ case CODE_FOR_lsx_vinsgr2vr_b: -+ case CODE_FOR_lsx_vinsgr2vr_h: -+ case CODE_FOR_lsx_vinsgr2vr_w: -+ case CODE_FOR_lsx_vinsgr2vr_d: -+ /* Map the built-ins to insert operations. We need to swap operands, -+ fix up the mode for the element being inserted, and generate -+ a bit mask for vec_merge. */ -+ gcc_assert (has_target_p && nops == 4); -+ std::swap (ops[1], ops[2]); -+// std::swap (ops[1], ops[3]); -+ imode = GET_MODE_INNER (ops[0].mode); -+ ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode); -+ ops[1].mode = imode; -+ rangelo = 0; -+ rangehi = GET_MODE_NUNITS (ops[0].mode) - 1; -+ if (CONST_INT_P (ops[3].value) -+ && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi)) -+ ops[3].value = GEN_INT (1 << INTVAL (ops[3].value)); -+ else -+ error_opno = 2; -+ break; -+ -+ /* Map the built-ins to element insert operations. We need to swap -+ operands and generate a bit mask. */ -+ gcc_assert (has_target_p && nops == 4); -+ std::swap (ops[1], ops[2]); -+ std::swap (ops[1], ops[3]); -+ rangelo = 0; -+ rangehi = GET_MODE_NUNITS (ops[0].mode) - 1; -+ if (CONST_INT_P (ops[3].value) -+ && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi)) -+ ops[3].value = GEN_INT (1 << INTVAL (ops[3].value)); -+ else -+ error_opno = 2; -+ break; -+ -+ case CODE_FOR_lsx_vshuf4i_b: -+ case CODE_FOR_lsx_vshuf4i_h: -+ case CODE_FOR_lsx_vshuf4i_w: -+ case CODE_FOR_lsx_vshuf4i_w_f: -+ gcc_assert (has_target_p && nops == 3); -+ ops[2].value = loongarch_gen_const_int_vector_shuffle (ops[0].mode, -+ INTVAL (ops[2].value)); -+ break; -+ -+ case CODE_FOR_lasx_xvinsgr2vr_w: -+ case CODE_FOR_lasx_xvinsgr2vr_d: -+ /* Map the built-ins to insert operations. We need to swap operands, -+ fix up the mode for the element being inserted, and generate -+ a bit mask for vec_merge. */ -+ gcc_assert (has_target_p && nops == 4); -+ std::swap (ops[1], ops[2]); -+// std::swap (ops[1], ops[3]); -+ imode = GET_MODE_INNER (ops[0].mode); -+ ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode); -+ ops[1].mode = imode; -+ rangelo = 0; -+ rangehi = GET_MODE_NUNITS (ops[0].mode) - 1; -+ if (CONST_INT_P (ops[3].value) -+ && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi)) -+ ops[3].value = GEN_INT (1 << INTVAL (ops[3].value)); -+ else -+ error_opno = 2; -+ break; -+ -+ default: -+ break; -+ } -+ -+ if (error_opno != 0) -+ { -+ error ("argument %d to the built-in must be a constant" -+ " in range %d to %d", error_opno, rangelo, rangehi); -+ return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx; -+ } -+ else if (!maybe_expand_insn (icode, nops, ops)) -+ { -+ error ("invalid argument to built-in function"); -+ return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx; -+ } -+ return has_target_p ? ops[0].value : const0_rtx; -+} -+ -+/* Expand a LARCH_BUILTIN_DIRECT or LARCH_BUILTIN_DIRECT_NO_TARGET function; -+ HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function -+ and ICODE is the code of the associated .md pattern. TARGET, if nonnull, -+ suggests a good place to put the result. */ -+ -+static rtx -+loongarch_expand_builtin_direct (enum insn_code icode, rtx target, tree exp, -+ bool has_target_p) -+{ -+ struct expand_operand ops[MAX_RECOG_OPERANDS]; -+ int opno, argno; -+ -+ /* Map any target to operand 0. */ -+ opno = 0; -+ if (has_target_p) -+ create_output_operand (&ops[opno++], target, TYPE_MODE (TREE_TYPE (exp))); -+ -+ /* Map the arguments to the other operands. */ -+ gcc_assert (opno + call_expr_nargs (exp) -+ == insn_data[icode].n_generator_args); -+ for (argno = 0; argno < call_expr_nargs (exp); argno++) -+ loongarch_prepare_builtin_arg (&ops[opno++], exp, argno); -+ -+ return loongarch_expand_builtin_insn (icode, opno, ops, has_target_p); -+} -+ -+/* Expand an LSX built-in for a compare and branch instruction specified by -+ ICODE, set a general-purpose register to 1 if the branch was taken, -+ 0 otherwise. */ -+ -+static rtx -+loongarch_expand_builtin_lsx_test_branch (enum insn_code icode, tree exp) -+{ -+ struct expand_operand ops[3]; -+ rtx_insn *cbranch; -+ rtx_code_label *true_label, *done_label; -+ rtx cmp_result; -+ -+ true_label = gen_label_rtx (); -+ done_label = gen_label_rtx (); -+ -+ create_input_operand (&ops[0], true_label, TYPE_MODE (TREE_TYPE (exp))); -+ loongarch_prepare_builtin_arg (&ops[1], exp, 0); -+ create_fixed_operand (&ops[2], const0_rtx); -+ -+ /* Make sure that the operand 1 is a REG. */ -+ if (GET_CODE (ops[1].value) != REG) -+ ops[1].value = force_reg (ops[1].mode, ops[1].value); -+ -+ if ((cbranch = maybe_gen_insn (icode, 3, ops)) == NULL_RTX) -+ error ("failed to expand built-in function"); -+ -+ cmp_result = gen_reg_rtx (SImode); -+ -+ /* First assume that CMP_RESULT is false. */ -+ loongarch_emit_move (cmp_result, const0_rtx); -+ -+ /* Branch to TRUE_LABEL if CBRANCH is taken and DONE_LABEL otherwise. */ -+ emit_jump_insn (cbranch); -+ emit_jump_insn (gen_jump (done_label)); -+ emit_barrier (); -+ -+ /* Set CMP_RESULT to true if the branch was taken. */ -+ emit_label (true_label); -+ loongarch_emit_move (cmp_result, const1_rtx); -+ -+ emit_label (done_label); -+ return cmp_result; -+} -+ -+/* Implement TARGET_EXPAND_BUILTIN. */ -+ -+rtx -+loongarch_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, -+ machine_mode mode, int ignore) -+{ -+ tree fndecl; -+ unsigned int fcode, avail; -+ const struct loongarch_builtin_description *d; -+ -+ fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); -+ fcode = DECL_FUNCTION_CODE (fndecl); -+ gcc_assert (fcode < ARRAY_SIZE (loongarch_builtins)); -+ d = &loongarch_builtins[fcode]; -+ avail = d->avail (); -+ gcc_assert (avail != 0); -+ switch (d->builtin_type) -+ { -+ case LARCH_BUILTIN_DIRECT: -+ case LARCH_BUILTIN_LSX: -+ case LARCH_BUILTIN_LASX: -+ return loongarch_expand_builtin_direct (d->icode, target, exp, true); -+ -+ case LARCH_BUILTIN_DIRECT_NO_TARGET: -+ return loongarch_expand_builtin_direct (d->icode, target, exp, false); -+ -+ case LARCH_BUILTIN_LSX_TEST_BRANCH: -+ case LARCH_BUILTIN_LASX_TEST_BRANCH: -+ return loongarch_expand_builtin_lsx_test_branch (d->icode, exp); -+ } -+ gcc_unreachable (); -+} -+/* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */ -+ -+void -+loongarch_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update) -+{ -+ if (!TARGET_HARD_FLOAT_ABI) -+ return; -+ tree exceptions_var = create_tmp_var_raw (LARCH_ATYPE_USI); -+ tree fcsr_orig_var = create_tmp_var_raw (LARCH_ATYPE_USI); -+ tree fcsr_mod_var = create_tmp_var_raw (LARCH_ATYPE_USI); -+ tree const0 = build_int_cst (LARCH_ATYPE_UQI, 0); -+ tree get_fcsr = loongarch_builtin_decls[LARCH_MOVFCSR2GR]; -+ tree set_fcsr = loongarch_builtin_decls[LARCH_MOVGR2FCSR]; -+ tree get_fcsr_hold_call = build_call_expr (get_fcsr, 1, const0); -+ tree hold_assign_orig = build4 (TARGET_EXPR, LARCH_ATYPE_USI, -+ fcsr_orig_var, get_fcsr_hold_call, -+ NULL, NULL); -+ tree hold_mod_val = build2 (BIT_AND_EXPR, LARCH_ATYPE_USI, fcsr_orig_var, -+ build_int_cst (LARCH_ATYPE_USI, 0xffe0ffe0)); -+ tree hold_assign_mod = build4 (TARGET_EXPR, LARCH_ATYPE_USI, -+ fcsr_mod_var, hold_mod_val, NULL, NULL); -+ tree set_fcsr_hold_call = build_call_expr (set_fcsr, 2, const0, fcsr_mod_var); -+ tree hold_all = build2 (COMPOUND_EXPR, LARCH_ATYPE_USI, -+ hold_assign_orig, hold_assign_mod); -+ *hold = build2 (COMPOUND_EXPR, void_type_node, hold_all, -+ set_fcsr_hold_call); -+ -+ *clear = build_call_expr (set_fcsr, 2, const0, fcsr_mod_var); -+ -+ tree get_fcsr_update_call = build_call_expr (get_fcsr, 1, const0); -+ *update = build4 (TARGET_EXPR, LARCH_ATYPE_USI, exceptions_var, -+ get_fcsr_update_call, NULL, NULL); -+ tree set_fcsr_update_call = build_call_expr (set_fcsr, 2, const0, fcsr_orig_var); -+ *update = build2 (COMPOUND_EXPR, void_type_node, *update, -+ set_fcsr_update_call); -+ tree atomic_feraiseexcept -+ = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT); -+ tree int_exceptions_var = fold_convert (integer_type_node, -+ exceptions_var); -+ tree atomic_feraiseexcept_call = build_call_expr (atomic_feraiseexcept, -+ 1, int_exceptions_var); -+ *update = build2 (COMPOUND_EXPR, void_type_node, *update, -+ atomic_feraiseexcept_call); -+} -+ -+/* Implement TARGET_BUILTIN_VA_LIST. */ -+ -+tree -+loongarch_build_builtin_va_list (void) -+{ -+ return ptr_type_node; -+} -+ -diff --git a/gcc/config/loongarch/loongarch-c.c b/gcc/config/loongarch/loongarch-c.c -new file mode 100644 -index 000000000..6eac43bdf ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-c.c -@@ -0,0 +1,135 @@ -+/* LoongArch-specific code for C family languages. -+ Copyright (C) 2020-2021 Free Software Foundation, Inc. -+ Contributed by Andrew Waterman (zhouyingkun@mail.loongson.cn). -+ -+ This file is part of GCC. -+ -+ GCC is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by -+ the Free Software Foundation; either version 3, or (at your option) -+ any later version. -+ -+ GCC is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ GNU General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License -+ along with GCC; see the file COPYING3. If not see -+ . */ -+ -+#define IN_TARGET_CODE 1 -+ -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "tm.h" -+#include "c-family/c-common.h" -+#include "cpplib.h" -+ -+#define preprocessing_asm_p() (cpp_get_options (pfile)->lang == CLK_ASM) -+#define builtin_define(TXT) cpp_define (pfile, TXT) -+#define builtin_assert(TXT) cpp_assert (pfile, TXT) -+ -+/* TODO: what is the pfile technique ??? !!! */ -+ -+void -+loongarch_cpu_cpp_builtins (cpp_reader *pfile) -+{ -+ builtin_assert ("machine=loongarch"); -+ builtin_assert ("cpu=loongarch"); -+ builtin_define ("__loongarch__"); -+ -+ if (TARGET_FLOAT64) -+ builtin_define ("__loongarch_fpr=64"); -+ else -+ builtin_define ("__loongarch_fpr=32"); -+ -+ if (ISA_HAS_LSX) -+ { -+ builtin_define ("__loongarch_simd"); -+ builtin_define ("__loongarch_sx"); -+ builtin_define ("__loongarch_sx_width=128"); -+ -+ if (!ISA_HAS_LASX) -+ builtin_define ("__loongarch_simd_width=128"); -+ } -+ -+ if (ISA_HAS_LASX) -+ { -+ builtin_define ("__loongarch_asx"); -+ builtin_define ("__loongarch_asx_width=256"); -+ builtin_define ("__loongarch_simd_width=256"); -+ } -+ -+ LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", loongarch_arch_info); -+ LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", loongarch_tune_info); -+ -+ -+ switch (loongarch_abi) -+ { -+ case ABILP32: -+ builtin_define ("_ABILP32=1"); -+ builtin_define ("_LOONGARCH_SIM=_ABILP32"); -+ builtin_define ("__loongarch32"); -+ break; -+ -+ case ABILPX32: -+ builtin_define ("_ABILPX32=2"); -+ builtin_define ("_LOONGARCH_SIM=_ABILPX32"); -+ break; -+ -+ case ABILP64: -+ builtin_define ("_ABILP64=3"); -+ builtin_define ("_LOONGARCH_SIM=_ABILP64"); -+ builtin_define ("__loongarch64"); -+ break; -+ } -+ -+ builtin_define_with_int_value ("_LOONGARCH_SZINT", INT_TYPE_SIZE); -+ builtin_define_with_int_value ("_LOONGARCH_SZLONG", LONG_TYPE_SIZE); -+ builtin_define_with_int_value ("_LOONGARCH_SZPTR", POINTER_SIZE); -+ builtin_define_with_int_value ("_LOONGARCH_FPSET", -+ 32 / MAX_FPRS_PER_FMT); -+ builtin_define_with_int_value ("_LOONGARCH_SPFPSET", -+ 32); -+ -+ /* These defines reflect the ABI in use, not whether the -+ FPU is directly accessible. */ -+ if (TARGET_NO_FLOAT) -+ builtin_define ("__loongarch_no_float"); -+ else if (TARGET_HARD_FLOAT_ABI) -+ builtin_define ("__loongarch_hard_float"); -+ else -+ builtin_define ("__loongarch_soft_float"); -+ -+ if (TARGET_SINGLE_FLOAT) -+ builtin_define ("__loongarch_single_float"); -+ -+ /* Macros dependent on the C dialect. */ -+ if (preprocessing_asm_p ()) -+ { -+ builtin_define_std ("LANGUAGE_ASSEMBLY"); -+ builtin_define ("_LANGUAGE_ASSEMBLY"); -+ } -+ else if (c_dialect_cxx ()) -+ { -+ builtin_define ("_LANGUAGE_C_PLUS_PLUS"); -+ builtin_define ("__LANGUAGE_C_PLUS_PLUS"); -+ builtin_define ("__LANGUAGE_C_PLUS_PLUS__"); -+ } -+ else -+ { -+ builtin_define_std ("LANGUAGE_C"); -+ builtin_define ("_LANGUAGE_C"); -+ } -+ -+ if (c_dialect_objc ()) -+ { -+ builtin_define ("_LANGUAGE_OBJECTIVE_C"); -+ builtin_define ("__LANGUAGE_OBJECTIVE_C"); -+ /* Bizarre, but retained for backwards compatibility. */ -+ builtin_define_std ("LANGUAGE_C"); -+ builtin_define ("_LANGUAGE_C"); -+ } -+} -diff --git a/gcc/config/loongarch/loongarch-cpus.def b/gcc/config/loongarch/loongarch-cpus.def -new file mode 100644 -index 000000000..7ce2508e3 ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-cpus.def -@@ -0,0 +1,38 @@ -+/* LARCH CPU names. -+ Copyright (C) 1989-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+/* A table describing all the processors GCC knows about. The first -+ mention of an ISA level is taken as the canonical name for that -+ ISA. -+ -+ To ease comparison, please keep this table in the same order -+ as GAS's loongarch_cpu_info_table. Please also make sure that -+ LARCH_ISA_LEVEL_SPEC and LARCH_ARCH_FLOAT_SPEC handle all -march -+ options correctly. -+ -+ Before including this file, define a macro: -+ -+ LARCH_CPU (NAME, CPU, ISA, FLAGS) -+ -+ where the arguments are the fields of struct loongarch_cpu_info. */ -+ -+/* Entries for generic ISAs. */ -+LARCH_CPU ("loongarch64", PROCESSOR_LOONGARCH64, 0, 0) -+LARCH_CPU ("la464", PROCESSOR_LA464, 0, 0) -+ -diff --git a/gcc/config/loongarch/loongarch-d.c b/gcc/config/loongarch/loongarch-d.c -new file mode 100644 -index 000000000..971e5d33e ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-d.c -@@ -0,0 +1,31 @@ -+/* Subroutines for the D front end on the LARCH architecture. -+ Copyright (C) 2017 Free Software Foundation, Inc. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "tm.h" -+#include "d/d-target.h" -+#include "d/d-target-def.h" -+ -+/* Implement TARGET_D_CPU_VERSIONS for LARCH targets. */ -+ -+void -+loongarch_d_target_versions (void) -+{ -+ // need to be improved !! -+} -diff --git a/gcc/config/loongarch/loongarch-ftypes.def b/gcc/config/loongarch/loongarch-ftypes.def -new file mode 100644 -index 000000000..a10a025ba ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-ftypes.def -@@ -0,0 +1,719 @@ -+/* Definitions of prototypes for LARCH built-in functions. -*- C -*- -+ Copyright (C) 2007-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+/* Invoke DEF_LARCH_FTYPE (NARGS, LIST) for each prototype used by -+ LARCH built-in functions, where: -+ -+ NARGS is the number of arguments. -+ LIST contains the return-type code followed by the codes for each -+ argument type. -+ -+ Argument- and return-type codes are either modes or one of the following: -+ -+ VOID for void_type_node -+ INT for integer_type_node -+ POINTER for ptr_type_node -+ -+ (we don't use PTR because that's a ANSI-compatibillity macro). -+ -+ Please keep this list lexicographically sorted by the LIST argument. */ -+DEF_LARCH_FTYPE (1, (DF, DF)) -+DEF_LARCH_FTYPE (2, (DF, DF, DF)) -+DEF_LARCH_FTYPE (1, (DF, V2DF)) -+DEF_LARCH_FTYPE (1, (DF, V4DF)) -+ -+DEF_LARCH_FTYPE (1, (DI, DI)) -+DEF_LARCH_FTYPE (1, (DI, SI)) -+DEF_LARCH_FTYPE (1, (DI, UQI)) -+DEF_LARCH_FTYPE (1, (UDI, USI)) -+DEF_LARCH_FTYPE (1, (UQI, USI)) -+DEF_LARCH_FTYPE (1, (USI, UQI)) -+DEF_LARCH_FTYPE (1, (UHI, USI)) -+DEF_LARCH_FTYPE (2, (DI, DI, DI)) -+DEF_LARCH_FTYPE (2, (DI, DI, SI)) -+DEF_LARCH_FTYPE (2, (DI, DI, UQI)) -+DEF_LARCH_FTYPE (2, (VOID, DI, UQI)) -+DEF_LARCH_FTYPE (2, (VOID, SI, UQI)) -+DEF_LARCH_FTYPE (2, (UDI, UDI, USI)) -+DEF_LARCH_FTYPE (3, (DI, DI, SI, SI)) -+DEF_LARCH_FTYPE (3, (DI, DI, USI, USI)) -+DEF_LARCH_FTYPE (3, (DI, DI, DI, QI)) -+DEF_LARCH_FTYPE (3, (UDI, UDI, UDI, USI)) -+DEF_LARCH_FTYPE (3, (DI, DI, V2HI, V2HI)) -+DEF_LARCH_FTYPE (3, (DI, DI, V4QI, V4QI)) -+DEF_LARCH_FTYPE (2, (DI, POINTER, SI)) -+DEF_LARCH_FTYPE (2, (DI, SI, SI)) -+DEF_LARCH_FTYPE (2, (DI, USI, USI)) -+DEF_LARCH_FTYPE (2, (DI, V2DI, UQI)) -+DEF_LARCH_FTYPE (2, (DI, V4DI, UQI)) -+ -+DEF_LARCH_FTYPE (2, (INT, DF, DF)) -+DEF_LARCH_FTYPE (2, (INT, SF, SF)) -+DEF_LARCH_FTYPE (2, (INT, V2SF, V2SF)) -+DEF_LARCH_FTYPE (4, (INT, V2SF, V2SF, V2SF, V2SF)) -+ -+DEF_LARCH_FTYPE (1, (SF, SF)) -+DEF_LARCH_FTYPE (2, (SF, SF, SF)) -+DEF_LARCH_FTYPE (1, (SF, V2SF)) -+DEF_LARCH_FTYPE (1, (SF, V4SF)) -+ -+DEF_LARCH_FTYPE (2, (SI, DI, SI)) -+DEF_LARCH_FTYPE (2, (SI, POINTER, SI)) -+DEF_LARCH_FTYPE (1, (SI, SI)) -+DEF_LARCH_FTYPE (1, (USI, USI)) -+DEF_LARCH_FTYPE (1, (SI, UDI)) -+DEF_LARCH_FTYPE (2, (QI, QI, QI)) -+DEF_LARCH_FTYPE (2, (HI, HI, HI)) -+DEF_LARCH_FTYPE (2, (SI, QI, SI)) -+DEF_LARCH_FTYPE (2, (SI, HI, SI)) -+DEF_LARCH_FTYPE (2, (SI, SI, SI)) -+DEF_LARCH_FTYPE (2, (SI, SI, UQI)) -+DEF_LARCH_FTYPE (2, (USI, USI, USI)) -+DEF_LARCH_FTYPE (3, (SI, SI, SI, SI)) -+DEF_LARCH_FTYPE (3, (SI, SI, SI, QI)) -+DEF_LARCH_FTYPE (3, (USI, USI, USI, USI)) -+DEF_LARCH_FTYPE (1, (SI, UQI)) -+DEF_LARCH_FTYPE (1, (SI, UV16QI)) -+DEF_LARCH_FTYPE (1, (SI, UV32QI)) -+DEF_LARCH_FTYPE (1, (SI, UV2DI)) -+DEF_LARCH_FTYPE (1, (SI, UV4DI)) -+DEF_LARCH_FTYPE (1, (SI, UV4SI)) -+DEF_LARCH_FTYPE (1, (SI, UV8SI)) -+DEF_LARCH_FTYPE (1, (SI, UV8HI)) -+DEF_LARCH_FTYPE (1, (SI, UV16HI)) -+DEF_LARCH_FTYPE (2, (SI, V16QI, UQI)) -+DEF_LARCH_FTYPE (2, (SI, V32QI, UQI)) -+DEF_LARCH_FTYPE (1, (SI, V2HI)) -+DEF_LARCH_FTYPE (2, (SI, V2HI, V2HI)) -+DEF_LARCH_FTYPE (1, (SI, V4QI)) -+DEF_LARCH_FTYPE (2, (SI, V4QI, V4QI)) -+DEF_LARCH_FTYPE (2, (SI, V4SI, UQI)) -+DEF_LARCH_FTYPE (2, (SI, V8SI, UQI)) -+DEF_LARCH_FTYPE (2, (SI, V8HI, UQI)) -+DEF_LARCH_FTYPE (1, (SI, VOID)) -+ -+DEF_LARCH_FTYPE (2, (UDI, UDI, UDI)) -+DEF_LARCH_FTYPE (2, (USI, V32QI, UQI)) -+DEF_LARCH_FTYPE (2, (UDI, UV2SI, UV2SI)) -+DEF_LARCH_FTYPE (2, (USI, V8SI, UQI)) -+DEF_LARCH_FTYPE (2, (UDI, V2DI, UQI)) -+DEF_LARCH_FTYPE (2, (USI, V16HI, UQI)) -+DEF_LARCH_FTYPE (2, (UDI, V4DI, UQI)) -+ -+DEF_LARCH_FTYPE (2, (USI, V16QI, UQI)) -+DEF_LARCH_FTYPE (2, (USI, V4SI, UQI)) -+DEF_LARCH_FTYPE (2, (USI, V8HI, UQI)) -+DEF_LARCH_FTYPE (1, (USI, VOID)) -+ -+DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, UQI)) -+DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, USI)) -+DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, UV16QI)) -+DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, UV16QI, UQI)) -+DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, UV16QI, USI)) -+DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, UV16QI, UV16QI)) -+DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, V16QI)) -+ -+DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, UQI)) -+DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, UV2DI)) -+DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV2DI, UQI)) -+DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV2DI, UV2DI)) -+DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV4SI, UV4SI)) -+DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, V2DI)) -+DEF_LARCH_FTYPE (2, (UV2DI, UV4SI, UV4SI)) -+DEF_LARCH_FTYPE (1, (UV2DI, V2DF)) -+ -+DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, UQI)) -+DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, USI)) -+DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, UV32QI)) -+DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, UV32QI, UQI)) -+DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, UV32QI, USI)) -+DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, UV32QI, UV32QI)) -+DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, V32QI)) -+ -+DEF_LARCH_FTYPE (2, (UV4DI, UV4DI, UQI)) -+DEF_LARCH_FTYPE (2, (UV4DI, UV4DI, UV4DI)) -+DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV4DI, UQI)) -+DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV4DI, UV4DI)) -+DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV8SI, UV8SI)) -+DEF_LARCH_FTYPE (2, (UV4DI, UV4DI, V4DI)) -+DEF_LARCH_FTYPE (2, (UV4DI, UV8SI, UV8SI)) -+DEF_LARCH_FTYPE (1, (UV4DI, V4DF)) -+ -+DEF_LARCH_FTYPE (2, (UV2SI, UV2SI, UQI)) -+DEF_LARCH_FTYPE (2, (UV2SI, UV2SI, UV2SI)) -+ -+DEF_LARCH_FTYPE (2, (UV4HI, UV4HI, UQI)) -+DEF_LARCH_FTYPE (2, (UV4HI, UV4HI, USI)) -+DEF_LARCH_FTYPE (2, (UV4HI, UV4HI, UV4HI)) -+DEF_LARCH_FTYPE (3, (UV4HI, UV4HI, UV4HI, UQI)) -+DEF_LARCH_FTYPE (3, (UV4HI, UV4HI, UV4HI, USI)) -+DEF_LARCH_FTYPE (1, (UV4HI, UV8QI)) -+DEF_LARCH_FTYPE (2, (UV4HI, UV8QI, UV8QI)) -+ -+DEF_LARCH_FTYPE (2, (UV4SI, UV4SI, UQI)) -+DEF_LARCH_FTYPE (2, (UV4SI, UV4SI, UV4SI)) -+DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV4SI, UQI)) -+DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV4SI, UV4SI)) -+DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV8HI, UV8HI)) -+DEF_LARCH_FTYPE (2, (UV4SI, UV4SI, V4SI)) -+DEF_LARCH_FTYPE (2, (UV4SI, UV8HI, UV8HI)) -+DEF_LARCH_FTYPE (1, (UV4SI, V4SF)) -+ -+DEF_LARCH_FTYPE (2, (UV8HI, UV16QI, UV16QI)) -+DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, UQI)) -+DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV16QI, UV16QI)) -+DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, UV8HI)) -+DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV8HI, UQI)) -+DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV8HI, UV8HI)) -+DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, V8HI)) -+ -+DEF_LARCH_FTYPE (2, (UV8SI, UV8SI, UQI)) -+DEF_LARCH_FTYPE (2, (UV8SI, UV8SI, UV8SI)) -+DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV8SI, UQI)) -+DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV8SI, UV8SI)) -+DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV16HI, UV16HI)) -+DEF_LARCH_FTYPE (2, (UV8SI, UV8SI, V8SI)) -+DEF_LARCH_FTYPE (2, (UV8SI, UV16HI, UV16HI)) -+DEF_LARCH_FTYPE (1, (UV8SI, V8SF)) -+ -+DEF_LARCH_FTYPE (2, (UV16HI, UV32QI, UV32QI)) -+DEF_LARCH_FTYPE (2, (UV16HI, UV16HI, UQI)) -+DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, UV32QI, UV32QI)) -+DEF_LARCH_FTYPE (2, (UV16HI, UV16HI, UV16HI)) -+DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, UV16HI, UQI)) -+DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, UV16HI, UV16HI)) -+DEF_LARCH_FTYPE (2, (UV16HI, UV16HI, V16HI)) -+ -+DEF_LARCH_FTYPE (2, (UV8QI, UV4HI, UV4HI)) -+DEF_LARCH_FTYPE (1, (UV8QI, UV8QI)) -+DEF_LARCH_FTYPE (2, (UV8QI, UV8QI, UV8QI)) -+ -+DEF_LARCH_FTYPE (2, (V16QI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (2, (V16QI, CVPOINTER, DI)) -+DEF_LARCH_FTYPE (1, (V16QI, HI)) -+DEF_LARCH_FTYPE (1, (V16QI, SI)) -+DEF_LARCH_FTYPE (2, (V16QI, UV16QI, UQI)) -+DEF_LARCH_FTYPE (2, (V16QI, UV16QI, UV16QI)) -+DEF_LARCH_FTYPE (1, (V16QI, V16QI)) -+DEF_LARCH_FTYPE (2, (V16QI, V16QI, QI)) -+DEF_LARCH_FTYPE (2, (V16QI, V16QI, SI)) -+DEF_LARCH_FTYPE (2, (V16QI, V16QI, USI)) -+DEF_LARCH_FTYPE (2, (V16QI, V16QI, UQI)) -+DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, SI)) -+DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, V16QI)) -+DEF_LARCH_FTYPE (2, (V16QI, V16QI, V16QI)) -+DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, SI)) -+DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, UQI)) -+DEF_LARCH_FTYPE (4, (V16QI, V16QI, V16QI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, USI)) -+DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, V16QI)) -+ -+DEF_LARCH_FTYPE (2, (V32QI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (2, (V32QI, CVPOINTER, DI)) -+DEF_LARCH_FTYPE (1, (V32QI, HI)) -+DEF_LARCH_FTYPE (1, (V32QI, SI)) -+DEF_LARCH_FTYPE (2, (V32QI, UV32QI, UQI)) -+DEF_LARCH_FTYPE (2, (V32QI, UV32QI, UV32QI)) -+DEF_LARCH_FTYPE (1, (V32QI, V32QI)) -+DEF_LARCH_FTYPE (2, (V32QI, V32QI, QI)) -+DEF_LARCH_FTYPE (2, (V32QI, V32QI, SI)) -+DEF_LARCH_FTYPE (2, (V32QI, V32QI, UQI)) -+DEF_LARCH_FTYPE (2, (V32QI, V32QI, USI)) -+DEF_LARCH_FTYPE (3, (V32QI, V32QI, SI, UQI)) -+DEF_LARCH_FTYPE (3, (V32QI, V32QI, UQI, V32QI)) -+DEF_LARCH_FTYPE (2, (V32QI, V32QI, V32QI)) -+DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, SI)) -+DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, UQI)) -+DEF_LARCH_FTYPE (4, (V32QI, V32QI, V32QI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, USI)) -+DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, V32QI)) -+ -+DEF_LARCH_FTYPE (1, (V2DF, DF)) -+DEF_LARCH_FTYPE (1, (V2DF, UV2DI)) -+DEF_LARCH_FTYPE (1, (V2DF, V2DF)) -+DEF_LARCH_FTYPE (2, (V2DF, V2DF, V2DF)) -+DEF_LARCH_FTYPE (3, (V2DF, V2DF, V2DF, V2DF)) -+DEF_LARCH_FTYPE (2, (V2DF, V2DF, V2DI)) -+DEF_LARCH_FTYPE (1, (V2DF, V2DI)) -+DEF_LARCH_FTYPE (1, (V2DF, V4SF)) -+DEF_LARCH_FTYPE (1, (V2DF, V4SI)) -+ -+DEF_LARCH_FTYPE (1, (V4DF, DF)) -+DEF_LARCH_FTYPE (1, (V4DF, UV4DI)) -+DEF_LARCH_FTYPE (1, (V4DF, V4DF)) -+DEF_LARCH_FTYPE (2, (V4DF, V4DF, V4DF)) -+DEF_LARCH_FTYPE (3, (V4DF, V4DF, V4DF, V4DF)) -+DEF_LARCH_FTYPE (2, (V4DF, V4DF, V4DI)) -+DEF_LARCH_FTYPE (1, (V4DF, V4DI)) -+DEF_LARCH_FTYPE (1, (V4DF, V8SF)) -+DEF_LARCH_FTYPE (1, (V4DF, V8SI)) -+ -+DEF_LARCH_FTYPE (2, (V2DI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (1, (V2DI, DI)) -+DEF_LARCH_FTYPE (1, (V2DI, HI)) -+DEF_LARCH_FTYPE (2, (V2DI, UV2DI, UQI)) -+DEF_LARCH_FTYPE (2, (V2DI, UV2DI, UV2DI)) -+DEF_LARCH_FTYPE (2, (V2DI, UV4SI, UV4SI)) -+DEF_LARCH_FTYPE (1, (V2DI, V2DF)) -+DEF_LARCH_FTYPE (2, (V2DI, V2DF, V2DF)) -+DEF_LARCH_FTYPE (1, (V2DI, V2DI)) -+DEF_LARCH_FTYPE (1, (UV2DI, UV2DI)) -+DEF_LARCH_FTYPE (2, (V2DI, V2DI, QI)) -+DEF_LARCH_FTYPE (2, (V2DI, V2DI, SI)) -+DEF_LARCH_FTYPE (2, (V2DI, V2DI, UQI)) -+DEF_LARCH_FTYPE (2, (V2DI, V2DI, USI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, DI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, V2DI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV4SI, UV4SI)) -+DEF_LARCH_FTYPE (2, (V2DI, V2DI, V2DI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, SI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, UQI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, USI)) -+DEF_LARCH_FTYPE (4, (V2DI, V2DI, V2DI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, V2DI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, V4SI, V4SI)) -+DEF_LARCH_FTYPE (2, (V2DI, V4SI, V4SI)) -+ -+DEF_LARCH_FTYPE (2, (V4DI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (1, (V4DI, DI)) -+DEF_LARCH_FTYPE (1, (V4DI, HI)) -+DEF_LARCH_FTYPE (2, (V4DI, UV4DI, UQI)) -+DEF_LARCH_FTYPE (2, (V4DI, UV4DI, UV4DI)) -+DEF_LARCH_FTYPE (2, (V4DI, UV8SI, UV8SI)) -+DEF_LARCH_FTYPE (1, (V4DI, V4DF)) -+DEF_LARCH_FTYPE (2, (V4DI, V4DF, V4DF)) -+DEF_LARCH_FTYPE (1, (V4DI, V4DI)) -+DEF_LARCH_FTYPE (1, (UV4DI, UV4DI)) -+DEF_LARCH_FTYPE (2, (V4DI, V4DI, QI)) -+DEF_LARCH_FTYPE (2, (V4DI, V4DI, SI)) -+DEF_LARCH_FTYPE (2, (V4DI, V4DI, UQI)) -+DEF_LARCH_FTYPE (2, (V4DI, V4DI, USI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, DI, UQI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, UQI, V4DI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, UV8SI, UV8SI)) -+DEF_LARCH_FTYPE (2, (V4DI, V4DI, V4DI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, SI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, USI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, UQI)) -+DEF_LARCH_FTYPE (4, (V4DI, V4DI, V4DI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, V4DI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, V8SI, V8SI)) -+DEF_LARCH_FTYPE (2, (V4DI, V8SI, V8SI)) -+ -+DEF_LARCH_FTYPE (1, (V2HI, SI)) -+DEF_LARCH_FTYPE (2, (V2HI, SI, SI)) -+DEF_LARCH_FTYPE (3, (V2HI, SI, SI, SI)) -+DEF_LARCH_FTYPE (1, (V2HI, V2HI)) -+DEF_LARCH_FTYPE (2, (V2HI, V2HI, SI)) -+DEF_LARCH_FTYPE (2, (V2HI, V2HI, V2HI)) -+DEF_LARCH_FTYPE (1, (V2HI, V4QI)) -+DEF_LARCH_FTYPE (2, (V2HI, V4QI, V2HI)) -+ -+DEF_LARCH_FTYPE (2, (V2SF, SF, SF)) -+DEF_LARCH_FTYPE (1, (V2SF, V2SF)) -+DEF_LARCH_FTYPE (2, (V2SF, V2SF, V2SF)) -+DEF_LARCH_FTYPE (3, (V2SF, V2SF, V2SF, INT)) -+DEF_LARCH_FTYPE (4, (V2SF, V2SF, V2SF, V2SF, V2SF)) -+ -+DEF_LARCH_FTYPE (2, (V2SI, V2SI, UQI)) -+DEF_LARCH_FTYPE (2, (V2SI, V2SI, V2SI)) -+DEF_LARCH_FTYPE (2, (V2SI, V4HI, V4HI)) -+ -+DEF_LARCH_FTYPE (2, (V4HI, V2SI, V2SI)) -+DEF_LARCH_FTYPE (2, (V4HI, V4HI, UQI)) -+DEF_LARCH_FTYPE (2, (V4HI, V4HI, USI)) -+DEF_LARCH_FTYPE (2, (V4HI, V4HI, V4HI)) -+DEF_LARCH_FTYPE (3, (V4HI, V4HI, V4HI, UQI)) -+DEF_LARCH_FTYPE (3, (V4HI, V4HI, V4HI, USI)) -+ -+DEF_LARCH_FTYPE (1, (V4QI, SI)) -+DEF_LARCH_FTYPE (2, (V4QI, V2HI, V2HI)) -+DEF_LARCH_FTYPE (1, (V4QI, V4QI)) -+DEF_LARCH_FTYPE (2, (V4QI, V4QI, SI)) -+DEF_LARCH_FTYPE (2, (V4QI, V4QI, V4QI)) -+ -+DEF_LARCH_FTYPE (1, (V4SF, SF)) -+DEF_LARCH_FTYPE (1, (V4SF, UV4SI)) -+DEF_LARCH_FTYPE (2, (V4SF, V2DF, V2DF)) -+DEF_LARCH_FTYPE (1, (V4SF, V4SF)) -+DEF_LARCH_FTYPE (2, (V4SF, V4SF, V4SF)) -+DEF_LARCH_FTYPE (3, (V4SF, V4SF, V4SF, V4SF)) -+DEF_LARCH_FTYPE (2, (V4SF, V4SF, V4SI)) -+DEF_LARCH_FTYPE (1, (V4SF, V4SI)) -+DEF_LARCH_FTYPE (1, (V4SF, V8HI)) -+DEF_LARCH_FTYPE (1, (V8SF, V16HI)) -+ -+DEF_LARCH_FTYPE (1, (V8SF, SF)) -+DEF_LARCH_FTYPE (1, (V8SF, UV8SI)) -+DEF_LARCH_FTYPE (2, (V8SF, V4DF, V4DF)) -+DEF_LARCH_FTYPE (1, (V8SF, V8SF)) -+DEF_LARCH_FTYPE (2, (V8SF, V8SF, V8SF)) -+DEF_LARCH_FTYPE (3, (V8SF, V8SF, V8SF, V8SF)) -+DEF_LARCH_FTYPE (2, (V8SF, V8SF, V8SI)) -+DEF_LARCH_FTYPE (1, (V8SF, V8SI)) -+DEF_LARCH_FTYPE (1, (V8SF, V8HI)) -+ -+DEF_LARCH_FTYPE (2, (V4SI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (1, (V4SI, HI)) -+DEF_LARCH_FTYPE (1, (V4SI, SI)) -+DEF_LARCH_FTYPE (2, (V4SI, UV4SI, UQI)) -+DEF_LARCH_FTYPE (2, (V4SI, UV4SI, UV4SI)) -+DEF_LARCH_FTYPE (2, (V4SI, UV8HI, UV8HI)) -+DEF_LARCH_FTYPE (2, (V4SI, V2DF, V2DF)) -+DEF_LARCH_FTYPE (2, (V8SI, V4DF, V4DF)) -+DEF_LARCH_FTYPE (1, (V4SI, V4SF)) -+DEF_LARCH_FTYPE (2, (V4SI, V4SF, V4SF)) -+DEF_LARCH_FTYPE (1, (V4SI, V4SI)) -+DEF_LARCH_FTYPE (2, (V4SI, V4SI, QI)) -+DEF_LARCH_FTYPE (2, (V4SI, V4SI, SI)) -+DEF_LARCH_FTYPE (2, (V4SI, V4SI, UQI)) -+DEF_LARCH_FTYPE (2, (V4SI, V4SI, USI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, SI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, V4SI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV8HI, UV8HI)) -+DEF_LARCH_FTYPE (2, (V4SI, V4SI, V4SI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, SI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, UQI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, USI)) -+DEF_LARCH_FTYPE (4, (V4SI, V4SI, V4SI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, V4SI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, V8HI, V8HI)) -+DEF_LARCH_FTYPE (2, (V4SI, V8HI, V8HI)) -+ -+DEF_LARCH_FTYPE (2, (V8SI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (1, (V8SI, HI)) -+DEF_LARCH_FTYPE (1, (V8SI, SI)) -+DEF_LARCH_FTYPE (2, (V8SI, UV8SI, UQI)) -+DEF_LARCH_FTYPE (2, (V8SI, UV8SI, UV8SI)) -+DEF_LARCH_FTYPE (2, (V8SI, UV16HI, UV16HI)) -+DEF_LARCH_FTYPE (2, (V8SI, V2DF, V2DF)) -+DEF_LARCH_FTYPE (1, (V8SI, V8SF)) -+DEF_LARCH_FTYPE (2, (V8SI, V8SF, V8SF)) -+DEF_LARCH_FTYPE (1, (V8SI, V8SI)) -+DEF_LARCH_FTYPE (2, (V8SI, V8SI, QI)) -+DEF_LARCH_FTYPE (2, (V8SI, V8SI, SI)) -+DEF_LARCH_FTYPE (2, (V8SI, V8SI, UQI)) -+DEF_LARCH_FTYPE (2, (V8SI, V8SI, USI)) -+DEF_LARCH_FTYPE (3, (V8SI, V8SI, SI, UQI)) -+DEF_LARCH_FTYPE (3, (V8SI, V8SI, UQI, V8SI)) -+DEF_LARCH_FTYPE (3, (V8SI, V8SI, UV16HI, UV16HI)) -+DEF_LARCH_FTYPE (2, (V8SI, V8SI, V8SI)) -+DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, SI)) -+DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, UQI)) -+DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, USI)) -+DEF_LARCH_FTYPE (4, (V8SI, V8SI, V8SI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, V8SI)) -+DEF_LARCH_FTYPE (3, (V8SI, V8SI, V16HI, V16HI)) -+DEF_LARCH_FTYPE (2, (V8SI, V16HI, V16HI)) -+ -+DEF_LARCH_FTYPE (2, (V8HI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (1, (V8HI, HI)) -+DEF_LARCH_FTYPE (1, (V8HI, SI)) -+DEF_LARCH_FTYPE (2, (V8HI, UV16QI, UV16QI)) -+DEF_LARCH_FTYPE (2, (V8HI, UV8HI, UQI)) -+DEF_LARCH_FTYPE (2, (V8HI, UV8HI, UV8HI)) -+DEF_LARCH_FTYPE (2, (V8HI, V16QI, V16QI)) -+DEF_LARCH_FTYPE (2, (V8HI, V4SF, V4SF)) -+DEF_LARCH_FTYPE (1, (V8HI, V8HI)) -+DEF_LARCH_FTYPE (2, (V8HI, V8HI, QI)) -+DEF_LARCH_FTYPE (2, (V8HI, V8HI, SI)) -+DEF_LARCH_FTYPE (3, (V8HI, V8HI, SI, UQI)) -+DEF_LARCH_FTYPE (2, (V8HI, V8HI, UQI)) -+DEF_LARCH_FTYPE (2, (V8HI, V8HI, USI)) -+DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, SI)) -+DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, V8HI)) -+DEF_LARCH_FTYPE (3, (V8HI, V8HI, UV16QI, UV16QI)) -+DEF_LARCH_FTYPE (3, (V8HI, V8HI, V16QI, V16QI)) -+DEF_LARCH_FTYPE (2, (V8HI, V8HI, V8HI)) -+DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, SI)) -+DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, UQI)) -+DEF_LARCH_FTYPE (4, (V8HI, V8HI, V8HI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, USI)) -+DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, V8HI)) -+ -+DEF_LARCH_FTYPE (2, (V16HI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (1, (V16HI, HI)) -+DEF_LARCH_FTYPE (1, (V16HI, SI)) -+DEF_LARCH_FTYPE (2, (V16HI, UV32QI, UV32QI)) -+DEF_LARCH_FTYPE (2, (V16HI, UV16HI, UQI)) -+DEF_LARCH_FTYPE (2, (V16HI, UV16HI, UV16HI)) -+DEF_LARCH_FTYPE (2, (V16HI, V32QI, V32QI)) -+DEF_LARCH_FTYPE (2, (V16HI, V8SF, V8SF)) -+DEF_LARCH_FTYPE (1, (V16HI, V16HI)) -+DEF_LARCH_FTYPE (2, (V16HI, V16HI, QI)) -+DEF_LARCH_FTYPE (2, (V16HI, V16HI, SI)) -+DEF_LARCH_FTYPE (3, (V16HI, V16HI, SI, UQI)) -+DEF_LARCH_FTYPE (2, (V16HI, V16HI, UQI)) -+DEF_LARCH_FTYPE (2, (V16HI, V16HI, USI)) -+DEF_LARCH_FTYPE (3, (V16HI, V16HI, UQI, SI)) -+DEF_LARCH_FTYPE (3, (V16HI, V16HI, UQI, V16HI)) -+DEF_LARCH_FTYPE (3, (V16HI, V16HI, UV32QI, UV32QI)) -+DEF_LARCH_FTYPE (3, (V16HI, V16HI, V32QI, V32QI)) -+DEF_LARCH_FTYPE (2, (V16HI, V16HI, V16HI)) -+DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, SI)) -+DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, UQI)) -+DEF_LARCH_FTYPE (4, (V16HI, V16HI, V16HI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, USI)) -+DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, V16HI)) -+ -+DEF_LARCH_FTYPE (2, (V8QI, V4HI, V4HI)) -+DEF_LARCH_FTYPE (1, (V8QI, V8QI)) -+DEF_LARCH_FTYPE (2, (V8QI, V8QI, V8QI)) -+ -+DEF_LARCH_FTYPE (2, (VOID, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (2, (VOID, SI, SI)) -+DEF_LARCH_FTYPE (2, (VOID, DI, DI)) -+DEF_LARCH_FTYPE (2, (VOID, UQI, SI)) -+DEF_LARCH_FTYPE (1, (VOID, USI)) -+DEF_LARCH_FTYPE (2, (VOID, USI, UQI)) -+DEF_LARCH_FTYPE (1, (VOID, UHI)) -+DEF_LARCH_FTYPE (2, (VOID, UQI, USI)) -+DEF_LARCH_FTYPE (2, (VOID, UHI, USI)) -+DEF_LARCH_FTYPE (2, (VOID, USI, USI)) -+DEF_LARCH_FTYPE (2, (VOID, UDI, USI)) -+DEF_LARCH_FTYPE (3, (VOID, USI, USI, SI)) -+DEF_LARCH_FTYPE (3, (VOID, USI, UDI, SI)) -+DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, DI)) -+DEF_LARCH_FTYPE (3, (VOID, V32QI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (3, (VOID, V32QI, CVPOINTER, DI)) -+DEF_LARCH_FTYPE (3, (VOID, V4DF, POINTER, SI)) -+DEF_LARCH_FTYPE (3, (VOID, V2DF, POINTER, SI)) -+DEF_LARCH_FTYPE (3, (VOID, V2DI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (3, (VOID, V4DI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (2, (VOID, V2HI, V2HI)) -+DEF_LARCH_FTYPE (2, (VOID, V4QI, V4QI)) -+DEF_LARCH_FTYPE (3, (VOID, V4SF, POINTER, SI)) -+DEF_LARCH_FTYPE (3, (VOID, V8SF, POINTER, SI)) -+DEF_LARCH_FTYPE (3, (VOID, V4SI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (3, (VOID, V8SI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (3, (VOID, V8HI, CVPOINTER, SI)) -+DEF_LARCH_FTYPE (3, (VOID, V16HI, CVPOINTER, SI)) -+ -+DEF_LARCH_FTYPE (1, (V16HI, V32QI)) -+DEF_LARCH_FTYPE (1, (UV16HI, UV32QI)) -+DEF_LARCH_FTYPE (1, (V8SI, V32QI)) -+DEF_LARCH_FTYPE (1, (V4DI, V32QI)) -+DEF_LARCH_FTYPE (1, (V8HI, V16QI)) -+DEF_LARCH_FTYPE (1, (V4SI, V16QI)) -+DEF_LARCH_FTYPE (1, (V2DI, V16QI)) -+DEF_LARCH_FTYPE (1, (UV8SI, UV16HI)) -+DEF_LARCH_FTYPE (1, (V8SI, V16HI)) -+DEF_LARCH_FTYPE (1, (V4DI, V16HI)) -+DEF_LARCH_FTYPE (1, (V4SI, V8HI)) -+DEF_LARCH_FTYPE (1, (V2DI, V8HI)) -+DEF_LARCH_FTYPE (1, (V2DI, V4SI)) -+DEF_LARCH_FTYPE (1, (V4DI, V8SI)) -+DEF_LARCH_FTYPE (1, (UV4DI, UV8SI)) -+DEF_LARCH_FTYPE (1, (UV16HI, V32QI)) -+DEF_LARCH_FTYPE (1, (UV8SI, V32QI)) -+DEF_LARCH_FTYPE (1, (UV4DI, V32QI)) -+DEF_LARCH_FTYPE (1, (UV8HI, V16QI)) -+DEF_LARCH_FTYPE (1, (UV4SI, V16QI)) -+DEF_LARCH_FTYPE (1, (UV2DI, V16QI)) -+DEF_LARCH_FTYPE (1, (UV8SI, V16HI)) -+DEF_LARCH_FTYPE (1, (UV4DI, V16HI)) -+DEF_LARCH_FTYPE (1, (UV4SI, V8HI)) -+DEF_LARCH_FTYPE (1, (UV2DI, V8HI)) -+DEF_LARCH_FTYPE (1, (UV2DI, V4SI)) -+DEF_LARCH_FTYPE (1, (UV4DI, V8SI)) -+DEF_LARCH_FTYPE (1, (UV8HI, UV16QI)) -+DEF_LARCH_FTYPE (1, (UV4SI, UV16QI)) -+DEF_LARCH_FTYPE (1, (UV2DI, UV16QI)) -+DEF_LARCH_FTYPE (1, (UV4DI, UV32QI)) -+DEF_LARCH_FTYPE (1, (UV4SI, UV8HI)) -+DEF_LARCH_FTYPE (1, (UV2DI, UV8HI)) -+DEF_LARCH_FTYPE (1, (UV2DI, UV4SI)) -+DEF_LARCH_FTYPE (2, (UV8HI, V16QI, V16QI)) -+DEF_LARCH_FTYPE (2, (UV4SI, V8HI, V8HI)) -+DEF_LARCH_FTYPE (2, (UV2DI, V4SI, V4SI)) -+DEF_LARCH_FTYPE (2, (V16HI, V32QI, UQI)) -+DEF_LARCH_FTYPE (2, (V8SI, V16HI, UQI)) -+DEF_LARCH_FTYPE (2, (V4DI, V8SI, UQI)) -+DEF_LARCH_FTYPE (2, (V8HI, V16QI, UQI)) -+DEF_LARCH_FTYPE (2, (V4SI, V8HI, UQI)) -+DEF_LARCH_FTYPE (2, (V2DI, V4SI, UQI)) -+DEF_LARCH_FTYPE (2, (UV16HI, UV32QI, UQI)) -+DEF_LARCH_FTYPE (2, (UV8SI, UV16HI, UQI)) -+DEF_LARCH_FTYPE (2, (UV4DI, UV8SI, UQI)) -+DEF_LARCH_FTYPE (2, (UV8HI, UV16QI, UQI)) -+DEF_LARCH_FTYPE (2, (UV4SI, UV8HI, UQI)) -+DEF_LARCH_FTYPE (2, (UV2DI, UV4SI, UQI)) -+DEF_LARCH_FTYPE (2, (V32QI, V16HI, V16HI)) -+DEF_LARCH_FTYPE (2, (V16HI, V8SI, V8SI)) -+DEF_LARCH_FTYPE (2, (V8SI, V4DI, V4DI)) -+DEF_LARCH_FTYPE (2, (V16QI, V8HI, V8HI)) -+DEF_LARCH_FTYPE (2, (V8HI, V4SI, V4SI)) -+DEF_LARCH_FTYPE (2, (V4SI, V2DI, V2DI)) -+DEF_LARCH_FTYPE (2, (UV32QI, UV16HI, UV16HI)) -+DEF_LARCH_FTYPE (2, (UV16HI, UV8SI, UV8SI)) -+DEF_LARCH_FTYPE (2, (UV8SI, UV4DI, UV4DI)) -+DEF_LARCH_FTYPE (2, (UV16QI, UV8HI, UV8HI)) -+DEF_LARCH_FTYPE (2, (UV8HI, UV4SI, UV4SI)) -+DEF_LARCH_FTYPE (2, (UV4SI, UV2DI, UV2DI)) -+DEF_LARCH_FTYPE (2, (V32QI, V16HI, UQI)) -+DEF_LARCH_FTYPE (2, (V16HI, V8SI, UQI)) -+DEF_LARCH_FTYPE (2, (V8SI, V4DI, UQI)) -+DEF_LARCH_FTYPE (2, (V16QI, V8HI, UQI)) -+DEF_LARCH_FTYPE (2, (V8HI, V4SI, UQI)) -+DEF_LARCH_FTYPE (2, (V4SI, V2DI, UQI)) -+DEF_LARCH_FTYPE (2, (UV32QI, UV16HI, UQI)) -+DEF_LARCH_FTYPE (2, (UV16HI, UV8SI, UQI)) -+DEF_LARCH_FTYPE (2, (UV8SI, UV4DI, UQI)) -+DEF_LARCH_FTYPE (2, (UV16QI, UV8HI, UQI)) -+DEF_LARCH_FTYPE (2, (UV8HI, UV4SI, UQI)) -+DEF_LARCH_FTYPE (2, (UV4SI, UV2DI, UQI)) -+DEF_LARCH_FTYPE (2, (V32QI, V32QI, DI)) -+DEF_LARCH_FTYPE (2, (V16QI, V16QI, DI)) -+DEF_LARCH_FTYPE (2, (V32QI, UQI, UQI)) -+DEF_LARCH_FTYPE (2, (V16QI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V32QI, V32QI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V16HI, V16HI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V8SI, V8SI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, UQI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, UQI)) -+DEF_LARCH_FTYPE (2, (V8SF, V4DI, V4DI)) -+DEF_LARCH_FTYPE (2, (V4SF, V2DI, V2DI)) -+DEF_LARCH_FTYPE (1, (V4DI, V8SF)) -+DEF_LARCH_FTYPE (1, (V2DI, V4SF)) -+DEF_LARCH_FTYPE (2, (V4DI, UQI, USI)) -+DEF_LARCH_FTYPE (2, (V2DI, UQI, USI)) -+DEF_LARCH_FTYPE (2, (V4DI, UQI, UQI)) -+DEF_LARCH_FTYPE (2, (V2DI, UQI, UQI)) -+DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V16QI, CVPOINTER)) -+DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V8HI, CVPOINTER)) -+DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V4SI, CVPOINTER)) -+DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V2DI, CVPOINTER)) -+DEF_LARCH_FTYPE (2, (V16QI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (2, (V8HI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (2, (V4SI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (2, (V2DI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (4, (VOID, V32QI, UQI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (4, (VOID, V16HI, UQI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (4, (VOID, V8SI, UQI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (4, (VOID, V4DI, UQI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (3, (VOID, V32QI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (2, (V32QI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (2, (V16HI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (2, (V8SI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (2, (V4DI, SI, CVPOINTER)) -+DEF_LARCH_FTYPE (1, (V32QI, POINTER)) -+DEF_LARCH_FTYPE (2, (VOID, V32QI, POINTER)) -+DEF_LARCH_FTYPE (2, (V8HI, UV16QI, V16QI)) -+DEF_LARCH_FTYPE (2, (V16QI, V16QI, UV16QI)) -+DEF_LARCH_FTYPE (2, (UV16QI, V16QI, UV16QI)) -+DEF_LARCH_FTYPE (2, (V8HI, V8HI, UV8HI)) -+DEF_LARCH_FTYPE (2, (UV8HI, V8HI, UV8HI)) -+DEF_LARCH_FTYPE (2, (V4SI, V4SI, UV4SI)) -+DEF_LARCH_FTYPE (2, (UV4SI, V4SI, UV4SI)) -+DEF_LARCH_FTYPE (2, (V4SI, V16QI, V16QI)) -+DEF_LARCH_FTYPE (2, (V4SI, UV16QI, V16QI)) -+DEF_LARCH_FTYPE (2, (UV4SI, UV16QI, UV16QI)) -+DEF_LARCH_FTYPE (2, (V2DI, V2DI, UV2DI)) -+DEF_LARCH_FTYPE (2, (UV2DI, UV8HI, UV8HI)) -+DEF_LARCH_FTYPE (2, (V4SI, UV8HI, V8HI)) -+DEF_LARCH_FTYPE (2, (V2DI, UV4SI, V4SI)) -+DEF_LARCH_FTYPE (2, (V2DI, UV2DI, V2DI)) -+DEF_LARCH_FTYPE (2, (V2DI, V8HI, V8HI)) -+DEF_LARCH_FTYPE (2, (V2DI, UV8HI, V8HI)) -+DEF_LARCH_FTYPE (2, (UV2DI, V2DI, UV2DI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV8HI, V8HI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV2DI, V2DI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV4SI, V4SI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, V8HI, V8HI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV8HI, V8HI)) -+DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV8HI, UV8HI)) -+DEF_LARCH_FTYPE (3, (V8HI, V8HI, UV16QI, V16QI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, V16QI, V16QI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV16QI, V16QI)) -+DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV16QI, UV16QI)) -+ -+ -+DEF_LARCH_FTYPE(2,(V4DI,V16HI,V16HI)) -+DEF_LARCH_FTYPE(2,(V4DI,UV4SI,V4SI)) -+DEF_LARCH_FTYPE(2,(V8SI,UV16HI,V16HI)) -+DEF_LARCH_FTYPE(2,(V16HI,UV32QI,V32QI)) -+DEF_LARCH_FTYPE(2,(V4DI,UV8SI,V8SI)) -+DEF_LARCH_FTYPE(3,(V4DI,V4DI,V16HI,V16HI)) -+DEF_LARCH_FTYPE(2,(UV32QI,V32QI,UV32QI)) -+DEF_LARCH_FTYPE(2,(UV16HI,V16HI,UV16HI)) -+DEF_LARCH_FTYPE(2,(UV8SI,V8SI,UV8SI)) -+DEF_LARCH_FTYPE(2,(UV4DI,V4DI,UV4DI)) -+DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV4DI,V4DI)) -+DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV8SI,V8SI)) -+DEF_LARCH_FTYPE(3,(V8SI,V8SI,UV16HI,V16HI)) -+DEF_LARCH_FTYPE(3,(V16HI,V16HI,UV32QI,V32QI)) -+DEF_LARCH_FTYPE(2,(V4DI,UV4DI,V4DI)) -+DEF_LARCH_FTYPE(2,(V8SI,V32QI,V32QI)) -+DEF_LARCH_FTYPE(2,(UV4DI,UV16HI,UV16HI)) -+DEF_LARCH_FTYPE(2,(V4DI,UV16HI,V16HI)) -+DEF_LARCH_FTYPE(3,(V8SI,V8SI,V32QI,V32QI)) -+DEF_LARCH_FTYPE(3,(UV8SI,UV8SI,UV32QI,UV32QI)) -+DEF_LARCH_FTYPE(3,(UV4DI,UV4DI,UV16HI,UV16HI)) -+DEF_LARCH_FTYPE(3,(V8SI,V8SI,UV32QI,V32QI)) -+DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV16HI,V16HI)) -+DEF_LARCH_FTYPE(2,(UV8SI,UV32QI,UV32QI)) -+DEF_LARCH_FTYPE(2,(V8SI,UV32QI,V32QI)) -+ -+DEF_LARCH_FTYPE(4,(VOID,V16QI,CVPOINTER,SI,UQI)) -+DEF_LARCH_FTYPE(4,(VOID,V8HI,CVPOINTER,SI,UQI)) -+DEF_LARCH_FTYPE(4,(VOID,V4SI,CVPOINTER,SI,UQI)) -+DEF_LARCH_FTYPE(4,(VOID,V2DI,CVPOINTER,SI,UQI)) -+ -+DEF_LARCH_FTYPE (2, (DI, V16QI, UQI)) -+DEF_LARCH_FTYPE (2, (DI, V8HI, UQI)) -+DEF_LARCH_FTYPE (2, (DI, V4SI, UQI)) -+DEF_LARCH_FTYPE (2, (UDI, V16QI, UQI)) -+DEF_LARCH_FTYPE (2, (UDI, V8HI, UQI)) -+DEF_LARCH_FTYPE (2, (UDI, V4SI, UQI)) -+ -+DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, V16QI, USI)) -+DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, V8HI, USI)) -+DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, V4SI, USI)) -+DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, V2DI, USI)) -+ -+DEF_LARCH_FTYPE (2, (DI, V8SI, UQI)) -+DEF_LARCH_FTYPE (2, (UDI, V8SI, UQI)) -+ -+DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, V32QI, USI)) -+DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, V16HI, USI)) -+DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, V8SI, USI)) -+DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, V4DI, USI)) -+ -+DEF_LARCH_FTYPE(4,(VOID,V32QI,CVPOINTER,SI,UQI)) -+DEF_LARCH_FTYPE(4,(VOID,V16HI,CVPOINTER,SI,UQI)) -+DEF_LARCH_FTYPE(4,(VOID,V8SI,CVPOINTER,SI,UQI)) -+DEF_LARCH_FTYPE(4,(VOID,V4DI,CVPOINTER,SI,UQI)) -+ -+DEF_LARCH_FTYPE (1, (BOOLEAN,V16QI)) -+DEF_LARCH_FTYPE(2,(V16QI,CVPOINTER,CVPOINTER)) -+DEF_LARCH_FTYPE(3,(VOID,V16QI,CVPOINTER,CVPOINTER)) -+DEF_LARCH_FTYPE(2,(V32QI,CVPOINTER,CVPOINTER)) -+DEF_LARCH_FTYPE(3,(VOID,V32QI,CVPOINTER,CVPOINTER)) -+ -+DEF_LARCH_FTYPE (3, (V16QI, V16QI, SI, UQI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, SI, UQI)) -+DEF_LARCH_FTYPE (3, (V2DI, V2DI, DI, UQI)) -+DEF_LARCH_FTYPE (3, (V4SI, V4SI, SI, UQI)) -+ -+DEF_LARCH_FTYPE (2, (V8SF, V8SF, UQI)) -+DEF_LARCH_FTYPE (2, (V4DF, V4DF, UQI)) -diff --git a/gcc/config/loongarch/loongarch-modes.def b/gcc/config/loongarch/loongarch-modes.def -new file mode 100644 -index 000000000..fe5bc38d9 ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-modes.def -@@ -0,0 +1,64 @@ -+/* LARCH extra machine modes. -+ Copyright (C) 2003-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+FLOAT_MODE (TF, 16, ieee_quad_format); -+ -+/* Vector modes. */ -+VECTOR_MODES (INT, 4); /* V4QI V2HI */ -+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */ -+VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */ -+ -+/* For LARCH LSX 128 bits. */ -+VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI */ -+VECTOR_MODES (FLOAT, 16); /* V4SF V2DF */ -+ -+/* For LARCH LASX 256 bits. */ -+VECTOR_MODES (INT, 32); /* V32QI V16HI V8SI V4DI */ -+VECTOR_MODES (FLOAT, 32); /* V8SF V4DF */ -+ -+/* Double-sized vector modes for vec_concat. */ -+/* VECTOR_MODE (INT, QI, 32); V32QI */ -+/* VECTOR_MODE (INT, HI, 16); V16HI */ -+/* VECTOR_MODE (INT, SI, 8); V8SI */ -+/* VECTOR_MODE (INT, DI, 4); V4DI */ -+/* VECTOR_MODE (FLOAT, SF, 8); V8SF */ -+/* VECTOR_MODE (FLOAT, DF, 4); V4DF */ -+ -+VECTOR_MODE (INT, QI, 64); /* V64QI */ -+VECTOR_MODE (INT, HI, 32); /* V32HI */ -+VECTOR_MODE (INT, SI, 16); /* V16SI */ -+VECTOR_MODE (INT, DI, 8); /* V8DI */ -+VECTOR_MODE (FLOAT, SF, 16); /* V16SF */ -+VECTOR_MODE (FLOAT, DF, 8); /* V8DF */ -+ -+VECTOR_MODES (FRACT, 4); /* V4QQ V2HQ */ -+VECTOR_MODES (UFRACT, 4); /* V4UQQ V2UHQ */ -+VECTOR_MODES (ACCUM, 4); /* V2HA */ -+VECTOR_MODES (UACCUM, 4); /* V2UHA */ -+ -+/* For floating point conditions in FCC registers. */ -+CC_MODE (FCC); -+ -+INT_MODE (OI, 32); -+ -+/* Keep the OI modes from confusing the compiler into thinking -+ that these modes could actually be used for computation. They are -+ only holders for vectors during data movement. */ -+#define MAX_BITSIZE_MODE_ANY_INT (128) -+ -diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h -new file mode 100644 -index 000000000..21639fa74 ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-opts.h -@@ -0,0 +1,34 @@ -+/* Definitions for option handling for LARCH. -+ Copyright (C) 1989-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#ifndef LARCH_OPTS_H -+#define LARCH_OPTS_H -+ -+#define LARCH_ARCH_OPTION_NATIVE -1 -+ -+ -+enum loongarch_code_model { -+ LARCH_CMODEL_NORMAL, -+ LARCH_CMODEL_TINY, -+ LARCH_CMODEL_TINY_STATIC, -+ LARCH_CMODEL_LARGE, -+ LARCH_CMODEL_EXTREME -+}; -+ -+#endif -diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h -new file mode 100644 -index 000000000..c36fdd37d ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-protos.h -@@ -0,0 +1,290 @@ -+/* Prototypes of target machine for GNU compiler. LARCH version. -+ Copyright (C) 1989-2018 Free Software Foundation, Inc. -+ Contributed by A. Lichnewsky (lich@inria.inria.fr). -+ Changed by Michael Meissner (meissner@osf.org). -+ 64-bit r4000 support by Ian Lance Taylor (ian@cygnus.com) and -+ Brendan Eich (brendan@microunity.com). -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#ifndef GCC_LARCH_PROTOS_H -+#define GCC_LARCH_PROTOS_H -+ -+/* Describes how a symbol is used. -+ -+ SYMBOL_CONTEXT_CALL -+ The symbol is used as the target of a call instruction. -+ -+ SYMBOL_CONTEXT_LEA -+ The symbol is used in a load-address operation. -+ -+ SYMBOL_CONTEXT_MEM -+ The symbol is used as the address in a MEM. */ -+enum loongarch_symbol_context { -+ SYMBOL_CONTEXT_CALL, -+ SYMBOL_CONTEXT_LEA, -+ SYMBOL_CONTEXT_MEM -+}; -+ -+/* Classifies a SYMBOL_REF, LABEL_REF or UNSPEC address. -+ -+ SYMBOL_GOT_DISP -+ The symbol's value will be loaded directly from the GOT. -+ -+ SYMBOL_TLS -+ A thread-local symbol. -+ -+ SYMBOL_TLSGD -+ SYMBOL_TLSLDM -+ UNSPEC wrappers around SYMBOL_TLS, corresponding to the -+ thread-local storage relocation operators. -+ */ -+enum loongarch_symbol_type { -+ SYMBOL_GOT_DISP, -+ SYMBOL_TLS, -+ SYMBOL_TLSGD, -+ SYMBOL_TLSLDM, -+}; -+#define NUM_SYMBOL_TYPES (SYMBOL_TLSLDM + 1) -+ -+/* Classifies a type of call. -+ -+ LARCH_CALL_NORMAL -+ A normal call or call_value pattern. -+ -+ LARCH_CALL_SIBCALL -+ A sibcall or sibcall_value pattern. -+ -+ LARCH_CALL_EPILOGUE -+ A call inserted in the epilogue. */ -+enum loongarch_call_type { -+ LARCH_CALL_NORMAL, -+ LARCH_CALL_SIBCALL, -+ LARCH_CALL_EPILOGUE -+}; -+ -+/* Controls the conditions under which certain instructions are split. -+ -+ SPLIT_IF_NECESSARY -+ Only perform splits that are necessary for correctness -+ (because no unsplit version exists). -+ -+ SPLIT_FOR_SPEED -+ Perform splits that are necessary for correctness or -+ beneficial for code speed. -+ -+ SPLIT_FOR_SIZE -+ Perform splits that are necessary for correctness or -+ beneficial for code size. */ -+enum loongarch_split_type { -+ SPLIT_IF_NECESSARY, -+ SPLIT_FOR_SPEED, -+ SPLIT_FOR_SIZE -+}; -+extern const char *const loongarch_fp_conditions[16]; -+ -+extern const char *loongarch_output_gpr_save (unsigned); -+extern HOST_WIDE_INT loongarch_initial_elimination_offset (int, int); -+extern void loongarch_expand_prologue (void); -+extern void loongarch_expand_epilogue (bool); -+extern bool loongarch_can_use_return_insn (void); -+extern rtx loongarch_function_value (const_tree, const_tree, enum machine_mode); -+extern bool loongarch_symbolic_constant_p (rtx, enum loongarch_symbol_context, -+ enum loongarch_symbol_type *); -+extern int loongarch_regno_mode_ok_for_base_p (int, machine_mode, bool); -+extern bool loongarch_stack_address_p (rtx, machine_mode); -+extern int loongarch_address_insns (rtx, machine_mode, bool); -+extern int loongarch_const_insns (rtx); -+extern int loongarch_split_const_insns (rtx); -+extern int loongarch_split_128bit_const_insns (rtx); -+extern int loongarch_load_store_insns (rtx, rtx_insn *); -+extern int loongarch_idiv_insns (machine_mode); -+extern rtx loongarch_emit_move (rtx, rtx); -+#ifdef RTX_CODE -+extern void loongarch_emit_binary (enum rtx_code, rtx, rtx, rtx); -+#endif -+extern rtx loongarch_pic_base_register (rtx); -+extern bool loongarch_split_symbol (rtx, rtx, machine_mode, rtx *); -+extern rtx loongarch_unspec_address (rtx, enum loongarch_symbol_type); -+extern rtx loongarch_strip_unspec_address (rtx); -+extern void loongarch_move_integer (rtx, rtx, unsigned HOST_WIDE_INT); -+extern bool loongarch_legitimize_move (machine_mode, rtx, rtx); -+extern rtx loongarch_legitimize_call_address (rtx); -+ -+extern rtx loongarch_subword (rtx, bool); -+extern bool loongarch_split_move_p (rtx, rtx, enum loongarch_split_type); -+extern void loongarch_split_move (rtx, rtx, enum loongarch_split_type, rtx); -+extern bool loongarch_split_move_insn_p (rtx, rtx, rtx); -+extern void loongarch_split_move_insn (rtx, rtx, rtx); -+extern void loongarch_split_128bit_move (rtx, rtx); -+extern bool loongarch_split_128bit_move_p (rtx, rtx); -+extern void loongarch_split_256bit_move (rtx, rtx); -+extern bool loongarch_split_256bit_move_p (rtx, rtx); -+extern void loongarch_split_lsx_copy_d (rtx, rtx, rtx, rtx (*)(rtx, rtx, rtx)); -+extern void loongarch_split_lsx_insert_d (rtx, rtx, rtx, rtx); -+extern void loongarch_split_lsx_fill_d (rtx, rtx); -+extern const char *loongarch_output_move (rtx, rtx); -+extern bool loongarch_cfun_has_cprestore_slot_p (void); -+extern bool loongarch_cprestore_address_p (rtx, bool); -+#ifdef RTX_CODE -+extern void loongarch_expand_scc (rtx *); -+extern bool loongarch_expand_int_vec_cmp (rtx *); -+extern bool loongarch_expand_fp_vec_cmp (rtx *); -+extern void loongarch_expand_conditional_branch (rtx *); -+extern void loongarch_expand_conditional_move (rtx *); -+extern void loongarch_expand_conditional_trap (rtx); -+#endif -+extern bool loongarch_get_pic_call_symbol (rtx *, int); -+extern void loongarch_set_return_address (rtx, rtx); -+extern bool loongarch_move_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int); -+extern bool loongarch_store_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int); -+extern bool loongarch_expand_block_move (rtx, rtx, rtx); -+ -+extern void loongarch_init_cumulative_args (CUMULATIVE_ARGS *, tree); -+extern bool loongarch_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT, -+ HOST_WIDE_INT, bool); -+extern bool loongarch_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT, -+ HOST_WIDE_INT); -+extern bool loongarch_mem_fits_mode_p (machine_mode mode, rtx x); -+extern HOST_WIDE_INT loongarch_debugger_offset (rtx, HOST_WIDE_INT); -+ -+extern void loongarch_push_asm_switch (struct loongarch_asm_switch *); -+extern void loongarch_pop_asm_switch (struct loongarch_asm_switch *); -+extern void loongarch_output_external (FILE *, tree, const char *); -+extern void loongarch_output_ascii (FILE *, const char *, size_t); -+extern void loongarch_output_aligned_decl_common (FILE *, tree, const char *, -+ unsigned HOST_WIDE_INT, -+ unsigned int); -+extern void loongarch_declare_common_object (FILE *, const char *, -+ const char *, unsigned HOST_WIDE_INT, -+ unsigned int, bool); -+extern void loongarch_declare_object (FILE *, const char *, const char *, -+ const char *, ...) ATTRIBUTE_PRINTF_4; -+extern void loongarch_declare_object_name (FILE *, const char *, tree); -+extern void loongarch_finish_declare_object (FILE *, tree, int, int); -+extern void loongarch_set_text_contents_type (FILE *, const char *, -+ unsigned long, bool); -+ -+extern bool loongarch_small_data_pattern_p (rtx); -+extern rtx loongarch_rewrite_small_data (rtx); -+extern rtx loongarch_return_addr (int, rtx); -+extern bool loongarch_must_initialize_gp_p (void); -+ -+extern bool loongarch_const_vector_same_val_p (rtx, machine_mode); -+extern bool loongarch_const_vector_same_bytes_p (rtx, machine_mode); -+extern bool loongarch_const_vector_same_int_p (rtx, machine_mode, HOST_WIDE_INT, -+ HOST_WIDE_INT); -+extern bool loongarch_const_vector_shuffle_set_p (rtx, machine_mode); -+extern bool loongarch_const_vector_bitimm_set_p (rtx, machine_mode); -+extern bool loongarch_const_vector_bitimm_clr_p (rtx, machine_mode); -+extern rtx loongarch_lsx_vec_parallel_const_half (machine_mode, bool); -+extern rtx loongarch_gen_const_int_vector (machine_mode, HOST_WIDE_INT); -+extern enum reg_class loongarch_secondary_reload_class (enum reg_class, -+ machine_mode, -+ rtx, bool); -+extern int loongarch_class_max_nregs (enum reg_class, machine_mode); -+ -+extern machine_mode loongarch_hard_regno_caller_save_mode (unsigned int, -+ unsigned int, -+ machine_mode); -+extern int loongarch_adjust_insn_length (rtx_insn *, int); -+extern const char *loongarch_output_conditional_branch (rtx_insn *, rtx *, -+ const char *, const char *); -+extern const char *loongarch_output_order_conditional_branch (rtx_insn *, rtx *, -+ bool); -+extern const char *loongarch_output_equal_conditional_branch (rtx_insn *, rtx *, -+ bool); -+extern const char *loongarch_output_division (const char *, rtx *); -+extern const char *loongarch_lsx_output_division (const char *, rtx *); -+extern const char *loongarch_output_probe_stack_range (rtx, rtx, rtx); -+extern bool loongarch_hard_regno_rename_ok (unsigned int, unsigned int); -+extern bool loongarch_linked_madd_p (rtx_insn *, rtx_insn *); -+extern bool loongarch_store_data_bypass_p (rtx_insn *, rtx_insn *); -+extern int loongarch_dspalu_bypass_p (rtx, rtx); -+extern rtx loongarch_prefetch_cookie (rtx, rtx); -+ -+extern bool loongarch_global_symbol_p (const_rtx); -+extern bool loongarch_global_symbol_noweak_p (const_rtx); -+extern bool loongarch_weak_symbol_p (const_rtx); -+extern bool loongarch_symbol_binds_local_p (const_rtx); -+ -+extern const char *current_section_name (void); -+extern unsigned int current_section_flags (void); -+extern bool loongarch_use_ins_ext_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT); -+ -+extern bool and_operands_ok (machine_mode, rtx, rtx); -+extern bool loongarch_fmadd_bypass (rtx_insn *, rtx_insn *); -+ -+union loongarch_gen_fn_ptrs -+{ -+ rtx (*fn_8) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx); -+ rtx (*fn_7) (rtx, rtx, rtx, rtx, rtx, rtx, rtx); -+ rtx (*fn_6) (rtx, rtx, rtx, rtx, rtx, rtx); -+ rtx (*fn_5) (rtx, rtx, rtx, rtx, rtx); -+ rtx (*fn_4) (rtx, rtx, rtx, rtx); -+}; -+ -+extern void loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs, -+ rtx, rtx, rtx, rtx, rtx); -+ -+extern void loongarch_expand_vector_init (rtx, rtx); -+extern void loongarch_expand_vec_unpack (rtx op[2], bool, bool); -+ -+extern int loongarch_ldst_scaled_shift (machine_mode); -+extern bool loongarch_signed_immediate_p (unsigned HOST_WIDE_INT, int, int); -+extern bool loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT, int, int); -+extern bool loongarch_load_store_pair_p (bool, rtx *); -+extern bool loongarch_movep_target_p (rtx, rtx); -+extern bool loongarch_12bit_offset_address_p (rtx, machine_mode); -+extern bool loongarch_14bit_shifted_offset_address_p (rtx, machine_mode); -+extern bool loongarch_9bit_offset_address_p (rtx, machine_mode); -+extern bool lwsp_swsp_address_p (rtx, machine_mode); -+extern rtx loongarch_expand_thread_pointer (rtx); -+ -+extern bool loongarch_eh_uses (unsigned int); -+extern bool loongarch_epilogue_uses (unsigned int); -+extern int loongarch_trampoline_code_size (void); -+extern bool loongarch_load_store_bonding_p (rtx *, machine_mode, bool); -+extern bool loongarch_la464_128_store_p (rtx[]); -+extern bool loongarch_la464_128_load_p (rtx[]); -+extern void loongarch_la464_emit_128bit_store (rtx[]); -+extern void loongarch_la464_emit_128bit_load (rtx[]); -+extern bool loongarch_split_symbol_type (enum loongarch_symbol_type); -+ -+typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx); -+ -+extern void loongarch_register_frame_header_opt (void); -+extern void loongarch_expand_vec_cond_expr (machine_mode, machine_mode, rtx *); -+ -+extern void loongarch_declare_function_name(FILE *, const char *, tree); -+/* Routines implemented in loongarch-d.c */ -+extern void loongarch_d_target_versions (void); -+ -+/* Routines implemented in loongarch-c.c. */ -+void loongarch_cpu_cpp_builtins (cpp_reader *); -+ -+extern void loongarch_init_builtins (void); -+extern void loongarch_atomic_assign_expand_fenv (tree *, tree *, tree *); -+extern tree loongarch_builtin_decl (unsigned int, bool); -+extern rtx loongarch_expand_builtin (tree, rtx, rtx subtarget ATTRIBUTE_UNUSED, -+ machine_mode, int); -+extern tree loongarch_builtin_vectorized_function (unsigned int, tree, tree); -+extern rtx loongarch_gen_const_int_vector_shuffle (machine_mode, int); -+extern tree loongarch_build_builtin_va_list (void); -+ -+extern rtx loongarch_build_signbit_mask (machine_mode, bool, bool); -+#endif /* ! GCC_LARCH_PROTOS_H */ -diff --git a/gcc/config/loongarch/loongarch-tables.opt b/gcc/config/loongarch/loongarch-tables.opt -new file mode 100644 -index 000000000..80794b564 ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-tables.opt -@@ -0,0 +1,34 @@ -+; -*- buffer-read-only: t -*- -+; Generated automatically by genopt.sh from loongarch-cpus.def. -+ -+; Copyright (C) 2011-2018 Free Software Foundation, Inc. -+; -+; This file is part of GCC. -+; -+; GCC is free software; you can redistribute it and/or modify it under -+; the terms of the GNU General Public License as published by the Free -+; Software Foundation; either version 3, or (at your option) any later -+; version. -+; -+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY -+; WARRANTY; without even the implied warranty of MERCHANTABILITY or -+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+; for more details. -+; -+; You should have received a copy of the GNU General Public License -+; along with GCC; see the file COPYING3. If not see -+; . -+ -+Enum -+Name(loongarch_arch_opt_value) Type(int) -+Known LARCH CPUs (for use with the -march= and -mtune= options): -+ -+EnumValue -+Enum(loongarch_arch_opt_value) String(native) Value(LARCH_ARCH_OPTION_NATIVE) DriverOnly -+ -+EnumValue -+Enum(loongarch_arch_opt_value) String(loongarch64) Value(0) Canonical -+ -+EnumValue -+Enum(loongarch_arch_opt_value) String(la464) Value(1) Canonical -+ -diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c -new file mode 100644 -index 000000000..e556f81e4 ---- /dev/null -+++ b/gcc/config/loongarch/loongarch.c -@@ -0,0 +1,10465 @@ -+/* Subroutines used for LARCH code generation. -+ Copyright (C) 1989-2018 Free Software Foundation, Inc. -+ Contributed by A. Lichnewsky, lich@inria.inria.fr. -+ Changes by Michael Meissner, meissner@osf.org. -+ 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and -+ Brendan Eich, brendan@microunity.com. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#define IN_TARGET_CODE 1 -+ -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "backend.h" -+#include "target.h" -+#include "rtl.h" -+#include "tree.h" -+#include "memmodel.h" -+#include "gimple.h" -+#include "cfghooks.h" -+#include "df.h" -+#include "tm_p.h" -+#include "stringpool.h" -+#include "attribs.h" -+#include "optabs.h" -+#include "regs.h" -+#include "emit-rtl.h" -+#include "recog.h" -+#include "cgraph.h" -+#include "diagnostic.h" -+#include "insn-attr.h" -+#include "output.h" -+#include "alias.h" -+#include "fold-const.h" -+#include "varasm.h" -+#include "stor-layout.h" -+#include "calls.h" -+#include "explow.h" -+#include "expr.h" -+#include "libfuncs.h" -+#include "reload.h" -+#include "common/common-target.h" -+#include "langhooks.h" -+#include "cfgrtl.h" -+#include "cfganal.h" -+#include "sched-int.h" -+#include "gimplify.h" -+#include "target-globals.h" -+#include "tree-pass.h" -+#include "context.h" -+#include "builtins.h" -+#include "rtl-iter.h" -+ -+/* This file should be included last. */ -+#include "target-def.h" -+ -+/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */ -+#define UNSPEC_ADDRESS_P(X) \ -+ (GET_CODE (X) == UNSPEC \ -+ && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \ -+ && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES) -+ -+/* Extract the symbol or label from UNSPEC wrapper X. */ -+#define UNSPEC_ADDRESS(X) \ -+ XVECEXP (X, 0, 0) -+ -+/* Extract the symbol type from UNSPEC wrapper X. */ -+#define UNSPEC_ADDRESS_TYPE(X) \ -+ ((enum loongarch_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST)) -+ -+/* The maximum distance between the top of the stack frame and the -+ value $sp has when we save and restore registers. -+*/ -+#define LARCH_MAX_FIRST_STACK_STEP 0x7f0 -+ -+/* True if INSN is a loongarch.md pattern or asm statement. */ -+/* ??? This test exists through the compiler, perhaps it should be -+ moved to rtl.h. */ -+#define USEFUL_INSN_P(INSN) \ -+ (NONDEBUG_INSN_P (INSN) \ -+ && GET_CODE (PATTERN (INSN)) != USE \ -+ && GET_CODE (PATTERN (INSN)) != CLOBBER) -+ -+/* If INSN is a delayed branch sequence, return the first instruction -+ in the sequence, otherwise return INSN itself. */ -+#define SEQ_BEGIN(INSN) \ -+ (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \ -+ ? as_a (XVECEXP (PATTERN (INSN), 0, 0)) \ -+ : (INSN)) -+ -+/* Likewise for the last instruction in a delayed branch sequence. */ -+#define SEQ_END(INSN) \ -+ (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \ -+ ? as_a (XVECEXP (PATTERN (INSN), \ -+ 0, \ -+ XVECLEN (PATTERN (INSN), 0) - 1)) \ -+ : (INSN)) -+ -+/* Execute the following loop body with SUBINSN set to each instruction -+ between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */ -+#define FOR_EACH_SUBINSN(SUBINSN, INSN) \ -+ for ((SUBINSN) = SEQ_BEGIN (INSN); \ -+ (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \ -+ (SUBINSN) = NEXT_INSN (SUBINSN)) -+ -+/* True if bit BIT is set in VALUE. */ -+#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0) -+ -+/* Classifies an address. -+ -+ ADDRESS_REG -+ A natural register + offset address. The register satisfies -+ loongarch_valid_base_register_p and the offset is a const_arith_operand. -+ -+ ADDRESS_CONST_INT -+ A signed 16-bit constant address. -+ -+ ADDRESS_SYMBOLIC: -+ A constant symbolic address. */ -+enum loongarch_address_type { -+ ADDRESS_REG, -+ ADDRESS_CONST_INT, -+ ADDRESS_SYMBOLIC -+}; -+ -+ -+/* A class used to control a comdat-style stub that we output in each -+ translation unit that needs it. */ -+class loongarch_one_only_stub { -+public: -+ virtual ~loongarch_one_only_stub () {} -+ -+ /* Return the name of the stub. */ -+ virtual const char *get_name () = 0; -+ -+ /* Output the body of the function to asm_out_file. */ -+ virtual void output_body () = 0; -+}; -+ -+/* Tuning information that is automatically derived from other sources -+ (such as the scheduler). */ -+static struct { -+ /* The architecture and tuning settings that this structure describes. */ -+ enum processor arch; -+ enum processor tune; -+ -+ /* True if the structure has been initialized. */ -+ bool initialized_p; -+ -+} loongarch_tuning_info; -+ -+/* Information about an address described by loongarch_address_type. -+ -+ ADDRESS_CONST_INT -+ No fields are used. -+ -+ ADDRESS_REG -+ REG is the base register and OFFSET is the constant offset. -+ -+ ADDRESS_SYMBOLIC -+ SYMBOL_TYPE is the type of symbol that the address references. */ -+struct loongarch_address_info { -+ enum loongarch_address_type type; -+ rtx reg; -+ rtx offset; -+ enum loongarch_symbol_type symbol_type; -+}; -+ -+/* Method to load immediate number fields. -+ -+ METHOD_NORMAL: -+ load immediate number 0-31 bit -+ -+ METHOD_LU32I: -+ load imm 32-51 bit -+ -+ METHOD_LU52I: -+ load imm 52-63 bit -+ -+ METHOD_INSV: -+ imm 0xfff00000fffffxxx -+ */ -+enum loongarch_load_imm_method { -+ METHOD_NORMAL, -+ METHOD_LU32I, -+ METHOD_LU52I, -+ METHOD_INSV -+}; -+ -+/* One stage in a constant building sequence. These sequences have -+ the form: -+ -+ A = VALUE[0] -+ A = A CODE[1] VALUE[1] -+ A = A CODE[2] VALUE[2] -+ ... -+ -+ where A is an accumulator, each CODE[i] is a binary rtl operation -+ and each VALUE[i] is a constant integer. CODE[0] is undefined. */ -+struct loongarch_integer_op { -+ enum rtx_code code; -+ unsigned HOST_WIDE_INT value; -+ enum loongarch_load_imm_method method; -+}; -+ -+/* The largest number of operations needed to load an integer constant. -+ The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI. -+ When the lowest bit is clear, we can try, but reject a sequence with -+ an extra SLL at the end. */ -+#define LARCH_MAX_INTEGER_OPS 9 -+ -+/* Costs of various operations on the different architectures. */ -+ -+struct loongarch_rtx_cost_data -+{ -+ unsigned short fp_add; -+ unsigned short fp_mult_sf; -+ unsigned short fp_mult_df; -+ unsigned short fp_div_sf; -+ unsigned short fp_div_df; -+ unsigned short int_mult_si; -+ unsigned short int_mult_di; -+ unsigned short int_div_si; -+ unsigned short int_div_di; -+ unsigned short branch_cost; -+ unsigned short memory_latency; -+}; -+ -+/* Global variables for machine-dependent things. */ -+ -+/* The -G setting, or the configuration's default small-data limit if -+ no -G option is given. */ -+static unsigned int loongarch_small_data_threshold; -+ -+/* The number of file directives written by loongarch_output_filename. */ -+int num_source_filenames; -+ -+/* The name that appeared in the last .file directive written by -+ loongarch_output_filename, or "" if loongarch_output_filename hasn't -+ written anything yet. */ -+const char *current_function_file = ""; -+ -+/* Arrays that map GCC register numbers to debugger register numbers. */ -+int loongarch_dbx_regno[FIRST_PSEUDO_REGISTER]; -+int loongarch_dwarf_regno[FIRST_PSEUDO_REGISTER]; -+ -+/* The current instruction-set architecture. */ -+enum processor loongarch_arch; -+const struct loongarch_cpu_info *loongarch_arch_info; -+ -+/* The processor that we should tune the code for. */ -+enum processor loongarch_tune; -+const struct loongarch_cpu_info *loongarch_tune_info; -+ -+/* The ISA level associated with loongarch_arch. */ -+int loongarch_isa; -+ -+/* The ISA revision level. */ -+int loongarch_isa_rev; -+ -+/* Which cost information to use. */ -+static const struct loongarch_rtx_cost_data *loongarch_cost; -+ -+/* Index [M][R] is true if register R is allowed to hold a value of mode M. */ -+static bool loongarch_hard_regno_mode_ok_p[MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER]; -+ -+/* Index C is true if character C is a valid PRINT_OPERAND punctation -+ character. */ -+static bool loongarch_print_operand_punct[256]; -+ -+static GTY (()) int loongarch_output_filename_first_time = 1; -+ -+/* loongarch_use_pcrel_pool_p[X] is true if symbols of type X should be -+ forced into a PC-relative constant pool. */ -+bool loongarch_use_pcrel_pool_p[NUM_SYMBOL_TYPES]; -+ -+/* Cached value of can_issue_more. This is cached in loongarch_variable_issue hook -+ and returned from loongarch_sched_reorder2. */ -+static int cached_can_issue_more; -+ -+/* Index R is the smallest register class that contains register R. */ -+const enum reg_class loongarch_regno_to_class[FIRST_PSEUDO_REGISTER] = { -+ GR_REGS, GR_REGS, GR_REGS, GR_REGS, -+ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, -+ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, -+ SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, -+ SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, -+ SIBCALL_REGS, GR_REGS, GR_REGS, JALR_REGS, -+ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, -+ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, -+ -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ ST_REGS, ST_REGS, ST_REGS, ST_REGS, -+ ST_REGS, ST_REGS, ST_REGS, ST_REGS, -+ FRAME_REGS, FRAME_REGS -+}; -+ -+static tree loongarch_handle_interrupt_attr (tree *, tree, tree, int, bool *); -+static tree loongarch_handle_use_shadow_register_set_attr (tree *, tree, tree, int, -+ bool *); -+ -+/* The value of TARGET_ATTRIBUTE_TABLE. */ -+static const struct attribute_spec loongarch_attribute_table[] = { -+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, -+ affects_type_identity, handler, exclude } */ -+ { "long_call", 0, 0, false, true, true, false, NULL, NULL }, -+ { "short_call", 0, 0, false, true, true, false, NULL, NULL }, -+ { "far", 0, 0, false, true, true, false, NULL, NULL }, -+ { "near", 0, 0, false, true, true, false, NULL, NULL }, -+ { "nocompression", 0, 0, true, false, false, false, NULL, NULL }, -+ /* Allow functions to be specified as interrupt handlers */ -+ { "interrupt", 0, 1, false, true, true, false, loongarch_handle_interrupt_attr, -+ NULL }, -+ { "use_shadow_register_set", 0, 1, false, true, true, false, -+ loongarch_handle_use_shadow_register_set_attr, NULL }, -+ { "keep_interrupts_masked", 0, 0, false, true, true, false, NULL, NULL }, -+ { "use_debug_exception_return", 0, 0, false, true, true, false, NULL, NULL }, -+ { NULL, 0, 0, false, false, false, false, NULL, NULL } -+}; -+ -+/* A table describing all the processors GCC knows about; see -+ loongarch-cpus.def for details. */ -+static const struct loongarch_cpu_info loongarch_cpu_info_table[] = { -+#define LARCH_CPU(NAME, CPU, ISA, FLAGS) \ -+ { NAME, CPU, ISA, FLAGS }, -+#include "loongarch-cpus.def" -+#undef LARCH_CPU -+}; -+ -+/* Default costs. If these are used for a processor we should look -+ up the actual costs. */ -+#define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \ -+ COSTS_N_INSNS (7), /* fp_mult_sf */ \ -+ COSTS_N_INSNS (8), /* fp_mult_df */ \ -+ COSTS_N_INSNS (23), /* fp_div_sf */ \ -+ COSTS_N_INSNS (36), /* fp_div_df */ \ -+ COSTS_N_INSNS (10), /* int_mult_si */ \ -+ COSTS_N_INSNS (10), /* int_mult_di */ \ -+ COSTS_N_INSNS (69), /* int_div_si */ \ -+ COSTS_N_INSNS (69), /* int_div_di */ \ -+ 2, /* branch_cost */ \ -+ 4 /* memory_latency */ -+ -+/* Floating-point costs for processors without an FPU. Just assume that -+ all floating-point libcalls are very expensive. */ -+#define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \ -+ COSTS_N_INSNS (256), /* fp_mult_sf */ \ -+ COSTS_N_INSNS (256), /* fp_mult_df */ \ -+ COSTS_N_INSNS (256), /* fp_div_sf */ \ -+ COSTS_N_INSNS (256) /* fp_div_df */ -+ -+/* Costs to use when optimizing for size. */ -+static const struct loongarch_rtx_cost_data loongarch_rtx_cost_optimize_size = { -+ COSTS_N_INSNS (1), /* fp_add */ -+ COSTS_N_INSNS (1), /* fp_mult_sf */ -+ COSTS_N_INSNS (1), /* fp_mult_df */ -+ COSTS_N_INSNS (1), /* fp_div_sf */ -+ COSTS_N_INSNS (1), /* fp_div_df */ -+ COSTS_N_INSNS (1), /* int_mult_si */ -+ COSTS_N_INSNS (1), /* int_mult_di */ -+ COSTS_N_INSNS (1), /* int_div_si */ -+ COSTS_N_INSNS (1), /* int_div_di */ -+ 2, /* branch_cost */ -+ 4 /* memory_latency */ -+}; -+ -+/* Costs to use when optimizing for speed, indexed by processor. */ -+static const struct loongarch_rtx_cost_data -+ loongarch_rtx_cost_data[NUM_PROCESSOR_VALUES] = { -+ { /* loongarch */ -+ DEFAULT_COSTS -+ }, -+ { /* loongarch64 */ -+ DEFAULT_COSTS -+ }, -+ { /* la464 */ -+ DEFAULT_COSTS -+ } -+}; -+ -+/* Information about a single argument. */ -+struct loongarch_arg_info { -+ /* True if the argument is at least partially passed on the stack. */ -+ bool stack_p; -+ -+ /* The number of integer registers allocated to this argument. */ -+ unsigned int num_gprs; -+ -+ /* The offset of the first register used, provided num_gprs is nonzero. -+ If passed entirely on the stack, the value is MAX_ARGS_IN_REGISTERS. */ -+ unsigned int gpr_offset; -+ -+ /* The number of floating-point registers allocated to this argument. */ -+ unsigned int num_fprs; -+ -+ /* The offset of the first register used, provided num_fprs is nonzero. */ -+ unsigned int fpr_offset; -+}; -+ -+ -+/* Emit a move from SRC to DEST. Assume that the move expanders can -+ handle all moves if !can_create_pseudo_p (). The distinction is -+ important because, unlike emit_move_insn, the move expanders know -+ how to force Pmode objects into the constant pool even when the -+ constant pool address is not itself legitimate. */ -+ -+rtx -+loongarch_emit_move (rtx dest, rtx src) -+{ -+ return (can_create_pseudo_p () -+ ? emit_move_insn (dest, src) -+ : emit_move_insn_1 (dest, src)); -+} -+ -+/* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at -+ least PARM_BOUNDARY bits of alignment, but will be given anything up -+ to PREFERRED_STACK_BOUNDARY bits if the type requires it. */ -+ -+static unsigned int -+loongarch_function_arg_boundary (machine_mode mode, const_tree type) -+{ -+ unsigned int alignment; -+ -+ /* Use natural alignment if the type is not aggregate data. */ -+ if (type && !AGGREGATE_TYPE_P (type)) -+ alignment = TYPE_ALIGN (TYPE_MAIN_VARIANT (type)); -+ else -+ alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode); -+ -+ return MIN (PREFERRED_STACK_BOUNDARY, MAX (PARM_BOUNDARY, alignment)); -+} -+ -+/* If MODE represents an argument that can be passed or returned in -+ floating-point registers, return the number of registers, else 0. */ -+ -+static unsigned -+loongarch_pass_mode_in_fpr_p (machine_mode mode) -+{ -+ if (GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FP_ARG) -+ { -+ if (GET_MODE_CLASS (mode) == MODE_FLOAT) -+ return 1; -+ -+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) -+ return 2; -+ } -+ -+ return 0; -+} -+ -+typedef struct { -+ const_tree type; -+ HOST_WIDE_INT offset; -+} loongarch_aggregate_field; -+ -+/* Identify subfields of aggregates that are candidates for passing in -+ floating-point registers. */ -+ -+static int -+loongarch_flatten_aggregate_field (const_tree type, -+ loongarch_aggregate_field fields[2], -+ int n, HOST_WIDE_INT offset, -+ const int use_vecarg_p) -+{ -+ switch (TREE_CODE (type)) -+ { -+ case RECORD_TYPE: -+ /* Can't handle incomplete types nor sizes that are not fixed. */ -+ if (!COMPLETE_TYPE_P (type) -+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST -+ || !tree_fits_uhwi_p (TYPE_SIZE (type))) -+ return -1; -+ -+ for (tree f = TYPE_FIELDS (type); f; f = DECL_CHAIN (f)) -+ if (TREE_CODE (f) == FIELD_DECL) -+ { -+ if (!TYPE_P (TREE_TYPE (f))) -+ return -1; -+ -+ HOST_WIDE_INT pos = offset + int_byte_position (f); -+ n = loongarch_flatten_aggregate_field (TREE_TYPE (f), fields, n, pos, 0); -+ if (n < 0) -+ return -1; -+ } -+ return n; -+ -+ case ARRAY_TYPE: -+ { -+ HOST_WIDE_INT n_elts; -+ loongarch_aggregate_field subfields[2]; -+ tree index = TYPE_DOMAIN (type); -+ tree elt_size = TYPE_SIZE_UNIT (TREE_TYPE (type)); -+ int n_subfields = loongarch_flatten_aggregate_field (TREE_TYPE (type), -+ subfields, 0, offset, 0); -+ -+ /* Can't handle incomplete types nor sizes that are not fixed. */ -+ if (n_subfields <= 0 -+ || !COMPLETE_TYPE_P (type) -+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST -+ || !index -+ || !TYPE_MAX_VALUE (index) -+ || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index)) -+ || !TYPE_MIN_VALUE (index) -+ || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index)) -+ || !tree_fits_uhwi_p (elt_size)) -+ return -1; -+ -+ n_elts = 1 + tree_to_uhwi (TYPE_MAX_VALUE (index)) -+ - tree_to_uhwi (TYPE_MIN_VALUE (index)); -+ gcc_assert (n_elts >= 0); -+ -+ for (HOST_WIDE_INT i = 0; i < n_elts; i++) -+ for (int j = 0; j < n_subfields; j++) -+ { -+ if (n >= 2) -+ return -1; -+ -+ fields[n] = subfields[j]; -+ fields[n++].offset += i * tree_to_uhwi (elt_size); -+ } -+ -+ return n; -+ } -+ -+ case COMPLEX_TYPE: -+ { -+ /* Complex type need consume 2 field, so n must be 0. */ -+ if (n != 0) -+ return -1; -+ -+ HOST_WIDE_INT elt_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (type))); -+ -+ if (elt_size <= UNITS_PER_FP_ARG) -+ { -+ fields[0].type = TREE_TYPE (type); -+ fields[0].offset = offset; -+ fields[1].type = TREE_TYPE (type); -+ fields[1].offset = offset + elt_size; -+ -+ return 2; -+ } -+ -+ return -1; -+ } -+ -+ default: -+ if (n < 2 -+ && ((SCALAR_FLOAT_TYPE_P (type) -+ && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FP_ARG) -+ || (INTEGRAL_TYPE_P (type) -+ && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD)) -+ || (use_vecarg_p && VECTOR_TYPE_P (type) -+ && ((ISA_HAS_LSX && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_LSX_REG) -+ || (ISA_HAS_LASX && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_LASX_REG)))) -+ { -+ fields[n].type = type; -+ fields[n].offset = offset; -+ return n + 1; -+ } -+ else -+ return -1; -+ } -+} -+ -+/* Identify candidate aggregates for passing in floating-point registers. -+ Candidates have at most two fields after flattening. */ -+ -+static int -+loongarch_flatten_aggregate_argument (const_tree type, -+ loongarch_aggregate_field fields[2], -+ const int use_vecarg_p) -+{ -+ if (!type || !((TREE_CODE (type) == RECORD_TYPE) -+ || (use_vecarg_p && TREE_CODE (type) == VECTOR_TYPE))) -+ return -1; -+ -+ return loongarch_flatten_aggregate_field (type, fields, 0, 0, use_vecarg_p); -+} -+ -+/* See whether TYPE is a record whose fields should be returned in one or -+ two floating-point registers. If so, populate FIELDS accordingly. */ -+ -+static unsigned -+loongarch_pass_aggregate_in_fpr_pair_p (const_tree type, -+ loongarch_aggregate_field fields[2], -+ const int use_vecarg_p) -+{ -+ int n = loongarch_flatten_aggregate_argument (type, fields, use_vecarg_p); -+ -+ for (int i = 0; i < n; i++) -+ if (!SCALAR_FLOAT_TYPE_P (fields[i].type) && !VECTOR_TYPE_P (fields[i].type)) -+ return 0; -+ -+ return n > 0 ? n : 0; -+} -+ -+/* See whether TYPE is a record whose fields should be returned in one or -+ floating-point register and one integer register. If so, populate -+ FIELDS accordingly. */ -+ -+static bool -+loongarch_pass_aggregate_in_fpr_and_gpr_p (const_tree type, -+ loongarch_aggregate_field fields[2]) -+{ -+ unsigned num_int = 0, num_float = 0; -+ int n = loongarch_flatten_aggregate_argument (type, fields, 0); -+ -+ for (int i = 0; i < n; i++) -+ { -+ num_float += SCALAR_FLOAT_TYPE_P (fields[i].type); -+ num_int += INTEGRAL_TYPE_P (fields[i].type); -+ } -+ -+ return num_int == 1 && num_float == 1; -+} -+ -+/* Return the representation of an argument passed or returned in an FPR -+ when the value has mode VALUE_MODE and the type has TYPE_MODE. The -+ two modes may be different for structures like: -+ -+ struct __attribute__((packed)) foo { float f; } -+ -+ where the SFmode value "f" is passed in REGNO but the struct itself -+ has mode BLKmode. */ -+ -+static rtx -+loongarch_pass_fpr_single (machine_mode type_mode, unsigned regno, -+ machine_mode value_mode) -+{ -+ rtx x = gen_rtx_REG (value_mode, regno); -+ -+ if (type_mode != value_mode) -+ { -+ x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx); -+ x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x)); -+ } -+ return x; -+} -+ -+/* Pass or return a composite value in the FPR pair REGNO and REGNO + 1. -+ MODE is the mode of the composite. MODE1 and OFFSET1 are the mode and -+ byte offset for the first value, likewise MODE2 and OFFSET2 for the -+ second value. */ -+ -+static rtx -+loongarch_pass_fpr_pair (machine_mode mode, unsigned regno1, -+ machine_mode mode1, HOST_WIDE_INT offset1, -+ unsigned regno2, machine_mode mode2, -+ HOST_WIDE_INT offset2) -+{ -+ return gen_rtx_PARALLEL -+ (mode, -+ gen_rtvec (2, -+ gen_rtx_EXPR_LIST (VOIDmode, -+ gen_rtx_REG (mode1, regno1), -+ GEN_INT (offset1)), -+ gen_rtx_EXPR_LIST (VOIDmode, -+ gen_rtx_REG (mode2, regno2), -+ GEN_INT (offset2)))); -+} -+ -+/* Fill INFO with information about a single argument, and return an -+ RTL pattern to pass or return the argument. CUM is the cumulative -+ state for earlier arguments. MODE is the mode of this argument and -+ TYPE is its type (if known). NAMED is true if this is a named -+ (fixed) argument rather than a variable one. RETURN_P is true if -+ returning the argument, or false if passing the argument. */ -+ -+static rtx -+loongarch_get_arg_info (struct loongarch_arg_info *info, const CUMULATIVE_ARGS *cum, -+ machine_mode mode, const_tree type, bool named, -+ bool return_p) -+{ -+ unsigned num_bytes, num_words; -+ unsigned fpr_base = return_p ? FP_RETURN : FP_ARG_FIRST; -+ unsigned gpr_base = return_p ? GP_RETURN : GP_ARG_FIRST; -+ unsigned alignment = loongarch_function_arg_boundary (mode, type); -+ -+ int use_vecarg_p = TARGET_VECARG -+ && (LSX_SUPPORTED_MODE_P (mode) -+ || LASX_SUPPORTED_MODE_P (mode)); -+ -+ memset (info, 0, sizeof (*info)); -+ info->gpr_offset = cum->num_gprs; -+ info->fpr_offset = cum->num_fprs; -+ -+ if (named) -+ { -+ loongarch_aggregate_field fields[2]; -+ unsigned fregno = fpr_base + info->fpr_offset; -+ unsigned gregno = gpr_base + info->gpr_offset; -+ -+ /* Pass one- or two-element floating-point aggregates in FPRs. */ -+ if ((info->num_fprs = loongarch_pass_aggregate_in_fpr_pair_p (type, fields, use_vecarg_p)) -+ && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS) -+ switch (info->num_fprs) -+ { -+ case 1: -+ return loongarch_pass_fpr_single (mode, fregno, -+ TYPE_MODE (fields[0].type)); -+ -+ case 2: -+ return loongarch_pass_fpr_pair (mode, fregno, -+ TYPE_MODE (fields[0].type), -+ fields[0].offset, -+ fregno + 1, -+ TYPE_MODE (fields[1].type), -+ fields[1].offset); -+ -+ default: -+ gcc_unreachable (); -+ } -+ -+ /* Pass real and complex floating-point numbers in FPRs. */ -+ if ((info->num_fprs = loongarch_pass_mode_in_fpr_p (mode)) -+ && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS) -+ switch (GET_MODE_CLASS (mode)) -+ { -+ case MODE_FLOAT: -+ return gen_rtx_REG (mode, fregno); -+ -+ case MODE_COMPLEX_FLOAT: -+ return loongarch_pass_fpr_pair (mode, fregno, GET_MODE_INNER (mode), 0, -+ fregno + 1, GET_MODE_INNER (mode), -+ GET_MODE_UNIT_SIZE (mode)); -+ -+ default: -+ gcc_unreachable (); -+ } -+ -+ /* Pass structs with one float and one integer in an FPR and a GPR. */ -+ if (loongarch_pass_aggregate_in_fpr_and_gpr_p (type, fields) -+ && info->gpr_offset < MAX_ARGS_IN_REGISTERS -+ && info->fpr_offset < MAX_ARGS_IN_REGISTERS) -+ { -+ info->num_gprs = 1; -+ info->num_fprs = 1; -+ -+ if (!SCALAR_FLOAT_TYPE_P (fields[0].type)) -+ std::swap (fregno, gregno); -+ -+ return loongarch_pass_fpr_pair (mode, fregno, TYPE_MODE (fields[0].type), -+ fields[0].offset, -+ gregno, TYPE_MODE (fields[1].type), -+ fields[1].offset); -+ } -+ } -+ -+ /* Work out the size of the argument. */ -+ num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode); -+ num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD; -+ -+ /* Doubleword-aligned varargs start on an even register boundary. */ -+ if (!named && num_bytes != 0 && alignment > BITS_PER_WORD) -+ info->gpr_offset += info->gpr_offset & 1; -+ -+ /* Partition the argument between registers and stack. */ -+ info->num_fprs = 0; -+ info->num_gprs = MIN (num_words, MAX_ARGS_IN_REGISTERS - info->gpr_offset); -+ info->stack_p = (num_words - info->num_gprs) != 0; -+ -+ if (info->num_gprs || return_p) -+ return gen_rtx_REG (mode, gpr_base + info->gpr_offset); -+ -+ return NULL_RTX; -+} -+ -+/* Implement TARGET_FUNCTION_ARG. */ -+ -+static rtx -+loongarch_function_arg (cumulative_args_t cum_v, machine_mode mode, -+ const_tree type, bool named) -+{ -+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); -+ struct loongarch_arg_info info; -+ -+ if (mode == VOIDmode) -+ return NULL; -+ -+ return loongarch_get_arg_info (&info, cum, mode, type, named, false); -+} -+ -+/* Implement TARGET_FUNCTION_ARG_ADVANCE. */ -+ -+static void -+loongarch_function_arg_advance (cumulative_args_t cum_v, machine_mode mode, -+ const_tree type, bool named) -+{ -+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); -+ struct loongarch_arg_info info; -+ -+ loongarch_get_arg_info (&info, cum, mode, type, named, false); -+ -+ /* Advance the register count. This has the effect of setting -+ num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned -+ argument required us to skip the final GPR and pass the whole -+ argument on the stack. */ -+ cum->num_fprs = info.fpr_offset + info.num_fprs; -+ cum->num_gprs = info.gpr_offset + info.num_gprs; -+} -+ -+/* Implement TARGET_ARG_PARTIAL_BYTES. */ -+ -+static int -+loongarch_arg_partial_bytes (cumulative_args_t cum, -+ machine_mode mode, tree type, bool named) -+{ -+ struct loongarch_arg_info arg; -+ -+ loongarch_get_arg_info (&arg, get_cumulative_args (cum), mode, type, named, false); -+ return arg.stack_p ? arg.num_gprs * UNITS_PER_WORD : 0; -+} -+ -+/* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls, -+ VALTYPE is the return type and MODE is VOIDmode. For libcalls, -+ VALTYPE is null and MODE is the mode of the return value. */ -+ -+rtx -+loongarch_function_value (const_tree type, const_tree func, machine_mode mode) -+{ -+ struct loongarch_arg_info info; -+ CUMULATIVE_ARGS args; -+ -+ if (type) -+ { -+ int unsigned_p = TYPE_UNSIGNED (type); -+ -+ mode = TYPE_MODE (type); -+ -+ /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes, -+ return values, promote the mode here too. */ -+ mode = promote_function_mode (type, mode, &unsigned_p, func, 1); -+ } -+ -+ memset (&args, 0, sizeof args); -+ return loongarch_get_arg_info (&info, &args, mode, type, true, true); -+} -+ -+/* Implement TARGET_PASS_BY_REFERENCE. */ -+ -+static bool -+loongarch_pass_by_reference (cumulative_args_t cum_v, machine_mode mode, -+ const_tree type, bool named) -+{ -+ HOST_WIDE_INT size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode); -+ struct loongarch_arg_info info; -+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); -+ -+ /* ??? std_gimplify_va_arg_expr passes NULL for cum. Fortunately, we -+ never pass variadic arguments in floating-point registers, so we can -+ avoid the call to loongarch_get_arg_info in this case. */ -+ if (cum != NULL) -+ { -+ /* Don't pass by reference if we can use a floating-point register. */ -+ loongarch_get_arg_info (&info, cum, mode, type, named, false); -+ if (info.num_fprs) -+ return false; -+ } -+ -+ /* Pass by reference if the data do not fit in two integer registers. */ -+ return !IN_RANGE (size, 0, 2 * UNITS_PER_WORD); -+} -+ -+/* Implement TARGET_RETURN_IN_MEMORY. */ -+ -+static bool -+loongarch_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED) -+{ -+ CUMULATIVE_ARGS args; -+ cumulative_args_t cum = pack_cumulative_args (&args); -+ -+ /* The rules for returning in memory are the same as for passing the -+ first named argument by reference. */ -+ memset (&args, 0, sizeof args); -+ return loongarch_pass_by_reference (cum, TYPE_MODE (type), type, true); -+} -+ -+/* Implement TARGET_SETUP_INCOMING_VARARGS. */ -+ -+static void -+loongarch_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode, -+ tree type, int *pretend_size ATTRIBUTE_UNUSED, -+ int no_rtl) -+{ -+ CUMULATIVE_ARGS local_cum; -+ int gp_saved; -+ -+ /* The caller has advanced CUM up to, but not beyond, the last named -+ argument. Advance a local copy of CUM past the last "real" named -+ argument, to find out how many registers are left over. */ -+ local_cum = *get_cumulative_args (cum); -+ loongarch_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1); -+ -+ /* Found out how many registers we need to save. */ -+ gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs; -+ -+ if (!no_rtl && gp_saved > 0) -+ { -+ rtx ptr = plus_constant (Pmode, virtual_incoming_args_rtx, -+ REG_PARM_STACK_SPACE (cfun->decl) -+ - gp_saved * UNITS_PER_WORD); -+ rtx mem = gen_frame_mem (BLKmode, ptr); -+ set_mem_alias_set (mem, get_varargs_alias_set ()); -+ -+ move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST, -+ mem, gp_saved); -+ } -+ if (REG_PARM_STACK_SPACE (cfun->decl) == 0) -+ cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD; -+} -+ -+/* Make the last instruction frame-related and note that it performs -+ the operation described by FRAME_PATTERN. */ -+ -+static void -+loongarch_set_frame_expr (rtx frame_pattern) -+{ -+ rtx insn; -+ -+ insn = get_last_insn (); -+ RTX_FRAME_RELATED_P (insn) = 1; -+ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR, -+ frame_pattern, -+ REG_NOTES (insn)); -+} -+ -+/* Return a frame-related rtx that stores REG at MEM. -+ REG must be a single register. */ -+ -+static rtx -+loongarch_frame_set (rtx mem, rtx reg) -+{ -+ rtx set = gen_rtx_SET (mem, reg); -+ RTX_FRAME_RELATED_P (set) = 1; -+ return set; -+} -+ -+/* Return true if the current function must save register REGNO. */ -+ -+static bool -+loongarch_save_reg_p (unsigned int regno) -+{ -+ bool call_saved = !global_regs[regno] && !call_used_regs[regno]; -+ bool might_clobber = crtl->saves_all_registers -+ || df_regs_ever_live_p (regno); -+ -+ if (call_saved && might_clobber) -+ return true; -+ -+ if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed) -+ return true; -+ -+ if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return) -+ return true; -+ -+ return false; -+} -+ -+/* Determine whether to call GPR save/restore routines. */ -+static bool -+loongarch_use_save_libcall (const struct loongarch_frame_info *frame) -+{ -+ // FIXME: if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed) -+ return false; -+ -+} -+ -+/* Determine which GPR save/restore routine to call. */ -+ -+static unsigned -+loongarch_save_libcall_count (unsigned mask) -+{ -+ for (unsigned n = GP_REG_LAST; n > GP_REG_FIRST; n--) -+ if (BITSET_P (mask, n)) -+ return CALLEE_SAVED_REG_NUMBER (n) + 1; -+ abort (); -+} -+ -+/* Populate the current function's loongarch_frame_info structure. -+ -+ LARCH stack frames grown downward. High addresses are at the top. -+ -+ +-------------------------------+ -+ | | -+ | incoming stack arguments | -+ | | -+ +-------------------------------+ <-- incoming stack pointer -+ | | -+ | callee-allocated save area | -+ | for arguments that are | -+ | split between registers and | -+ | the stack | -+ | | -+ +-------------------------------+ <-- arg_pointer_rtx -+ | | -+ | callee-allocated save area | -+ | for register varargs | -+ | | -+ +-------------------------------+ <-- hard_frame_pointer_rtx; -+ | | stack_pointer_rtx + gp_sp_offset -+ | GPR save area | + UNITS_PER_WORD -+ | | -+ +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset -+ | | + UNITS_PER_HWVALUE -+ | FPR save area | -+ | | -+ +-------------------------------+ <-- frame_pointer_rtx (virtual) -+ | | -+ | local variables | -+ | | -+ P +-------------------------------+ -+ | | -+ | outgoing stack arguments | -+ | | -+ +-------------------------------+ <-- stack_pointer_rtx -+ -+ Dynamic stack allocations such as alloca insert data at point P. -+ They decrease stack_pointer_rtx but leave frame_pointer_rtx and -+ hard_frame_pointer_rtx unchanged. */ -+ -+static void -+loongarch_compute_frame_info (void) -+{ -+ struct loongarch_frame_info *frame; -+ HOST_WIDE_INT offset; -+ unsigned int regno, i, num_x_saved = 0, num_f_saved = 0; -+ -+ frame = &cfun->machine->frame; -+ memset (frame, 0, sizeof (*frame)); -+ -+ /* Find out which GPRs we need to save. */ -+ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) -+ if (loongarch_save_reg_p (regno)) -+ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++; -+ -+ /* If this function calls eh_return, we must also save and restore the -+ EH data registers. */ -+ if (crtl->calls_eh_return) -+ for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++) -+ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++; -+ -+ /* Find out which FPRs we need to save. This loop must iterate over -+ the same space as its companion in loongarch_for_each_saved_reg. */ -+ if (TARGET_HARD_FLOAT) -+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) -+ if (loongarch_save_reg_p (regno)) -+ frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++; -+ -+ /* At the bottom of the frame are any outgoing stack arguments. */ -+ offset = LARCH_STACK_ALIGN (crtl->outgoing_args_size); -+ /* Next are local stack variables. */ -+ offset += LARCH_STACK_ALIGN (get_frame_size ()); -+ /* The virtual frame pointer points above the local variables. */ -+ frame->frame_pointer_offset = offset; -+ /* Next are the callee-saved FPRs. */ -+ if (frame->fmask) -+ offset += LARCH_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG); -+ frame->fp_sp_offset = offset - UNITS_PER_FP_REG; -+ /* Next are the callee-saved GPRs. */ -+ if (frame->mask) -+ { -+ unsigned x_save_size = LARCH_STACK_ALIGN (num_x_saved * UNITS_PER_WORD); -+ unsigned num_save_restore = 1 + loongarch_save_libcall_count (frame->mask); -+ -+ /* Only use save/restore routines if they don't alter the stack size. */ -+ if (LARCH_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size) -+ frame->save_libcall_adjustment = x_save_size; -+ -+ offset += x_save_size; -+ } -+ frame->gp_sp_offset = offset - UNITS_PER_WORD; -+ /* The hard frame pointer points above the callee-saved GPRs. */ -+ frame->hard_frame_pointer_offset = offset; -+ /* Above the hard frame pointer is the callee-allocated varags save area. */ -+ offset += LARCH_STACK_ALIGN (cfun->machine->varargs_size); -+ /* Next is the callee-allocated area for pretend stack arguments. */ -+ offset += LARCH_STACK_ALIGN (crtl->args.pretend_args_size); -+ /* Arg pointer must be below pretend args, but must be above alignment -+ padding. */ -+ frame->arg_pointer_offset = offset - crtl->args.pretend_args_size; -+ frame->total_size = offset; -+ /* Next points the incoming stack pointer and any incoming arguments. */ -+ -+ /* Only use save/restore routines when the GPRs are atop the frame. */ -+ if (frame->hard_frame_pointer_offset != frame->total_size) -+ frame->save_libcall_adjustment = 0; -+} -+ -+/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer -+ or argument pointer. TO is either the stack pointer or hard frame -+ pointer. */ -+ -+HOST_WIDE_INT -+loongarch_initial_elimination_offset (int from, int to) -+{ -+ HOST_WIDE_INT src, dest; -+ -+ loongarch_compute_frame_info (); -+ -+ if (to == HARD_FRAME_POINTER_REGNUM) -+ dest = cfun->machine->frame.hard_frame_pointer_offset; -+ else if (to == STACK_POINTER_REGNUM) -+ dest = 0; /* The stack pointer is the base of all offsets, hence 0. */ -+ else -+ gcc_unreachable (); -+ -+ if (from == FRAME_POINTER_REGNUM) -+ src = cfun->machine->frame.frame_pointer_offset; -+ else if (from == ARG_POINTER_REGNUM) -+ src = cfun->machine->frame.arg_pointer_offset; -+ else -+ gcc_unreachable (); -+ -+ return src - dest; -+} -+ -+/* A function to save or store a register. The first argument is the -+ register and the second is the stack slot. */ -+typedef void (*loongarch_save_restore_fn) (rtx, rtx); -+ -+/* Use FN to save or restore register REGNO. MODE is the register's -+ mode and OFFSET is the offset of its save slot from the current -+ stack pointer. */ -+ -+static void -+loongarch_save_restore_reg (machine_mode mode, int regno, -+ HOST_WIDE_INT offset, loongarch_save_restore_fn fn) -+{ -+ rtx mem; -+ -+ mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset)); -+ fn (gen_rtx_REG (mode, regno), mem); -+} -+ -+/* Call FN for each register that is saved by the current function. -+ SP_OFFSET is the offset of the current stack pointer from the start -+ of the frame. */ -+ -+static void -+loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset, loongarch_save_restore_fn fn) -+{ -+ HOST_WIDE_INT offset; -+ -+ /* Save the link register and s-registers. */ -+ offset = cfun->machine->frame.gp_sp_offset - sp_offset; -+ for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) -+ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) -+ { -+ loongarch_save_restore_reg (word_mode, regno, offset, fn); -+ offset -= UNITS_PER_WORD; -+ } -+ -+ /* This loop must iterate over the same space as its companion in -+ loongarch_compute_frame_info. */ -+ offset = cfun->machine->frame.fp_sp_offset - sp_offset; -+ for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) -+ if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST)) -+ { -+ machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode; -+ -+ loongarch_save_restore_reg (mode, regno, offset, fn); -+ offset -= GET_MODE_SIZE (mode); -+ } -+} -+ -+/* Save register REG to MEM. Make the instruction frame-related. */ -+ -+static void -+loongarch_save_reg (rtx reg, rtx mem) -+{ -+ loongarch_emit_move (mem, reg); -+ loongarch_set_frame_expr (loongarch_frame_set (mem, reg)); -+} -+ -+/* Restore register REG from MEM. */ -+ -+static void -+loongarch_restore_reg (rtx reg, rtx mem) -+{ -+ rtx insn = loongarch_emit_move (reg, mem); -+ rtx dwarf = NULL_RTX; -+ dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf); -+ REG_NOTES (insn) = dwarf; -+ -+ RTX_FRAME_RELATED_P (insn) = 1; -+} -+ -+/* Return the code to invoke the GPR save routine. */ -+ -+const char * -+loongarch_output_gpr_save (unsigned mask) -+{ -+ static char s[32]; -+ unsigned n = loongarch_save_libcall_count (mask); -+ -+ ssize_t bytes = snprintf (s, sizeof (s), "call\tt0,__loongarch_save_%u", n); -+ gcc_assert ((size_t) bytes < sizeof (s)); -+ -+ return s; -+} -+ -+#define IMM_BITS 12 -+ -+#define IMM_REACH (1LL << IMM_BITS) -+ -+/* For stack frames that can't be allocated with a single ADDI instruction, -+ compute the best value to initially allocate. It must at a minimum -+ allocate enough space to spill the callee-saved registers. If TARGET_RVC, -+ try to pick a value that will allow compression of the register saves -+ without adding extra instructions. */ -+ -+static HOST_WIDE_INT -+loongarch_first_stack_step (struct loongarch_frame_info *frame) -+{ -+ if (SMALL_OPERAND (frame->total_size)) -+ return frame->total_size; -+ -+ HOST_WIDE_INT min_first_step = -+ LARCH_STACK_ALIGN (frame->total_size - frame->fp_sp_offset); -+ HOST_WIDE_INT max_first_step = IMM_REACH / 2 - PREFERRED_STACK_BOUNDARY / 8; -+ HOST_WIDE_INT min_second_step = frame->total_size - max_first_step; -+ gcc_assert (min_first_step <= max_first_step); -+ -+ /* As an optimization, use the least-significant bits of the total frame -+ size, so that the second adjustment step is just LUI + ADD. */ -+ if (!SMALL_OPERAND (min_second_step) -+ && frame->total_size % IMM_REACH < IMM_REACH / 2 -+ && frame->total_size % IMM_REACH >= min_first_step) -+ return frame->total_size % IMM_REACH; -+ -+ return max_first_step; -+} -+ -+static rtx -+loongarch_adjust_libcall_cfi_prologue () -+{ -+ rtx dwarf = NULL_RTX; -+ rtx adjust_sp_rtx, reg, mem, insn; -+ int saved_size = cfun->machine->frame.save_libcall_adjustment; -+ int offset; -+ -+ for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) -+ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) -+ { -+ /* The save order is ra, s0 to s8. */ -+ if (regno == RETURN_ADDR_REGNUM) -+ offset = saved_size - UNITS_PER_WORD; -+ else -+ offset = saved_size - ((regno - S0_REGNUM + 2) * UNITS_PER_WORD); -+ -+ reg = gen_rtx_REG (SImode, regno); -+ mem = gen_frame_mem (SImode, plus_constant (Pmode, -+ stack_pointer_rtx, -+ offset)); -+ -+ insn = gen_rtx_SET (mem, reg); -+ dwarf = alloc_reg_note (REG_CFA_OFFSET, insn, dwarf); -+ } -+ -+ /* Debug info for adjust sp. */ -+ adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx, -+ stack_pointer_rtx, GEN_INT (-saved_size)); -+ dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx, -+ dwarf); -+ return dwarf; -+} -+ -+static void -+loongarch_emit_stack_tie (void) -+{ -+ if (Pmode == SImode) -+ emit_insn (gen_stack_tiesi (stack_pointer_rtx, hard_frame_pointer_rtx)); -+ else -+ emit_insn (gen_stack_tiedi (stack_pointer_rtx, hard_frame_pointer_rtx)); -+} -+ -+/* Return nonzero if this function is known to have a null epilogue. -+ This allows the optimizer to omit jumps to jumps if no stack -+ was created. */ -+ -+bool -+loongarch_can_use_return_insn (void) -+{ -+ return reload_completed && cfun->machine->frame.total_size == 0; -+} -+ -+static rtx -+loongarch_adjust_libcall_cfi_epilogue () -+{ -+ rtx dwarf = NULL_RTX; -+ rtx adjust_sp_rtx, reg; -+ int saved_size = cfun->machine->frame.save_libcall_adjustment; -+ -+ /* Debug info for adjust sp. */ -+ adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx, -+ stack_pointer_rtx, GEN_INT (saved_size)); -+ dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx, -+ dwarf); -+ -+ for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) -+ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) -+ { -+ reg = gen_rtx_REG (SImode, regno); -+ dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf); -+ } -+ -+ return dwarf; -+} -+ -+/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P -+ says which. */ -+ -+void -+loongarch_expand_epilogue (bool sibcall_p) -+{ -+ /* Split the frame into two. STEP1 is the amount of stack we should -+ deallocate before restoring the registers. STEP2 is the amount we -+ should deallocate afterwards. -+ -+ Start off by assuming that no registers need to be restored. */ -+ struct loongarch_frame_info *frame = &cfun->machine->frame; -+ unsigned mask = frame->mask; -+ HOST_WIDE_INT step1 = frame->total_size; -+ HOST_WIDE_INT step2 = 0; -+ bool use_restore_libcall = !sibcall_p && loongarch_use_save_libcall (frame); -+ rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM); -+ rtx insn; -+ -+ /* We need to add memory barrier to prevent read from deallocated stack. */ -+ bool need_barrier_p = (get_frame_size () -+ + cfun->machine->frame.arg_pointer_offset) != 0; -+ -+ if (!sibcall_p && loongarch_can_use_return_insn ()) -+ { -+ emit_jump_insn (gen_return ()); -+ return; -+ } -+ -+ /* Move past any dynamic stack allocations. */ -+ if (cfun->calls_alloca) -+ { -+ /* Emit a barrier to prevent loads from a deallocated stack. */ -+ loongarch_emit_stack_tie (); -+ need_barrier_p = false; -+ -+ rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset); -+ if (!SMALL_OPERAND (INTVAL (adjust))) -+ { -+ loongarch_emit_move (N_LARCH_PROLOGUE_TEMP (Pmode), adjust); -+ adjust = N_LARCH_PROLOGUE_TEMP (Pmode); -+ } -+ -+ insn = emit_insn ( -+ gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx, -+ adjust)); -+ -+ rtx dwarf = NULL_RTX; -+ rtx cfa_adjust_value = gen_rtx_PLUS ( -+ Pmode, hard_frame_pointer_rtx, -+ GEN_INT (-frame->hard_frame_pointer_offset)); -+ rtx cfa_adjust_rtx = gen_rtx_SET (stack_pointer_rtx, cfa_adjust_value); -+ dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, cfa_adjust_rtx, dwarf); -+ RTX_FRAME_RELATED_P (insn) = 1; -+ -+ REG_NOTES (insn) = dwarf; -+ } -+ -+ /* If we need to restore registers, deallocate as much stack as -+ possible in the second step without going out of range. */ -+ if ((frame->mask | frame->fmask) != 0) -+ { -+ step2 = loongarch_first_stack_step (frame); -+ step1 -= step2; -+ } -+ -+ /* Set TARGET to BASE + STEP1. */ -+ if (step1 > 0) -+ { -+ /* Emit a barrier to prevent loads from a deallocated stack. */ -+ loongarch_emit_stack_tie (); -+ need_barrier_p = false; -+ -+ /* Get an rtx for STEP1 that we can add to BASE. */ -+ rtx adjust = GEN_INT (step1); -+ if (!SMALL_OPERAND (step1)) -+ { -+ loongarch_emit_move (N_LARCH_PROLOGUE_TEMP (Pmode), adjust); -+ adjust = N_LARCH_PROLOGUE_TEMP (Pmode); -+ } -+ -+ insn = emit_insn ( -+ gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust)); -+ -+ rtx dwarf = NULL_RTX; -+ rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx, -+ GEN_INT (step2)); -+ -+ dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf); -+ RTX_FRAME_RELATED_P (insn) = 1; -+ -+ REG_NOTES (insn) = dwarf; -+ } -+ -+ if (use_restore_libcall) -+ frame->mask = 0; /* Temporarily fib that we need not save GPRs. */ -+ -+ /* Restore the registers. */ -+ loongarch_for_each_saved_reg (frame->total_size - step2, loongarch_restore_reg); -+ -+ if (use_restore_libcall) -+ { -+ frame->mask = mask; /* Undo the above fib. */ -+ gcc_assert (step2 >= frame->save_libcall_adjustment); -+ step2 -= frame->save_libcall_adjustment; -+ } -+ -+ if (need_barrier_p) -+ loongarch_emit_stack_tie (); -+ -+ /* Deallocate the final bit of the frame. */ -+ if (step2 > 0) -+ { -+ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, -+ GEN_INT (step2))); -+ -+ rtx dwarf = NULL_RTX; -+ rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx, -+ const0_rtx); -+ dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf); -+ RTX_FRAME_RELATED_P (insn) = 1; -+ -+ REG_NOTES (insn) = dwarf; -+ } -+ -+ if (use_restore_libcall) -+ { -+ rtx dwarf = loongarch_adjust_libcall_cfi_epilogue (); -+ insn = emit_insn (gen_gpr_restore (GEN_INT (loongarch_save_libcall_count (mask)))); -+ RTX_FRAME_RELATED_P (insn) = 1; -+ REG_NOTES (insn) = dwarf; -+ -+ emit_jump_insn (gen_gpr_restore_return (ra)); -+ return; -+ } -+ -+ /* Add in the __builtin_eh_return stack adjustment. */ -+ if (crtl->calls_eh_return) -+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, -+ EH_RETURN_STACKADJ_RTX)); -+ -+ if (!sibcall_p) -+ emit_jump_insn (gen_simple_return_internal (ra)); -+} -+ -+ -+static rtx loongarch_find_pic_call_symbol (rtx_insn *, rtx, bool); -+static int loongarch_register_move_cost (machine_mode, reg_class_t, -+ reg_class_t); -+ -+/* Predicates to test for presence of "near"/"short_call" and "far"/"long_call" -+ attributes on the given TYPE. */ -+ -+static bool -+loongarch_near_type_p (const_tree type) -+{ -+ return (lookup_attribute ("short_call", TYPE_ATTRIBUTES (type)) != NULL -+ || lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL); -+} -+ -+static bool -+loongarch_far_type_p (const_tree type) -+{ -+ return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL -+ || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL); -+} -+ -+ -+/* Check if the interrupt attribute is set for a function. */ -+ -+static bool -+loongarch_interrupt_type_p (tree type) -+{ -+ return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL; -+} -+ -+/* Implement TARGET_COMP_TYPE_ATTRIBUTES. */ -+ -+static int -+loongarch_comp_type_attributes (const_tree type1, const_tree type2) -+{ -+ /* Disallow mixed near/far attributes. */ -+ if (loongarch_far_type_p (type1) && loongarch_near_type_p (type2)) -+ return 0; -+ if (loongarch_near_type_p (type1) && loongarch_far_type_p (type2)) -+ return 0; -+ return 1; -+} -+ -+/* Implement TARGET_INSERT_ATTRIBUTES. */ -+ -+static void -+loongarch_insert_attributes (tree decl, tree *attributes) -+{ -+} -+ -+/* Implement TARGET_MERGE_DECL_ATTRIBUTES. */ -+ -+static tree -+loongarch_merge_decl_attributes (tree olddecl, tree newdecl) -+{ -+ return merge_attributes (DECL_ATTRIBUTES (olddecl), -+ DECL_ATTRIBUTES (newdecl)); -+} -+ -+/* Implement TARGET_CAN_INLINE_P. */ -+ -+static bool -+loongarch_can_inline_p (tree caller, tree callee) -+{ -+ return default_target_can_inline_p (caller, callee); -+} -+ -+/* Handle an "interrupt" attribute with an optional argument. */ -+ -+static tree -+loongarch_handle_interrupt_attr (tree *node ATTRIBUTE_UNUSED, tree name, tree args, -+ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) -+{ -+ /* Check for an argument. */ -+ if (is_attribute_p ("interrupt", name) && args != NULL) -+ { -+ tree cst; -+ -+ cst = TREE_VALUE (args); -+ if (TREE_CODE (cst) != STRING_CST) -+ { -+ warning (OPT_Wattributes, -+ "%qE attribute requires a string argument", -+ name); -+ *no_add_attrs = true; -+ } -+ else if (strcmp (TREE_STRING_POINTER (cst), "eic") != 0 -+ && strncmp (TREE_STRING_POINTER (cst), "vector=", 7) != 0) -+ { -+ warning (OPT_Wattributes, -+ "argument to %qE attribute is neither eic, nor " -+ "vector=", name); -+ *no_add_attrs = true; -+ } -+ else if (strncmp (TREE_STRING_POINTER (cst), "vector=", 7) == 0) -+ { -+ const char *arg = TREE_STRING_POINTER (cst) + 7; -+ -+ /* Acceptable names are: sw0,sw1,hw0,hw1,hw2,hw3,hw4,hw5. */ -+ if (strlen (arg) != 3 -+ || (arg[0] != 's' && arg[0] != 'h') -+ || arg[1] != 'w' -+ || (arg[0] == 's' && arg[2] != '0' && arg[2] != '1') -+ || (arg[0] == 'h' && (arg[2] < '0' || arg[2] > '5'))) -+ { -+ warning (OPT_Wattributes, -+ "interrupt vector to %qE attribute is not " -+ "vector=(sw0|sw1|hw0|hw1|hw2|hw3|hw4|hw5)", -+ name); -+ *no_add_attrs = true; -+ } -+ } -+ -+ return NULL_TREE; -+ } -+ -+ return NULL_TREE; -+} -+ -+/* Handle a "use_shadow_register_set" attribute with an optional argument. */ -+ -+static tree -+loongarch_handle_use_shadow_register_set_attr (tree *node ATTRIBUTE_UNUSED, -+ tree name, tree args, -+ int flags ATTRIBUTE_UNUSED, -+ bool *no_add_attrs) -+{ -+ /* Check for an argument. */ -+ if (is_attribute_p ("use_shadow_register_set", name) && args != NULL) -+ { -+ tree cst; -+ -+ cst = TREE_VALUE (args); -+ if (TREE_CODE (cst) != STRING_CST) -+ { -+ warning (OPT_Wattributes, -+ "%qE attribute requires a string argument", -+ name); -+ *no_add_attrs = true; -+ } -+ else if (strcmp (TREE_STRING_POINTER (cst), "intstack") != 0) -+ { -+ warning (OPT_Wattributes, -+ "argument to %qE attribute is not intstack", name); -+ *no_add_attrs = true; -+ } -+ -+ return NULL_TREE; -+ } -+ -+ return NULL_TREE; -+} -+ -+/* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR -+ and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */ -+ -+static void -+loongarch_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr) -+{ -+ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))) -+ { -+ *base_ptr = XEXP (x, 0); -+ *offset_ptr = INTVAL (XEXP (x, 1)); -+ } -+ else -+ { -+ *base_ptr = x; -+ *offset_ptr = 0; -+ } -+} -+ -+static unsigned int loongarch_build_integer (struct loongarch_integer_op *, -+ unsigned HOST_WIDE_INT); -+ -+/* Fill CODES with a sequence of rtl operations to load VALUE. -+ Return the number of operations needed. -+ Split interger in loongarch_output_move. */ -+ -+static unsigned int -+loongarch_build_integer (struct loongarch_integer_op *codes, -+ unsigned HOST_WIDE_INT value) -+{ -+ uint32_t hi32, lo32; -+ char all0_bit_vec, sign_bit_vec, allf_bit_vec, paritial_is_sext_of_prev; -+ unsigned int cost = 0; -+ -+ lo32 = value & 0xffffffff; -+ hi32 = value >> 32; -+ -+ all0_bit_vec = (((hi32 & 0xfff00000) == 0) << 3) -+ | (((hi32 & 0x000fffff) == 0) << 2) -+ | (((lo32 & 0xfffff000) == 0) << 1) -+ | ((lo32 & 0x00000fff) == 0); -+ sign_bit_vec = (((hi32 & 0x80000000) != 0) << 3) -+ | (((hi32 & 0x00080000) != 0) << 2) -+ | (((lo32 & 0x80000000) != 0) << 1) -+ | ((lo32 & 0x00000800) != 0); -+ allf_bit_vec = (((hi32 & 0xfff00000) == 0xfff00000) << 3) -+ | (((hi32 & 0x000fffff) == 0x000fffff) << 2) -+ | (((lo32 & 0xfffff000) == 0xfffff000) << 1) -+ | ((lo32 & 0x00000fff) == 0x00000fff); -+ paritial_is_sext_of_prev = (all0_bit_vec ^ allf_bit_vec) -+ & (all0_bit_vec ^ (sign_bit_vec << 1)); -+ -+ do -+ { -+ if (paritial_is_sext_of_prev == 0x7) -+ { -+ codes[0].code = UNKNOWN; -+ codes[0].method = METHOD_LU52I; -+ codes[0].value = value & 0xfff0000000000000; -+ cost++; -+ break; -+ } -+ if ((all0_bit_vec & 0x3) == 0x2) -+ { -+ codes[cost].code = UNKNOWN; -+ codes[cost].method = METHOD_NORMAL; -+ codes[cost].value = value & 0xfff; -+ cost++; -+ } -+ else -+ { -+ switch (paritial_is_sext_of_prev & 0x3) -+ { -+ case 0: -+ codes[cost].code = UNKNOWN; -+ codes[cost].method = METHOD_NORMAL; -+ codes[cost].value = ((HOST_WIDE_INT)value << 32 >> 32) & 0xfffffffffffff000; -+ cost++; -+ codes[cost].code = IOR; -+ codes[cost].method = METHOD_NORMAL; -+ codes[cost].value = value & 0xfff; -+ cost++; -+ break; -+ case 1: -+ codes[cost].code = UNKNOWN; -+ codes[cost].method = METHOD_NORMAL; -+ codes[cost].value = ((HOST_WIDE_INT)value << 32 >> 32) & 0xfffffffffffff000; -+ cost++; -+ break; -+ case 2: -+ codes[cost].code = UNKNOWN; -+ codes[cost].method = METHOD_NORMAL; -+ codes[cost].value = (HOST_WIDE_INT)value << 52 >> 52; -+ cost++; -+ break; -+ case 3: -+ codes[cost].code = UNKNOWN; -+ codes[cost].method = METHOD_NORMAL; -+ codes[cost].value = 0; -+ cost++; -+ break; -+ default: -+ gcc_unreachable (); -+ } -+ } -+ -+ if (((value & 0xfffffffffffff800) ^ 0xfff00000fffff800) == 0) -+ { -+ codes[cost].method = METHOD_INSV; -+ cost++; -+ break; -+ } -+ -+ switch (paritial_is_sext_of_prev >> 2) -+ { -+ case 0: -+ codes[cost].method = METHOD_LU32I; -+ codes[cost].value = ((HOST_WIDE_INT)value << 12 >> 12) & 0xffffffff00000000; -+ cost++; -+ case 1: -+ codes[cost].method = METHOD_LU52I; -+ codes[cost].value = value & 0xfff0000000000000; -+ cost++; -+ break; -+ case 2: -+ codes[cost].method = METHOD_LU32I; -+ codes[cost].value = ((HOST_WIDE_INT)value << 12 >> 12) & 0xffffffff00000000; -+ cost++; -+ break; -+ case 3: -+ break; -+ default: -+ gcc_unreachable (); -+ } -+ } -+ while (0); -+ -+ return cost; -+} -+ -+/* Fill CODES with a sequence of rtl operations to load VALUE. -+ Return the number of operations needed. -+ Split interger in loongarch_output_move. */ -+ -+static unsigned int -+loongarch_integer_cost (HOST_WIDE_INT value) -+{ -+ struct loongarch_integer_op codes[LARCH_MAX_INTEGER_OPS]; -+ return loongarch_build_integer(codes, value); -+} -+ -+/* Implement TARGET_LEGITIMATE_CONSTANT_P. */ -+ -+static bool -+loongarch_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x) -+{ -+ return loongarch_const_insns (x) > 0; -+} -+ -+ -+/* Return true if X is a thread-local symbol. */ -+ -+static bool -+loongarch_tls_symbol_p (rtx x) -+{ -+ return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0; -+} -+ -+/* Return true if SYMBOL_REF X is associated with a global symbol -+ (in the STB_GLOBAL sense). */ -+ -+bool -+loongarch_global_symbol_p (const_rtx x) -+{ -+ if (GET_CODE (x) == LABEL_REF) -+ return false; -+ -+ const_tree decl = SYMBOL_REF_DECL (x); -+ -+ if (!decl) -+ return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x); -+ -+ /* Weakref symbols are not TREE_PUBLIC, but their targets are global -+ or weak symbols. Relocations in the object file will be against -+ the target symbol, so it's that symbol's binding that matters here. */ -+ return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl)); -+} -+ -+bool -+loongarch_global_symbol_noweak_p (const_rtx x) -+{ -+ if (GET_CODE (x) == LABEL_REF) -+ return false; -+ -+ const_tree decl = SYMBOL_REF_DECL (x); -+ -+ if (!decl) -+ return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x); -+ -+ /* Weakref symbols are not TREE_PUBLIC, but their targets are global -+ or weak symbols. Relocations in the object file will be against -+ the target symbol, so it's that symbol's binding that matters here. */ -+ return DECL_P (decl) && TREE_PUBLIC (decl); -+} -+ -+bool -+loongarch_weak_symbol_p (const_rtx x) -+{ -+ const_tree decl; -+ if (GET_CODE (x) == LABEL_REF || !(decl = SYMBOL_REF_DECL (x))) -+ return false; -+ return DECL_P (decl) && DECL_WEAK (decl); -+} -+ -+ -+/* Return true if SYMBOL_REF X binds locally. */ -+ -+bool -+loongarch_symbol_binds_local_p (const_rtx x) -+{ -+ if (GET_CODE (x) == LABEL_REF) -+ return false; -+ -+ return (SYMBOL_REF_DECL (x) -+ ? targetm.binds_local_p (SYMBOL_REF_DECL (x)) -+ : SYMBOL_REF_LOCAL_P (x)); -+} -+ -+/* Return true if OP is a constant vector with the number of units in MODE, -+ and each unit has the same bit set. */ -+ -+bool -+loongarch_const_vector_bitimm_set_p (rtx op, machine_mode mode) -+{ -+ if (GET_CODE (op) == CONST_VECTOR && op != CONST0_RTX (mode)) -+ { -+ unsigned HOST_WIDE_INT val = UINTVAL (CONST_VECTOR_ELT (op, 0)); -+ int vlog2 = exact_log2 (val & GET_MODE_MASK (GET_MODE_INNER (mode))); -+ -+ if (vlog2 != -1) -+ { -+ gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT); -+ gcc_assert (vlog2 >= 0 && vlog2 <= GET_MODE_UNIT_BITSIZE (mode) - 1); -+ return loongarch_const_vector_same_val_p (op, mode); -+ } -+ } -+ -+ return false; -+} -+ -+/* Return true if OP is a constant vector with the number of units in MODE, -+ and each unit has the same bit clear. */ -+ -+bool -+loongarch_const_vector_bitimm_clr_p (rtx op, machine_mode mode) -+{ -+ if (GET_CODE (op) == CONST_VECTOR && op != CONSTM1_RTX (mode)) -+ { -+ unsigned HOST_WIDE_INT val = ~UINTVAL (CONST_VECTOR_ELT (op, 0)); -+ int vlog2 = exact_log2 (val & GET_MODE_MASK (GET_MODE_INNER (mode))); -+ -+ if (vlog2 != -1) -+ { -+ gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT); -+ gcc_assert (vlog2 >= 0 && vlog2 <= GET_MODE_UNIT_BITSIZE (mode) - 1); -+ return loongarch_const_vector_same_val_p (op, mode); -+ } -+ } -+ -+ return false; -+} -+ -+/* Return true if OP is a constant vector with the number of units in MODE, -+ and each unit has the same value. */ -+ -+bool -+loongarch_const_vector_same_val_p (rtx op, machine_mode mode) -+{ -+ int i, nunits = GET_MODE_NUNITS (mode); -+ rtx first; -+ -+ if (GET_CODE (op) != CONST_VECTOR || GET_MODE (op) != mode) -+ return false; -+ -+ first = CONST_VECTOR_ELT (op, 0); -+ for (i = 1; i < nunits; i++) -+ if (!rtx_equal_p (first, CONST_VECTOR_ELT (op, i))) -+ return false; -+ -+ return true; -+} -+ -+/* Return true if OP is a constant vector with the number of units in MODE, -+ and each unit has the same value as well as replicated bytes in the value. -+*/ -+ -+bool -+loongarch_const_vector_same_bytes_p (rtx op, machine_mode mode) -+{ -+ int i, bytes; -+ HOST_WIDE_INT val, first_byte; -+ rtx first; -+ -+ if (!loongarch_const_vector_same_val_p (op, mode)) -+ return false; -+ -+ first = CONST_VECTOR_ELT (op, 0); -+ bytes = GET_MODE_UNIT_SIZE (mode); -+ val = INTVAL (first); -+ first_byte = val & 0xff; -+ for (i = 1; i < bytes; i++) -+ { -+ val >>= 8; -+ if ((val & 0xff) != first_byte) -+ return false; -+ } -+ -+ return true; -+} -+ -+/* Return true if OP is a constant vector with the number of units in MODE, -+ and each unit has the same integer value in the range [LOW, HIGH]. */ -+ -+bool -+loongarch_const_vector_same_int_p (rtx op, machine_mode mode, HOST_WIDE_INT low, -+ HOST_WIDE_INT high) -+{ -+ HOST_WIDE_INT value; -+ rtx elem0; -+ -+ if (!loongarch_const_vector_same_val_p (op, mode)) -+ return false; -+ -+ elem0 = CONST_VECTOR_ELT (op, 0); -+ if (!CONST_INT_P (elem0)) -+ return false; -+ -+ value = INTVAL (elem0); -+ return (value >= low && value <= high); -+} -+ -+/* Return true if OP is a constant vector with repeated 4-element sets -+ in mode MODE. */ -+ -+bool -+loongarch_const_vector_shuffle_set_p (rtx op, machine_mode mode) -+{ -+ int nunits = GET_MODE_NUNITS (mode); -+ int nsets = nunits / 4; -+ int set = 0; -+ int i, j; -+ -+ /* Check if we have the same 4-element sets. */ -+ for (j = 0; j < nsets; j++, set = 4 * j) -+ for (i = 0; i < 4; i++) -+ if ((INTVAL (XVECEXP (op, 0, i)) -+ != (INTVAL (XVECEXP (op, 0, set + i)) - set)) -+ || !IN_RANGE (INTVAL (XVECEXP (op, 0, set + i)), 0, set + 3)) -+ return false; -+ return true; -+} -+ -+/* Return true if rtx constants of mode MODE should be put into a small -+ data section. */ -+ -+static bool -+loongarch_rtx_constant_in_small_data_p (machine_mode mode) -+{ -+ return (GET_MODE_SIZE (mode) <= loongarch_small_data_threshold); -+} -+ -+/* Return the method that should be used to access SYMBOL_REF or -+ LABEL_REF X in context CONTEXT. */ -+ -+static enum loongarch_symbol_type -+loongarch_classify_symbol (const_rtx x, enum loongarch_symbol_context context) -+{ -+ if (TARGET_RTP_PIC) -+ return SYMBOL_GOT_DISP; -+ -+ if (GET_CODE (x) == LABEL_REF) -+ { -+ return SYMBOL_GOT_DISP; -+ } -+ -+ gcc_assert (GET_CODE (x) == SYMBOL_REF); -+ -+ if (SYMBOL_REF_TLS_MODEL (x)) -+ return SYMBOL_TLS; -+ -+ if (GET_CODE (x) == SYMBOL_REF) -+ return SYMBOL_GOT_DISP; -+} -+ -+/* Return true if X is a symbolic constant that can be used in context -+ CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */ -+ -+bool -+loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_context context, -+ enum loongarch_symbol_type *symbol_type) -+{ -+ rtx offset; -+ -+ split_const (x, &x, &offset); -+ if (UNSPEC_ADDRESS_P (x)) -+ { -+ *symbol_type = UNSPEC_ADDRESS_TYPE (x); -+ x = UNSPEC_ADDRESS (x); -+ } -+ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF) -+ { -+ *symbol_type = loongarch_classify_symbol (x, context); -+ if (*symbol_type == SYMBOL_TLS) -+ return true; -+ } -+ else -+ return false; -+ -+ if (offset == const0_rtx) -+ return true; -+ -+ /* Check whether a nonzero offset is valid for the underlying -+ relocations. */ -+ switch (*symbol_type) -+ { -+ /* Fall through. */ -+ -+ case SYMBOL_GOT_DISP: -+ case SYMBOL_TLSGD: -+ case SYMBOL_TLSLDM: -+ case SYMBOL_TLS: -+ return false; -+ } -+ gcc_unreachable (); -+} -+ -+/* Like loongarch_symbol_insns We rely on the fact that, in the worst case. */ -+ -+static int -+loongarch_symbol_insns_1 (enum loongarch_symbol_type type, machine_mode mode) -+{ -+ if (loongarch_use_pcrel_pool_p[(int) type]) -+ { -+ /* The constant must be loaded and then dereferenced. */ -+ return 0; -+ } -+ -+ switch (type) -+ { -+ case SYMBOL_GOT_DISP: -+ /* The constant will have to be loaded from the GOT before it -+ is used in an address. */ -+ if (mode != MAX_MACHINE_MODE) -+ return 0; -+ -+ /* Fall through. */ -+ -+ return 3; -+ -+ case SYMBOL_TLSGD: -+ case SYMBOL_TLSLDM: -+ return 1; -+ -+ case SYMBOL_TLS: -+ /* We don't treat a bare TLS symbol as a constant. */ -+ return 0; -+ } -+ gcc_unreachable (); -+} -+ -+/* If MODE is MAX_MACHINE_MODE, return the number of instructions needed -+ to load symbols of type TYPE into a register. Return 0 if the given -+ type of symbol cannot be used as an immediate operand. -+ -+ Otherwise, return the number of instructions needed to load or store -+ values of mode MODE to or from addresses of type TYPE. Return 0 if -+ the given type of symbol is not valid in addresses. -+ -+ In both cases, instruction counts are based off BASE_INSN_LENGTH. */ -+ -+static int -+loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode) -+{ -+ /* LSX LD.* and ST.* cannot support loading symbols via an immediate -+ operand. */ -+ if (LSX_SUPPORTED_MODE_P (mode)) -+ return 0; -+ -+ if (LASX_SUPPORTED_MODE_P (mode)) -+ return 0; -+ -+ return loongarch_symbol_insns_1 (type, mode) * (1); -+} -+ -+/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */ -+ -+static bool -+loongarch_cannot_force_const_mem (machine_mode mode, rtx x) -+{ -+ enum loongarch_symbol_type type; -+ rtx base, offset; -+ -+ /* There is no assembler syntax for expressing an address-sized -+ high part. */ -+ if (GET_CODE (x) == HIGH) -+ return true; -+ -+ /* As an optimization, reject constants that loongarch_legitimize_move -+ can expand inline. -+ -+ Suppose we have a multi-instruction sequence that loads constant C -+ into register R. If R does not get allocated a hard register, and -+ R is used in an operand that allows both registers and memory -+ references, reload will consider forcing C into memory and using -+ one of the instruction's memory alternatives. Returning false -+ here will force it to use an input reload instead. */ -+ if (CONST_INT_P (x) && loongarch_legitimate_constant_p (mode, x)) -+ return true; -+ -+ split_const (x, &base, &offset); -+ if (loongarch_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type)) -+ { -+ /* See whether we explicitly want these symbols in the pool. */ -+ if (loongarch_use_pcrel_pool_p[(int) type]) -+ return false; -+ -+ /* The same optimization as for CONST_INT. */ -+ if (SMALL_INT (offset) && loongarch_symbol_insns (type, MAX_MACHINE_MODE) > 0) -+ return true; -+ -+ } -+ -+ /* TLS symbols must be computed by loongarch_legitimize_move. */ -+ if (tls_referenced_p (x)) -+ return true; -+ -+ return false; -+} -+ -+/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for -+ constants when we're using a per-function constant pool. */ -+ -+static bool -+loongarch_use_blocks_for_constant_p (machine_mode mode ATTRIBUTE_UNUSED, -+ const_rtx x ATTRIBUTE_UNUSED) -+{ -+ return 1; -+} -+ -+/* Return true if register REGNO is a valid base register for mode MODE. -+ STRICT_P is true if REG_OK_STRICT is in effect. */ -+ -+int -+loongarch_regno_mode_ok_for_base_p (int regno, machine_mode mode, -+ bool strict_p) -+{ -+ if (!HARD_REGISTER_NUM_P (regno)) -+ { -+ if (!strict_p) -+ return true; -+ regno = reg_renumber[regno]; -+ } -+ -+ /* These fake registers will be eliminated to either the stack or -+ hard frame pointer, both of which are usually valid base registers. -+ Reload deals with the cases where the eliminated form isn't valid. */ -+ if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM) -+ return true; -+ -+ -+ return GP_REG_P (regno); -+} -+ -+/* Return true if X is a valid base register for mode MODE. -+ STRICT_P is true if REG_OK_STRICT is in effect. */ -+ -+static bool -+loongarch_valid_base_register_p (rtx x, machine_mode mode, bool strict_p) -+{ -+ if (!strict_p && GET_CODE (x) == SUBREG) -+ x = SUBREG_REG (x); -+ -+ return (REG_P (x) -+ && loongarch_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p)); -+} -+ -+/* Return true if, for every base register BASE_REG, (plus BASE_REG X) -+ can address a value of mode MODE. */ -+ -+static bool -+loongarch_valid_offset_p (rtx x, machine_mode mode) -+{ -+ /* Check that X is a signed 12-bit number, -+ * or check that X is a signed 16-bit number -+ * and offset 4 byte aligned */ -+ if (!(const_arith_operand (x, Pmode) -+ || ((mode == E_SImode || mode == E_DImode) -+ && const_imm16_operand (x, Pmode) -+ && (loongarch_signed_immediate_p (INTVAL (x), 14, 2))))) -+ return false; -+ -+ /* We may need to split multiword moves, so make sure that every word -+ is accessible. */ -+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD -+ && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD)) -+ return false; -+ -+ /* LSX LD.* and ST.* supports 10-bit signed offsets. */ -+ if (LSX_SUPPORTED_MODE_P (mode) -+ && !loongarch_signed_immediate_p (INTVAL (x), 10, -+ loongarch_ldst_scaled_shift (mode))) -+ return false; -+ -+ /* LASX XVLD.B and XVST.B supports 10-bit signed offsets without shift. */ -+ if (LASX_SUPPORTED_MODE_P (mode) -+ && !loongarch_signed_immediate_p (INTVAL (x), 10, 0)) -+ return false; -+ -+ return true; -+} -+ -+/* Return true if X is a valid address for machine mode MODE. If it is, -+ fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in -+ effect. */ -+ -+static bool -+loongarch_classify_address (struct loongarch_address_info *info, rtx x, -+ machine_mode mode, bool strict_p) -+{ -+ switch (GET_CODE (x)) -+ { -+ case REG: -+ case SUBREG: -+ info->type = ADDRESS_REG; -+ info->reg = x; -+ info->offset = const0_rtx; -+ return loongarch_valid_base_register_p (info->reg, mode, strict_p); -+ -+ case PLUS: -+ info->type = ADDRESS_REG; -+ info->reg = XEXP (x, 0); -+ info->offset = XEXP (x, 1); -+ return (loongarch_valid_base_register_p (info->reg, mode, strict_p) -+ && loongarch_valid_offset_p (info->offset, mode)); -+ #if 0 -+ case LABEL_REF: -+ case SYMBOL_REF: -+ info->type = ADDRESS_SYMBOLIC; -+ return (loongarch_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM, -+ &info->symbol_type) -+ && loongarch_symbol_insns (info->symbol_type, mode) > 0 -+ && !loongarch_split_p[info->symbol_type]); -+ -+ #endif -+ default: -+ return false; -+ } -+} -+ -+/* Implement TARGET_LEGITIMATE_ADDRESS_P. */ -+ -+static bool -+loongarch_legitimate_address_p (machine_mode mode, rtx x, bool strict_p) -+{ -+ struct loongarch_address_info addr; -+ -+ return loongarch_classify_address (&addr, x, mode, strict_p); -+} -+ -+/* Return true if X is a legitimate $sp-based address for mode MODE. */ -+ -+bool -+loongarch_stack_address_p (rtx x, machine_mode mode) -+{ -+ struct loongarch_address_info addr; -+ -+ return (loongarch_classify_address (&addr, x, mode, false) -+ && addr.type == ADDRESS_REG -+ && addr.reg == stack_pointer_rtx); -+} -+ -+/* Return true if ADDR matches the pattern for the L{B,H,W,D}{,U}X load -+ indexed address instruction. Note that such addresses are -+ not considered legitimate in the TARGET_LEGITIMATE_ADDRESS_P -+ sense, because their use is so restricted. */ -+ -+static bool -+loongarch_lx_address_p (rtx addr, machine_mode mode) -+{ -+ if (GET_CODE (addr) != PLUS -+ || !REG_P (XEXP (addr, 0)) -+ || !REG_P (XEXP (addr, 1))) -+ return false; -+ if (LSX_SUPPORTED_MODE_P (mode)) -+ return true; -+ return false; -+} -+ -+ -+/* Return the number of instructions needed to load or store a value -+ of mode MODE at address X, assuming that BASE_INSN_LENGTH is the -+ length of one instruction. Return 0 if X isn't valid for MODE. -+ Assume that multiword moves may need to be split into word moves -+ if MIGHT_SPLIT_P, otherwise assume that a single load or store is -+ enough. */ -+ -+int -+loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) -+{ -+ struct loongarch_address_info addr; -+ int factor; -+ bool lsx_p = (!might_split_p && (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))); -+ -+ if (!loongarch_classify_address (&addr, x, mode, false)) -+ return 0; -+ -+ /* BLKmode is used for single unaligned loads and stores and should -+ not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty -+ meaningless, so we have to single it out as a special case one way -+ or the other.) */ -+ if (mode != BLKmode && might_split_p) -+ factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; -+ else -+ factor = 1; -+ -+ if (loongarch_classify_address (&addr, x, mode, false)) -+ switch (addr.type) -+ { -+ case ADDRESS_REG: -+ if (lsx_p) -+ { -+ /* LSX LD.* and ST.* supports 10-bit signed offsets. */ -+ if (loongarch_signed_immediate_p (INTVAL (addr.offset), 10, -+ loongarch_ldst_scaled_shift (mode))) -+ return 1; -+ else -+ return 0; -+ } -+ return factor; -+ -+ case ADDRESS_CONST_INT: -+ return lsx_p ? 0 : factor; -+ -+ case ADDRESS_SYMBOLIC: -+ return lsx_p ? 0 : factor * loongarch_symbol_insns (addr.symbol_type, mode); -+ } -+ return 0; -+} -+ -+/* Return true if X fits within an unsigned field of BITS bits that is -+ shifted left SHIFT bits before being used. */ -+ -+bool -+loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0) -+{ -+ return (x & ((1 << shift) - 1)) == 0 && x < ((unsigned) 1 << (shift + bits)); -+} -+ -+/* Return true if X fits within a signed field of BITS bits that is -+ shifted left SHIFT bits before being used. */ -+ -+bool -+loongarch_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0) -+{ -+ x += 1 << (bits + shift - 1); -+ return loongarch_unsigned_immediate_p (x, bits, shift); -+} -+ -+/* Return the scale shift that applied to LSX LD/ST address offset. */ -+ -+int -+loongarch_ldst_scaled_shift (machine_mode mode) -+{ -+ int shift = exact_log2 (GET_MODE_UNIT_SIZE (mode)); -+ -+ if (shift < 0 || shift > 8) -+ gcc_unreachable (); -+ -+ return shift; -+} -+ -+/* Return true if X is a legitimate address that conforms to the requirements -+ for a microLARCH LWSP or SWSP insn. */ -+ -+bool -+lwsp_swsp_address_p (rtx x, machine_mode mode) -+{ -+ struct loongarch_address_info addr; -+ -+ return (loongarch_classify_address (&addr, x, mode, false) -+ && addr.type == ADDRESS_REG -+ && REGNO (addr.reg) == STACK_POINTER_REGNUM -+ && uw5_operand (addr.offset, mode)); -+} -+ -+/* Return true if X is a legitimate address with a 12-bit offset. -+ MODE is the mode of the value being accessed. */ -+ -+bool -+loongarch_12bit_offset_address_p (rtx x, machine_mode mode) -+{ -+ struct loongarch_address_info addr; -+ -+ return (loongarch_classify_address (&addr, x, mode, false) -+ && addr.type == ADDRESS_REG -+ && CONST_INT_P (addr.offset) -+ && ULARCH_12BIT_OFFSET_P (INTVAL (addr.offset))); -+} -+ -+/* Return true if X is a legitimate address with a 9-bit offset. -+ MODE is the mode of the value being accessed. */ -+ -+bool -+loongarch_9bit_offset_address_p (rtx x, machine_mode mode) -+{ -+ struct loongarch_address_info addr; -+ -+ return (loongarch_classify_address (&addr, x, mode, false) -+ && addr.type == ADDRESS_REG -+ && CONST_INT_P (addr.offset) -+ && LARCH_9BIT_OFFSET_P (INTVAL (addr.offset))); -+} -+ -+/* Return true if X is a legitimate address with a 14-bit offset shifted 2. -+ MODE is the mode of the value being accessed. */ -+ -+bool -+loongarch_14bit_shifted_offset_address_p (rtx x, machine_mode mode) -+{ -+ struct loongarch_address_info addr; -+ -+ return (loongarch_classify_address (&addr, x, mode, false) -+ && addr.type == ADDRESS_REG -+ && CONST_INT_P (addr.offset) -+ && LISA_16BIT_OFFSET_P (INTVAL (addr.offset)) -+ && LISA_SHIFT_2_OFFSET_P (INTVAL (addr.offset))); -+} -+ -+ -+/* Return the number of instructions needed to load constant X, -+ assuming that BASE_INSN_LENGTH is the length of one instruction. -+ Return 0 if X isn't a valid constant. */ -+ -+int -+loongarch_const_insns (rtx x) -+{ -+ struct loongarch_integer_op codes[LARCH_MAX_INTEGER_OPS]; -+ enum loongarch_symbol_type symbol_type; -+ rtx offset; -+ -+ switch (GET_CODE (x)) -+ { -+ case CONST_INT: -+ return loongarch_build_integer (codes, INTVAL (x)); -+ -+ case CONST_VECTOR: -+ if ((ISA_HAS_LSX || ISA_HAS_LASX) -+ && loongarch_const_vector_same_int_p (x, GET_MODE (x), -512, 511)) -+ return 1; -+ /* Fall through. */ -+ case CONST_DOUBLE: -+ /* Allow zeros for normal mode, where we can use $0. */ -+ return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0; -+ -+ case CONST: -+ /* See if we can refer to X directly. */ -+ if (loongarch_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type)) -+ return loongarch_symbol_insns (symbol_type, MAX_MACHINE_MODE); -+ -+ /* Otherwise try splitting the constant into a base and offset. -+ If the offset is a 16-bit value, we can load the base address -+ into a register and then use (D)ADDIU to add in the offset. -+ If the offset is larger, we can load the base and offset -+ into separate registers and add them together with (D)ADDU. -+ However, the latter is only possible before reload; during -+ and after reload, we must have the option of forcing the -+ constant into the pool instead. */ -+ split_const (x, &x, &offset); -+ if (offset != 0) -+ { -+ int n = loongarch_const_insns (x); -+ if (n != 0) -+ { -+ if (SMALL_INT (offset)) -+ return n + 1; -+ else if (!targetm.cannot_force_const_mem (GET_MODE (x), x)) -+ return n + 1 + loongarch_build_integer (codes, INTVAL (offset)); -+ } -+ } -+ return 0; -+ -+ case SYMBOL_REF: -+ case LABEL_REF: -+ return loongarch_symbol_insns (loongarch_classify_symbol (x, SYMBOL_CONTEXT_LEA), -+ MAX_MACHINE_MODE); -+ -+ default: -+ return 0; -+ } -+} -+ -+/* X is a doubleword constant that can be handled by splitting it into -+ two words and loading each word separately. Return the number of -+ instructions required to do this, assuming that BASE_INSN_LENGTH -+ is the length of one instruction. */ -+ -+int -+loongarch_split_const_insns (rtx x) -+{ -+ unsigned int low, high; -+ -+ low = loongarch_const_insns (loongarch_subword (x, false)); -+ high = loongarch_const_insns (loongarch_subword (x, true)); -+ gcc_assert (low > 0 && high > 0); -+ return low + high; -+} -+ -+/* Return one word of 128-bit value OP, taking into account the fixed -+ endianness of certain registers. BYTE selects from the byte address. */ -+ -+rtx -+loongarch_subword_at_byte (rtx op, unsigned int byte) -+{ -+ machine_mode mode; -+ -+ mode = GET_MODE (op); -+ if (mode == VOIDmode) -+ mode = TImode; -+ -+ gcc_assert (!FP_REG_RTX_P (op)); -+ -+ if (MEM_P (op)) -+ return loongarch_rewrite_small_data (adjust_address (op, word_mode, byte)); -+ -+ return simplify_gen_subreg (word_mode, op, mode, byte); -+} -+ -+/* Return the number of instructions needed to implement INSN, -+ given that it loads from or stores to MEM. Assume that -+ BASE_INSN_LENGTH is the length of one instruction. */ -+ -+int -+loongarch_load_store_insns (rtx mem, rtx_insn *insn) -+{ -+ machine_mode mode; -+ bool might_split_p; -+ rtx set; -+ -+ gcc_assert (MEM_P (mem)); -+ mode = GET_MODE (mem); -+ -+ /* Try to prove that INSN does not need to be split. */ -+ might_split_p = GET_MODE_SIZE (mode) > UNITS_PER_WORD; -+ if (might_split_p) -+ { -+ set = single_set (insn); -+ if (set && !loongarch_split_move_insn_p (SET_DEST (set), SET_SRC (set), insn)) -+ might_split_p = false; -+ } -+ -+ return loongarch_address_insns (XEXP (mem, 0), mode, might_split_p); -+} -+ -+/* Return the number of instructions needed for an integer division, -+ assuming that BASE_INSN_LENGTH is the length of one instruction. */ -+ -+int -+loongarch_idiv_insns (machine_mode mode) -+{ -+ int count; -+ -+ count = 1; -+ if (TARGET_CHECK_ZERO_DIV) -+ count += 2; -+ -+ return count; -+} -+ -+ -+/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */ -+ -+void -+loongarch_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1) -+{ -+ emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (code, GET_MODE (target), -+ op0, op1))); -+} -+ -+/* Compute (CODE OP0 OP1) and store the result in a new register -+ of mode MODE. Return that new register. */ -+ -+static rtx -+loongarch_force_binary (machine_mode mode, enum rtx_code code, rtx op0, rtx op1) -+{ -+ rtx reg; -+ -+ reg = gen_reg_rtx (mode); -+ loongarch_emit_binary (code, reg, op0, op1); -+ return reg; -+} -+ -+/* Copy VALUE to a register and return that register. If new pseudos -+ are allowed, copy it into a new register, otherwise use DEST. */ -+ -+static rtx -+loongarch_force_temporary (rtx dest, rtx value) -+{ -+ if (can_create_pseudo_p ()) -+ return force_reg (Pmode, value); -+ else -+ { -+ loongarch_emit_move (dest, value); -+ return dest; -+ } -+} -+ -+ -+/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE, -+ then add CONST_INT OFFSET to the result. */ -+ -+static rtx -+loongarch_unspec_address_offset (rtx base, rtx offset, -+ enum loongarch_symbol_type symbol_type) -+{ -+ base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base), -+ UNSPEC_ADDRESS_FIRST + symbol_type); -+ if (offset != const0_rtx) -+ base = gen_rtx_PLUS (Pmode, base, offset); -+ return gen_rtx_CONST (Pmode, base); -+} -+ -+/* Return an UNSPEC address with underlying address ADDRESS and symbol -+ type SYMBOL_TYPE. */ -+ -+rtx -+loongarch_unspec_address (rtx address, enum loongarch_symbol_type symbol_type) -+{ -+ rtx base, offset; -+ -+ split_const (address, &base, &offset); -+ return loongarch_unspec_address_offset (base, offset, symbol_type); -+} -+ -+/* If OP is an UNSPEC address, return the address to which it refers, -+ otherwise return OP itself. */ -+ -+rtx -+loongarch_strip_unspec_address (rtx op) -+{ -+ rtx base, offset; -+ -+ split_const (op, &base, &offset); -+ if (UNSPEC_ADDRESS_P (base)) -+ op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset)); -+ return op; -+} -+ -+ -+/* Return a base register that holds pic_offset_table_rtx. -+ TEMP, if nonnull, is a scratch Pmode base register. */ -+ -+rtx -+loongarch_pic_base_register (rtx temp) -+{ -+ return pic_offset_table_rtx; -+ -+} -+ -+/* If SRC is the RHS of a load_call insn, return the underlying symbol -+ reference. Return NULL_RTX otherwise. */ -+ -+static rtx -+loongarch_strip_unspec_call (rtx src) -+{ -+ if (GET_CODE (src) == UNSPEC && XINT (src, 1) == UNSPEC_LOAD_CALL) -+ return loongarch_strip_unspec_address (XVECEXP (src, 0, 1)); -+ return NULL_RTX; -+} -+ -+/* Return a legitimate address for REG + OFFSET. TEMP is as for -+ loongarch_force_temporary; it is only needed when OFFSET is not a -+ SMALL_OPERAND. */ -+ -+static rtx -+loongarch_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset) -+{ -+ if (!SMALL_OPERAND (offset)) -+ { -+ rtx high; -+ -+ /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. -+ The addition inside the macro CONST_HIGH_PART may cause an -+ overflow, so we need to force a sign-extension check. */ -+ high = gen_int_mode (CONST_HIGH_PART (offset), Pmode); -+ offset = CONST_LOW_PART (offset); -+ high = loongarch_force_temporary (temp, high); -+ reg = loongarch_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg)); -+ } -+ return plus_constant (Pmode, reg, offset); -+} -+ -+/* The __tls_get_attr symbol. */ -+static GTY(()) rtx loongarch_tls_symbol; -+ -+/* Load an entry from the GOT for a TLS GD access. */ -+ -+static rtx loongarch_got_load_tls_gd (rtx dest, rtx sym) -+{ -+ if (Pmode == DImode) -+ return gen_got_load_tls_gddi (dest, sym); -+ else -+ return gen_got_load_tls_gdsi (dest, sym); -+} -+ -+/* Load an entry from the GOT for a TLS LD access. */ -+ -+static rtx loongarch_got_load_tls_ld (rtx dest, rtx sym) -+{ -+ if (Pmode == DImode) -+ return gen_got_load_tls_lddi (dest, sym); -+ else -+ return gen_got_load_tls_ldsi (dest, sym); -+} -+ -+ -+/* Load an entry from the GOT for a TLS IE access. */ -+ -+static rtx loongarch_got_load_tls_ie (rtx dest, rtx sym) -+{ -+ if (Pmode == DImode) -+ return gen_got_load_tls_iedi (dest, sym); -+ else -+ return gen_got_load_tls_iesi (dest, sym); -+} -+ -+/* Add in the thread pointer for a TLS LE access. */ -+ -+static rtx loongarch_got_load_tls_le (rtx dest, rtx sym) -+{ -+ if (Pmode == DImode) -+ return gen_got_load_tls_ledi (dest, sym); -+ else -+ return gen_got_load_tls_lesi (dest, sym); -+} -+ -+/* Return an instruction sequence that calls __tls_get_addr. SYM is -+ the TLS symbol we are referencing and TYPE is the symbol type to use -+ (either global dynamic or local dynamic). V0 is an RTX for the -+ return value location. */ -+ -+static rtx_insn * -+loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) -+{ -+ rtx loc, a0; -+ rtx_insn *insn; -+ -+ a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST); -+ -+ if (!loongarch_tls_symbol) -+ loongarch_tls_symbol = init_one_libfunc ("__tls_get_addr"); -+ -+ loc = loongarch_unspec_address (sym, type); -+ -+ start_sequence (); -+ -+ if (type == SYMBOL_TLSLDM) -+ emit_insn (loongarch_got_load_tls_ld (a0, loc)); -+ else if (type == SYMBOL_TLSGD) -+ emit_insn (loongarch_got_load_tls_gd (a0, loc)); -+ else -+ gcc_unreachable (); -+ -+ insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol, const0_rtx)); -+ RTL_CONST_CALL_P (insn) = 1; -+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0); -+ insn = get_insns (); -+ -+ end_sequence (); -+ -+ return insn; -+} -+ -+/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return -+ its address. The return value will be both a valid address and a valid -+ SET_SRC (either a REG or a LO_SUM). */ -+ -+static rtx -+loongarch_legitimize_tls_address (rtx loc) -+{ -+ rtx dest, tp, tmp; -+ enum tls_model model = SYMBOL_REF_TLS_MODEL (loc); -+ rtx_insn *insn; -+ -+ /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */ -+ #if 0 -+ if (!flag_pic) -+ model = TLS_MODEL_LOCAL_EXEC; -+ #endif -+ -+ switch (model) -+ { -+ case TLS_MODEL_LOCAL_DYNAMIC: -+ tmp = gen_rtx_REG (Pmode, GP_RETURN); -+ dest = gen_reg_rtx (Pmode); -+ insn = loongarch_call_tls_get_addr (loc, SYMBOL_TLSLDM, tmp); -+ emit_libcall_block (insn, dest, tmp, loc); -+ break; -+ -+ case TLS_MODEL_GLOBAL_DYNAMIC: -+ tmp = gen_rtx_REG (Pmode, GP_RETURN); -+ dest = gen_reg_rtx (Pmode); -+ insn = loongarch_call_tls_get_addr (loc, SYMBOL_TLSGD, tmp); -+ emit_libcall_block (insn, dest, tmp, loc); -+ break; -+ -+ case TLS_MODEL_INITIAL_EXEC: -+ /* la.tls.ie; tp-relative add */ -+ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); -+ tmp = gen_reg_rtx (Pmode); -+ emit_insn (loongarch_got_load_tls_ie (tmp, loc)); -+ dest = gen_reg_rtx (Pmode); -+ emit_insn (gen_add3_insn (dest, tmp, tp)); -+ break; -+ -+ case TLS_MODEL_LOCAL_EXEC: -+ /* la.tls.le; tp-relative add */ -+ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); -+ tmp = gen_reg_rtx (Pmode); -+ emit_insn (loongarch_got_load_tls_le (tmp, loc)); -+ dest = gen_reg_rtx (Pmode); -+ emit_insn (gen_add3_insn (dest, tmp, tp)); -+ break; -+ -+ default: -+ gcc_unreachable (); -+ } -+ return dest; -+} -+ -+rtx -+loongarch_legitimize_call_address (rtx addr) -+{ -+ if (!call_insn_operand (addr, VOIDmode)) -+ { -+ rtx reg = gen_reg_rtx (Pmode); -+ loongarch_emit_move (reg, addr); -+ return reg; -+ } -+ return addr; -+} -+ -+/* If X is not a valid address for mode MODE, force it into a register. */ -+ -+static rtx -+loongarch_force_address (rtx x, machine_mode mode) -+{ -+ if (!loongarch_legitimate_address_p (mode, x, false)) -+ x = force_reg (Pmode, x); -+ return x; -+} -+ -+/* This function is used to implement LEGITIMIZE_ADDRESS. If X can -+ be legitimized in a way that the generic machinery might not expect, -+ return a new address, otherwise return NULL. MODE is the mode of -+ the memory being accessed. */ -+ -+static rtx -+loongarch_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, -+ machine_mode mode) -+{ -+ rtx base, addr; -+ HOST_WIDE_INT offset; -+ -+ if (loongarch_tls_symbol_p (x)) -+ return loongarch_legitimize_tls_address (x); -+ -+ /* Handle BASE + OFFSET using loongarch_add_offset. */ -+ loongarch_split_plus (x, &base, &offset); -+ if (offset != 0) -+ { -+ if (!loongarch_valid_base_register_p (base, mode, false)) -+ base = copy_to_mode_reg (Pmode, base); -+ addr = loongarch_add_offset (NULL, base, offset); -+ return loongarch_force_address (addr, mode); -+ } -+ -+ return x; -+} -+ -+/* Load VALUE into DEST. TEMP is as for loongarch_force_temporary. */ -+ -+void -+loongarch_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value) -+{ -+ struct loongarch_integer_op codes[LARCH_MAX_INTEGER_OPS]; -+ machine_mode mode; -+ unsigned int i, num_ops; -+ rtx x; -+ -+ mode = GET_MODE (dest); -+ num_ops = loongarch_build_integer (codes, value); -+ -+ /* Apply each binary operation to X. Invariant: X is a legitimate -+ source operand for a SET pattern. */ -+ x = GEN_INT (codes[0].value); -+ for (i = 1; i < num_ops; i++) -+ { -+ if (!can_create_pseudo_p ()) -+ { -+ emit_insn (gen_rtx_SET (temp, x)); -+ x = temp; -+ } -+ else -+ x = force_reg (mode, x); -+ switch (codes[i].method) -+ { -+ case METHOD_NORMAL: -+ x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value)); -+ break; -+ case METHOD_LU32I: -+ emit_insn (gen_rtx_SET (x, gen_rtx_IOR (DImode, -+ gen_rtx_ZERO_EXTEND (DImode, -+ gen_rtx_SUBREG (SImode, x, 0)), -+ GEN_INT (codes[i].value)))); -+ break; -+ case METHOD_LU52I: -+ emit_insn (gen_lu52i_d (x, x, -+ GEN_INT (0xfffffffffffff), -+ GEN_INT (codes[i].value))); -+ break; -+ case METHOD_INSV: -+ emit_insn (gen_rtx_SET (gen_rtx_ZERO_EXTRACT (DImode, -+ x, -+ GEN_INT (20), -+ GEN_INT (32)), -+ gen_rtx_REG (DImode, 0))); -+ break; -+ default: -+ gcc_unreachable (); -+ } -+ } -+ -+ emit_insn (gen_rtx_SET (dest, x)); -+} -+ -+/* Subroutine of loongarch_legitimize_move. Move constant SRC into register -+ DEST given that SRC satisfies immediate_operand but doesn't satisfy -+ move_operand. */ -+ -+static void -+loongarch_legitimize_const_move (machine_mode mode, rtx dest, rtx src) -+{ -+ rtx base, offset; -+ -+ /* Split moves of big integers into smaller pieces. */ -+ if (splittable_const_int_operand (src, mode)) -+ { -+ loongarch_move_integer (dest, dest, INTVAL (src)); -+ return; -+ } -+ -+ /* Generate the appropriate access sequences for TLS symbols. */ -+ if (loongarch_tls_symbol_p (src)) -+ { -+ loongarch_emit_move (dest, loongarch_legitimize_tls_address (src)); -+ return; -+ } -+ -+ /* If we have (const (plus symbol offset)), and that expression cannot -+ be forced into memory, load the symbol first and add in the offset. -+ prefer to do this even if the constant _can_ be forced into memory, -+ as it usually produces better code. */ -+ split_const (src, &base, &offset); -+ if (offset != const0_rtx -+ && (targetm.cannot_force_const_mem (mode, src) -+ || (can_create_pseudo_p ()))) -+ { -+ base = loongarch_force_temporary (dest, base); -+ loongarch_emit_move (dest, loongarch_add_offset (NULL, base, INTVAL (offset))); -+ return; -+ } -+ -+ src = force_const_mem (mode, src); -+ -+ loongarch_emit_move (dest, src); -+} -+ -+/* If (set DEST SRC) is not a valid move instruction, emit an equivalent -+ sequence that is valid. */ -+ -+bool -+loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src) -+{ -+ -+ if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode)) -+ { -+ loongarch_emit_move (dest, force_reg (mode, src)); -+ return true; -+ } -+ -+ /* Both src and dest are non-registers; one special case is supported where -+ the source is (const_int 0) and the store can source the zero register. -+ LSX and lasx are never able to source the zero register directly in -+ memory operations. */ -+ if (!register_operand (dest, mode) -+ && !register_operand (src, mode) -+ && (!const_0_operand (src, mode) -+ || LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))) -+ { -+ loongarch_emit_move (dest, force_reg (mode, src)); -+ return true; -+ } -+ -+ /* We need to deal with constants that would be legitimate -+ immediate_operands but aren't legitimate move_operands. */ -+ if (CONSTANT_P (src) && !move_operand (src, mode)) -+ { -+ loongarch_legitimize_const_move (mode, dest, src); -+ set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src)); -+ return true; -+ } -+ -+ if ((GET_CODE (src) == SYMBOL_REF || GET_CODE (src) == LABEL_REF) -+ && symbolic_operand (src, VOIDmode) -+ && (loongarch_cmodel_var == LARCH_CMODEL_EXTREME)) -+ { -+ rtx temp = gen_reg_rtx (GET_MODE (dest)); -+ rtx x = gen_rtx_UNSPEC_VOLATILE (GET_MODE (dest), gen_rtvec (1, src), UNSPECV_MOVE_EXTREME); -+ temp = gen_rtx_USE(VOIDmode, temp); -+ temp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec(2, gen_rtx_SET (dest, x), temp)); -+ emit_insn (temp); -+ return true; -+ } -+ -+ return false; -+} -+ -+/* Return true if OP refers to small data symbols directly, not through -+ a LO_SUM. CONTEXT is the context in which X appears. */ -+ -+static int -+loongarch_small_data_pattern_1 (rtx x, enum loongarch_symbol_context context) -+{ -+ subrtx_var_iterator::array_type array; -+ FOR_EACH_SUBRTX_VAR (iter, array, x, ALL) -+ { -+ rtx x = *iter; -+ -+ /* Ignore things like "g" constraints in asms. We make no particular -+ guarantee about which symbolic constants are acceptable as asm operands -+ versus which must be forced into a GPR. */ -+ if (GET_CODE (x) == ASM_OPERANDS) -+ iter.skip_subrtxes (); -+ else if (MEM_P (x)) -+ { -+ if (loongarch_small_data_pattern_1 (XEXP (x, 0), SYMBOL_CONTEXT_MEM)) -+ return true; -+ iter.skip_subrtxes (); -+ } -+ } -+ return false; -+} -+ -+/* Return true if OP refers to small data symbols directly, not through -+ a LO_SUM. */ -+ -+bool -+loongarch_small_data_pattern_p (rtx op) -+{ -+ return loongarch_small_data_pattern_1 (op, SYMBOL_CONTEXT_LEA); -+} -+ -+/* Rewrite *LOC so that it refers to small data using explicit -+ relocations. CONTEXT is the context in which *LOC appears. */ -+ -+static void -+loongarch_rewrite_small_data_1 (rtx *loc, enum loongarch_symbol_context context) -+{ -+ subrtx_ptr_iterator::array_type array; -+ FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL) -+ { -+ rtx *loc = *iter; -+ if (MEM_P (*loc)) -+ { -+ loongarch_rewrite_small_data_1 (&XEXP (*loc, 0), SYMBOL_CONTEXT_MEM); -+ iter.skip_subrtxes (); -+ } -+ } -+} -+ -+/* Rewrite instruction pattern PATTERN so that it refers to small data -+ using explicit relocations. */ -+ -+rtx -+loongarch_rewrite_small_data (rtx pattern) -+{ -+ pattern = copy_insn (pattern); -+ loongarch_rewrite_small_data_1 (&pattern, SYMBOL_CONTEXT_LEA); -+ return pattern; -+} -+ -+/* The cost of loading values from the constant pool. It should be -+ larger than the cost of any constant we want to synthesize inline. */ -+#define CONSTANT_POOL_COST COSTS_N_INSNS (8) -+ -+/* Return true if there is a instruction that implements CODE -+ and if that instruction accepts X as an immediate operand. */ -+ -+static int -+loongarch_immediate_operand_p (int code, HOST_WIDE_INT x) -+{ -+ switch (code) -+ { -+ case ASHIFT: -+ case ASHIFTRT: -+ case LSHIFTRT: -+ /* All shift counts are truncated to a valid constant. */ -+ return true; -+ -+ case ROTATE: -+ case ROTATERT: -+ /* Likewise rotates, if the target supports rotates at all. */ -+ return true; -+ -+ case AND: -+ case IOR: -+ case XOR: -+ /* These instructions take 12-bit unsigned immediates. */ -+ return SMALL_OPERAND_UNSIGNED (x); -+ -+ case PLUS: -+ case LT: -+ case LTU: -+ /* These instructions take 12-bit signed immediates. */ -+ return SMALL_OPERAND (x); -+ -+ case EQ: -+ case NE: -+ case GT: -+ case GTU: -+ /* The "immediate" forms of these instructions are really -+ implemented as comparisons with register 0. */ -+ return x == 0; -+ -+ case GE: -+ case GEU: -+ /* Likewise, meaning that the only valid immediate operand is 1. */ -+ return x == 1; -+ -+ case LE: -+ /* We add 1 to the immediate and use SLT. */ -+ return SMALL_OPERAND (x + 1); -+ -+ case LEU: -+ /* Likewise SLTU, but reject the always-true case. */ -+ return SMALL_OPERAND (x + 1) && x + 1 != 0; -+ -+ case SIGN_EXTRACT: -+ case ZERO_EXTRACT: -+ /* The bit position and size are immediate operands. */ -+ return 1; -+ -+ default: -+ /* By default assume that $0 can be used for 0. */ -+ return x == 0; -+ } -+} -+ -+/* Return the cost of binary operation X, given that the instruction -+ sequence for a word-sized or smaller operation has cost SINGLE_COST -+ and that the sequence of a double-word operation has cost DOUBLE_COST. -+ If SPEED is true, optimize for speed otherwise optimize for size. */ -+ -+static int -+loongarch_binary_cost (rtx x, int single_cost, int double_cost, bool speed) -+{ -+ int cost; -+ -+ if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2) -+ cost = double_cost; -+ else -+ cost = single_cost; -+ return (cost -+ + set_src_cost (XEXP (x, 0), GET_MODE (x), speed) -+ + rtx_cost (XEXP (x, 1), GET_MODE (x), GET_CODE (x), 1, speed)); -+} -+ -+/* Return the cost of floating-point multiplications of mode MODE. */ -+ -+static int -+loongarch_fp_mult_cost (machine_mode mode) -+{ -+ return mode == DFmode ? loongarch_cost->fp_mult_df : loongarch_cost->fp_mult_sf; -+} -+ -+/* Return the cost of floating-point divisions of mode MODE. */ -+ -+static int -+loongarch_fp_div_cost (machine_mode mode) -+{ -+ return mode == DFmode ? loongarch_cost->fp_div_df : loongarch_cost->fp_div_sf; -+} -+ -+/* Return the cost of sign-extending OP to mode MODE, not including the -+ cost of OP itself. */ -+ -+static int -+loongarch_sign_extend_cost (machine_mode mode, rtx op) -+{ -+ if (MEM_P (op)) -+ /* Extended loads are as cheap as unextended ones. */ -+ return 0; -+ -+ if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) -+ /* A sign extension from SImode to DImode in 64-bit mode is free. */ -+ return 0; -+ -+ return COSTS_N_INSNS (1); -+} -+ -+/* Return the cost of zero-extending OP to mode MODE, not including the -+ cost of OP itself. */ -+ -+static int -+loongarch_zero_extend_cost (machine_mode mode, rtx op) -+{ -+ if (MEM_P (op)) -+ /* Extended loads are as cheap as unextended ones. */ -+ return 0; -+ -+ if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) -+ /* We need a shift left by 32 bits and a shift right by 32 bits. */ -+ return COSTS_N_INSNS (2); -+ -+ /* We can use ANDI. */ -+ return COSTS_N_INSNS (1); -+} -+ -+/* Return the cost of moving between two registers of mode MODE, -+ assuming that the move will be in pieces of at most UNITS bytes. */ -+ -+static int -+loongarch_set_reg_reg_piece_cost (machine_mode mode, unsigned int units) -+{ -+ return COSTS_N_INSNS ((GET_MODE_SIZE (mode) + units - 1) / units); -+} -+ -+/* Return the cost of moving between two registers of mode MODE. */ -+ -+static int -+loongarch_set_reg_reg_cost (machine_mode mode) -+{ -+ switch (GET_MODE_CLASS (mode)) -+ { -+ case MODE_FCC: -+ return loongarch_set_reg_reg_piece_cost (mode, GET_MODE_SIZE (FCCmode)); -+ -+ case MODE_FLOAT: -+ case MODE_COMPLEX_FLOAT: -+ case MODE_VECTOR_FLOAT: -+ if (TARGET_HARD_FLOAT) -+ return loongarch_set_reg_reg_piece_cost (mode, UNITS_PER_HWFPVALUE); -+ /* Fall through */ -+ -+ default: -+ return loongarch_set_reg_reg_piece_cost (mode, UNITS_PER_WORD); -+ } -+} -+ -+/* Implement TARGET_RTX_COSTS. */ -+ -+static bool -+loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, -+ int opno ATTRIBUTE_UNUSED, int *total, bool speed) -+{ -+ int code = GET_CODE (x); -+ bool float_mode_p = FLOAT_MODE_P (mode); -+ int cost; -+ rtx addr; -+ -+ /* The cost of a COMPARE is hard to define for LARCH. COMPAREs don't -+ appear in the instruction stream, and the cost of a comparison is -+ really the cost of the branch or scc condition. At the time of -+ writing, GCC only uses an explicit outer COMPARE code when optabs -+ is testing whether a constant is expensive enough to force into a -+ register. We want optabs to pass such constants through the LARCH -+ expanders instead, so make all constants very cheap here. */ -+ if (outer_code == COMPARE) -+ { -+ gcc_assert (CONSTANT_P (x)); -+ *total = 0; -+ return true; -+ } -+ -+ switch (code) -+ { -+ case CONST_INT: -+ /* Treat *clear_upper32-style ANDs as having zero cost in the -+ second operand. The cost is entirely in the first operand. -+ -+ ??? This is needed because we would otherwise try to CSE -+ the constant operand. Although that's the right thing for -+ instructions that continue to be a register operation throughout -+ compilation, it is disastrous for instructions that could -+ later be converted into a memory operation. */ -+ if (TARGET_64BIT -+ && outer_code == AND -+ && UINTVAL (x) == 0xffffffff) -+ { -+ *total = 0; -+ return true; -+ } -+ -+ /* When not optimizing for size, we care more about the cost -+ of hot code, and hot code is often in a loop. If a constant -+ operand needs to be forced into a register, we will often be -+ able to hoist the constant load out of the loop, so the load -+ should not contribute to the cost. */ -+ if (speed || loongarch_immediate_operand_p (outer_code, INTVAL (x))) -+ { -+ *total = 0; -+ return true; -+ } -+ /* Fall through. */ -+ -+ case CONST: -+ case SYMBOL_REF: -+ case LABEL_REF: -+ case CONST_DOUBLE: -+ if (force_to_mem_operand (x, VOIDmode)) -+ { -+ *total = COSTS_N_INSNS (1); -+ return true; -+ } -+ cost = loongarch_const_insns (x); -+ if (cost > 0) -+ { -+ /* If the constant is likely to be stored in a GPR, SETs of -+ single-insn constants are as cheap as register sets; we -+ never want to CSE them. -+ -+ Don't reduce the cost of storing a floating-point zero in -+ FPRs. If we have a zero in an FPR for other reasons, we -+ can get better cfg-cleanup and delayed-branch results by -+ using it consistently, rather than using $0 sometimes and -+ an FPR at other times. Also, moves between floating-point -+ registers are sometimes cheaper than MOVGR2FR.W/MOVGR2FR.D $0. */ -+ if (cost == 1 -+ && outer_code == SET -+ && !(float_mode_p && TARGET_HARD_FLOAT)) -+ cost = 0; -+ /* When code loads a constant N>1 times, we rarely -+ want to CSE the constant itself. It is usually better to -+ have N copies of the last operation in the sequence and one -+ shared copy of the other operations. -+ -+ Also, if we have a CONST_INT, we don't know whether it is -+ for a word or doubleword operation, so we cannot rely on -+ the result of loongarch_build_integer. */ -+ else if ((outer_code == SET || GET_MODE (x) == VOIDmode)) -+ cost = 1; -+ *total = COSTS_N_INSNS (cost); -+ return true; -+ } -+ /* The value will need to be fetched from the constant pool. */ -+ *total = CONSTANT_POOL_COST; -+ return true; -+ -+ case MEM: -+ /* If the address is legitimate, return the number of -+ instructions it needs. */ -+ addr = XEXP (x, 0); -+ cost = loongarch_address_insns (addr, mode, true); -+ if (cost > 0) -+ { -+ *total = COSTS_N_INSNS (cost + 1); -+ return true; -+ } -+ /* Check for a scaled indexed address. */ -+ if (loongarch_lx_address_p (addr, mode)) -+ { -+ *total = COSTS_N_INSNS (2); -+ return true; -+ } -+ /* Otherwise use the default handling. */ -+ return false; -+ -+ case FFS: -+ *total = COSTS_N_INSNS (6); -+ return false; -+ -+ case NOT: -+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1); -+ return false; -+ -+ case AND: -+ /* Check for a *clear_upper32 pattern and treat it like a zero -+ extension. See the pattern's comment for details. */ -+ if (TARGET_64BIT -+ && mode == DImode -+ && CONST_INT_P (XEXP (x, 1)) -+ && UINTVAL (XEXP (x, 1)) == 0xffffffff) -+ { -+ *total = (loongarch_zero_extend_cost (mode, XEXP (x, 0)) -+ + set_src_cost (XEXP (x, 0), mode, speed)); -+ return true; -+ } -+ /* (AND (NOT op0) (NOT op1) is a nor operation that can be done in -+ a single instruction. */ -+ if (GET_CODE (XEXP (x, 0)) == NOT -+ && GET_CODE (XEXP (x, 1)) == NOT) -+ { -+ cost = GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1; -+ *total = (COSTS_N_INSNS (cost) -+ + set_src_cost (XEXP (XEXP (x, 0), 0), mode, speed) -+ + set_src_cost (XEXP (XEXP (x, 1), 0), mode, speed)); -+ return true; -+ } -+ -+ /* Fall through. */ -+ -+ case IOR: -+ case XOR: -+ /* Double-word operations use two single-word operations. */ -+ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2), -+ speed); -+ return true; -+ -+ case ASHIFT: -+ case ASHIFTRT: -+ case LSHIFTRT: -+ case ROTATE: -+ case ROTATERT: -+ if (CONSTANT_P (XEXP (x, 1))) -+ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4), -+ speed); -+ else -+ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12), -+ speed); -+ return true; -+ -+ case ABS: -+ if (float_mode_p) -+ *total = loongarch_cost->fp_add; -+ else -+ *total = COSTS_N_INSNS (4); -+ return false; -+ -+ case LT: -+ case LTU: -+ case LE: -+ case LEU: -+ case GT: -+ case GTU: -+ case GE: -+ case GEU: -+ case EQ: -+ case NE: -+ case UNORDERED: -+ case LTGT: -+ case UNGE: -+ case UNGT: -+ case UNLE: -+ case UNLT: -+ /* Branch comparisons have VOIDmode, so use the first operand's -+ mode instead. */ -+ mode = GET_MODE (XEXP (x, 0)); -+ if (FLOAT_MODE_P (mode)) -+ { -+ *total = loongarch_cost->fp_add; -+ return false; -+ } -+ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4), -+ speed); -+ return true; -+ -+ case MINUS: -+ case PLUS: -+ if (float_mode_p) -+ { -+ *total = loongarch_cost->fp_add; -+ return false; -+ } -+ -+ /* If it's an add + mult (which is equivalent to shift left) and -+ it's immediate operand satisfies const_immlsa_operand predicate. */ -+ if (((ISA_HAS_LSA && mode == SImode) -+ || (ISA_HAS_DLSA && mode == DImode)) -+ && GET_CODE (XEXP (x, 0)) == MULT) -+ { -+ rtx op2 = XEXP (XEXP (x, 0), 1); -+ if (const_immlsa_operand (op2, mode)) -+ { -+ *total = (COSTS_N_INSNS (1) -+ + set_src_cost (XEXP (XEXP (x, 0), 0), mode, speed) -+ + set_src_cost (XEXP (x, 1), mode, speed)); -+ return true; -+ } -+ } -+ -+ /* Double-word operations require three single-word operations and -+ an SLTU. */ -+ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), -+ COSTS_N_INSNS (4), -+ speed); -+ return true; -+ -+ case NEG: -+ if (float_mode_p) -+ *total = loongarch_cost->fp_add; -+ else -+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1); -+ return false; -+ -+ case FMA: -+ *total = loongarch_fp_mult_cost (mode); -+ return false; -+ -+ case MULT: -+ if (float_mode_p) -+ *total = loongarch_fp_mult_cost (mode); -+ else if (mode == DImode && !TARGET_64BIT) -+ /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions, -+ where the mulsidi3 always includes an MFHI and an MFLO. */ -+ // FIXED ME??? -+ *total = (speed -+ ? loongarch_cost->int_mult_si * 3 + 6 -+ : COSTS_N_INSNS (7)); -+ else if (!speed) -+ *total = COSTS_N_INSNS (1) + 1; -+ else if (mode == DImode) -+ *total = loongarch_cost->int_mult_di; -+ else -+ *total = loongarch_cost->int_mult_si; -+ return false; -+ -+ case DIV: -+ /* Check for a reciprocal. */ -+ if (float_mode_p -+ && ISA_HAS_FP_RECIP_RSQRT (mode) -+ && flag_unsafe_math_optimizations -+ && XEXP (x, 0) == CONST1_RTX (mode)) -+ { -+ if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT) -+ /* An rsqrta or rsqrtb pattern. Count the -+ division as being free. */ -+ *total = set_src_cost (XEXP (x, 1), mode, speed); -+ else -+ *total = (loongarch_fp_div_cost (mode) -+ + set_src_cost (XEXP (x, 1), mode, speed)); -+ return true; -+ } -+ /* Fall through. */ -+ -+ case SQRT: -+ case MOD: -+ if (float_mode_p) -+ { -+ *total = loongarch_fp_div_cost (mode); -+ return false; -+ } -+ /* Fall through. */ -+ -+ case UDIV: -+ case UMOD: -+ if (!speed) -+ { -+ *total = COSTS_N_INSNS (loongarch_idiv_insns (mode)); -+ } -+ else if (mode == DImode) -+ *total = loongarch_cost->int_div_di; -+ else -+ *total = loongarch_cost->int_div_si; -+ return false; -+ -+ case SIGN_EXTEND: -+ *total = loongarch_sign_extend_cost (mode, XEXP (x, 0)); -+ return false; -+ -+ case ZERO_EXTEND: -+ *total = loongarch_zero_extend_cost (mode, XEXP (x, 0)); -+ return false; -+ case TRUNCATE: -+ /* Costings for highpart multiplies. Matching patterns of the form: -+ -+ (lshiftrt:DI (mult:DI (sign_extend:DI (...) -+ (sign_extend:DI (...)) -+ (const_int 32) -+ */ -+ if ((GET_CODE (XEXP (x, 0)) == ASHIFTRT -+ || GET_CODE (XEXP (x, 0)) == LSHIFTRT) -+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)) -+ && ((INTVAL (XEXP (XEXP (x, 0), 1)) == 32 -+ && GET_MODE (XEXP (x, 0)) == DImode) -+ || (ISA_HAS_DMUL -+ && INTVAL (XEXP (XEXP (x, 0), 1)) == 64 -+ && GET_MODE (XEXP (x, 0)) == TImode)) -+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT -+ && ((GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND -+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SIGN_EXTEND) -+ || (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND -+ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) -+ == ZERO_EXTEND)))) -+ { -+ if (!speed) -+ *total = COSTS_N_INSNS (1) + 1; -+ else if (mode == DImode) -+ *total = loongarch_cost->int_mult_di; -+ else -+ *total = loongarch_cost->int_mult_si; -+ -+ /* Sign extension is free, zero extension costs for DImode when -+ on a 64bit core / when DMUL is present. */ -+ for (int i = 0; i < 2; ++i) -+ { -+ rtx op = XEXP (XEXP (XEXP (x, 0), 0), i); -+ if (ISA_HAS_DMUL -+ && GET_CODE (op) == ZERO_EXTEND -+ && GET_MODE (op) == DImode) -+ *total += rtx_cost (op, DImode, MULT, i, speed); -+ else -+ *total += rtx_cost (XEXP (op, 0), VOIDmode, GET_CODE (op), -+ 0, speed); -+ } -+ -+ return true; -+ } -+ return false; -+ -+ case FLOAT: -+ case UNSIGNED_FLOAT: -+ case FIX: -+ case FLOAT_EXTEND: -+ case FLOAT_TRUNCATE: -+ *total = loongarch_cost->fp_add; -+ return false; -+ -+ case SET: -+ if (register_operand (SET_DEST (x), VOIDmode) -+ && reg_or_0_operand (SET_SRC (x), VOIDmode)) -+ { -+ *total = loongarch_set_reg_reg_cost (GET_MODE (SET_DEST (x))); -+ return true; -+ } -+ return false; -+ -+ default: -+ return false; -+ } -+} -+ -+/* Vectorizer cost model implementation. */ -+ -+/* Implement targetm.vectorize.builtin_vectorization_cost. */ -+ -+static int -+loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, -+ tree vectype, -+ int misalign ATTRIBUTE_UNUSED) -+{ -+ unsigned elements; -+ -+ switch (type_of_cost) -+ { -+ case scalar_stmt: -+ case scalar_load: -+ case vector_stmt: -+ case vector_load: -+ case vec_to_scalar: -+ case scalar_to_vec: -+ case cond_branch_not_taken: -+ case vec_perm: -+ case vec_promote_demote: -+ case scalar_store: -+ case vector_store: -+ return 1; -+ -+ case unaligned_load: -+ case vector_gather_load: -+ return 2; -+ -+ case unaligned_store: -+ case vector_scatter_store: -+ return 10; -+ -+ case cond_branch_taken: -+ return 3; -+ -+ case vec_construct: -+ elements = TYPE_VECTOR_SUBPARTS (vectype); -+ return elements / 2 + 1; -+ -+ default: -+ gcc_unreachable (); -+ } -+} -+ -+ -+/* Implement TARGET_ADDRESS_COST. */ -+ -+static int -+loongarch_address_cost (rtx addr, machine_mode mode, -+ addr_space_t as ATTRIBUTE_UNUSED, -+ bool speed ATTRIBUTE_UNUSED) -+{ -+ return loongarch_address_insns (addr, mode, false); -+} -+ -+ -+/* Return one word of double-word value OP, taking into account the fixed -+ endianness of certain registers. HIGH_P is true to select the high part, -+ false to select the low part. */ -+ -+rtx -+loongarch_subword (rtx op, bool high_p) -+{ -+ unsigned int byte, offset; -+ machine_mode mode; -+ -+ mode = GET_MODE (op); -+ if (mode == VOIDmode) -+ mode = TARGET_64BIT ? TImode : DImode; -+ -+ if (high_p) -+ byte = UNITS_PER_WORD; -+ else -+ byte = 0; -+ -+ if (FP_REG_RTX_P (op)) -+ { -+ /* Paired FPRs are always ordered little-endian. */ -+ offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0); -+ return gen_rtx_REG (word_mode, REGNO (op) + offset); -+ } -+ -+ if (MEM_P (op)) -+ return loongarch_rewrite_small_data (adjust_address (op, word_mode, byte)); -+ -+ return simplify_gen_subreg (word_mode, op, mode, byte); -+} -+ -+/* Return true if a move from SRC to DEST should be split into two. -+ SPLIT_TYPE describes the split condition. */ -+ -+bool -+loongarch_split_move_p (rtx dest, rtx src, enum loongarch_split_type split_type) -+{ -+ /* FPR-to-FPR moves can be done in a single instruction, if they're -+ allowed at all. */ -+ unsigned int size = GET_MODE_SIZE (GET_MODE (dest)); -+ if (size == 8 && FP_REG_RTX_P (src) && FP_REG_RTX_P (dest)) -+ return false; -+ -+ /* Check for floating-point loads and stores. */ -+ if (size == 8) -+ { -+ if (FP_REG_RTX_P (dest) && MEM_P (src)) -+ return false; -+ if (FP_REG_RTX_P (src) && MEM_P (dest)) -+ return false; -+ } -+ -+ /* Check if LSX moves need splitting. */ -+ if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))) -+ return loongarch_split_128bit_move_p (dest, src); -+ -+ /* Check if LASX moves need splitting. */ -+ if (LASX_SUPPORTED_MODE_P (GET_MODE (dest))) -+ return loongarch_split_256bit_move_p (dest, src); -+ -+ /* Otherwise split all multiword moves. */ -+ return size > UNITS_PER_WORD; -+} -+ -+/* Split a move from SRC to DEST, given that loongarch_split_move_p holds. -+ SPLIT_TYPE describes the split condition. */ -+ -+void -+loongarch_split_move (rtx dest, rtx src, enum loongarch_split_type split_type, rtx insn_) -+{ -+ rtx low_dest; -+ -+ gcc_checking_assert (loongarch_split_move_p (dest, src, split_type)); -+ if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))) -+ loongarch_split_128bit_move (dest, src); -+ else if (LASX_SUPPORTED_MODE_P (GET_MODE (dest))) -+ loongarch_split_256bit_move (dest, src); -+ else if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src)) -+ { -+ if (!TARGET_64BIT && GET_MODE (dest) == DImode) -+ emit_insn (gen_move_doubleword_fprdi (dest, src)); -+ else if (!TARGET_64BIT && GET_MODE (dest) == DFmode) -+ emit_insn (gen_move_doubleword_fprdf (dest, src)); -+ else if (TARGET_64BIT && GET_MODE (dest) == TFmode) -+ emit_insn (gen_move_doubleword_fprtf (dest, src)); -+ else -+ gcc_unreachable (); -+ } -+ else -+ { -+ /* The operation can be split into two normal moves. Decide in -+ which order to do them. */ -+ low_dest = loongarch_subword (dest, false); -+ if (REG_P (low_dest) -+ && reg_overlap_mentioned_p (low_dest, src)) -+ { -+ loongarch_emit_move (loongarch_subword (dest, true), loongarch_subword (src, true)); -+ loongarch_emit_move (low_dest, loongarch_subword (src, false)); -+ } -+ else -+ { -+ loongarch_emit_move (low_dest, loongarch_subword (src, false)); -+ loongarch_emit_move (loongarch_subword (dest, true), loongarch_subword (src, true)); -+ } -+ } -+ -+ /* This is a hack. See if the next insn uses DEST and if so, see if we -+ can forward SRC for DEST. This is most useful if the next insn is a -+ simple store. */ -+ rtx_insn *insn = (rtx_insn *)insn_; -+ struct loongarch_address_info addr = {}; -+ if (insn) -+ { -+ rtx_insn *next = next_nonnote_nondebug_insn_bb (insn); -+ if (next) -+ { -+ rtx set = single_set (next); -+ if (set && SET_SRC (set) == dest) -+ { -+ if (MEM_P (src)) -+ { -+ rtx tmp = XEXP (src, 0); -+ loongarch_classify_address (&addr, tmp, GET_MODE (tmp), true); -+ if (addr.reg && !reg_overlap_mentioned_p (dest, addr.reg)) -+ validate_change (next, &SET_SRC (set), src, false); -+ } -+ else -+ validate_change (next, &SET_SRC (set), src, false); -+ } -+ } -+ } -+} -+ -+/* Return the split type for instruction INSN. */ -+ -+static enum loongarch_split_type -+loongarch_insn_split_type (rtx insn) -+{ -+ basic_block bb = BLOCK_FOR_INSN (insn); -+ if (bb) -+ { -+ if (optimize_bb_for_speed_p (bb)) -+ return SPLIT_FOR_SPEED; -+ else -+ return SPLIT_FOR_SIZE; -+ } -+ /* Once CFG information has been removed, we should trust the optimization -+ decisions made by previous passes and only split where necessary. */ -+ return SPLIT_IF_NECESSARY; -+} -+ -+/* Return true if a 128-bit move from SRC to DEST should be split. */ -+ -+bool -+loongarch_split_128bit_move_p (rtx dest, rtx src) -+{ -+ /* LSX-to-LSX moves can be done in a single instruction. */ -+ if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest)) -+ return false; -+ -+ /* Check for LSX loads and stores. */ -+ if (FP_REG_RTX_P (dest) && MEM_P (src)) -+ return false; -+ if (FP_REG_RTX_P (src) && MEM_P (dest)) -+ return false; -+ -+ /* Check for LSX set to an immediate const vector with valid replicated -+ element. */ -+ if (FP_REG_RTX_P (dest) -+ && loongarch_const_vector_same_int_p (src, GET_MODE (src), -512, 511)) -+ return false; -+ -+ /* Check for LSX load zero immediate. */ -+ if (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src))) -+ return false; -+ -+ return true; -+} -+ -+/* Return true if a 256-bit move from SRC to DEST should be split. */ -+ -+bool -+loongarch_split_256bit_move_p (rtx dest, rtx src) -+{ -+ /* LSX-to-LSX moves can be done in a single instruction. */ -+ if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest)) -+ return false; -+ -+ /* Check for LSX loads and stores. */ -+ if (FP_REG_RTX_P (dest) && MEM_P (src)) -+ return false; -+ if (FP_REG_RTX_P (src) && MEM_P (dest)) -+ return false; -+ -+ /* Check for LSX set to an immediate const vector with valid replicated -+ element. */ -+ if (FP_REG_RTX_P (dest) -+ && loongarch_const_vector_same_int_p (src, GET_MODE (src), -512, 511)) -+ return false; -+ -+ /* Check for LSX load zero immediate. */ -+ if (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src))) -+ return false; -+ -+ return true; -+} -+ -+/* Split a 128-bit move from SRC to DEST. */ -+ -+void -+loongarch_split_128bit_move (rtx dest, rtx src) -+{ -+ int byte, index; -+ rtx low_dest, low_src, d, s; -+ -+ if (FP_REG_RTX_P (dest)) -+ { -+ gcc_assert (!MEM_P (src)); -+ -+ rtx new_dest = dest; -+ if (!TARGET_64BIT) -+ { -+ if (GET_MODE (dest) != V4SImode) -+ new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0); -+ } -+ else -+ { -+ if (GET_MODE (dest) != V2DImode) -+ new_dest = simplify_gen_subreg (V2DImode, dest, GET_MODE (dest), 0); -+ } -+ -+ for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode); -+ byte += UNITS_PER_WORD, index++) -+ { -+ s = loongarch_subword_at_byte (src, byte); -+ if (!TARGET_64BIT) -+ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, s, new_dest, -+ GEN_INT (1 << index))); -+ else -+ emit_insn (gen_lsx_vinsgr2vr_d (new_dest, s, new_dest, -+ GEN_INT (1 << index))); -+ } -+ } -+ else if (FP_REG_RTX_P (src)) -+ { -+ gcc_assert (!MEM_P (dest)); -+ -+ rtx new_src = src; -+ if (!TARGET_64BIT) -+ { -+ if (GET_MODE (src) != V4SImode) -+ new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0); -+ } -+ else -+ { -+ if (GET_MODE (src) != V2DImode) -+ new_src = simplify_gen_subreg (V2DImode, src, GET_MODE (src), 0); -+ } -+ -+ for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode); -+ byte += UNITS_PER_WORD, index++) -+ { -+ d = loongarch_subword_at_byte (dest, byte); -+ if (!TARGET_64BIT) -+ emit_insn (gen_lsx_vpickve2gr_w (d, new_src, GEN_INT (index))); -+ else -+ emit_insn (gen_lsx_vpickve2gr_d (d, new_src, GEN_INT (index))); -+ } -+ } -+ else -+ { -+ low_dest = loongarch_subword_at_byte (dest, 0); -+ low_src = loongarch_subword_at_byte (src, 0); -+ gcc_assert (REG_P (low_dest) && REG_P (low_src)); -+ /* Make sure the source register is not written before reading. */ -+ if (REGNO (low_dest) <= REGNO (low_src)) -+ { -+ for (byte = 0; byte < GET_MODE_SIZE (TImode); -+ byte += UNITS_PER_WORD) -+ { -+ d = loongarch_subword_at_byte (dest, byte); -+ s = loongarch_subword_at_byte (src, byte); -+ loongarch_emit_move (d, s); -+ } -+ } -+ else -+ { -+ for (byte = GET_MODE_SIZE (TImode) - UNITS_PER_WORD; byte >= 0; -+ byte -= UNITS_PER_WORD) -+ { -+ d = loongarch_subword_at_byte (dest, byte); -+ s = loongarch_subword_at_byte (src, byte); -+ loongarch_emit_move (d, s); -+ } -+ } -+ } -+} -+ -+/* Split a 256-bit move from SRC to DEST. */ -+ -+void -+loongarch_split_256bit_move (rtx dest, rtx src) -+{ -+ int byte, index; -+ rtx low_dest, low_src, d, s; -+ -+ if (FP_REG_RTX_P (dest)) -+ { -+ gcc_assert (!MEM_P (src)); -+ -+ rtx new_dest = dest; -+ if (!TARGET_64BIT) -+ { -+ if (GET_MODE (dest) != V8SImode) -+ new_dest = simplify_gen_subreg (V8SImode, dest, GET_MODE (dest), 0); -+ } -+ else -+ { -+ if (GET_MODE (dest) != V4DImode) -+ new_dest = simplify_gen_subreg (V4DImode, dest, GET_MODE (dest), 0); -+ } -+ -+ for (byte = 0, index = 0; byte < GET_MODE_SIZE (GET_MODE (dest)); -+ byte += UNITS_PER_WORD, index++) -+ { -+ s = loongarch_subword_at_byte (src, byte); -+ if (!TARGET_64BIT) -+ emit_insn (gen_lasx_xvinsgr2vr_w (new_dest, s, new_dest, -+ GEN_INT (1 << index))); -+ else -+ emit_insn (gen_lasx_xvinsgr2vr_d (new_dest, s, new_dest, -+ GEN_INT (1 << index))); -+ } -+ } -+ else if (FP_REG_RTX_P (src)) -+ { -+ gcc_assert (!MEM_P (dest)); -+ -+ rtx new_src = src; -+ if (!TARGET_64BIT) -+ { -+ if (GET_MODE (src) != V8SImode) -+ new_src = simplify_gen_subreg (V8SImode, src, GET_MODE (src), 0); -+ } -+ else -+ { -+ if (GET_MODE (src) != V4DImode) -+ new_src = simplify_gen_subreg (V4DImode, src, GET_MODE (src), 0); -+ } -+ -+ for (byte = 0, index = 0; byte < GET_MODE_SIZE (GET_MODE (src)); -+ byte += UNITS_PER_WORD, index++) -+ { -+ d = loongarch_subword_at_byte (dest, byte); -+ if (!TARGET_64BIT) -+ emit_insn (gen_lsx_vpickve2gr_w (d, new_src, GEN_INT (index))); -+ else -+ emit_insn (gen_lsx_vpickve2gr_d (d, new_src, GEN_INT (index))); -+ } -+ } -+ else -+ { -+ low_dest = loongarch_subword_at_byte (dest, 0); -+ low_src = loongarch_subword_at_byte (src, 0); -+ gcc_assert (REG_P (low_dest) && REG_P (low_src)); -+ /* Make sure the source register is not written before reading. */ -+ if (REGNO (low_dest) <= REGNO (low_src)) -+ { -+ for (byte = 0; byte < GET_MODE_SIZE (TImode); -+ byte += UNITS_PER_WORD) -+ { -+ d = loongarch_subword_at_byte (dest, byte); -+ s = loongarch_subword_at_byte (src, byte); -+ loongarch_emit_move (d, s); -+ } -+ } -+ else -+ { -+ for (byte = GET_MODE_SIZE (TImode) - UNITS_PER_WORD; byte >= 0; -+ byte -= UNITS_PER_WORD) -+ { -+ d = loongarch_subword_at_byte (dest, byte); -+ s = loongarch_subword_at_byte (src, byte); -+ loongarch_emit_move (d, s); -+ } -+ } -+ } -+} -+ -+ -+/* Split a COPY_S.D with operands DEST, SRC and INDEX. GEN is a function -+ used to generate subregs. */ -+ -+void -+loongarch_split_lsx_copy_d (rtx dest, rtx src, rtx index, -+ rtx (*gen_fn)(rtx, rtx, rtx)) -+{ -+ gcc_assert ((GET_MODE (src) == V2DImode && GET_MODE (dest) == DImode) -+ || (GET_MODE (src) == V2DFmode && GET_MODE (dest) == DFmode)); -+ -+ /* Note that low is always from the lower index, and high is always -+ from the higher index. */ -+ rtx low = loongarch_subword (dest, false); -+ rtx high = loongarch_subword (dest, true); -+ rtx new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0); -+ -+ emit_insn (gen_fn (low, new_src, GEN_INT (INTVAL (index) * 2))); -+ emit_insn (gen_fn (high, new_src, GEN_INT (INTVAL (index) * 2 + 1))); -+} -+ -+/* Split a INSERT.D with operand DEST, SRC1.INDEX and SRC2. */ -+ -+void -+loongarch_split_lsx_insert_d (rtx dest, rtx src1, rtx index, rtx src2) -+{ -+ int i; -+ gcc_assert (GET_MODE (dest) == GET_MODE (src1)); -+ gcc_assert ((GET_MODE (dest) == V2DImode -+ && (GET_MODE (src2) == DImode || src2 == const0_rtx)) -+ || (GET_MODE (dest) == V2DFmode && GET_MODE (src2) == DFmode)); -+ -+ /* Note that low is always from the lower index, and high is always -+ from the higher index. */ -+ rtx low = loongarch_subword (src2, false); -+ rtx high = loongarch_subword (src2, true); -+ rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0); -+ rtx new_src1 = simplify_gen_subreg (V4SImode, src1, GET_MODE (src1), 0); -+ i = exact_log2 (INTVAL (index)); -+ gcc_assert (i != -1); -+ -+ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, low, new_src1, -+ GEN_INT (1 << (i * 2)))); -+ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, -+ GEN_INT (1 << (i * 2 + 1)))); -+} -+ -+/* Split FILL.D. */ -+ -+void -+loongarch_split_lsx_fill_d (rtx dest, rtx src) -+{ -+ gcc_assert ((GET_MODE (dest) == V2DImode -+ && (GET_MODE (src) == DImode || src == const0_rtx)) -+ || (GET_MODE (dest) == V2DFmode && GET_MODE (src) == DFmode)); -+ -+ /* Note that low is always from the lower index, and high is always -+ from the higher index. */ -+ rtx low, high; -+ if (src == const0_rtx) -+ { -+ low = src; -+ high = src; -+ } -+ else -+ { -+ low = loongarch_subword (src, false); -+ high = loongarch_subword (src, true); -+ } -+ rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0); -+ emit_insn (gen_lsx_vreplgr2vr_w (new_dest, low)); -+ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 1))); -+ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 3))); -+} -+ -+/* Return true if a move from SRC to DEST in INSN should be split. */ -+ -+bool -+loongarch_split_move_insn_p (rtx dest, rtx src, rtx insn) -+{ -+ return loongarch_split_move_p (dest, src, loongarch_insn_split_type (insn)); -+} -+ -+/* Split a move from SRC to DEST in INSN, given that loongarch_split_move_insn_p -+ holds. */ -+ -+void -+loongarch_split_move_insn (rtx dest, rtx src, rtx insn) -+{ -+ loongarch_split_move (dest, src, loongarch_insn_split_type (insn), insn); -+} -+ -+ -+/* Forward declaration. Used below */ -+static HOST_WIDE_INT -+loongarch_constant_alignment (const_tree exp, HOST_WIDE_INT align); -+ -+/* Return the appropriate instructions to move SRC into DEST. Assume -+ that SRC is operand 1 and DEST is operand 0. */ -+ -+const char * -+loongarch_output_move (rtx dest, rtx src) -+{ -+ enum rtx_code dest_code = GET_CODE (dest); -+ enum rtx_code src_code = GET_CODE (src); -+ machine_mode mode = GET_MODE (dest); -+ bool dbl_p = (GET_MODE_SIZE (mode) == 8); -+ bool lsx_p = LSX_SUPPORTED_MODE_P (mode); -+ bool lasx_p = LASX_SUPPORTED_MODE_P (mode); -+ enum loongarch_symbol_type symbol_type; -+ -+ if (loongarch_split_move_p (dest, src, SPLIT_IF_NECESSARY)) -+ return "#"; -+ -+ if ((lsx_p || lasx_p) -+ && dest_code == REG && FP_REG_P (REGNO (dest)) -+ && src_code == CONST_VECTOR -+ && CONST_INT_P (CONST_VECTOR_ELT (src, 0))) -+ { -+ gcc_assert (loongarch_const_vector_same_int_p (src, mode, -512, 511)); -+ if(lsx_p || lasx_p) -+ { -+ switch (GET_MODE_SIZE (mode)) -+ { -+ case 16: -+ return "vrepli.%v0\t%w0,%E1"; -+ case 32: -+ return "xvrepli.%v0\t%u0,%E1"; -+ default: gcc_unreachable (); -+ } -+ } -+ } -+ -+ if ((src_code == REG && GP_REG_P (REGNO (src))) -+ || (src == CONST0_RTX (mode))) -+ { -+ if (dest_code == REG) -+ { -+ if (GP_REG_P (REGNO (dest))) -+ return "or\t%0,%z1,$r0"; -+ -+ if (FP_REG_P (REGNO (dest))) -+ { -+ if (lsx_p || lasx_p) -+ { -+ gcc_assert (src == CONST0_RTX (GET_MODE (src))); -+ switch (GET_MODE_SIZE (mode)) -+ { -+ case 16: -+ return "vrepli.b\t%w0,0"; -+ case 32: -+ return "xvrepli.b\t%u0,0"; -+ default: gcc_unreachable (); -+ } -+ } -+ -+ return dbl_p ? "movgr2fr.d\t%0,%z1" : "movgr2fr.w\t%0,%z1"; -+ } -+ } -+ if (dest_code == MEM) -+ { -+ rtx offset = XEXP (dest, 0); -+ if (GET_CODE(offset) == PLUS) -+ offset = XEXP(offset, 1); -+ switch (GET_MODE_SIZE (mode)) -+ { -+ case 1: return "st.b\t%z1,%0"; -+ case 2: return "st.h\t%z1,%0"; -+ case 4: -+ if (const_arith_operand (offset, Pmode)) -+ return "st.w\t%z1,%0"; -+ else -+ return "stptr.w\t%z1,%0"; -+ case 8: -+ if (const_arith_operand (offset, Pmode)) -+ return "st.d\t%z1,%0"; -+ else -+ return "stptr.d\t%z1,%0"; -+ default: gcc_unreachable (); -+ } -+ } -+ } -+ if (dest_code == REG && GP_REG_P (REGNO (dest))) -+ { -+ if (src_code == REG) -+ { -+ if (FP_REG_P (REGNO (src))) -+ { -+ gcc_assert (!lsx_p); -+ return dbl_p ? "movfr2gr.d\t%0,%1" : "movfr2gr.s\t%0,%1"; -+ } -+ } -+ -+ if (src_code == MEM) -+ { -+ rtx offset = XEXP (src, 0); -+ if (GET_CODE(offset) == PLUS) -+ offset = XEXP(offset, 1); -+ switch (GET_MODE_SIZE (mode)) -+ { -+ case 1: return "ld.bu\t%0,%1"; -+ case 2: return "ld.hu\t%0,%1"; -+ case 4: -+ if (const_arith_operand (offset, Pmode)) -+ return "ld.w\t%0,%1"; -+ else -+ return "ldptr.w\t%0,%1"; -+ case 8: -+ if (const_arith_operand (offset, Pmode)) -+ return "ld.d\t%0,%1"; -+ else -+ return "ldptr.d\t%0,%1"; -+ default: gcc_unreachable (); -+ } -+ } -+ -+ if (src_code == CONST_INT) -+ { -+ if (LUI_INT (src)) -+ return "lu12i.w\t%0,%1>>12\t\t\t# %X1"; -+ else if (SMALL_INT (src)) -+ return "addi.w\t%0,$r0,%1\t\t\t# %X1"; -+ else if (SMALL_INT_UNSIGNED (src)) -+ return "ori\t%0,$r0,%1\t\t\t# %X1"; -+ else if (LU52I_INT (src)) -+ return "lu52i.d\t%0,$r0,%X1>>52\t\t\t# %1"; -+ else -+ gcc_unreachable (); -+ } -+ -+ if (symbolic_operand (src, VOIDmode)) -+ { -+ -+ switch (loongarch_cmodel_var) -+ { -+ case LARCH_CMODEL_TINY: -+ do -+ { -+ if (loongarch_global_symbol_p (src) -+ && !loongarch_symbol_binds_local_p (src)) -+ break; -+ case LARCH_CMODEL_TINY_STATIC: -+ if (loongarch_weak_symbol_p (src)) -+ break; -+ -+ /* The symbol must be aligned to 4 byte. */ -+ unsigned int align; -+ -+ if (GET_CODE (src) == LABEL_REF) -+ align = 128 /* whatever */; -+ /* copy from aarch64 */ -+ else if (CONSTANT_POOL_ADDRESS_P (src)) -+ align = GET_MODE_ALIGNMENT (get_pool_mode (src)); -+ else if (TREE_CONSTANT_POOL_ADDRESS_P (src)) -+ { -+ tree exp = SYMBOL_REF_DECL (src); -+ align = TYPE_ALIGN (TREE_TYPE (exp)); -+ align = loongarch_constant_alignment (exp, align); -+ } -+ else if (SYMBOL_REF_DECL (src)) -+ align = DECL_ALIGN (SYMBOL_REF_DECL (src)); -+ else if (SYMBOL_REF_HAS_BLOCK_INFO_P (src) -+ && SYMBOL_REF_BLOCK (src) != NULL) -+ align = SYMBOL_REF_BLOCK (src)->alignment; -+ else -+ align = BITS_PER_UNIT; -+ -+ if (align % (4 * 8) == 0) -+ return "pcaddi\t%0,%%pcrel(%1)>>2"; -+ } -+ while (0); -+ case LARCH_CMODEL_NORMAL: -+ case LARCH_CMODEL_LARGE: -+ if (!loongarch_global_symbol_p (src) -+ || loongarch_symbol_binds_local_p (src)) -+ return "la.local\t%0,%1"; -+ else -+ return "la.global\t%0,%1"; -+ case LARCH_CMODEL_EXTREME: -+ default: -+ gcc_unreachable (); -+ } -+ } -+ } -+ if (src_code == REG && FP_REG_P (REGNO (src))) -+ { -+ if (dest_code == REG && FP_REG_P (REGNO (dest))) -+ { -+ if (lsx_p || lasx_p) -+ { -+ -+ switch (GET_MODE_SIZE (mode)) -+ { -+ case 16: -+ return "vori.b\t%w0,%w1,0"; -+ case 32: -+ return "xvori.b\t%u0,%u1,0"; -+ default: gcc_unreachable (); -+ } -+ } -+ else -+ return dbl_p ? "fmov.d\t%0,%1" : "fmov.s\t%0,%1"; -+ } -+ -+ if (dest_code == MEM) -+ { -+ if (lsx_p || lasx_p) -+ { -+ -+ switch (GET_MODE_SIZE (mode)) -+ { -+ case 16: -+ return "vst\t%w1,%0"; -+ case 32: -+ return "xvst\t%u1,%0"; -+ default: gcc_unreachable (); -+ } -+ } -+ -+ return dbl_p ? "fst.d\t%1,%0" : "fst.s\t%1,%0"; -+ } -+ } -+ if (dest_code == REG && FP_REG_P (REGNO (dest))) -+ { -+ if (src_code == MEM) -+ { -+ if (lsx_p || lasx_p) -+ { -+ switch (GET_MODE_SIZE (mode)) -+ { -+ case 16: -+ return "vld\t%w0,%1"; -+ case 32: -+ return "xvld\t%u0,%1"; -+ default: gcc_unreachable (); -+ } -+ } -+ return dbl_p ? "fld.d\t%0,%1" : "fld.s\t%0,%1"; -+ } -+ } -+ gcc_unreachable (); -+} -+ -+/* Return true if CMP1 is a suitable second operand for integer ordering -+ test CODE. See also the *sCC patterns in loongarch.md. */ -+ -+static bool -+loongarch_int_order_operand_ok_p (enum rtx_code code, rtx cmp1) -+{ -+ switch (code) -+ { -+ case GT: -+ case GTU: -+ return reg_or_0_operand (cmp1, VOIDmode); -+ -+ case GE: -+ case GEU: -+ return cmp1 == const1_rtx; -+ -+ case LT: -+ case LTU: -+ return arith_operand (cmp1, VOIDmode); -+ -+ case LE: -+ return sle_operand (cmp1, VOIDmode); -+ -+ case LEU: -+ return sleu_operand (cmp1, VOIDmode); -+ -+ default: -+ gcc_unreachable (); -+ } -+} -+ -+/* Return true if *CMP1 (of mode MODE) is a valid second operand for -+ integer ordering test *CODE, or if an equivalent combination can -+ be formed by adjusting *CODE and *CMP1. When returning true, update -+ *CODE and *CMP1 with the chosen code and operand, otherwise leave -+ them alone. */ -+ -+static bool -+loongarch_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1, -+ machine_mode mode) -+{ -+ HOST_WIDE_INT plus_one; -+ -+ if (loongarch_int_order_operand_ok_p (*code, *cmp1)) -+ return true; -+ -+ if (CONST_INT_P (*cmp1)) -+ switch (*code) -+ { -+ case LE: -+ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode); -+ if (INTVAL (*cmp1) < plus_one) -+ { -+ *code = LT; -+ *cmp1 = force_reg (mode, GEN_INT (plus_one)); -+ return true; -+ } -+ break; -+ -+ case LEU: -+ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode); -+ if (plus_one != 0) -+ { -+ *code = LTU; -+ *cmp1 = force_reg (mode, GEN_INT (plus_one)); -+ return true; -+ } -+ break; -+ -+ default: -+ break; -+ } -+ return false; -+} -+ -+/* Compare CMP0 and CMP1 using ordering test CODE and store the result -+ in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR -+ is nonnull, it's OK to set TARGET to the inverse of the result and -+ flip *INVERT_PTR instead. */ -+ -+static void -+loongarch_emit_int_order_test (enum rtx_code code, bool *invert_ptr, -+ rtx target, rtx cmp0, rtx cmp1) -+{ -+ machine_mode mode; -+ -+ /* First see if there is a LARCH instruction that can do this operation. -+ If not, try doing the same for the inverse operation. If that also -+ fails, force CMP1 into a register and try again. */ -+ mode = GET_MODE (cmp0); -+ if (loongarch_canonicalize_int_order_test (&code, &cmp1, mode)) -+ loongarch_emit_binary (code, target, cmp0, cmp1); -+ else -+ { -+ enum rtx_code inv_code = reverse_condition (code); -+ if (!loongarch_canonicalize_int_order_test (&inv_code, &cmp1, mode)) -+ { -+ cmp1 = force_reg (mode, cmp1); -+ loongarch_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1); -+ } -+ else if (invert_ptr == 0) -+ { -+ rtx inv_target; -+ -+ inv_target = loongarch_force_binary (GET_MODE (target), -+ inv_code, cmp0, cmp1); -+ loongarch_emit_binary (XOR, target, inv_target, const1_rtx); -+ } -+ else -+ { -+ *invert_ptr = !*invert_ptr; -+ loongarch_emit_binary (inv_code, target, cmp0, cmp1); -+ } -+ } -+} -+ -+/* Return a register that is zero if CMP0 and CMP1 are equal. -+ The register will have the same mode as CMP0. */ -+ -+static rtx -+loongarch_zero_if_equal (rtx cmp0, rtx cmp1) -+{ -+ if (cmp1 == const0_rtx) -+ return cmp0; -+ -+ if (uns_arith_operand (cmp1, VOIDmode)) -+ return expand_binop (GET_MODE (cmp0), xor_optab, -+ cmp0, cmp1, 0, 0, OPTAB_DIRECT); -+ -+ return expand_binop (GET_MODE (cmp0), sub_optab, -+ cmp0, cmp1, 0, 0, OPTAB_DIRECT); -+} -+ -+/* Allocate a floating-point condition-code register of mode MODE. -+ -+ These condition code registers are used for certain kinds -+ of compound operation, such as compare and branches, vconds, -+ and built-in functions. At expand time, their use is entirely -+ controlled by LARCH-specific code and is entirely internal -+ to these compound operations. -+ -+ We could (and did in the past) expose condition-code values -+ as pseudo registers and leave the register allocator to pick -+ appropriate registers. The problem is that it is not practically -+ possible for the rtl optimizers to guarantee that no spills will -+ be needed, even when AVOID_CCMODE_COPIES is defined. We would -+ therefore need spill and reload sequences to handle the worst case. -+ -+ Although such sequences do exist, they are very expensive and are -+ not something we'd want to use. -+ -+ The main benefit of having more than one condition-code register -+ is to allow the pipelining of operations, especially those involving -+ comparisons and conditional moves. We don't really expect the -+ registers to be live for long periods, and certainly never want -+ them to be live across calls. -+ -+ Also, there should be no penalty attached to using all the available -+ registers. They are simply bits in the same underlying FPU control -+ register. -+ -+ We therefore expose the hardware registers from the outset and use -+ a simple round-robin allocation scheme. */ -+ -+static rtx -+loongarch_allocate_fcc (machine_mode mode) -+{ -+ unsigned int regno, count; -+ -+ gcc_assert (TARGET_HARD_FLOAT); -+ -+ if (mode == FCCmode) -+ count = 1; -+ else -+ gcc_unreachable (); -+ -+ cfun->machine->next_fcc += -cfun->machine->next_fcc & (count - 1); -+ if (cfun->machine->next_fcc > ST_REG_LAST - ST_REG_FIRST) -+ cfun->machine->next_fcc = 0; -+ -+ regno = ST_REG_FIRST + cfun->machine->next_fcc; -+ cfun->machine->next_fcc += count; -+ return gen_rtx_REG (mode, regno); -+} -+ -+ -+/* Sign- or zero-extend OP0 and OP1 for integer comparisons. */ -+ -+static void -+loongarch_extend_comparands (rtx_code code, rtx *op0, rtx *op1) -+{ -+ /* Comparisons consider all XLEN bits, so extend sub-XLEN values. */ -+ if (GET_MODE_SIZE (word_mode) > GET_MODE_SIZE (GET_MODE (*op0))) -+ { -+ /* TODO: checkout It is more profitable to zero-extend QImode values. */ -+ if (unsigned_condition (code) == code && GET_MODE (*op0) == QImode) -+ { -+ *op0 = gen_rtx_ZERO_EXTEND (word_mode, *op0); -+ if (CONST_INT_P (*op1)) -+ *op1 = GEN_INT ((uint8_t) INTVAL (*op1)); -+ else -+ *op1 = gen_rtx_ZERO_EXTEND (word_mode, *op1); -+ } -+ else -+ { -+ *op0 = gen_rtx_SIGN_EXTEND (word_mode, *op0); -+ if (*op1 != const0_rtx) -+ *op1 = gen_rtx_SIGN_EXTEND (word_mode, *op1); -+ } -+ } -+} -+ -+/* Convert a comparison into something that can be used in a branch. On -+ entry, *OP0 and *OP1 are the values being compared and *CODE is the code -+ used to compare them. Update them to describe the final comparison. */ -+ -+static void -+loongarch_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1) -+{ -+ if (splittable_const_int_operand (*op1, VOIDmode)) -+ { -+ HOST_WIDE_INT rhs = INTVAL (*op1); -+ -+ if (*code == EQ || *code == NE) -+ { -+ /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */ -+ if (SMALL_OPERAND (-rhs)) -+ { -+ *op0 = loongarch_force_binary (GET_MODE (*op0), PLUS, *op0, -+ GEN_INT (-rhs)); -+ *op1 = const0_rtx; -+ } -+ } -+ else -+ { -+ static const enum rtx_code mag_comparisons[][2] = { -+ {LEU, LTU}, {GTU, GEU}, {LE, LT}, {GT, GE} -+ }; -+ -+ /* Convert e.g. (OP0 <= 0xFFF) into (OP0 < 0x1000). */ -+ for (size_t i = 0; i < ARRAY_SIZE (mag_comparisons); i++) -+ { -+ HOST_WIDE_INT new_rhs; -+ bool increment = *code == mag_comparisons[i][0]; -+ bool decrement = *code == mag_comparisons[i][1]; -+ if (!increment && !decrement) -+ continue; -+ -+ new_rhs = rhs + (increment ? 1 : -1); -+ if (loongarch_integer_cost (new_rhs) -+ < loongarch_integer_cost (rhs) -+ && (rhs < 0) == (new_rhs < 0)) -+ { -+ *op1 = GEN_INT (new_rhs); -+ *code = mag_comparisons[i][increment]; -+ } -+ break; -+ } -+ } -+ } -+ -+ -+ *op0 = force_reg (GET_MODE (*op0), *op0); -+ if (*op1 != const0_rtx) -+ *op1 = force_reg (GET_MODE (*op0), *op1); -+} -+ -+/* Like riscv_emit_int_compare, but for floating-point comparisons. */ -+ -+static void -+loongarch_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1) -+{ -+ rtx cmp_op0 = *op0; -+ rtx cmp_op1 = *op1; -+ -+ /* Floating-point tests use a separate FCMP.cond.fmt -+ comparison to set a register. The branch or conditional move will -+ then compare that register against zero. -+ -+ Set CMP_CODE to the code of the comparison instruction and -+ *CODE to the code that the branch or move should use. */ -+ enum rtx_code cmp_code = *code; -+ /* Three FP conditions cannot be implemented by reversing the -+ operands for FCMP.cond.fmt, instead a reversed condition code is -+ required and a test for false. */ -+ *code = NE; -+ *op0 = loongarch_allocate_fcc (FCCmode); -+ -+ *op1 = const0_rtx; -+ loongarch_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1); -+} -+ -+/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2] -+ and OPERAND[3]. Store the result in OPERANDS[0]. -+ -+ On 64-bit targets, the mode of the comparison and target will always be -+ SImode, thus possibly narrower than that of the comparison's operands. */ -+ -+void -+loongarch_expand_scc (rtx operands[]) -+{ -+ rtx target = operands[0]; -+ enum rtx_code code = GET_CODE (operands[1]); -+ rtx op0 = operands[2]; -+ rtx op1 = operands[3]; -+ -+ gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT); -+ -+ if (code == EQ || code == NE) -+ { -+ { -+ rtx zie = loongarch_zero_if_equal (op0, op1); -+ loongarch_emit_binary (code, target, zie, const0_rtx); -+ } -+ } -+ else -+ loongarch_emit_int_order_test (code, 0, target, op0, op1); -+} -+ -+/* Compare OPERANDS[1] with OPERANDS[2] using comparison code -+ CODE and jump to OPERANDS[3] if the condition holds. */ -+ -+void -+loongarch_expand_conditional_branch (rtx *operands) -+{ -+ enum rtx_code code = GET_CODE (operands[0]); -+ rtx op0 = operands[1]; -+ rtx op1 = operands[2]; -+ rtx condition; -+ -+ if (FLOAT_MODE_P (GET_MODE (op1))) -+ loongarch_emit_float_compare (&code, &op0, &op1); -+ else -+ loongarch_emit_int_compare (&code, &op0, &op1); -+ -+ condition = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1); -+ emit_jump_insn (gen_condjump (condition, operands[3])); -+} -+ -+/* Perform the comparison in OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0] -+ if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0]. */ -+ -+void -+loongarch_expand_conditional_move (rtx *operands) -+{ -+ enum rtx_code code = GET_CODE (operands[1]); -+ rtx op0 = XEXP (operands[1], 0); -+ rtx op1 = XEXP (operands[1], 1); -+ -+ if (FLOAT_MODE_P (GET_MODE (op1))) -+ loongarch_emit_float_compare (&code, &op0, &op1); -+ else -+ { -+ if (code == EQ || code == NE) /*see test-mask-1.c && test-mask-5.c*/ -+ { -+ op0 = loongarch_zero_if_equal(op0, op1); -+ op1 = const0_rtx; -+ } -+ else /*see test-mask-2.c*/ -+ { -+ /* The comparison needs a separate scc instruction. Store the -+ result of the scc in *OP0 and compare it against zero. */ -+ bool invert = false; -+ rtx target = gen_reg_rtx (GET_MODE (op0)); -+ loongarch_emit_int_order_test (code, &invert, target, op0, op1); -+ code = invert ? EQ: NE; -+ op0 = target; -+ op1 = const0_rtx; -+ } -+ } -+ -+ rtx cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1); -+ /* There is no direct support for general conditional GP move involving -+ two registers using SEL. see test-mask-3.c */ -+ if (INTEGRAL_MODE_P (GET_MODE (operands[2])) -+ && register_operand (operands[2], VOIDmode) -+ && register_operand (operands[3], VOIDmode)) -+ { -+ machine_mode mode = GET_MODE (operands[0]); -+ rtx temp = gen_reg_rtx (mode); -+ rtx temp2 = gen_reg_rtx (mode); -+ -+ emit_insn (gen_rtx_SET (temp, -+ gen_rtx_IF_THEN_ELSE (mode, cond, -+ operands[2], const0_rtx))); -+ -+ /* Flip the test for the second operand. */ -+ cond = gen_rtx_fmt_ee ((code == EQ) ? NE : EQ, GET_MODE (op0), op0, op1); -+ -+ emit_insn (gen_rtx_SET (temp2, -+ gen_rtx_IF_THEN_ELSE (mode, cond, -+ operands[3], const0_rtx))); -+ -+ /* Merge the two results, at least one is guaranteed to be zero. */ -+ emit_insn (gen_rtx_SET (operands[0], gen_rtx_IOR (mode, temp, temp2))); -+ } -+ else -+ emit_insn (gen_rtx_SET (operands[0], -+ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond, -+ operands[2], operands[3]))); -+} -+ -+ -+/* Initialize *CUM for a call to a function of type FNTYPE. */ -+ -+void -+loongarch_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype) -+{ -+ memset (cum, 0, sizeof (*cum)); -+ cum->prototype = (fntype && prototype_p (fntype)); -+ cum->gp_reg_found = (cum->prototype && stdarg_p (fntype)); -+} -+ -+ -+ -+/* Implement TARGET_EXPAND_BUILTIN_VA_START. */ -+ -+static void -+loongarch_va_start (tree valist, rtx nextarg) -+{ -+ nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size); -+ std_expand_builtin_va_start (valist, nextarg); -+} -+ -+ -+/* Start a definition of function NAME. */ -+ -+static void -+loongarch_start_function_definition (const char *name) -+{ -+ ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function"); -+ -+ /* Start the definition proper. */ -+ assemble_name (asm_out_file, name); -+ fputs (":\n", asm_out_file); -+} -+ -+/* End a function definition started by loongarch_start_function_definition. */ -+ -+static void -+loongarch_end_function_definition (const char *name) -+{ -+} -+ -+/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */ -+ -+static bool -+loongarch_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED) -+{ -+ if (!TARGET_SIBCALLS) -+ return false; -+ -+ /* Interrupt handlers need special epilogue code and therefore can't -+ use sibcalls. */ -+ if (loongarch_interrupt_type_p (TREE_TYPE (current_function_decl))) -+ return false; -+ -+ /* Otherwise OK. */ -+ return true; -+} -+ -+/* Implement a handler for STORE_BY_PIECES operations -+ for TARGET_USE_MOVE_BY_PIECES_INFRASTRUCTURE_P. */ -+ -+bool -+loongarch_store_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align) -+{ -+ /* Storing by pieces involves moving constants into registers -+ of size MIN (ALIGN, BITS_PER_WORD), then storing them. -+ We need to decide whether it is cheaper to load the address of -+ constant data into a register and use a block move instead. */ -+ -+ /* If the data is only byte aligned, then: -+ -+ (a1) A block move of less than 4 bytes would involve three 3 LD.Bs and -+ 3 ST.Bs. We might as well use 3 single-instruction LIs and 3 SD.Bs -+ instead. -+ -+ (a2) A block move of 4 bytes from aligned source data can use an -+ LD.W/ST.W sequence. This is often better than the 4 LIs and -+ 4 SD.Bs that we would generate when storing by pieces. */ -+ if (align <= BITS_PER_UNIT) -+ return size < 4; -+ -+ /* If the data is 2-byte aligned, then: -+ -+ (b1) A block move of less than 4 bytes would use a combination of LD.Bs, -+ LD.Hs, SD.Bs and SD.Hs. We get better code by using single-instruction -+ LIs, SD.Bs and SD.Hs instead. -+ -+ (b2) A block move of 4 bytes from aligned source data would again use -+ an LD.W/ST.W sequence. In most cases, loading the address of -+ the source data would require at least one extra instruction. -+ It is often more efficient to use 2 single-instruction LIs and -+ 2 SHs instead. -+ -+ (b3) A block move of up to 3 additional bytes would be like (b1). -+ -+ (b4) A block move of 8 bytes from aligned source data can use two -+ LD.W/ST.W sequences. Both sequences are better than the 4 LIs -+ and 4 ST.Hs that we'd generate when storing by pieces. -+ -+ The reasoning for higher alignments is similar: -+ -+ (c1) A block move of less than 4 bytes would be the same as (b1). -+ -+ (c2) A block move of 4 bytes would use an LD.W/ST.W sequence. Again, -+ loading the address of the source data would typically require -+ at least one extra instruction. It is generally better to use -+ LUI/ORI/SW instead. -+ -+ (c3) A block move of up to 3 additional bytes would be like (b1). -+ -+ (c4) A block move of 8 bytes can use two LD.W/ST.W sequences or a single -+ LD.D/ST.D sequence, and in these cases we've traditionally preferred -+ the memory copy over the more bulky constant moves. */ -+ return size < 8; -+} -+ -+/* Emit straight-line code to move LENGTH bytes from SRC to DEST. -+ Assume that the areas do not overlap. */ -+ -+static void -+loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) -+{ -+ HOST_WIDE_INT offset, delta; -+ unsigned HOST_WIDE_INT bits; -+ int i; -+ machine_mode mode; -+ rtx *regs; -+ -+ /* Work out how many bits to move at a time. If both operands have -+ half-word alignment, it is usually better to move in half words. -+ For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr -+ and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr. -+ Otherwise move word-sized chunks. -+ -+ For ISA_HAS_LWL_LWR we rely on the lwl/lwr & swl/swr load. Otherwise -+ picking the minimum of alignment or BITS_PER_WORD gets us the -+ desired size for bits. */ -+ -+ bits = MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))); -+ -+ if (TARGET_LASX) -+ { -+ bits = BITS_PER_WORD * 4; -+ mode = V4DImode; -+ delta = bits / BITS_PER_UNIT; -+ } -+ else -+ { -+ mode = int_mode_for_size (bits, 0).require (); -+ delta = bits / BITS_PER_UNIT; -+ } -+ -+ /* Allocate a buffer for the temporary registers. */ -+ regs = XALLOCAVEC (rtx, length / delta); -+ -+ /* Load as many BITS-sized chunks as possible. Use a normal load if -+ the source has enough alignment, otherwise use left/right pairs. */ -+ if (TARGET_LASX) -+ { -+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) -+ { -+ regs[i] = gen_reg_rtx (mode); -+ loongarch_emit_move (regs[i], adjust_address (src, mode, offset)); -+ } -+ } -+ else -+ { -+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) -+ { -+ regs[i] = gen_reg_rtx (mode); -+ loongarch_emit_move (regs[i], adjust_address (src, mode, offset)); -+ } -+ } -+ -+ /* Copy the chunks to the destination. */ -+ if (TARGET_LASX) -+ { -+ -+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) -+ { -+ loongarch_emit_move (adjust_address (dest, mode, offset), regs[i]); -+ } -+ } -+ else -+ { -+ -+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) -+ loongarch_emit_move (adjust_address (dest, mode, offset), regs[i]); -+ } -+ -+ /* Mop up any left-over bytes. */ -+ if (offset < length) -+ { -+ if (TARGET_LASX) -+ { -+ if(length - offset >= 16) -+ { -+ rtx *regs_tmp = XALLOCAVEC (rtx, 1); -+ regs_tmp[0] = gen_reg_rtx (V2DImode); -+ loongarch_emit_move (regs_tmp[0], adjust_address (src, V2DImode, offset)); -+ loongarch_emit_move (adjust_address (dest, V2DImode, offset), regs_tmp[0]); -+ offset += 16; -+ } -+ if(length - offset >= 8) -+ { -+ rtx *regs_tmp = XALLOCAVEC (rtx, 1); -+ regs_tmp[0] = gen_reg_rtx (DImode); -+ loongarch_emit_move (regs_tmp[0], adjust_address (src, DImode, offset)); -+ loongarch_emit_move (adjust_address (dest, DImode, offset), regs_tmp[0]); -+ offset += 8; -+ } -+ if(length - offset >= 4) -+ { -+ rtx *regs_tmp = XALLOCAVEC (rtx, 1); -+ regs_tmp[0] = gen_reg_rtx (SImode); -+ loongarch_emit_move (regs_tmp[0], adjust_address (src, SImode, offset)); -+ loongarch_emit_move (adjust_address (dest, SImode, offset), regs_tmp[0]); -+ offset += 4; -+ } -+ if(length - offset >= 2) -+ { -+ rtx *regs_tmp = XALLOCAVEC (rtx, 1); -+ regs_tmp[0] = gen_reg_rtx (HImode); -+ loongarch_emit_move (regs_tmp[0], adjust_address (src, HImode, offset)); -+ loongarch_emit_move (adjust_address (dest, HImode, offset), regs_tmp[0]); -+ offset += 2; -+ } -+ if(length - offset >= 1) -+ { -+ rtx *regs_tmp = XALLOCAVEC (rtx, 1); -+ regs_tmp[0] = gen_reg_rtx (QImode); -+ loongarch_emit_move (regs_tmp[0], adjust_address (src, QImode, offset)); -+ loongarch_emit_move (adjust_address (dest, QImode, offset), regs_tmp[0]); -+ offset += 1; -+ } -+ -+ if(length - offset != 0) -+ gcc_unreachable (); -+ } -+ else -+ { -+ src = adjust_address (src, BLKmode, offset); -+ dest = adjust_address (dest, BLKmode, offset); -+ move_by_pieces (dest, src, length - offset, -+ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0); -+ -+ } -+ } -+} -+ -+/* Helper function for doing a loop-based block operation on memory -+ reference MEM. Each iteration of the loop will operate on LENGTH -+ bytes of MEM. -+ -+ Create a new base register for use within the loop and point it to -+ the start of MEM. Create a new memory reference that uses this -+ register. Store them in *LOOP_REG and *LOOP_MEM respectively. */ -+ -+static void -+loongarch_adjust_block_mem (rtx mem, HOST_WIDE_INT length, -+ rtx *loop_reg, rtx *loop_mem) -+{ -+ *loop_reg = copy_addr_to_reg (XEXP (mem, 0)); -+ -+ /* Although the new mem does not refer to a known location, -+ it does keep up to LENGTH bytes of alignment. */ -+ *loop_mem = change_address (mem, BLKmode, *loop_reg); -+ set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT)); -+} -+ -+/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER -+ bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that -+ the memory regions do not overlap. */ -+ -+static void -+loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, -+ HOST_WIDE_INT bytes_per_iter) -+{ -+ rtx_code_label *label; -+ rtx src_reg, dest_reg, final_src, test; -+ HOST_WIDE_INT leftover; -+ -+ leftover = length % bytes_per_iter; -+ length -= leftover; -+ -+ /* Create registers and memory references for use within the loop. */ -+ loongarch_adjust_block_mem (src, bytes_per_iter, &src_reg, &src); -+ loongarch_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest); -+ -+ /* Calculate the value that SRC_REG should have after the last iteration -+ of the loop. */ -+ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length), -+ 0, 0, OPTAB_WIDEN); -+ -+ /* Emit the start of the loop. */ -+ label = gen_label_rtx (); -+ emit_label (label); -+ -+ /* Emit the loop body. */ -+ loongarch_block_move_straight (dest, src, bytes_per_iter); -+ -+ /* Move on to the next block. */ -+ loongarch_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter)); -+ loongarch_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter)); -+ -+ /* Emit the loop condition. */ -+ test = gen_rtx_NE (VOIDmode, src_reg, final_src); -+ if (Pmode == DImode) -+ emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label)); -+ else -+ emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label)); -+ -+ /* Mop up any left-over bytes. */ -+ if (leftover) -+ loongarch_block_move_straight (dest, src, leftover); -+ else -+ /* Temporary fix for PR79150. */ -+ emit_insn (gen_nop ()); -+} -+ -+/* Expand a movmemsi instruction, which copies LENGTH bytes from -+ memory reference SRC to memory reference DEST. */ -+ -+bool -+loongarch_expand_block_move (rtx dest, rtx src, rtx length) -+{ -+ -+ int max_move_bytes = (TARGET_LASX ? \ -+ LARCH_MAX_MOVE_BYTES_STRAIGHT * 8 \ -+ : LARCH_MAX_MOVE_BYTES_STRAIGHT); -+ -+ if (CONST_INT_P (length) && INTVAL (length) <= loongarch_max_inline_memcpy_size) -+ { -+ if (INTVAL (length) <= max_move_bytes) -+ { -+ loongarch_block_move_straight (dest, src, INTVAL (length)); -+ return true; -+ } -+ else if (optimize) -+ { -+ loongarch_block_move_loop (dest, src, INTVAL (length), -+ LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER); -+ return true; -+ } -+ } -+ return false; -+} -+ -+ -+/* Expand a QI or HI mode atomic memory operation. -+ -+ GENERATOR contains a pointer to the gen_* function that generates -+ the SI mode underlying atomic operation using masks that we -+ calculate. -+ -+ RESULT is the return register for the operation. Its value is NULL -+ if unused. -+ -+ MEM is the location of the atomic access. -+ -+ OLDVAL is the first operand for the operation. -+ -+ NEWVAL is the optional second operand for the operation. Its value -+ is NULL if unused. */ -+ -+void -+loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs generator, -+ rtx result, rtx mem, rtx oldval, -+ rtx newval, rtx model) -+{ -+ rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask; -+ rtx unshifted_mask_reg, mask, inverted_mask, si_op; -+ rtx res = NULL; -+ rtx tmp = NULL; -+ machine_mode mode; -+ -+ mode = GET_MODE (mem); -+ -+ /* Compute the address of the containing SImode value. */ -+ orig_addr = force_reg (Pmode, XEXP (mem, 0)); -+ memsi_addr = loongarch_force_binary (Pmode, AND, orig_addr, -+ force_reg (Pmode, GEN_INT (-4))); -+ -+ /* Create a memory reference for it. */ -+ memsi = gen_rtx_MEM (SImode, memsi_addr); -+ set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER); -+ MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem); -+ -+ /* Work out the byte offset of the QImode or HImode value, -+ counting from the least significant byte. */ -+ shift = loongarch_force_binary (Pmode, AND, orig_addr, GEN_INT (3)); -+ -+ /* Multiply by eight to convert the shift value from bytes to bits. */ -+ loongarch_emit_binary (ASHIFT, shift, shift, GEN_INT (3)); -+ -+ /* Make the final shift an SImode value, so that it can be used in -+ SImode operations. */ -+ shiftsi = force_reg (SImode, gen_lowpart (SImode, shift)); -+ -+ /* Set MASK to an inclusive mask of the QImode or HImode value. */ -+ unshifted_mask = GEN_INT (GET_MODE_MASK (mode)); -+ unshifted_mask_reg = force_reg (SImode, unshifted_mask); -+ mask = loongarch_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi); -+ -+ /* Compute the equivalent exclusive mask. */ -+ inverted_mask = gen_reg_rtx (SImode); -+ emit_insn (gen_rtx_SET (inverted_mask, gen_rtx_NOT (SImode, mask))); -+ -+ /* Shift the old value into place. */ -+ if (oldval != const0_rtx) -+ { -+ oldval = convert_modes (SImode, mode, oldval, true); -+ oldval = force_reg (SImode, oldval); -+ oldval = loongarch_force_binary (SImode, ASHIFT, oldval, shiftsi); -+ } -+ -+ /* Do the same for the new value. */ -+ if (newval && newval != const0_rtx) -+ { -+ newval = convert_modes (SImode, mode, newval, true); -+ newval = force_reg (SImode, newval); -+ newval = loongarch_force_binary (SImode, ASHIFT, newval, shiftsi); -+ } -+ -+ /* Do the SImode atomic access. */ -+ if (result) -+ res = gen_reg_rtx (SImode); -+ -+ if (newval) -+ si_op = generator.fn_7 (res, memsi, mask, inverted_mask, oldval, newval, model); -+ else if (result) -+ si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, model); -+ else -+ si_op = generator.fn_5 (memsi, mask, inverted_mask, oldval, model); -+ -+ //si_op = generator.fn_7 (res, memsi, mask, inverted_mask, oldval, newval, model); -+ -+ emit_insn (si_op); -+ -+ if (result) -+ { -+ /* Shift and convert the result. */ -+ loongarch_emit_binary (AND, res, res, mask); -+ loongarch_emit_binary (LSHIFTRT, res, res, shiftsi); -+ loongarch_emit_move (result, gen_lowpart (GET_MODE (result), res)); -+ } -+} -+ -+/* Return true if X is a MEM with the same size as MODE. */ -+ -+bool -+loongarch_mem_fits_mode_p (machine_mode mode, rtx x) -+{ -+ return (MEM_P (x) -+ && MEM_SIZE_KNOWN_P (x) -+ && MEM_SIZE (x) == GET_MODE_SIZE (mode)); -+} -+ -+/* Return true if (zero_extract OP WIDTH BITPOS) can be used as the -+ source of an "ext" instruction or the destination of an "ins" -+ instruction. OP must be a register operand and the following -+ conditions must hold: -+ -+ 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op)) -+ 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op)) -+ 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op)) -+ -+ Also reject lengths equal to a word as they are better handled -+ by the move patterns. */ -+ -+bool -+loongarch_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos) -+{ -+ if (!register_operand (op, VOIDmode) -+ || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD) -+ return false; -+ -+ if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1)) -+ return false; -+ -+ if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op))) -+ return false; -+ -+ return true; -+} -+ -+ -+/* Return true iff OP1 and OP2 are valid operands together for the -+ *and3 patterns. For the cases to consider, -+ see the table in the comment before the pattern. */ -+ -+bool -+and_operands_ok (machine_mode mode, rtx op1, rtx op2) -+{ -+ -+ if (memory_operand (op1, mode)) -+ { -+ return and_load_operand (op2, mode); -+ } -+ else -+ return and_reg_operand (op2, mode); -+} -+ -+/* Print the text for PRINT_OPERAND punctation character CH to FILE. -+ The punctuation characters are: -+ -+ '.' Print the name of the register with a hard-wired zero (zero or $r0). -+ '$' Print the name of the stack pointer register (sp or $r3). -+ ':' Print "c" to use the compact version if the delay slot is a nop. -+ '!' Print "s" to use the short version if the delay slot contains a -+ 16-bit instruction. -+ -+ See also loongarch_init_print_operand_punct. */ -+ -+static void -+loongarch_print_operand_punctuation (FILE *file, int ch) -+{ -+ switch (ch) -+ { -+ case '.': -+ fputs (reg_names[GP_REG_FIRST + 0], file); -+ break; -+ -+ case '$': -+ fputs (reg_names[STACK_POINTER_REGNUM], file); -+ break; -+ -+ case ':': -+ /* When final_sequence is 0, the delay slot will be a nop. We can -+ use the compact version where available. The %: formatter will -+ only be present if a compact form of the branch is available. */ -+ if (final_sequence == 0) -+ putc ('c', file); -+ break; -+ -+ default: -+ gcc_unreachable (); -+ break; -+ } -+} -+ -+/* Initialize loongarch_print_operand_punct. */ -+ -+static void -+loongarch_init_print_operand_punct (void) -+{ -+ const char *p; -+ -+ for (p = ".$:"; *p; p++) -+ loongarch_print_operand_punct[(unsigned char) *p] = true; -+} -+ -+/* PRINT_OPERAND prefix LETTER refers to the integer branch instruction -+ associated with condition CODE. Print the condition part of the -+ opcode to FILE. */ -+ -+static void -+loongarch_print_int_branch_condition (FILE *file, enum rtx_code code, int letter) -+{ -+ switch (code) -+ { -+ case EQ: -+ case NE: -+ case GT: -+ case GE: -+ case LT: -+ case LE: -+ case GTU: -+ case GEU: -+ case LTU: -+ case LEU: -+ /* Conveniently, the LARCH names for these conditions are the same -+ as their RTL equivalents. */ -+ fputs (GET_RTX_NAME (code), file); -+ break; -+ -+ default: -+ output_operand_lossage ("'%%%c' is not a valid operand prefix", letter); -+ break; -+ } -+} -+ -+/* Likewise floating-point branches. */ -+ -+static void -+loongarch_print_float_branch_condition (FILE *file, enum rtx_code code, int letter) -+{ -+ switch (code) -+ { -+ case EQ: -+ fputs ("ceqz", file); -+ break; -+ -+ case NE: -+ fputs ("cnez", file); -+ break; -+ -+ default: -+ output_operand_lossage ("'%%%c' is not a valid operand prefix", letter); -+ break; -+ } -+} -+ -+/* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */ -+ -+static bool -+loongarch_print_operand_punct_valid_p (unsigned char code) -+{ -+ return loongarch_print_operand_punct[code]; -+} -+ -+/* Return true if a FENCE should be emitted to before a memory access to -+ implement the release portion of memory model MODEL. */ -+ -+static bool -+loongarch_memmodel_needs_rel_and_acq_fence (enum memmodel model) -+{ -+ switch (model) -+ { -+ case MEMMODEL_ACQ_REL: -+ case MEMMODEL_SEQ_CST: -+ case MEMMODEL_SYNC_SEQ_CST: -+ case MEMMODEL_RELEASE: -+ case MEMMODEL_SYNC_RELEASE: -+ case MEMMODEL_ACQUIRE: -+ case MEMMODEL_CONSUME: -+ case MEMMODEL_SYNC_ACQUIRE: -+ return true; -+ -+ case MEMMODEL_RELAXED: -+ return false; -+ -+ default: -+ gcc_unreachable (); -+ } -+} -+ -+/* Return true if a FENCE should be emitted to before a memory access to -+ implement the release portion of memory model MODEL. */ -+ -+static bool -+loongarch_memmodel_needs_release_fence (enum memmodel model) -+{ -+ switch (model) -+ { -+ case MEMMODEL_ACQ_REL: -+ case MEMMODEL_SEQ_CST: -+ case MEMMODEL_SYNC_SEQ_CST: -+ case MEMMODEL_RELEASE: -+ case MEMMODEL_SYNC_RELEASE: -+ return true; -+ -+ case MEMMODEL_ACQUIRE: -+ case MEMMODEL_CONSUME: -+ case MEMMODEL_SYNC_ACQUIRE: -+ case MEMMODEL_RELAXED: -+ return false; -+ -+ default: -+ gcc_unreachable (); -+ } -+} -+ -+/* Implement TARGET_PRINT_OPERAND. The LARCH-specific operand codes are: -+ -+ 'E' Print CONST_INT OP element 0 of a replicated CONST_VECTOR in decimal. -+ 'X' Print CONST_INT OP in hexadecimal format. -+ 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format. -+ 'd' Print CONST_INT OP in decimal. -+ 'B' Print CONST_INT OP element 0 of a replicated CONST_VECTOR -+ as an unsigned byte [0..255]. -+ 'm' Print one less than CONST_INT OP in decimal. -+ 'y' Print exact log2 of CONST_INT OP in decimal. -+ 'h' Print the high-part relocation associated with OP, after stripping -+ any outermost HIGH. -+ 'R' Print the low-part relocation associated with OP. -+ 'C' Print the integer branch condition for comparison OP. -+ 'N' Print the inverse of the integer branch condition for comparison OP. -+ 'F' Print the FPU branch condition for comparison OP. -+ 'W' Print the inverse of the FPU branch condition for comparison OP. -+ 'w' Print a LSX register. -+ 'u' Print a LASX register. -+ 'T' Print 'f' for (eq:FCC ...), 't' for (ne:FCC ...), -+ 'z' for (eq:?I ...), 'n' for (ne:?I ...). -+ 't' Like 'T', but with the EQ/NE cases reversed -+ 'Y' Print loongarch_fp_conditions[INTVAL (OP)] -+ 'Z' Print OP and a comma for 8CC, otherwise print nothing. -+ 'D' Print the second part of a double-word register or memory operand. -+ 'L' Print the low-order register in a double-word register operand. -+ 'M' Print high-order register in a double-word register operand. -+ 'z' Print $0 if OP is zero, otherwise print OP normally. -+ 'b' Print the address of a memory operand, without offset. -+ 'v' Print the insn size suffix b, h, w or d for vector modes V16QI, V8HI, -+ V4SI, V2SI, and w, d for vector modes V4SF, V2DF respectively. -+ 'V' Print exact log2 of CONST_INT OP element 0 of a replicated -+ CONST_VECTOR in decimal. -+ 'A' Print a _DB suffix if the memory model requires a release. -+ 'G' Print a DBAR insn if the memory model requires a release. -+ 'i' Print i if the operand is not a register. */ -+ -+static void -+loongarch_print_operand (FILE *file, rtx op, int letter) -+{ -+ enum rtx_code code; -+ -+ if (loongarch_print_operand_punct_valid_p (letter)) -+ { -+ loongarch_print_operand_punctuation (file, letter); -+ return; -+ } -+ -+ gcc_assert (op); -+ code = GET_CODE (op); -+ -+ switch (letter) -+ { -+ case 'E': -+ if (GET_CODE (op) == CONST_VECTOR) -+ { -+ gcc_assert (loongarch_const_vector_same_val_p (op, GET_MODE (op))); -+ op = CONST_VECTOR_ELT (op, 0); -+ gcc_assert (CONST_INT_P (op)); -+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op)); -+ } -+ else -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ break; -+ -+ case 'X': -+ if (CONST_INT_P (op)) -+ fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op)); -+ else -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ break; -+ -+ case 'x': -+ if (CONST_INT_P (op)) -+ fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff); -+ else -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ break; -+ -+ case 'd': -+ if (CONST_INT_P (op)) -+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op)); -+ else -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ break; -+ -+ case 'B': -+ if (GET_CODE (op) == CONST_VECTOR) -+ { -+ gcc_assert (loongarch_const_vector_same_val_p (op, GET_MODE (op))); -+ op = CONST_VECTOR_ELT (op, 0); -+ gcc_assert (CONST_INT_P (op)); -+ unsigned HOST_WIDE_INT val8 = UINTVAL (op) & GET_MODE_MASK (QImode); -+ fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, val8); -+ } -+ else -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ break; -+ -+ case 'm': -+ if (CONST_INT_P (op)) -+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1); -+ else -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ break; -+ -+ case 'y': -+ if (CONST_INT_P (op)) -+ { -+ int val = exact_log2 (INTVAL (op)); -+ if (val != -1) -+ fprintf (file, "%d", val); -+ else -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ } -+ else -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ break; -+ -+ case 'V': -+ if (GET_CODE (op) == CONST_VECTOR) -+ { -+ machine_mode mode = GET_MODE_INNER (GET_MODE (op)); -+ unsigned HOST_WIDE_INT val = UINTVAL (CONST_VECTOR_ELT (op, 0)); -+ int vlog2 = exact_log2 (val & GET_MODE_MASK (mode)); -+ if (vlog2 != -1) -+ fprintf (file, "%d", vlog2); -+ else -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ } -+ else -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ break; -+ -+ case 'C': -+ loongarch_print_int_branch_condition (file, code, letter); -+ break; -+ -+ case 'N': -+ loongarch_print_int_branch_condition (file, reverse_condition (code), letter); -+ break; -+ -+ case 'F': -+ loongarch_print_float_branch_condition (file, code, letter); -+ break; -+ -+ case 'W': -+ loongarch_print_float_branch_condition (file, reverse_condition (code), -+ letter); -+ break; -+ -+ case 'T': -+ case 't': -+ { -+ int truth = (code == NE) == (letter == 'T'); -+ fputc ("zfnt"[truth * 2 + ST_REG_P (REGNO (XEXP (op, 0)))], file); -+ } -+ break; -+ -+ case 'Y': -+ if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (loongarch_fp_conditions)) -+ fputs (loongarch_fp_conditions[UINTVAL (op)], file); -+ else -+ output_operand_lossage ("'%%%c' is not a valid operand prefix", -+ letter); -+ break; -+ -+ case 'Z': -+ loongarch_print_operand (file, op, 0); -+ fputc (',', file); -+ break; -+ -+ case 'w': -+ if (code == REG && LSX_REG_P (REGNO (op))) -+ fprintf (file, "$vr%s", ®_names[REGNO (op)][2]); -+ else -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ break; -+ -+ case 'u': -+ if (code == REG && LASX_REG_P (REGNO (op))) -+ fprintf (file, "$xr%s", ®_names[REGNO (op)][2]); -+ else -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ break; -+ -+ case 'v': -+ switch (GET_MODE (op)) -+ { -+ case E_V16QImode: -+ case E_V32QImode: -+ fprintf (file, "b"); -+ break; -+ case E_V8HImode: -+ case E_V16HImode: -+ fprintf (file, "h"); -+ break; -+ case E_V4SImode: -+ case E_V4SFmode: -+ case E_V8SImode: -+ case E_V8SFmode: -+ fprintf (file, "w"); -+ break; -+ case E_V2DImode: -+ case E_V2DFmode: -+ case E_V4DImode: -+ case E_V4DFmode: -+ fprintf (file, "d"); -+ break; -+ default: -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ } -+ break; -+ -+ case 'A': -+ if (loongarch_memmodel_needs_rel_and_acq_fence ((enum memmodel) INTVAL (op))) -+ fputs ("_db", file); -+ break; -+ -+ case 'G': -+ if (loongarch_memmodel_needs_release_fence ((enum memmodel) INTVAL (op))) -+ fputs ("dbar\t0", file); -+ break; -+ -+ case 'i': -+ if (code != REG) -+ fputs ("i", file); -+ break; -+ -+ default: -+ switch (code) -+ { -+ case REG: -+ { -+ unsigned int regno = REGNO (op); -+ if ((letter == 'M') -+ || letter == 'D') -+ regno++; -+ else if (letter && letter != 'z' && letter != 'M' && letter != 'L') -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ fprintf (file, "%s", reg_names[regno]); -+ } -+ break; -+ -+ case MEM: -+ if (letter == 'D') -+ output_address (GET_MODE (op), plus_constant (Pmode, -+ XEXP (op, 0), 4)); -+ else if (letter == 'b') -+ { -+ gcc_assert (REG_P (XEXP (op, 0))); -+ loongarch_print_operand (file, XEXP (op, 0), 0); -+ } -+ else if (letter && letter != 'z') -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ else -+ output_address (GET_MODE (op), XEXP (op, 0)); -+ break; -+ -+ default: -+ if (letter == 'z' && op == CONST0_RTX (GET_MODE (op))) -+ fputs (reg_names[GP_REG_FIRST], file); -+ else if (letter && letter != 'z') -+ output_operand_lossage ("invalid use of '%%%c'", letter); -+ else -+ output_addr_const (file, loongarch_strip_unspec_address (op)); -+ break; -+ } -+ } -+} -+ -+/* Implement TARGET_PRINT_OPERAND_ADDRESS. */ -+ -+static void -+loongarch_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x) -+{ -+ struct loongarch_address_info addr; -+ -+ if (loongarch_classify_address (&addr, x, word_mode, true)) -+ switch (addr.type) -+ { -+ case ADDRESS_REG: -+ fprintf (file, "%s,", reg_names[REGNO (addr.reg)]); -+ loongarch_print_operand (file, addr.offset, 0); -+ return; -+ -+ case ADDRESS_CONST_INT: -+ fprintf (file, "%s,", reg_names[GP_REG_FIRST]); -+ output_addr_const (file, x); -+ return; -+ -+ case ADDRESS_SYMBOLIC: -+ output_addr_const (file, loongarch_strip_unspec_address (x)); -+ return; -+ } -+ if (GET_CODE (x) == CONST_INT) -+ output_addr_const (file, x); -+ else -+ gcc_unreachable (); -+} -+ -+ -+/* Implement TARGET_ENCODE_SECTION_INFO. */ -+ -+static void -+loongarch_encode_section_info (tree decl, rtx rtl, int first) -+{ -+ default_encode_section_info (decl, rtl, first); -+ -+ if (TREE_CODE (decl) == FUNCTION_DECL) -+ { -+ rtx symbol = XEXP (rtl, 0); -+ tree type = TREE_TYPE (decl); -+ -+ /* Encode whether the symbol is short or long. */ -+ if ((TARGET_LONG_CALLS && !loongarch_near_type_p (type)) -+ || loongarch_far_type_p (type)) -+ SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL; -+ } -+} -+ -+/* Implement TARGET_SELECT_RTX_SECTION. */ -+ -+static section * -+loongarch_select_rtx_section (machine_mode mode, rtx x, -+ unsigned HOST_WIDE_INT align) -+{ -+ /* ??? Consider using mergeable small data sections. */ -+ if (loongarch_rtx_constant_in_small_data_p (mode)) -+ return get_named_section (NULL, ".sdata", 0); -+ -+ return default_elf_select_rtx_section (mode, x, align); -+} -+ -+/* Implement TARGET_ASM_FUNCTION_RODATA_SECTION. -+ -+ The complication here is that, with the combination -+ !TARGET_ABSOLUTE_ABICALLS , jump tables will use -+ absolute addresses, and should therefore not be included in the -+ read-only part of a DSO. Handle such cases by selecting a normal -+ data section instead of a read-only one. The logic apes that in -+ default_function_rodata_section. */ -+ -+static section * -+loongarch_function_rodata_section (tree decl) -+{ -+ return default_function_rodata_section (decl); -+} -+ -+/* Implement TARGET_IN_SMALL_DATA_P. */ -+ -+static bool -+loongarch_in_small_data_p (const_tree decl) -+{ -+ unsigned HOST_WIDE_INT size; -+ -+ if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL) -+ return false; -+ -+ /* We don't yet generate small-data references for -+ VxWorks RTP code. See the related -G handling in -+ loongarch_option_override. */ -+ if (TARGET_VXWORKS_RTP) -+ return false; -+ -+ if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0) -+ { -+ const char *name; -+ -+ /* Reject anything that isn't in a known small-data section. */ -+ name = DECL_SECTION_NAME (decl); -+ if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0) -+ return false; -+ -+ /* If a symbol is defined externally, the assembler will use the -+ usual -G rules when deciding how to implement macros. */ -+ if (!DECL_EXTERNAL (decl)) -+ return true; -+ } -+ -+ /* We have traditionally not treated zero-sized objects as small data, -+ so this is now effectively part of the ABI. */ -+ size = int_size_in_bytes (TREE_TYPE (decl)); -+ return size > 0 && size <= loongarch_small_data_threshold; -+} -+ -+/* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use -+ anchors for small data: the GP register acts as an anchor in that -+ case. We also don't want to use them for PC-relative accesses, -+ where the PC acts as an anchor. */ -+ -+static bool -+loongarch_use_anchors_for_symbol_p (const_rtx symbol) -+{ -+ return default_use_anchors_for_symbol_p (symbol); -+} -+ -+/* The LARCH debug format wants all automatic variables and arguments -+ to be in terms of the virtual frame pointer (stack pointer before -+ any adjustment in the function), while the LARCH 3.0 linker wants -+ the frame pointer to be the stack pointer after the initial -+ adjustment. So, we do the adjustment here. The arg pointer (which -+ is eliminated) points to the virtual frame pointer, while the frame -+ pointer (which may be eliminated) points to the stack pointer after -+ the initial adjustments. */ -+ -+HOST_WIDE_INT -+loongarch_debugger_offset (rtx addr, HOST_WIDE_INT offset) -+{ -+ rtx offset2 = const0_rtx; -+ rtx reg = eliminate_constant_term (addr, &offset2); -+ -+ if (offset == 0) -+ offset = INTVAL (offset2); -+ -+ if (reg == stack_pointer_rtx -+ || reg == frame_pointer_rtx -+ || reg == hard_frame_pointer_rtx) -+ { -+ offset -= cfun->machine->frame.total_size; -+ if (reg == hard_frame_pointer_rtx) -+ offset += cfun->machine->frame.hard_frame_pointer_offset; -+ } -+ -+ return offset; -+} -+ -+/* Implement ASM_OUTPUT_EXTERNAL. */ -+ -+void -+loongarch_output_external (FILE *file, tree decl, const char *name) -+{ -+ default_elf_asm_output_external (file, decl, name); -+ -+ /* We output the name if and only if TREE_SYMBOL_REFERENCED is -+ set in order to avoid putting out names that are never really -+ used. */ -+ if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))) -+ { -+ if (loongarch_in_small_data_p (decl)) -+ { -+ /* When using assembler macros, emit .extern directives for -+ all small-data externs so that the assembler knows how -+ big they are. -+ -+ In most cases it would be safe (though pointless) to emit -+ .externs for other symbols too. One exception is when an -+ object is within the -G limit but declared by the user to -+ be in a section other than .sbss or .sdata. */ -+ fputs ("\t.extern\t", file); -+ assemble_name (file, name); -+ fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n", -+ int_size_in_bytes (TREE_TYPE (decl))); -+ } -+ } -+} -+ -+/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */ -+ -+static void ATTRIBUTE_UNUSED -+loongarch_output_dwarf_dtprel (FILE *file, int size, rtx x) -+{ -+ switch (size) -+ { -+ case 4: -+ fputs ("\t.dtprelword\t", file); -+ break; -+ -+ case 8: -+ fputs ("\t.dtpreldword\t", file); -+ break; -+ -+ default: -+ gcc_unreachable (); -+ } -+ output_addr_const (file, x); -+ fputs ("+0x8000", file); -+} -+ -+/* Implement TARGET_DWARF_REGISTER_SPAN. */ -+ -+static rtx -+loongarch_dwarf_register_span (rtx reg) -+{ -+ rtx high, low; -+ machine_mode mode; -+ -+ mode = GET_MODE (reg); -+ -+ return NULL_RTX; -+} -+ -+/* Implement TARGET_DWARF_FRAME_REG_MODE. */ -+ -+static machine_mode -+loongarch_dwarf_frame_reg_mode (int regno) -+{ -+ machine_mode mode = default_dwarf_frame_reg_mode (regno); -+ -+ if (FP_REG_P (regno) && loongarch_abi == ABILP32 && TARGET_FLOAT64) -+ mode = SImode; -+ -+ return mode; -+} -+ -+ -+/* Implement ASM_OUTPUT_ASCII. */ -+ -+void -+loongarch_output_ascii (FILE *stream, const char *string, size_t len) -+{ -+ size_t i; -+ int cur_pos; -+ -+ cur_pos = 17; -+ fprintf (stream, "\t.ascii\t\""); -+ for (i = 0; i < len; i++) -+ { -+ int c; -+ -+ c = (unsigned char) string[i]; -+ if (ISPRINT (c)) -+ { -+ if (c == '\\' || c == '\"') -+ { -+ putc ('\\', stream); -+ cur_pos++; -+ } -+ putc (c, stream); -+ cur_pos++; -+ } -+ else -+ { -+ fprintf (stream, "\\%03o", c); -+ cur_pos += 4; -+ } -+ -+ if (cur_pos > 72 && i+1 < len) -+ { -+ cur_pos = 17; -+ fprintf (stream, "\"\n\t.ascii\t\""); -+ } -+ } -+ fprintf (stream, "\"\n"); -+} -+ -+/* Emit either a label, .comm, or .lcomm directive. When using assembler -+ macros, mark the symbol as written so that loongarch_asm_output_external -+ won't emit an .extern for it. STREAM is the output file, NAME is the -+ name of the symbol, INIT_STRING is the string that should be written -+ before the symbol and FINAL_STRING is the string that should be -+ written after it. FINAL_STRING is a printf format that consumes the -+ remaining arguments. */ -+ -+void -+loongarch_declare_object (FILE *stream, const char *name, const char *init_string, -+ const char *final_string, ...) -+{ -+ va_list ap; -+ -+ fputs (init_string, stream); -+ assemble_name (stream, name); -+ va_start (ap, final_string); -+ vfprintf (stream, final_string, ap); -+ va_end (ap); -+ -+ tree name_tree = get_identifier (name); -+ TREE_ASM_WRITTEN (name_tree) = 1; -+} -+ -+/* Declare a common object of SIZE bytes using asm directive INIT_STRING. -+ NAME is the name of the object and ALIGN is the required alignment -+ in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third -+ alignment argument. */ -+ -+void -+loongarch_declare_common_object (FILE *stream, const char *name, -+ const char *init_string, -+ unsigned HOST_WIDE_INT size, -+ unsigned int align, bool takes_alignment_p) -+{ -+ if (!takes_alignment_p) -+ { -+ size += (align / BITS_PER_UNIT) - 1; -+ size -= size % (align / BITS_PER_UNIT); -+ loongarch_declare_object (stream, name, init_string, -+ "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size); -+ } -+ else -+ loongarch_declare_object (stream, name, init_string, -+ "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n", -+ size, align / BITS_PER_UNIT); -+} -+ -+/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the -+ elfos.h version, but we also need to handle -muninit-const-in-rodata. */ -+ -+void -+loongarch_output_aligned_decl_common (FILE *stream, tree decl, const char *name, -+ unsigned HOST_WIDE_INT size, -+ unsigned int align) -+{ -+ loongarch_declare_common_object (stream, name, "\n\t.comm\t", -+ size, align, true); -+} -+ -+#ifdef ASM_OUTPUT_SIZE_DIRECTIVE -+extern int size_directive_output; -+ -+/* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF -+ definitions except that it uses loongarch_declare_object to emit the label. */ -+ -+void -+loongarch_declare_object_name (FILE *stream, const char *name, -+ tree decl ATTRIBUTE_UNUSED) -+{ -+#ifdef ASM_OUTPUT_TYPE_DIRECTIVE -+#ifdef USE_GNU_UNIQUE_OBJECT -+ /* As in elfos.h. */ -+ if (USE_GNU_UNIQUE_OBJECT && DECL_ONE_ONLY (decl) -+ && (!DECL_ARTIFICIAL (decl) || !TREE_READONLY (decl))) -+ ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "gnu_unique_object"); -+ else -+#endif -+ ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object"); -+#endif -+ -+ size_directive_output = 0; -+ if (!flag_inhibit_size_directive && DECL_SIZE (decl)) -+ { -+ HOST_WIDE_INT size; -+ -+ size_directive_output = 1; -+ size = int_size_in_bytes (TREE_TYPE (decl)); -+ ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size); -+ } -+ -+ loongarch_declare_object (stream, name, "", ":\n"); -+} -+ -+/* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */ -+ -+void -+loongarch_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end) -+{ -+ const char *name; -+ -+ name = XSTR (XEXP (DECL_RTL (decl), 0), 0); -+ if (!flag_inhibit_size_directive -+ && DECL_SIZE (decl) != 0 -+ && !at_end -+ && top_level -+ && DECL_INITIAL (decl) == error_mark_node -+ && !size_directive_output) -+ { -+ HOST_WIDE_INT size; -+ -+ size_directive_output = 1; -+ size = int_size_in_bytes (TREE_TYPE (decl)); -+ ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size); -+ } -+} -+#endif -+ -+/* Mark text contents as code or data, mainly for the purpose of correct -+ disassembly. Emit a local symbol and set its type appropriately for -+ that purpose. Also emit `.insn' if marking contents as code so that -+ the ISA mode is recorded and any padding that follows is disassembled -+ as correct instructions. */ -+ -+void -+loongarch_set_text_contents_type (FILE *file ATTRIBUTE_UNUSED, -+ const char *prefix ATTRIBUTE_UNUSED, -+ unsigned long num ATTRIBUTE_UNUSED, -+ bool function_p ATTRIBUTE_UNUSED) -+{ -+#ifdef ASM_OUTPUT_TYPE_DIRECTIVE -+ char buf[(sizeof (num) * 10) / 4 + 2]; -+ const char *fnname; -+ char *sname; -+ rtx symbol; -+ -+ sprintf (buf, "%lu", num); -+ symbol = XEXP (DECL_RTL (current_function_decl), 0); -+ fnname = targetm.strip_name_encoding (XSTR (symbol, 0)); -+ sname = ACONCAT ((prefix, fnname, "_", buf, NULL)); -+ -+ ASM_OUTPUT_TYPE_DIRECTIVE (file, sname, function_p ? "function" : "object"); -+ assemble_name (file, sname); -+ fputs (":\n", file); -+// if (function_p) -+// fputs ("\t.insn\n", file); -+#endif -+} -+ -+ -+/* Implement TARGET_ASM_FILE_START. */ -+ -+static void -+loongarch_file_start (void) -+{ -+ default_file_start (); -+ -+ /* Generate a special section to describe the ABI switches used to -+ produce the resultant binary. */ -+} -+ -+ -+/* Return true if REGNO is a register that is ordinarily call-clobbered -+ but must nevertheless be preserved by an interrupt handler. */ -+ -+static bool -+loongarch_interrupt_extra_call_saved_reg_p (unsigned int regno) -+{ -+ if (GP_REG_P (regno) -+ && cfun->machine->use_shadow_register_set == SHADOW_SET_NO) -+ { -+ /* $0 is hard-wired. */ -+ if (regno == GP_REG_FIRST) -+ return false; -+ -+ /* The function will return the stack pointer to its original value -+ anyway. */ -+ if (regno == STACK_POINTER_REGNUM) -+ return false; -+ -+ /* Otherwise, return true for registers that aren't ordinarily -+ call-clobbered. */ -+ return call_used_regs[regno]; -+ } -+ -+ return false; -+} -+ -+/* Implement TARGET_FRAME_POINTER_REQUIRED. */ -+ -+static bool -+loongarch_frame_pointer_required (void) -+{ -+ /* If the function contains dynamic stack allocations, we need to -+ use the frame pointer to access the static parts of the frame. */ -+ if (cfun->calls_alloca) -+ return true; -+ -+ return false; -+} -+ -+/* Make sure that we're not trying to eliminate to the wrong hard frame -+ pointer. */ -+ -+static bool -+loongarch_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to) -+{ -+ return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM); -+} -+ -+ -+ -+/* Implement RETURN_ADDR_RTX. We do not support moving back to a -+ previous frame. */ -+ -+rtx -+loongarch_return_addr (int count, rtx frame ATTRIBUTE_UNUSED) -+{ -+ if (count != 0) -+ return const0_rtx; -+ -+ return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM); -+} -+ -+/* Emit code to change the current function's return address to -+ ADDRESS. SCRATCH is available as a scratch register, if needed. -+ ADDRESS and SCRATCH are both word-mode GPRs. */ -+ -+void -+loongarch_set_return_address (rtx address, rtx scratch) -+{ -+ rtx slot_address; -+ -+ gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM)); -+ if (frame_pointer_needed) -+ slot_address = loongarch_add_offset (scratch, hard_frame_pointer_rtx, -+ -UNITS_PER_WORD); -+ else -+ slot_address = loongarch_add_offset (scratch, stack_pointer_rtx, -+ cfun->machine->frame.gp_sp_offset); -+ loongarch_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address); -+} -+ -+ -+/* Fill *BASE and *OFFSET such that *BASE + *OFFSET refers to the -+ cprestore slot. LOAD_P is true if the caller wants to load from -+ the cprestore slot; it is false if the caller wants to store to -+ the slot. */ -+ -+static void -+loongarch_get_cprestore_base_and_offset (rtx *base, HOST_WIDE_INT *offset, -+ bool load_p) -+{ -+ const struct loongarch_frame_info *frame; -+ -+ frame = &cfun->machine->frame; -+ /* .cprestore always uses the stack pointer instead of the frame pointer. -+ We have a free choice for direct stores, -+ Using the stack pointer would sometimes give more -+ (early) scheduling freedom, but using the frame pointer would -+ sometimes give more (late) scheduling freedom. It's hard to -+ predict which applies to a given function, so let's keep things -+ simple. -+ -+ Loads must always use the frame pointer in functions that call -+ alloca, and there's little benefit to using the stack pointer -+ otherwise. */ -+ if (frame_pointer_needed) -+ { -+ *base = hard_frame_pointer_rtx; -+ *offset = frame->args_size - frame->hard_frame_pointer_offset; -+ } -+ else -+ { -+ *base = stack_pointer_rtx; -+ *offset = frame->args_size; -+ } -+} -+ -+/* Return true if X is the load or store address of the cprestore slot; -+ LOAD_P says which. */ -+ -+bool -+loongarch_cprestore_address_p (rtx x, bool load_p) -+{ -+ rtx given_base, required_base; -+ HOST_WIDE_INT given_offset, required_offset; -+ -+ loongarch_split_plus (x, &given_base, &given_offset); -+ loongarch_get_cprestore_base_and_offset (&required_base, &required_offset, load_p); -+ return given_base == required_base && given_offset == required_offset; -+} -+ -+ -+/* A function to save or store a register. The first argument is the -+ register and the second is the stack slot. */ -+typedef void (*loongarch_save_restore_fn) (rtx, rtx); -+ -+/* LOONGSON LA464 Emit insn pattern for gssq and gslq*/ -+void -+loongarch_la464_emit_128bit_load(rtx operands[]) -+{ -+ rtx op0; -+ rtx op1; -+ rtx op2; -+ rtx op3; -+ -+#if 0 /*for debug*/ -+ printf("464po: emit 128 PO LOAD!\n"); -+ printf("reg num of op0 is: %d\n",REGNO(operands[0])); -+ printf("reg num of op2 is: %d\n",REGNO(operands[2])); -+#endif -+ op0 = gen_rtx_REG (GET_MODE (operands[0]), REGNO (operands[0])); -+ op1 = operands[1]; -+ op2 = gen_rtx_REG (GET_MODE (operands[2]), REGNO (operands[2])); -+ op3 = operands[3]; -+ emit (gen_rtx_PARALLEL (VOIDmode, -+ gen_rtvec (2, -+ gen_rtx_SET (op0,op1), -+ gen_rtx_SET (op2,op3)))); -+} -+ -+void -+loongarch_la464_emit_128bit_store(rtx operands[]) -+{ -+ rtx op0; -+ rtx op1; -+ rtx op2; -+ rtx op3; -+ -+#if 0 /*for debug*/ -+ printf("464po: emit 128 PO STORE!\n"); -+ printf("reg num of op1 is: %d\n",REGNO(operands[1])); -+ printf("reg num of op3 is: %d\n",REGNO(operands[3])); -+#endif -+ op0 = operands[0]; -+ op1 = gen_rtx_REG (GET_MODE (operands[1]), REGNO (operands[1])); -+ op2 = operands[2]; -+ op3 = gen_rtx_REG (GET_MODE (operands[3]), REGNO (operands[3])); -+ emit (gen_rtx_PARALLEL (VOIDmode, -+ gen_rtvec (2, -+ gen_rtx_SET (op0,op1), -+ gen_rtx_SET (op2,op3)))); -+ -+} -+ -+ -+ -+ -+/* Implement ASM_DECLARE_FUNCTION_NAME. */ -+ -+void loongarch_declare_function_name(FILE *stream ATTRIBUTE_UNUSED, -+ const char *name, tree fndecl ATTRIBUTE_UNUSED) -+{ -+ loongarch_start_function_definition (name); -+} -+ -+/* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */ -+ -+static void -+loongarch_output_function_prologue (FILE *file) -+{ -+} -+ -+/* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */ -+ -+static void -+loongarch_output_function_epilogue (FILE *) -+{ -+ const char *fnname; -+ -+ /* Get the function name the same way that toplev.c does before calling -+ assemble_start_function. This is needed so that the name used here -+ exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */ -+ fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0); -+ loongarch_end_function_definition (fnname); -+} -+ -+ -+#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP) -+ -+#if PROBE_INTERVAL > 16384 -+#error Cannot use indexed addressing mode for stack probing -+#endif -+ -+/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE, -+ inclusive. These are offsets from the current stack pointer. */ -+ -+static void -+loongarch_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size) -+{ -+ -+ /* See if we have a constant small number of probes to generate. If so, -+ that's the easy case. */ -+ if ((TARGET_64BIT && (first + size <= 8 * PROBE_INTERVAL)) -+ || (!TARGET_64BIT && (first + size <= 2048))) -+ { -+ HOST_WIDE_INT i; -+ -+ /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until -+ it exceeds SIZE. If only one probe is needed, this will not -+ generate any code. Then probe at FIRST + SIZE. */ -+ for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL) -+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx, -+ -(first + i))); -+ -+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx, -+ -(first + size))); -+ } -+ -+ /* Otherwise, do the same as above, but in a loop. Note that we must be -+ extra careful with variables wrapping around because we might be at -+ the very top (or the very bottom) of the address space and we have -+ to be able to handle this case properly; in particular, we use an -+ equality test for the loop condition. */ -+ else -+ { -+ HOST_WIDE_INT rounded_size; -+ rtx r13 = LARCH_PROLOGUE_TEMP (Pmode); -+ rtx r12 = LARCH_PROLOGUE_TEMP2 (Pmode); -+ rtx r14 = LARCH_PROLOGUE_TEMP3 (Pmode); -+ -+ /* Sanity check for the addressing mode we're going to use. */ -+ gcc_assert (first <= 16384); -+ -+ -+ /* Step 1: round SIZE to the previous multiple of the interval. */ -+ -+ rounded_size = ROUND_DOWN (size, PROBE_INTERVAL); -+ /* TEST_ADDR = SP + FIRST */ -+ if (first != 0) -+ { -+ emit_move_insn (r14, GEN_INT (first)); -+ emit_insn (gen_rtx_SET (r13, gen_rtx_MINUS (Pmode, stack_pointer_rtx, r14))); -+ } -+ else -+ emit_move_insn (r13, stack_pointer_rtx); -+ -+ /* Step 2: compute initial and final value of the loop counter. */ -+ -+ emit_move_insn (r14, GEN_INT (PROBE_INTERVAL)); -+ if (rounded_size == 0) -+ emit_move_insn (r12, r13); -+ /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */ -+ else -+ { -+ emit_move_insn (r12, GEN_INT (rounded_size)); -+ emit_insn (gen_rtx_SET (r12, gen_rtx_MINUS (Pmode, r13, r12))); -+ /* Step 3: the loop -+ -+ do -+ { -+ TEST_ADDR = TEST_ADDR + PROBE_INTERVAL -+ probe at TEST_ADDR -+ } -+ while (TEST_ADDR != LAST_ADDR) -+ -+ probes at FIRST + N * PROBE_INTERVAL for values of N from 1 -+ until it is equal to ROUNDED_SIZE. */ -+ -+ emit_insn (PMODE_INSN (gen_probe_stack_range, (r13, r13, r12, r14))); -+ } -+ -+ /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time -+ that SIZE is equal to ROUNDED_SIZE. */ -+ -+ if (size != rounded_size) -+ { -+ if (TARGET_64BIT) -+ emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size)); -+ else -+ { -+ HOST_WIDE_INT i; -+ for (i = 2048; i < (size - rounded_size); i += 2048 ) -+ { -+ emit_stack_probe (plus_constant (Pmode, r12, -i)); -+ emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, r12, -2048))); -+ } -+ emit_stack_probe (plus_constant (Pmode, r12, -(size - rounded_size - i + 2048))); -+ } -+ } -+ } -+ -+ /* Make sure nothing is scheduled before we are done. */ -+ emit_insn (gen_blockage ()); -+} -+ -+/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are -+ absolute addresses. */ -+ -+const char * -+loongarch_output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3) -+{ -+ static int labelno = 0; -+ char loop_lab[32], tmp[64]; -+ rtx xops[3]; -+ -+ ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++); -+ -+ /* Loop. */ -+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab); -+ -+ /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */ -+ xops[0] = reg1; -+ xops[1] = GEN_INT (-PROBE_INTERVAL); -+ xops[2] = reg3; -+ if (TARGET_64BIT) -+ output_asm_insn ("sub.d\t%0,%0,%2", xops); -+ else -+ output_asm_insn ("sub.w\t%0,%0,%2", xops); -+ -+ /* Probe at TEST_ADDR, test if TEST_ADDR == LAST_ADDR and branch. */ -+ xops[1] = reg2; -+ strcpy (tmp, "bne\t%0,%1,"); -+ if (TARGET_64BIT) -+ output_asm_insn ("st.d\t$r0,%0,0", xops); -+ else -+ output_asm_insn ("st.w\t$r0,%0,0", xops); -+ output_asm_insn (strcat (tmp, &loop_lab[1]), xops); -+ -+ return ""; -+} -+ -+/* Expand the "prologue" pattern. */ -+ -+void -+loongarch_expand_prologue (void) -+{ -+ struct loongarch_frame_info *frame = &cfun->machine->frame; -+ HOST_WIDE_INT size = frame->total_size; -+ unsigned mask = frame->mask; -+ rtx insn; -+ -+ if (flag_stack_usage_info) -+ current_function_static_stack_size = size; -+ -+ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK -+ || flag_stack_clash_protection) -+ { -+ if (crtl->is_leaf && !cfun->calls_alloca) -+ { -+ if (size > PROBE_INTERVAL && size > get_stack_check_protect ()) -+ loongarch_emit_probe_stack_range (get_stack_check_protect (), -+ size - get_stack_check_protect ()); -+ } -+ else if (size > 0) -+ loongarch_emit_probe_stack_range (get_stack_check_protect (), size); -+ } -+ -+ /* When optimizing for size, call a subroutine to save the registers. */ -+ if (loongarch_use_save_libcall (frame)) -+ { -+ rtx dwarf = NULL_RTX; -+ dwarf = loongarch_adjust_libcall_cfi_prologue (); -+ -+ frame->mask = 0; /* Temporarily fib that we need not save GPRs. */ -+ size -= frame->save_libcall_adjustment; -+ insn = emit_insn (gen_gpr_save (GEN_INT (mask))); -+ -+ RTX_FRAME_RELATED_P (insn) = 1; -+ REG_NOTES (insn) = dwarf; -+ } -+ -+ /* Save the registers. */ -+ if ((frame->mask | frame->fmask) != 0) -+ { -+ HOST_WIDE_INT step1 = MIN (size, loongarch_first_stack_step (frame)); -+ -+ insn = gen_add3_insn (stack_pointer_rtx, -+ stack_pointer_rtx, -+ GEN_INT (-step1)); -+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; -+ size -= step1; -+ loongarch_for_each_saved_reg (size, loongarch_save_reg); -+ } -+ -+ frame->mask = mask; /* Undo the above fib. */ -+ -+ /* Set up the frame pointer, if we're using one. */ -+ if (frame_pointer_needed) -+ { -+ insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx, -+ GEN_INT (frame->hard_frame_pointer_offset - size)); -+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; -+ -+ loongarch_emit_stack_tie (); -+ } -+ -+ /* Allocate the rest of the frame. */ -+ if (size > 0) -+ { -+ if (SMALL_OPERAND (-size)) -+ { -+ insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, -+ GEN_INT (-size)); -+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; -+ } -+ else -+ { -+ loongarch_emit_move (N_LARCH_PROLOGUE_TEMP (Pmode), GEN_INT (-size)); -+ emit_insn (gen_add3_insn (stack_pointer_rtx, -+ stack_pointer_rtx, -+ N_LARCH_PROLOGUE_TEMP (Pmode))); -+ -+ /* Describe the effect of the previous instructions. */ -+ insn = plus_constant (Pmode, stack_pointer_rtx, -size); -+ insn = gen_rtx_SET (stack_pointer_rtx, insn); -+ loongarch_set_frame_expr (insn); -+ } -+ } -+} -+ -+ -+/* Return true if register REGNO can store a value of mode MODE. -+ The result of this function is cached in loongarch_hard_regno_mode_ok. */ -+ -+static bool -+loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode) -+{ -+ unsigned int size; -+ enum mode_class mclass; -+ -+ if (mode == FCCmode) -+ return ST_REG_P (regno); -+ -+ size = GET_MODE_SIZE (mode); -+ mclass = GET_MODE_CLASS (mode); -+ -+ if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode) && !LASX_SUPPORTED_MODE_P (mode)) -+ return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD; -+ -+ /* For LSX, allow TImode and 128-bit vector modes in all FPR. */ -+ if (FP_REG_P (regno) && LSX_SUPPORTED_MODE_P (mode)) -+ return true; -+ -+ /* For LASX, allow TImode and 256-bit vector modes in all FPR. FIXME: */ -+ if (FP_REG_P (regno) && LASX_SUPPORTED_MODE_P (mode)) -+ return true; -+ -+ if (FP_REG_P (regno) -+ && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0 -+ || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG))) -+ { -+ if (mclass == MODE_FLOAT -+ || mclass == MODE_COMPLEX_FLOAT -+ || mclass == MODE_VECTOR_FLOAT) -+ return size <= UNITS_PER_FPVALUE; -+ -+ /* Allow integer modes that fit into a single register. We need -+ to put integers into FPRs when using instructions like CVT -+ and TRUNC. There's no point allowing sizes smaller than a word, -+ because the FPU has no appropriate load/store instructions. */ -+ if (mclass == MODE_INT) -+ return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG; -+ } -+ -+ return false; -+} -+ -+/* Implement TARGET_HARD_REGNO_MODE_OK. */ -+ -+static bool -+loongarch_hard_regno_mode_ok (unsigned int regno, machine_mode mode) -+{ -+ return loongarch_hard_regno_mode_ok_p[mode][regno]; -+} -+ -+/* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */ -+ -+bool -+loongarch_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED, -+ unsigned int new_reg) -+{ -+ /* Interrupt functions can only use registers that have already been -+ saved by the prologue, even if they would normally be call-clobbered. */ -+ if (cfun->machine->interrupt_handler_p && !df_regs_ever_live_p (new_reg)) -+ return false; -+ -+ return true; -+} -+ -+/* Return nonzero if register REGNO can be used as a scratch register -+ in peephole2. */ -+ -+bool -+loongarch_hard_regno_scratch_ok (unsigned int regno) -+{ -+ /* See loongarch_hard_regno_rename_ok. */ -+ if (cfun->machine->interrupt_handler_p && !df_regs_ever_live_p (regno)) -+ return false; -+ -+ return true; -+} -+ -+static bool -+loongarch_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode) -+{ -+ if (ISA_HAS_LSX && FP_REG_P (regno) && GET_MODE_SIZE (mode) > 8) -+ return true; -+ -+ return false; -+} -+ -+/* Implement TARGET_HARD_REGNO_NREGS. */ -+ -+static unsigned int -+loongarch_hard_regno_nregs (unsigned int regno, machine_mode mode) -+{ -+ if (ST_REG_P (regno)) -+ /* The size of FP status registers is always 4, because they only hold -+ FCCmode values, and FCCmode is always considered to be 4 bytes wide. */ -+ return (GET_MODE_SIZE (mode) + 3) / 4; -+ -+ if (FP_REG_P (regno)) -+ { -+ if (LSX_SUPPORTED_MODE_P (mode)) -+ return 1; -+ -+ if (LASX_SUPPORTED_MODE_P (mode)) -+ return 1; -+ -+ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG; -+ } -+ -+ /* All other registers are word-sized. */ -+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; -+} -+ -+/* Implement CLASS_MAX_NREGS, taking the maximum of the cases -+ in loongarch_hard_regno_nregs. */ -+ -+int -+loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode) -+{ -+ int size; -+ HARD_REG_SET left; -+ -+ size = 0x8000; -+ COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]); -+ if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS])) -+ { -+ if (loongarch_hard_regno_mode_ok (ST_REG_FIRST, mode)) -+ size = MIN (size, 4); -+ -+ AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]); -+ } -+ if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS])) -+ { -+ if (loongarch_hard_regno_mode_ok (FP_REG_FIRST, mode)) -+ { -+ if (LASX_SUPPORTED_MODE_P (mode)) //Fix me -+ size = MIN (size, UNITS_PER_LASX_REG); -+ else if (LSX_SUPPORTED_MODE_P (mode)) -+ size = MIN (size, UNITS_PER_LSX_REG); -+ else -+ size = MIN (size, UNITS_PER_FPREG); -+ } -+ -+ AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]); -+ } -+ if (!hard_reg_set_empty_p (left)) -+ size = MIN (size, UNITS_PER_WORD); -+ return (GET_MODE_SIZE (mode) + size - 1) / size; -+} -+ -+/* Implement TARGET_CAN_CHANGE_MODE_CLASS. */ -+ -+static bool -+loongarch_can_change_mode_class (machine_mode from, -+ machine_mode to, reg_class_t rclass) -+{ -+ /* Allow conversions between different Loongson integer vectors, -+ and between those vectors and DImode. */ -+ if (GET_MODE_SIZE (from) == 8 && GET_MODE_SIZE (to) == 8 -+ && INTEGRAL_MODE_P (from) && INTEGRAL_MODE_P (to)) -+ return true; -+ -+ /* Allow conversions between different LSX/LASX vector modes. */ -+ if (LASX_SUPPORTED_MODE_P (from) && LASX_SUPPORTED_MODE_P (to)) -+ return true; -+ -+ /* Allow conversions between different LSX vector modes. */ -+ if (LSX_SUPPORTED_MODE_P (from) && LSX_SUPPORTED_MODE_P (to)) -+ return true; -+ -+ /* Otherwise, there are several problems with changing the modes of -+ values in floating-point registers: -+ -+ - When a multi-word value is stored in paired floating-point -+ registers, the first register always holds the low word. We -+ therefore can't allow FPRs to change between single-word and -+ multi-word modes on big-endian targets. -+ -+ - GCC assumes that each word of a multiword register can be -+ accessed individually using SUBREGs. This is not true for -+ floating-point registers if they are bigger than a word. -+ -+ - Loading a 32-bit value into a 64-bit floating-point register -+ will not sign-extend the value, despite what LOAD_EXTEND_OP -+ says. We can't allow FPRs to change from SImode to a wider -+ mode on 64-bit targets. -+ -+ - If the FPU has already interpreted a value in one format, we -+ must not ask it to treat the value as having a different -+ format. -+ -+ We therefore disallow all mode changes involving FPRs. */ -+ -+ return !reg_classes_intersect_p (FP_REGS, rclass); -+} -+ -+/* Implement target hook small_register_classes_for_mode_p. */ -+ -+static bool -+loongarch_small_register_classes_for_mode_p (machine_mode mode -+ ATTRIBUTE_UNUSED) -+{ -+ return 0; -+} -+ -+/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction, -+ or use the LSX's move.v instruction. */ -+ -+static bool -+loongarch_mode_ok_for_mov_fmt_p (machine_mode mode) -+{ -+ switch (mode) -+ { -+ case E_SFmode: -+ return TARGET_HARD_FLOAT; -+ -+ case E_DFmode: -+ return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT; -+ -+ case E_V2SFmode: -+ return 0; -+ -+ default: -+ return ISA_HAS_LASX ? LASX_SUPPORTED_MODE_P (mode) : LSX_SUPPORTED_MODE_P (mode); -+ } -+} -+ -+/* Implement TARGET_MODES_TIEABLE_P. */ -+ -+static bool -+loongarch_modes_tieable_p (machine_mode mode1, machine_mode mode2) -+{ -+ /* FPRs allow no mode punning, so it's not worth tying modes if we'd -+ prefer to put one of them in FPRs. */ -+ return (mode1 == mode2 -+ || (!loongarch_mode_ok_for_mov_fmt_p (mode1) -+ && !loongarch_mode_ok_for_mov_fmt_p (mode2))); -+} -+ -+/* Implement TARGET_PREFERRED_RELOAD_CLASS. */ -+ -+static reg_class_t -+loongarch_preferred_reload_class (rtx x, reg_class_t rclass) -+{ -+ if (reg_class_subset_p (FP_REGS, rclass) -+ && loongarch_mode_ok_for_mov_fmt_p (GET_MODE (x))) -+ return FP_REGS; -+ -+ if (reg_class_subset_p (GR_REGS, rclass)) -+ rclass = GR_REGS; -+ -+ return rclass; -+} -+ -+/* RCLASS is a class involved in a REGISTER_MOVE_COST calculation. -+ Return a "canonical" class to represent it in later calculations. */ -+ -+static reg_class_t -+loongarch_canonicalize_move_class (reg_class_t rclass) -+{ -+ if (reg_class_subset_p (rclass, GENERAL_REGS)) -+ rclass = GENERAL_REGS; -+ -+ return rclass; -+} -+ -+/* Return the cost of moving a value from a register of class FROM to a GPR. -+ Return 0 for classes that are unions of other classes handled by this -+ function. */ -+ -+static int -+loongarch_move_to_gpr_cost (reg_class_t from) -+{ -+ switch (from) -+ { -+ case GENERAL_REGS: -+ /* MOVE macro. */ -+ return 2; -+ -+ case FP_REGS: -+ /* MFC1, etc. */ -+ return 4; -+ -+ default: -+ return 0; -+ } -+} -+ -+/* Return the cost of moving a value from a GPR to a register of class TO. -+ Return 0 for classes that are unions of other classes handled by this -+ function. */ -+ -+static int -+loongarch_move_from_gpr_cost (reg_class_t to) -+{ -+ switch (to) -+ { -+ case GENERAL_REGS: -+ /*MOVE macro. */ -+ return 2; -+ -+ case FP_REGS: -+ /* MTC1, etc. */ -+ return 4; -+ -+ default: -+ return 0; -+ } -+} -+ -+/* Implement TARGET_REGISTER_MOVE_COST. Return 0 for classes that are the -+ maximum of the move costs for subclasses; regclass will work out -+ the maximum for us. */ -+ -+static int -+loongarch_register_move_cost (machine_mode mode, -+ reg_class_t from, reg_class_t to) -+{ -+ reg_class_t dregs; -+ int cost1, cost2; -+ -+ from = loongarch_canonicalize_move_class (from); -+ to = loongarch_canonicalize_move_class (to); -+ -+ /* Handle moves that can be done without using general-purpose registers. */ -+ if (from == FP_REGS) -+ { -+ if (to == FP_REGS && loongarch_mode_ok_for_mov_fmt_p (mode)) -+ /* MOV.FMT. */ -+ return 4; -+ } -+ -+ /* Handle cases in which only one class deviates from the ideal. */ -+ dregs = GENERAL_REGS; -+ if (from == dregs) -+ return loongarch_move_from_gpr_cost (to); -+ if (to == dregs) -+ return loongarch_move_to_gpr_cost (from); -+ -+ /* Handles cases that require a GPR temporary. */ -+ cost1 = loongarch_move_to_gpr_cost (from); -+ if (cost1 != 0) -+ { -+ cost2 = loongarch_move_from_gpr_cost (to); -+ if (cost2 != 0) -+ return cost1 + cost2; -+ } -+ -+ return 0; -+} -+ -+/* Implement TARGET_MEMORY_MOVE_COST. */ -+ -+static int -+loongarch_memory_move_cost (machine_mode mode, reg_class_t rclass, bool in) -+{ -+ return (loongarch_cost->memory_latency -+ + memory_move_secondary_cost (mode, rclass, in)); -+} -+ -+/* Implement TARGET_SECONDARY_MEMORY_NEEDED. -+ -+ When targeting the o32 FPXX ABI, all moves with a length of doubleword -+ or greater must be performed by FR-mode-aware instructions. -+ This can be achieved using MOVFRH2GR.S/MOVGR2FRH.W when these instructions are -+ available but otherwise moves must go via memory. -+ Using MOVGR2FR/MOVFR2GR to access the lower-half of these registers would require -+ a forbidden single-precision access. We require all double-word moves to use -+ memory because adding even and odd floating-point registers classes -+ would have a significant impact on the backend. */ -+ -+static bool -+loongarch_secondary_memory_needed (machine_mode mode, reg_class_t class1, -+ reg_class_t class2) -+{ -+ /* Ignore spilled pseudos. */ -+ if (lra_in_progress && (class1 == NO_REGS || class2 == NO_REGS)) -+ return false; -+ -+ return false; -+} -+ -+/* Return the register class required for a secondary register when -+ copying between one of the registers in RCLASS and value X, which -+ has mode MODE. X is the source of the move if IN_P, otherwise it -+ is the destination. Return NO_REGS if no secondary register is -+ needed. */ -+ -+enum reg_class -+loongarch_secondary_reload_class (enum reg_class rclass, -+ machine_mode mode, rtx x, bool) -+{ -+ int regno; -+ -+ regno = true_regnum (x); -+ -+ /* Copying from accumulator registers to anywhere other than a general -+ register requires a temporary general register. */ -+// if (reg_class_subset_p (rclass, ACC_REGS)) ?????? -+// return GP_REG_P (regno) ? NO_REGS : GR_REGS; -+ if (reg_class_subset_p (rclass, FP_REGS)) -+ { -+ if (regno < 0 -+ || (MEM_P (x) -+ && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))) -+ /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use -+ pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */ -+ return NO_REGS; -+ -+ if (MEM_P (x) && LSX_SUPPORTED_MODE_P (mode)) -+ /* In this case we can use LSX LD.* and ST.*. */ -+ return NO_REGS; -+ -+ if (GP_REG_P (regno) || x == CONST0_RTX (mode)) -+ /* In this case we can use movgr2fr.s, movfr2gr.s, movgr2fr.d or movfr2gr.d. */ -+ return NO_REGS; -+ -+ if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x)) -+ /* We can force the constant to memory and use lwc1 -+ and ldc1. As above, we will use pairs of lwc1s if -+ ldc1 is not supported. */ -+ return NO_REGS; -+ -+ if (FP_REG_P (regno) && loongarch_mode_ok_for_mov_fmt_p (mode)) -+ /* In this case we can use mov.fmt. */ -+ return NO_REGS; -+ -+ /* Otherwise, we need to reload through an integer register. */ -+ return GR_REGS; -+ } -+ if (FP_REG_P (regno)) -+ return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS; -+ -+ return NO_REGS; -+} -+ -+ -+/* Implement TARGET_VALID_POINTER_MODE. */ -+ -+static bool -+loongarch_valid_pointer_mode (scalar_int_mode mode) -+{ -+ return mode == SImode || (TARGET_64BIT && mode == DImode); -+} -+ -+/* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */ -+ -+static bool -+loongarch_vector_mode_supported_p (machine_mode mode) -+{ -+ return ISA_HAS_LASX ? LASX_SUPPORTED_MODE_P (mode) : LSX_SUPPORTED_MODE_P (mode); -+} -+ -+/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */ -+ -+static bool -+loongarch_scalar_mode_supported_p (scalar_mode mode) -+{ -+ if (ALL_FIXED_POINT_MODE_P (mode) -+ && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD) -+ return true; -+ -+ return default_scalar_mode_supported_p (mode); -+} -+ -+/* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */ -+ -+static machine_mode -+loongarch_preferred_simd_mode (scalar_mode mode) -+{ -+ if (!ISA_HAS_LSX) -+ return word_mode; -+ -+ switch (mode) -+ { -+ case E_QImode: -+ return ISA_HAS_LASX ? E_V32QImode : E_V16QImode; -+ case E_HImode: -+ return ISA_HAS_LASX ? E_V16HImode : E_V8HImode; -+ case E_SImode: -+ return ISA_HAS_LASX ? E_V8SImode : E_V4SImode; -+ case E_DImode: -+ return ISA_HAS_LASX ? E_V4DImode : E_V2DImode; -+ -+ case E_SFmode: -+ return ISA_HAS_LASX ? E_V8SFmode : E_V4SFmode; -+ -+ case E_DFmode: -+ return ISA_HAS_LASX ? E_V4DFmode : E_V2DFmode; -+ -+ default: -+ break; -+ } -+ return word_mode; -+} -+ -+/* Implement TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES. */ -+ -+static void -+loongarch_autovectorize_vector_sizes (vector_sizes *sizes) -+{ -+ if (ISA_HAS_LASX) -+ { -+ sizes->safe_push (32); -+ sizes->safe_push (16); -+ } -+ else if (ISA_HAS_LSX) -+ sizes->safe_push (16); -+} -+ -+/* Return the length of INSN. LENGTH is the initial length computed by -+ attributes in the machine-description file. */ -+ -+int -+loongarch_adjust_insn_length (rtx_insn *insn, int length) -+{ -+ /* loongarch.md uses MAX_PIC_BRANCH_LENGTH as a placeholder for the length -+ of a PIC long-branch sequence. Substitute the correct value. */ -+ if (length == MAX_PIC_BRANCH_LENGTH -+ && JUMP_P (insn) -+ && INSN_CODE (insn) >= 0 -+ && get_attr_type (insn) == TYPE_BRANCH) -+ { -+ /* Add the branch-over instruction and its delay slot, if this -+ is a conditional branch. */ -+ length = simplejump_p (insn) ? 0 : 8; -+ -+ /* Add the length of an indirect jump, ignoring the delay slot. */ -+ length += 4; -+ } -+ -+ /* A unconditional jump has an unfilled delay slot if it is not part -+ of a sequence. A conditional jump normally has a delay slot. */ -+ if (CALL_P (insn) || (JUMP_P (insn))) -+ length += 4; -+ -+ /* See how many nops might be needed to avoid hardware hazards. */ -+ if (!cfun->machine->ignore_hazard_length_p -+ && INSN_P (insn) -+ && INSN_CODE (insn) >= 0) -+ switch (get_attr_hazard (insn)) -+ { -+ case HAZARD_NONE: -+ break; -+ -+ case HAZARD_DELAY: -+ case HAZARD_FORBIDDEN_SLOT: -+ length += NOP_INSN_LENGTH; -+ break; -+ } -+ -+ return length; -+} -+ -+/* Return the assembly code for INSN, which has the operands given by -+ OPERANDS, and which branches to OPERANDS[0] if some condition is true. -+ BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0] -+ is in range of a direct branch. BRANCH_IF_FALSE is an inverted -+ version of BRANCH_IF_TRUE. */ -+ -+const char * -+loongarch_output_conditional_branch (rtx_insn *insn, rtx *operands, -+ const char *branch_if_true, -+ const char *branch_if_false) -+{ -+ unsigned int length; -+ rtx taken; -+ -+ gcc_assert (LABEL_P (operands[0])); -+ -+ length = get_attr_length (insn); -+ if (length <= 12) -+ { -+ return branch_if_true; -+ } -+ -+ /* Generate a reversed branch around a direct jump. This fallback does -+ not use branch-likely instructions. */ -+ rtx_code_label *not_taken = gen_label_rtx (); -+ taken = operands[0]; -+ -+ /* Generate the reversed branch to NOT_TAKEN. */ -+ operands[0] = not_taken; -+ output_asm_insn (branch_if_false, operands); -+ -+ /* If INSN has a delay slot, we must provide delay slots for both the -+ branch to NOT_TAKEN and the conditional jump. We must also ensure -+ that INSN's delay slot is executed in the appropriate cases. */ -+ if (final_sequence) -+ { -+ /* This first delay slot will always be executed, so use INSN's -+ delay slot if is not annulled. */ -+ if (!INSN_ANNULLED_BRANCH_P (insn)) -+ { -+ final_scan_insn (final_sequence->insn (1), -+ asm_out_file, optimize, 1, NULL); -+ final_sequence->insn (1)->set_deleted (); -+ } -+ fprintf (asm_out_file, "\n"); -+ } -+ -+ output_asm_insn (LARCH_ABSOLUTE_JUMP ("b\t%0"), &taken); -+ -+ /* Now deal with its delay slot; see above. */ -+ if (final_sequence) -+ { -+ /* This delay slot will only be executed if the branch is taken. -+ Use INSN's delay slot if is annulled. */ -+ if (INSN_ANNULLED_BRANCH_P (insn)) -+ { -+ final_scan_insn (final_sequence->insn (1), -+ asm_out_file, optimize, 1, NULL); -+ final_sequence->insn (1)->set_deleted (); -+ } -+ fprintf (asm_out_file, "\n"); -+ } -+ -+ /* Output NOT_TAKEN. */ -+ targetm.asm_out.internal_label (asm_out_file, "L", -+ CODE_LABEL_NUMBER (not_taken)); -+ return ""; -+} -+ -+/* Return the assembly code for INSN, which branches to OPERANDS[0] -+ if some equality condition is true. The condition is given by -+ OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of -+ OPERANDS[1]. OPERANDS[2] is the comparison's first operand; -+ OPERANDS[3] is the second operand and may be zero or a register. */ -+ -+const char * -+loongarch_output_equal_conditional_branch (rtx_insn* insn, rtx *operands, -+ bool inverted_p) -+{ -+ const char *branch[2]; -+ if (operands[3] == const0_rtx) -+ { -+ branch[!inverted_p] = LARCH_BRANCH ("b%C1z", "%2,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("b%N1z", "%2,%0"); -+ } else -+ { -+ branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,%z3,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("b%N1", "%2,%z3,%0"); -+ } -+ -+ return loongarch_output_conditional_branch (insn, operands, branch[1], branch[0]); -+} -+ -+/* Return the assembly code for INSN, which branches to OPERANDS[0] -+ if some ordering condition is true. The condition is given by -+ OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of -+ OPERANDS[1]. OPERANDS[2] is the comparison's first operand; -+ OPERANDS[3] is the second operand and may be zero or a register. */ -+ -+const char * -+loongarch_output_order_conditional_branch (rtx_insn *insn, rtx *operands, -+ bool inverted_p) -+{ -+ const char *branch[2]; -+ -+ /* Make BRANCH[1] branch to OPERANDS[0] when the condition is true. -+ Make BRANCH[0] branch on the inverse condition. */ -+ if (operands[3] != const0_rtx) -+ { -+ /* Handle degenerate cases that should not, but do, occur. */ -+ if (REGNO (operands[2]) == REGNO (operands[3])) -+ { -+ switch (GET_CODE (operands[1])) -+ { -+ case LT: -+ case LTU: -+ case GT: -+ case GTU: -+ inverted_p = !inverted_p; -+ /* Fall through. */ -+ case LE: -+ case LEU: -+ case GE: -+ case GEU: -+ branch[!inverted_p] = LARCH_BRANCH ("b", "%0"); -+ branch[inverted_p] = "\t# branch never"; -+ break; -+ default: -+ gcc_unreachable (); -+ } -+ } -+ else -+ { -+ switch (GET_CODE (operands[1])) -+ { -+ case LE: -+ branch[!inverted_p] = LARCH_BRANCH ("bge", "%3,%2,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("blt", "%3,%2,%0"); -+ break; -+ case LEU: -+ branch[!inverted_p] = LARCH_BRANCH ("bgeu", "%3,%2,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("bltu", "%3,%2,%0"); -+ break; -+ case GT: -+ branch[!inverted_p] = LARCH_BRANCH ("blt", "%3,%2,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("bge", "%3,%2,%0"); -+ break; -+ case GTU: -+ branch[!inverted_p] = LARCH_BRANCH ("bltu", "%3,%2,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("bgeu", "%3,%2,%0"); -+ break; -+ case LT: -+ case LTU: -+ case GE: -+ case GEU: -+ branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,%3,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("b%N1", "%2,%3,%0"); -+ break; -+ default: -+ gcc_unreachable (); -+ } -+ } -+ } -+ else -+ { -+ switch (GET_CODE (operands[1])) -+ { -+ /* These cases are equivalent to comparisons against zero. */ -+ case LEU: -+ inverted_p = !inverted_p; -+ /* Fall through. */ -+ case GTU: -+ branch[!inverted_p] = LARCH_BRANCH ("bne", "%2,%.,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("beq", "%2,%.,%0"); -+ break; -+ -+ /* These cases are always true or always false. */ -+ case LTU: -+ inverted_p = !inverted_p; -+ /* Fall through. */ -+ case GEU: -+ branch[!inverted_p] = LARCH_BRANCH ("beq", "%.,%.,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("bne", "%.,%.,%0"); -+ break; -+ -+ case LE: -+ branch[!inverted_p] = LARCH_BRANCH ("bge", "$r0,%2,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("blt", "$r0,%2,%0"); -+ break; -+ case GT: -+ branch[!inverted_p] = LARCH_BRANCH ("blt", "$r0,%2,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("bge", "$r0,%2,%0"); -+ break; -+ case LT: -+ case GE: -+ branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,$r0,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("b%N1", "%2,$r0,%0"); -+ break; -+ default: -+ gcc_unreachable (); -+ } -+ } -+ return loongarch_output_conditional_branch (insn, operands, branch[1], branch[0]); -+} -+ -+/* Return the assembly code for DIV or DDIV instruction DIVISION, which has -+ the operands given by OPERANDS. Add in a divide-by-zero check if needed. -+ -+ When working around R4000 and R4400 errata, we need to make sure that -+ the division is not immediately followed by a shift[1][2]. We also -+ need to stop the division from being put into a branch delay slot[3]. -+ The easiest way to avoid both problems is to add a nop after the -+ division. When a divide-by-zero check is needed, this nop can be -+ used to fill the branch delay slot. -+ -+ [1] If a double-word or a variable shift executes immediately -+ after starting an integer division, the shift may give an -+ incorrect result. See quotations of errata #16 and #28 from -+ "LARCH R4000PC/SC Errata, Processor Revision 2.2 and 3.0" -+ in loongarch.md for details. -+ -+ [2] A similar bug to [1] exists for all revisions of the -+ R4000 and the R4400 when run in an MC configuration. -+ From "LARCH R4000MC Errata, Processor Revision 2.2 and 3.0": -+ -+ "19. In this following sequence: -+ -+ ddiv (or ddivu or div or divu) -+ dsll32 (or dsrl32, dsra32) -+ -+ if an MPT stall occurs, while the divide is slipping the cpu -+ pipeline, then the following double shift would end up with an -+ incorrect result. -+ -+ Workaround: The compiler needs to avoid generating any -+ sequence with divide followed by extended double shift." -+ -+ This erratum is also present in "LARCH R4400MC Errata, Processor -+ Revision 1.0" and "LARCH R4400MC Errata, Processor Revision 2.0 -+ & 3.0" as errata #10 and #4, respectively. -+ -+ [3] From "LARCH R4000PC/SC Errata, Processor Revision 2.2 and 3.0" -+ (also valid for LARCH R4000MC processors): -+ -+ "52. R4000SC: This bug does not apply for the R4000PC. -+ -+ There are two flavors of this bug: -+ -+ 1) If the instruction just after divide takes an RF exception -+ (tlb-refill, tlb-invalid) and gets an instruction cache -+ miss (both primary and secondary) and the line which is -+ currently in secondary cache at this index had the first -+ data word, where the bits 5..2 are set, then R4000 would -+ get a wrong result for the div. -+ -+ ##1 -+ nop -+ div r8, r9 -+ ------------------- # end-of page. -tlb-refill -+ nop -+ ##2 -+ nop -+ div r8, r9 -+ ------------------- # end-of page. -tlb-invalid -+ nop -+ -+ 2) If the divide is in the taken branch delay slot, where the -+ target takes RF exception and gets an I-cache miss for the -+ exception vector or where I-cache miss occurs for the -+ target address, under the above mentioned scenarios, the -+ div would get wrong results. -+ -+ ##1 -+ j r2 # to next page mapped or unmapped -+ div r8,r9 # this bug would be there as long -+ # as there is an ICache miss and -+ nop # the "data pattern" is present -+ -+ ##2 -+ beq r0, r0, NextPage # to Next page -+ div r8,r9 -+ nop -+ -+ This bug is present for div, divu, ddiv, and ddivu -+ instructions. -+ -+ Workaround: For item 1), OS could make sure that the next page -+ after the divide instruction is also mapped. For item 2), the -+ compiler could make sure that the divide instruction is not in -+ the branch delay slot." -+ -+ These processors have PRId values of 0x00004220 and 0x00004300 for -+ the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */ -+ -+const char * -+loongarch_output_division (const char *division, rtx *operands) -+{ -+ const char *s; -+ -+ s = division; -+ if (TARGET_CHECK_ZERO_DIV) -+ { -+ output_asm_insn (s, operands); -+ s = "bne\t%2,%.,1f\n\tbreak\t7\n1:"; -+ } -+ return s; -+} -+ -+/* Return the assembly code for LSX DIV_{S,U}.DF or MOD_{S,U}.DF instructions, -+ which has the operands given by OPERANDS. Add in a divide-by-zero check -+ if needed. */ -+ -+const char * -+loongarch_lsx_output_division (const char *division, rtx *operands) -+{ -+ const char *s; -+ machine_mode mode = GET_MODE (*operands); -+ -+ s = division; -+ if (TARGET_CHECK_ZERO_DIV) -+ { -+ if(ISA_HAS_LASX && GET_MODE_SIZE (mode) == 32) -+ { -+ output_asm_insn ("xvsetallnez.%v0\t$fcc7,%u2",operands); -+ output_asm_insn (s, operands); -+ output_asm_insn ("bcnez\t$fcc7,1f", operands); -+ } -+ else if(ISA_HAS_LSX) -+ { -+ output_asm_insn ("vsetallnez.%v0\t$fcc7,%w2",operands); -+ output_asm_insn (s, operands); -+ output_asm_insn ("bcnez\t$fcc7,1f", operands); -+ } -+ s = "break\t7\n1:"; -+ } -+ return s; -+} -+ -+/* Return true if destination of IN_INSN is used as add source in -+ OUT_INSN. Both IN_INSN and OUT_INSN are of type fmadd. Example: -+ madd.s dst, x, y, z -+ madd.s a, dst, b, c */ -+ -+bool -+loongarch_fmadd_bypass (rtx_insn *out_insn, rtx_insn *in_insn) -+{ -+ int dst_reg, src_reg; -+ -+ gcc_assert (get_attr_type (in_insn) == TYPE_FMADD); -+ gcc_assert (get_attr_type (out_insn) == TYPE_FMADD); -+ -+ extract_insn (in_insn); -+ dst_reg = REG_P (recog_data.operand[0]); -+ -+ extract_insn (out_insn); -+ src_reg = REG_P (recog_data.operand[1]); -+ -+ if (dst_reg == src_reg) -+ return true; -+ -+ return false; -+} -+ -+/* Return true if IN_INSN is a multiply-add or multiply-subtract -+ instruction and if OUT_INSN assigns to the accumulator operand. */ -+ -+bool -+loongarch_linked_madd_p (rtx_insn *out_insn, rtx_insn *in_insn) -+{ -+ enum attr_accum_in accum_in; -+ int accum_in_opnum; -+ rtx accum_in_op; -+ -+ if (recog_memoized (in_insn) < 0) -+ return false; -+ -+ accum_in = get_attr_accum_in (in_insn); -+ if (accum_in == ACCUM_IN_NONE) -+ return false; -+ -+ accum_in_opnum = accum_in - ACCUM_IN_0; -+ -+ extract_insn (in_insn); -+ gcc_assert (accum_in_opnum < recog_data.n_operands); -+ accum_in_op = recog_data.operand[accum_in_opnum]; -+ -+ return reg_set_p (accum_in_op, out_insn); -+} -+ -+/* True if the dependency between OUT_INSN and IN_INSN is on the store -+ data rather than the address. We need this because the cprestore -+ pattern is type "store", but is defined using an UNSPEC_VOLATILE, -+ which causes the default routine to abort. We just return false -+ for that case. */ -+ -+bool -+loongarch_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn) -+{ -+ if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE) -+ return false; -+ -+ return store_data_bypass_p (out_insn, in_insn); -+} -+ -+ -+/* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output -+ dependencies have no cost, except on the 20Kc where output-dependence -+ is treated like input-dependence. */ -+ -+static int -+loongarch_adjust_cost (rtx_insn *, int dep_type, rtx_insn *, int cost, unsigned int) -+{ -+ if (dep_type != 0 && (dep_type != REG_DEP_OUTPUT)) -+ return 0; -+ return cost; -+} -+ -+/* Return the number of instructions that can be issued per cycle. */ -+ -+static int -+loongarch_issue_rate (void) -+{ -+ switch (loongarch_tune) -+ { -+ case PROCESSOR_LOONGARCH64: -+ case PROCESSOR_LA464: -+ return 4; -+ -+ default: -+ return 1; -+ } -+} -+ -+/* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should -+ be as wide as the scheduling freedom in the DFA. */ -+ -+static int -+loongarch_multipass_dfa_lookahead (void) -+{ -+ if (TUNE_LOONGARCH64 || TUNE_LA464) -+ return 4; -+ -+ return 0; -+} -+ -+ -+static void -+loongarch_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, -+ int max_ready ATTRIBUTE_UNUSED) -+{ -+} -+ -+/* Implement TARGET_SCHED_REORDER. */ -+ -+static int -+loongarch_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, -+ rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED) -+{ -+ return loongarch_issue_rate (); -+} -+ -+/* Implement TARGET_SCHED_REORDER2. */ -+ -+static int -+loongarch_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, -+ rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED) -+{ -+ return cached_can_issue_more; -+} -+ -+/* Implement TARGET_SCHED_VARIABLE_ISSUE. */ -+ -+static int -+loongarch_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, -+ rtx_insn *insn, int more) -+{ -+ /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */ -+ if (USEFUL_INSN_P (insn)) -+ { -+ if (get_attr_type (insn) != TYPE_GHOST) -+ more--; -+ } -+ -+ /* Instructions of type 'multi' should all be split before -+ the second scheduling pass. */ -+ gcc_assert (!reload_completed -+ || recog_memoized (insn) < 0 -+ || get_attr_type (insn) != TYPE_MULTI); -+ -+ cached_can_issue_more = more; -+ return more; -+} -+ -+/* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY), -+ return the first operand of the associated PREF or PREFX insn. */ -+ -+rtx -+loongarch_prefetch_cookie (rtx write, rtx locality) -+{ -+ /* store_streamed / load_streamed. */ -+ if (INTVAL (locality) <= 0) -+ return GEN_INT (INTVAL (write) + 4); -+ -+ /* store / load. */ -+ if (INTVAL (locality) <= 2) -+ return write; -+ -+ /* store_retained / load_retained. */ -+ return GEN_INT (INTVAL (write) + 6); -+} -+ -+ -+/* Return whether CFG is used in loongarch_reorg. */ -+ -+static bool -+loongarch_cfg_in_reorg (void) -+{ -+ return (TARGET_RELAX_PIC_CALLS); -+} -+ -+/* If INSN is a call, return the underlying CALL expr. Return NULL_RTX -+ otherwise. If INSN has two call rtx, then store the second one in -+ SECOND_CALL. */ -+ -+static rtx -+loongarch_call_expr_from_insn (rtx_insn *insn, rtx *second_call) -+{ -+ rtx x; -+ rtx x2; -+ -+ if (!CALL_P (insn)) -+ return NULL_RTX; -+ -+ x = PATTERN (insn); -+ if (GET_CODE (x) == PARALLEL) -+ { -+ /* Calls returning complex values have two CALL rtx. Look for the second -+ one here, and return it via the SECOND_CALL arg. */ -+ x2 = XVECEXP (x, 0, 1); -+ if (GET_CODE (x2) == SET) -+ x2 = XEXP (x2, 1); -+ if (GET_CODE (x2) == CALL) -+ *second_call = x2; -+ -+ x = XVECEXP (x, 0, 0); -+ } -+ if (GET_CODE (x) == SET) -+ x = XEXP (x, 1); -+ gcc_assert (GET_CODE (x) == CALL); -+ -+ return x; -+} -+ -+/* REG is set in DEF. See if the definition is one of the ways we load a -+ register with a symbol address for a loongarch_use_pic_fn_addr_reg_p call. -+ If it is, return the symbol reference of the function, otherwise return -+ NULL_RTX. -+ -+ If RECURSE_P is true, use loongarch_find_pic_call_symbol to interpret -+ the values of source registers, otherwise treat such registers as -+ having an unknown value. */ -+ -+static rtx -+loongarch_pic_call_symbol_from_set (df_ref def, rtx reg, bool recurse_p) -+{ -+ rtx_insn *def_insn; -+ rtx set; -+ -+ if (DF_REF_IS_ARTIFICIAL (def)) -+ return NULL_RTX; -+ -+ def_insn = DF_REF_INSN (def); -+ set = single_set (def_insn); -+ if (set && rtx_equal_p (SET_DEST (set), reg)) -+ { -+ rtx note, src, symbol; -+ -+ /* First see whether the source is a plain symbol. This is used -+ when calling symbols that are not lazily bound. */ -+ src = SET_SRC (set); -+ if (GET_CODE (src) == SYMBOL_REF) -+ return src; -+ -+ /* Handle %call16 references. */ -+ symbol = loongarch_strip_unspec_call (src); -+ if (symbol) -+ { -+ gcc_assert (GET_CODE (symbol) == SYMBOL_REF); -+ return symbol; -+ } -+ -+ /* If we have something more complicated, look for a -+ REG_EQUAL or REG_EQUIV note. */ -+ note = find_reg_equal_equiv_note (def_insn); -+ if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF) -+ return XEXP (note, 0); -+ -+ /* Follow at most one simple register copy. Such copies are -+ interesting in cases like: -+ -+ for (...) -+ { -+ locally_binding_fn (...); -+ } -+ -+ and: -+ -+ locally_binding_fn (...); -+ ... -+ locally_binding_fn (...); -+ -+ where the load of locally_binding_fn can legitimately be -+ hoisted or shared. However, we do not expect to see complex -+ chains of copies, so a full worklist solution to the problem -+ would probably be overkill. */ -+ if (recurse_p && REG_P (src)) -+ return loongarch_find_pic_call_symbol (def_insn, src, false); -+ } -+ -+ return NULL_RTX; -+} -+ -+/* Find the definition of the use of REG in INSN. See if the definition -+ is one of the ways we load a register with a symbol address for a -+ loongarch_use_pic_fn_addr_reg_p call. If it is return the symbol reference -+ of the function, otherwise return NULL_RTX. RECURSE_P is as for -+ loongarch_pic_call_symbol_from_set. */ -+ -+static rtx -+loongarch_find_pic_call_symbol (rtx_insn *insn, rtx reg, bool recurse_p) -+{ -+ df_ref use; -+ struct df_link *defs; -+ rtx symbol; -+ -+ use = df_find_use (insn, regno_reg_rtx[REGNO (reg)]); -+ if (!use) -+ return NULL_RTX; -+ defs = DF_REF_CHAIN (use); -+ if (!defs) -+ return NULL_RTX; -+ symbol = loongarch_pic_call_symbol_from_set (defs->ref, reg, recurse_p); -+ if (!symbol) -+ return NULL_RTX; -+ -+ /* If we have more than one definition, they need to be identical. */ -+ for (defs = defs->next; defs; defs = defs->next) -+ { -+ rtx other; -+ -+ other = loongarch_pic_call_symbol_from_set (defs->ref, reg, recurse_p); -+ if (!rtx_equal_p (symbol, other)) -+ return NULL_RTX; -+ } -+ -+ return symbol; -+} -+ -+/* Replace the args_size operand of the call expression CALL with the -+ call-attribute UNSPEC and fill in SYMBOL as the function symbol. */ -+ -+static void -+loongarch_annotate_pic_call_expr (rtx call, rtx symbol) -+{ -+ rtx args_size; -+ -+ args_size = XEXP (call, 1); -+ XEXP (call, 1) = gen_rtx_UNSPEC (GET_MODE (args_size), -+ gen_rtvec (2, args_size, symbol), -+ UNSPEC_CALL_ATTR); -+} -+ -+/* OPERANDS[ARGS_SIZE_OPNO] is the arg_size operand of a CALL expression. See -+ if instead of the arg_size argument it contains the call attributes. If -+ yes return true along with setting OPERANDS[ARGS_SIZE_OPNO] to the function -+ symbol from the call attributes. Also return false if ARGS_SIZE_OPNO is -+ -1. */ -+ -+bool -+loongarch_get_pic_call_symbol (rtx *operands, int args_size_opno) -+{ -+ rtx args_size, symbol; -+ -+ if (!TARGET_RELAX_PIC_CALLS || args_size_opno == -1) -+ return false; -+ -+ args_size = operands[args_size_opno]; -+ if (GET_CODE (args_size) != UNSPEC) -+ return false; -+ gcc_assert (XINT (args_size, 1) == UNSPEC_CALL_ATTR); -+ -+ symbol = XVECEXP (args_size, 0, 1); -+ gcc_assert (GET_CODE (symbol) == SYMBOL_REF); -+ -+ operands[args_size_opno] = symbol; -+ return true; -+} -+ -+/* Use DF to annotate PIC indirect calls with the function symbol they -+ dispatch to. */ -+ -+static void -+loongarch_annotate_pic_calls (void) -+{ -+ basic_block bb; -+ rtx_insn *insn; -+ -+ FOR_EACH_BB_FN (bb, cfun) -+ FOR_BB_INSNS (bb, insn) -+ { -+ rtx call, reg, symbol, second_call; -+ -+ second_call = 0; -+ call = loongarch_call_expr_from_insn (insn, &second_call); -+ if (!call) -+ continue; -+ gcc_assert (MEM_P (XEXP (call, 0))); -+ reg = XEXP (XEXP (call, 0), 0); -+ if (!REG_P (reg)) -+ continue; -+ -+ symbol = loongarch_find_pic_call_symbol (insn, reg, true); -+ if (symbol) -+ { -+ loongarch_annotate_pic_call_expr (call, symbol); -+ if (second_call) -+ loongarch_annotate_pic_call_expr (second_call, symbol); -+ } -+ } -+} -+ -+ -+/* A structure representing the state of the processor pipeline. -+ Used by the loongarch_sim_* family of functions. */ -+struct loongarch_sim { -+ /* The maximum number of instructions that can be issued in a cycle. -+ (Caches loongarch_issue_rate.) */ -+ unsigned int issue_rate; -+ -+ /* The current simulation time. */ -+ unsigned int time; -+ -+ /* How many more instructions can be issued in the current cycle. */ -+ unsigned int insns_left; -+ -+ /* LAST_SET[X].INSN is the last instruction to set register X. -+ LAST_SET[X].TIME is the time at which that instruction was issued. -+ INSN is null if no instruction has yet set register X. */ -+ struct { -+ rtx_insn *insn; -+ unsigned int time; -+ } last_set[FIRST_PSEUDO_REGISTER]; -+ -+ /* The pipeline's current DFA state. */ -+ state_t dfa_state; -+}; -+ -+/* Reset STATE to the initial simulation state. */ -+ -+static void -+loongarch_sim_reset (struct loongarch_sim *state) -+{ -+ curr_state = state->dfa_state; -+ -+ state->time = 0; -+ state->insns_left = state->issue_rate; -+ memset (&state->last_set, 0, sizeof (state->last_set)); -+ state_reset (curr_state); -+ -+ targetm.sched.init (0, false, 0); -+ advance_state (curr_state); -+} -+ -+/* Initialize STATE before its first use. DFA_STATE points to an -+ allocated but uninitialized DFA state. */ -+ -+static void -+loongarch_sim_init (struct loongarch_sim *state, state_t dfa_state) -+{ -+ if (targetm.sched.init_dfa_pre_cycle_insn) -+ targetm.sched.init_dfa_pre_cycle_insn (); -+ -+ if (targetm.sched.init_dfa_post_cycle_insn) -+ targetm.sched.init_dfa_post_cycle_insn (); -+ -+ state->issue_rate = loongarch_issue_rate (); -+ state->dfa_state = dfa_state; -+ loongarch_sim_reset (state); -+} -+ -+ -+ -+/* Set up costs based on the current architecture and tuning settings. */ -+ -+static void -+loongarch_set_tuning_info (void) -+{ -+ -+ loongarch_tuning_info.arch = loongarch_arch; -+ loongarch_tuning_info.tune = loongarch_tune; -+ loongarch_tuning_info.initialized_p = true; -+ -+ dfa_start (); -+ -+ struct loongarch_sim state; -+ loongarch_sim_init (&state, alloca (state_size ())); -+ -+ dfa_finish (); -+} -+ -+/* Implement TARGET_EXPAND_TO_RTL_HOOK. */ -+ -+static void -+loongarch_expand_to_rtl_hook (void) -+{ -+ /* We need to call this at a point where we can safely create sequences -+ of instructions, so TARGET_OVERRIDE_OPTIONS is too early. We also -+ need to call it at a point where the DFA infrastructure is not -+ already in use, so we can't just call it lazily on demand. -+ -+ At present, loongarch_tuning_info is only needed during post-expand -+ RTL passes such as split_insns, so this hook should be early enough. -+ We may need to move the call elsewhere if loongarch_tuning_info starts -+ to be used for other things (such as rtx_costs, or expanders that -+ could be called during gimple optimization). */ -+ loongarch_set_tuning_info (); -+} -+ -+/* This structure records that the current function has a LO_SUM -+ involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is -+ the largest offset applied to BASE by all such LO_SUMs. */ -+struct loongarch_lo_sum_offset { -+ rtx base; -+ HOST_WIDE_INT offset; -+}; -+ -+/* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */ -+ -+static hashval_t -+loongarch_hash_base (rtx base) -+{ -+ int do_not_record_p; -+ -+ return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false); -+} -+ -+/* Hashtable helpers. */ -+ -+struct loongarch_lo_sum_offset_hasher : free_ptr_hash -+{ -+ typedef rtx_def *compare_type; -+ static inline hashval_t hash (const loongarch_lo_sum_offset *); -+ static inline bool equal (const loongarch_lo_sum_offset *, const rtx_def *); -+}; -+ -+/* Hash-table callbacks for loongarch_lo_sum_offsets. */ -+ -+inline hashval_t -+loongarch_lo_sum_offset_hasher::hash (const loongarch_lo_sum_offset *entry) -+{ -+ return loongarch_hash_base (entry->base); -+} -+ -+inline bool -+loongarch_lo_sum_offset_hasher::equal (const loongarch_lo_sum_offset *entry, -+ const rtx_def *value) -+{ -+ return rtx_equal_p (entry->base, value); -+} -+ -+typedef hash_table loongarch_offset_table; -+ -+ -+/* Subroutine of loongarch_reorg to manage passes that require DF. */ -+ -+static void -+loongarch_df_reorg (void) -+{ -+ /* Create def-use chains. */ -+ df_set_flags (DF_EQ_NOTES); -+ df_chain_add_problem (DF_UD_CHAIN); -+ df_analyze (); -+ -+ if (TARGET_RELAX_PIC_CALLS) -+ loongarch_annotate_pic_calls (); -+ -+ df_finish_pass (false); -+} -+ -+ -+/* Implement TARGET_MACHINE_DEPENDENT_REORG. */ -+ -+static void -+loongarch_reorg (void) -+{ -+ /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF.DF insn info is only kept up -+ to date if the CFG is available. */ -+ if (loongarch_cfg_in_reorg ()) -+ compute_bb_for_insn (); -+ if (loongarch_cfg_in_reorg ()) -+ { -+ loongarch_df_reorg (); -+ free_bb_for_insn (); -+ } -+} -+ -+/* We use a machine specific pass to do a second machine dependent reorg -+ pass after delay branch scheduling. */ -+ -+static unsigned int -+loongarch_machine_reorg2 (void) -+{ -+// loongarch_insert_insn_pseudos (); -+ return 0; -+} -+ -+namespace { -+ -+const pass_data pass_data_loongarch_machine_reorg2 = -+{ -+ RTL_PASS, /* type */ -+ "mach2", /* name */ -+ OPTGROUP_NONE, /* optinfo_flags */ -+ TV_MACH_DEP, /* tv_id */ -+ 0, /* properties_required */ -+ 0, /* properties_provided */ -+ 0, /* properties_destroyed */ -+ 0, /* todo_flags_start */ -+ 0, /* todo_flags_finish */ -+}; -+ -+class pass_loongarch_machine_reorg2 : public rtl_opt_pass -+{ -+public: -+ pass_loongarch_machine_reorg2(gcc::context *ctxt) -+ : rtl_opt_pass(pass_data_loongarch_machine_reorg2, ctxt) -+ {} -+ -+ /* opt_pass methods: */ -+ virtual unsigned int execute (function *) { return loongarch_machine_reorg2 (); } -+ -+}; // class pass_loongarch_machine_reorg2 -+ -+} // anon namespace -+ -+rtl_opt_pass * -+make_pass_loongarch_machine_reorg2 (gcc::context *ctxt) -+{ -+ return new pass_loongarch_machine_reorg2 (ctxt); -+} -+ -+ -+/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text -+ in order to avoid duplicating too much logic from elsewhere. */ -+ -+static void -+loongarch_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, -+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, -+ tree function) -+{ -+ rtx this_rtx, temp1, temp2, fnaddr; -+ rtx_insn *insn; -+ bool use_sibcall_p; -+ -+ /* Pretend to be a post-reload pass while generating rtl. */ -+ reload_completed = 1; -+ -+ /* Mark the end of the (empty) prologue. */ -+ emit_note (NOTE_INSN_PROLOGUE_END); -+ -+ /* Determine if we can use a sibcall to call FUNCTION directly. */ -+ fnaddr = XEXP (DECL_RTL (function), 0); -+ use_sibcall_p = (loongarch_function_ok_for_sibcall (function, NULL) -+ && const_call_insn_operand (fnaddr, Pmode)); -+ -+// /* Determine if we need to load FNADDR from the GOT. */ -+// if (!use_sibcall_p -+// && (loongarch_got_symbol_type_p -+// (loongarch_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA)))) -+// { -+// /* Pick a global pointer. Use a call-clobbered register if -+// TARGET_CALL_SAVED_GP. */ -+// cfun->machine->global_pointer -+// = GLOBAL_POINTER_REGNUM; -+// cfun->machine->must_initialize_gp_p = true; -+// SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer); -+// -+// /* Set up the global pointer for n32 or n64 abicalls. */ -+// loongarch_emit_loadgp (); -+// } -+ -+ /* We need two temporary registers in some cases. */ -+ temp1 = gen_rtx_REG (Pmode, 12); -+ temp2 = gen_rtx_REG (Pmode, 13); -+ -+ /* Find out which register contains the "this" pointer. */ -+ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)) -+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1); -+ else -+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST); -+ -+ /* Add DELTA to THIS_RTX. */ -+ if (delta != 0) -+ { -+ rtx offset = GEN_INT (delta); -+ if (!SMALL_OPERAND (delta)) -+ { -+ loongarch_emit_move (temp1, offset); -+ offset = temp1; -+ } -+ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset)); -+ } -+ -+ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */ -+ if (vcall_offset != 0) -+ { -+ rtx addr; -+ -+ /* Set TEMP1 to *THIS_RTX. */ -+ loongarch_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx)); -+ -+ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */ -+ addr = loongarch_add_offset (temp2, temp1, vcall_offset); -+ -+ /* Load the offset and add it to THIS_RTX. */ -+ loongarch_emit_move (temp1, gen_rtx_MEM (Pmode, addr)); -+ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1)); -+ } -+ -+ /* Jump to the target function. Use a sibcall if direct jumps are -+ allowed, otherwise load the address into a register first. */ -+ if (use_sibcall_p) -+ { -+ insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx)); -+ SIBLING_CALL_P (insn) = 1; -+ } -+ else -+ { -+ loongarch_emit_move (temp1, fnaddr); -+ emit_jump_insn (gen_indirect_jump (temp1)); -+ } -+ -+ /* Run just enough of rest_of_compilation. This sequence was -+ "borrowed" from alpha.c. */ -+ insn = get_insns (); -+ split_all_insns_noflow (); -+ shorten_branches (insn); -+ final_start_function (insn, file, 1); -+ final (insn, file, 1); -+ final_end_function (); -+ -+ /* Clean up the vars set above. Note that final_end_function resets -+ the global pointer for us. */ -+ reload_completed = 0; -+} -+ -+ -+/* Allocate a chunk of memory for per-function machine-dependent data. */ -+ -+static struct machine_function * -+loongarch_init_machine_status (void) -+{ -+ return ggc_cleared_alloc (); -+} -+ -+/* Return the processor associated with the given ISA level, or null -+ if the ISA isn't valid. */ -+ -+static const struct loongarch_cpu_info * -+loongarch_cpu_info_from_isa (int isa) -+{ -+ unsigned int i; -+ -+ for (i = 0; i < ARRAY_SIZE (loongarch_cpu_info_table); i++) -+ if (loongarch_cpu_info_table[i].isa == isa) -+ return loongarch_cpu_info_table + i; -+ -+ return NULL; -+} -+ -+/* Return a loongarch_cpu_info entry determined by an option valued -+ OPT. */ -+ -+static const struct loongarch_cpu_info * -+loongarch_cpu_info_from_opt (int opt) -+{ -+ switch (opt) -+ { -+ case LARCH_ARCH_OPTION_NATIVE: -+ gcc_unreachable (); -+ -+ default: -+ return &loongarch_cpu_info_table[opt]; -+ } -+} -+ -+/* Return a default loongarch_cpu_info entry, given that no -march= option -+ was explicitly specified. */ -+ -+static const struct loongarch_cpu_info * -+loongarch_default_arch (void) -+{ -+#if defined (LARCH_CPU_STRING_DEFAULT) -+ unsigned int i; -+ for (i = 0; i < ARRAY_SIZE (loongarch_cpu_info_table); i++) -+ if (strcmp (loongarch_cpu_info_table[i].name, LARCH_CPU_STRING_DEFAULT) == 0) -+ return loongarch_cpu_info_table + i; -+ gcc_unreachable (); -+#elif defined (LARCH_ISA_DEFAULT) -+ return loongarch_cpu_info_from_isa (LARCH_ISA_DEFAULT); -+#else -+ gcc_unreachable (); -+#endif -+} -+ -+/* Set up globals to generate code for the ISA or processor -+ described by INFO. */ -+ -+static void -+loongarch_set_architecture (const struct loongarch_cpu_info *info) -+{ -+ if (info != 0) -+ { -+ loongarch_arch_info = info; -+ loongarch_arch = info->cpu; -+ loongarch_isa = info->isa; -+ if (loongarch_isa < 32) -+ loongarch_isa_rev = 0; -+ else -+ loongarch_isa_rev = (loongarch_isa & 31) + 1; -+ } -+} -+ -+/* Likewise for tuning. */ -+ -+static void -+loongarch_set_tune (const struct loongarch_cpu_info *info) -+{ -+ if (info != 0) -+ { -+ loongarch_tune_info = info; -+ loongarch_tune = info->cpu; -+ } -+} -+ -+/* Implement TARGET_OPTION_OVERRIDE. */ -+ -+static void -+loongarch_option_override (void) -+{ -+ int i, start, regno, mode; -+ -+#ifdef SUBTARGET_OVERRIDE_OPTIONS -+ SUBTARGET_OVERRIDE_OPTIONS; -+#endif -+ -+ -+ /* -mno-float overrides -mhard-float and -msoft-float. */ -+ if (TARGET_NO_FLOAT) -+ { -+ target_flags |= MASK_SOFT_FLOAT_ABI; -+ target_flags_explicit |= MASK_SOFT_FLOAT_ABI; -+ } -+ -+ -+ /* Set the small data limit. */ -+ loongarch_small_data_threshold = (global_options_set.x_g_switch_value -+ ? g_switch_value -+ : LARCH_DEFAULT_GVALUE); -+ -+ /* The following code determines the architecture and register size. -+ Similar code was added to GAS 2.14 (see tc-loongarch.c:md_after_parse_args()). -+ The GAS and GCC code should be kept in sync as much as possible. */ -+ -+ if (global_options_set.x_loongarch_arch_option) -+ loongarch_set_architecture (loongarch_cpu_info_from_opt (loongarch_arch_option)); -+ -+ if (loongarch_arch_info == 0) -+ loongarch_set_architecture (loongarch_default_arch ()); -+ -+ /* Optimize for loongarch_arch, unless -mtune selects a different processor. */ -+ if (global_options_set.x_loongarch_tune_option) -+ loongarch_set_tune (loongarch_cpu_info_from_opt (loongarch_tune_option)); -+ -+ if (loongarch_tune_info == 0) -+ loongarch_set_tune (loongarch_arch_info); -+ -+ if ((target_flags_explicit & MASK_64BIT) == 0) -+ { -+ /* Infer the integer register size from the ABI and processor. -+ Restrict ourselves to 32-bit registers if that's all the -+ processor has, or if the ABI cannot handle 64-bit registers. */ -+ if (loongarch_abi == ABILP32) -+ target_flags &= ~MASK_64BIT; -+ else -+ target_flags |= MASK_64BIT; -+ } -+ -+ if ((target_flags_explicit & MASK_FLOAT64) != 0) -+ { -+ if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64) -+ error ("unsupported combination: %s", "-mfp64 -msingle-float"); -+ } -+ else -+ { -+ /* -msingle-float selects 32-bit float registers. On r6 and later, -+ -mdouble-float selects 64-bit float registers, since the old paired -+ register model is not supported. In other cases the float registers -+ should be the same size as the integer ones. */ -+ if (TARGET_64BIT && TARGET_DOUBLE_FLOAT) -+ target_flags |= MASK_FLOAT64; -+ else if (loongarch_abi == ABILP32 && ISA_HAS_LSX) -+ target_flags |= MASK_FLOAT64; -+ else -+ target_flags &= ~MASK_FLOAT64; -+ } -+ -+ /* End of code shared with GAS. */ -+ -+ if (!TARGET_OLDABI) -+ flag_pcc_struct_return = 0; -+ -+ /* Decide which rtx_costs structure to use. */ -+ if (optimize_size) -+ loongarch_cost = &loongarch_rtx_cost_optimize_size; -+ else -+ loongarch_cost = &loongarch_rtx_cost_data[loongarch_tune]; -+ -+ /* If the user hasn't specified a branch cost, use the processor's -+ default. */ -+ if (loongarch_branch_cost == 0) -+ loongarch_branch_cost = loongarch_cost->branch_cost; -+ -+ /* Prefer a call to memcpy over inline code when optimizing for size, -+ though see MOVE_RATIO in loongarch.h. */ -+ if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0) -+ target_flags |= MASK_MEMCPY; -+ -+ /* If we have a nonzero small-data limit, check that the -mgpopt -+ setting is consistent with the other target flags. */ -+ if (loongarch_small_data_threshold > 0) -+ { -+ if (TARGET_VXWORKS_RTP) -+ warning (0, "cannot use small-data accesses for %qs", "-mrtp"); -+ } -+ -+ /* Make sure that when ISA_HAS_LSX is true, TARGET_FLOAT64 and -+ TARGET_HARD_FLOAT_ABI and both true. */ -+ if (ISA_HAS_LSX && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI)) -+ error ("%<-mlsx%> must be used with %<-mfp64%> and %<-mhard-float%>"); -+ -+ /* If TARGET_LASX, enable TARGET_LSX. */ -+ if (TARGET_LASX) -+ target_flags |= MASK_LSX; -+ -+ /* .cfi_* directives generate a read-only section, so fall back on -+ manual .eh_frame creation if we need the section to be writable. */ -+ if (TARGET_WRITABLE_EH_FRAME) -+ flag_dwarf2_cfi_asm = 0; -+ -+ loongarch_init_print_operand_punct (); -+ -+ /* Set up array to map GCC register number to debug register number. -+ Ignore the special purpose register numbers. */ -+ -+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) -+ { -+ loongarch_dbx_regno[i] = IGNORED_DWARF_REGNUM; -+ if (GP_REG_P (i) || FP_REG_P (i)) -+ loongarch_dwarf_regno[i] = i; -+ else -+ loongarch_dwarf_regno[i] = INVALID_REGNUM; -+ } -+ -+ start = GP_DBX_FIRST - GP_REG_FIRST; -+ for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++) -+ loongarch_dbx_regno[i] = i + start; -+ -+ start = FP_DBX_FIRST - FP_REG_FIRST; -+ for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++) -+ loongarch_dbx_regno[i] = i + start; -+ -+ /* Set up loongarch_hard_regno_mode_ok. */ -+ for (mode = 0; mode < MAX_MACHINE_MODE; mode++) -+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) -+ loongarch_hard_regno_mode_ok_p[mode][regno] -+ = loongarch_hard_regno_mode_ok_uncached (regno, (machine_mode) mode); -+ -+ /* Function to allocate machine-dependent function status. */ -+ init_machine_status = &loongarch_init_machine_status; -+ target_flags &= ~MASK_RELAX_PIC_CALLS; -+ -+ /* We register a second machine specific reorg pass after delay slot -+ filling. Registering the pass must be done at start up. It's -+ convenient to do it here. */ -+ opt_pass *new_pass = make_pass_loongarch_machine_reorg2 (g); -+ struct register_pass_info insert_pass_loongarch_machine_reorg2 = -+ { -+ new_pass, /* pass */ -+ "dbr", /* reference_pass_name */ -+ 1, /* ref_pass_instance_number */ -+ PASS_POS_INSERT_AFTER /* po_op */ -+ }; -+ register_pass (&insert_pass_loongarch_machine_reorg2); -+ -+ loongarch_register_frame_header_opt (); -+} -+ -+ -+/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */ -+ -+static void -+loongarch_conditional_register_usage (void) -+{ -+ if (!TARGET_HARD_FLOAT) -+ { -+ AND_COMPL_HARD_REG_SET (accessible_reg_set, -+ reg_class_contents[(int) FP_REGS]); -+ AND_COMPL_HARD_REG_SET (accessible_reg_set, -+ reg_class_contents[(int) ST_REGS]); -+ } -+} -+ -+/* Implement EH_USES. */ -+ -+bool -+loongarch_eh_uses (unsigned int regno) -+{ -+ return false; -+} -+ -+/* Implement EPILOGUE_USES. */ -+ -+bool -+loongarch_epilogue_uses (unsigned int regno) -+{ -+ /* Say that the epilogue uses the return address register. Note that -+ in the case of sibcalls, the values "used by the epilogue" are -+ considered live at the start of the called function. */ -+ if (regno == RETURN_ADDR_REGNUM) -+ return true; -+ -+ /* An interrupt handler must preserve some registers that are -+ ordinarily call-clobbered. */ -+ if (cfun->machine->interrupt_handler_p -+ && loongarch_interrupt_extra_call_saved_reg_p (regno)) -+ return true; -+ -+ return false; -+} -+ -+/* Return true if MEM1 and MEM2 use the same base register, and the -+ offset of MEM2 equals the offset of MEM1 plus 4. FIRST_REG is the -+ register into (from) which the contents of MEM1 will be loaded -+ (stored), depending on the value of LOAD_P. -+ SWAP_P is true when the 1st and 2nd instructions are swapped. */ -+ -+static bool -+loongarch_load_store_pair_p_1 (bool load_p, bool swap_p, -+ rtx first_reg, rtx mem1, rtx mem2) -+{ -+ rtx base1, base2; -+ HOST_WIDE_INT offset1, offset2; -+ -+ if (!MEM_P (mem1) || !MEM_P (mem2)) -+ return false; -+ -+ loongarch_split_plus (XEXP (mem1, 0), &base1, &offset1); -+ loongarch_split_plus (XEXP (mem2, 0), &base2, &offset2); -+ -+ if (!REG_P (base1) || !rtx_equal_p (base1, base2)) -+ return false; -+ -+ /* Avoid invalid load pair instructions. */ -+ if (load_p && REGNO (first_reg) == REGNO (base1)) -+ return false; -+ -+ /* We must avoid this case for anti-dependence. -+ Ex: lw $3, 4($3) -+ lw $2, 0($3) -+ first_reg is $2, but the base is $3. */ -+ if (load_p -+ && swap_p -+ && REGNO (first_reg) + 1 == REGNO (base1)) -+ return false; -+ -+ if (offset2 != offset1 + 4) -+ return false; -+ -+ if (!ULARCH_12BIT_OFFSET_P (offset1)) -+ return false; -+ -+ return true; -+} -+ -+bool -+loongarch_load_store_bonding_p (rtx *operands, machine_mode mode, bool load_p) -+{ -+ rtx reg1, reg2, mem1, mem2, base1, base2; -+ enum reg_class rc1, rc2; -+ HOST_WIDE_INT offset1, offset2; -+ -+ if (load_p) -+ { -+ reg1 = operands[0]; -+ reg2 = operands[2]; -+ mem1 = operands[1]; -+ mem2 = operands[3]; -+ } -+ else -+ { -+ reg1 = operands[1]; -+ reg2 = operands[3]; -+ mem1 = operands[0]; -+ mem2 = operands[2]; -+ } -+ -+ if (loongarch_address_insns (XEXP (mem1, 0), mode, false) == 0 -+ || loongarch_address_insns (XEXP (mem2, 0), mode, false) == 0) -+ return false; -+ -+ loongarch_split_plus (XEXP (mem1, 0), &base1, &offset1); -+ loongarch_split_plus (XEXP (mem2, 0), &base2, &offset2); -+ -+ /* Base regs do not match. */ -+ if (!REG_P (base1) || !rtx_equal_p (base1, base2)) -+ return false; -+ -+ /* Either of the loads is clobbering base register. It is legitimate to bond -+ loads if second load clobbers base register. However, hardware does not -+ support such bonding. */ -+ if (load_p -+ && (REGNO (reg1) == REGNO (base1) -+ || (REGNO (reg2) == REGNO (base1)))) -+ return false; -+ -+ /* Loading in same registers. */ -+ if (load_p -+ && REGNO (reg1) == REGNO (reg2)) -+ return false; -+ -+ /* The loads/stores are not of same type. */ -+ rc1 = REGNO_REG_CLASS (REGNO (reg1)); -+ rc2 = REGNO_REG_CLASS (REGNO (reg2)); -+ if (rc1 != rc2 -+ && !reg_class_subset_p (rc1, rc2) -+ && !reg_class_subset_p (rc2, rc1)) -+ return false; -+ -+ if (abs (offset1 - offset2) != GET_MODE_SIZE (mode)) -+ return false; -+ -+ return true; -+} -+ -+/* OPERANDS describes the operands to a pair of SETs, in the order -+ dest1, src1, dest2, src2. Return true if the operands can be used -+ in an LWP or SWP instruction; LOAD_P says which. */ -+ -+bool -+loongarch_load_store_pair_p (bool load_p, rtx *operands) -+{ -+ rtx reg1, reg2, mem1, mem2; -+ -+ if (load_p) -+ { -+ reg1 = operands[0]; -+ reg2 = operands[2]; -+ mem1 = operands[1]; -+ mem2 = operands[3]; -+ } -+ else -+ { -+ reg1 = operands[1]; -+ reg2 = operands[3]; -+ mem1 = operands[0]; -+ mem2 = operands[2]; -+ } -+ -+ if (REGNO (reg2) == REGNO (reg1) + 1) -+ return loongarch_load_store_pair_p_1 (load_p, false, reg1, mem1, mem2); -+ -+ if (REGNO (reg1) == REGNO (reg2) + 1) -+ return loongarch_load_store_pair_p_1 (load_p, true, reg2, mem2, mem1); -+ -+ return false; -+} -+ -+/* Return true if REG1 and REG2 match the criteria for a movep insn. */ -+ -+bool -+loongarch_movep_target_p (rtx reg1, rtx reg2) -+{ -+ int regno1, regno2, pair; -+ unsigned int i; -+ static const int match[8] = { -+ 0x00000060, /* 5, 6 */ -+ 0x000000a0, /* 5, 7 */ -+ 0x000000c0, /* 6, 7 */ -+ 0x00200010, /* 4, 21 */ -+ 0x00400010, /* 4, 22 */ -+ 0x00000030, /* 4, 5 */ -+ 0x00000050, /* 4, 6 */ -+ 0x00000090 /* 4, 7 */ -+ }; -+ -+ if (!REG_P (reg1) || !REG_P (reg2)) -+ return false; -+ -+ regno1 = REGNO (reg1); -+ regno2 = REGNO (reg2); -+ -+ if (!GP_REG_P (regno1) || !GP_REG_P (regno2)) -+ return false; -+ -+ pair = (1 << regno1) | (1 << regno2); -+ -+ for (i = 0; i < ARRAY_SIZE (match); i++) -+ if (pair == match[i]) -+ return true; -+ -+ return false; -+} -+ -+/* Return the size in bytes of the trampoline code, padded to -+ TRAMPOLINE_ALIGNMENT bits. The static chain pointer and target -+ function address immediately follow. */ -+ -+int -+loongarch_trampoline_code_size (void) -+{ -+ return 4 * 4; -+} -+ -+/* Implement TARGET_TRAMPOLINE_INIT. */ -+ -+static void -+loongarch_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) -+{ -+ rtx addr, end_addr, high, low, opcode, mem; -+ rtx trampoline[8]; -+ unsigned int i, j; -+ HOST_WIDE_INT end_addr_offset, static_chain_offset, target_function_offset; -+ -+ /* Work out the offsets of the pointers from the start of the -+ trampoline code. */ -+ end_addr_offset = loongarch_trampoline_code_size (); -+ static_chain_offset = end_addr_offset; -+ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode); -+ -+ /* Get pointers to the beginning and end of the code block. */ -+ addr = force_reg (Pmode, XEXP (m_tramp, 0)); -+ end_addr = loongarch_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset)); -+ -+#define OP(X) gen_int_mode (X, SImode) -+ -+ /* Build up the code in TRAMPOLINE. */ -+ i = 0; -+ /* -+ pcaddi $static_chain,0 -+ ld.[dw] $tmp,$static_chain,target_function_offset -+ ld.[dw] $static_chain,$static_chain,static_chain_offset -+ jirl $r0,$tmp,0 -+ */ -+ trampoline[i++] = OP (0x18000000 | (STATIC_CHAIN_REGNUM - GP_REG_FIRST)); -+ trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000) -+ | 19 /* $t7 */ -+ | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5) -+ | ((target_function_offset & 0xfff) << 10)); -+ trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000) -+ | (STATIC_CHAIN_REGNUM - GP_REG_FIRST) -+ | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5) -+ | ((static_chain_offset & 0xfff) << 10)); -+ trampoline[i++] = OP (0x4c000000 | (19 << 5)); -+#undef OP -+ -+ for (j = 0; j < i; j++) -+ { -+ mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode)); -+ loongarch_emit_move (mem, trampoline[j]); -+ } -+ -+ /* Set up the static chain pointer field. */ -+ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset); -+ loongarch_emit_move (mem, chain_value); -+ -+ /* Set up the target function field. */ -+ mem = adjust_address (m_tramp, ptr_mode, target_function_offset); -+ loongarch_emit_move (mem, XEXP (DECL_RTL (fndecl), 0)); -+ -+ /* Flush the code part of the trampoline. */ -+ emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE))); -+ emit_insn (gen_clear_cache (addr, end_addr)); -+} -+ -+ -+/* Implement TARGET_SHIFT_TRUNCATION_MASK. We want to keep the default -+ behavior of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even -+ when TARGET_LOONGSON_MMI is true. */ -+ -+static unsigned HOST_WIDE_INT -+loongarch_shift_truncation_mask (machine_mode mode) -+{ -+ return GET_MODE_BITSIZE (mode) - 1; -+} -+ -+ -+/* Generate or test for an insn that supports a constant permutation. */ -+ -+#define MAX_VECT_LEN 32 -+ -+struct expand_vec_perm_d -+{ -+ rtx target, op0, op1; -+ unsigned char perm[MAX_VECT_LEN]; -+ machine_mode vmode; -+ unsigned char nelt; -+ bool one_vector_p; -+ bool testing_p; -+}; -+ -+/* Construct (set target (vec_select op0 (parallel perm))) and -+ return true if that's a valid instruction in the active ISA. */ -+ -+static bool -+loongarch_expand_vselect (rtx target, rtx op0, -+ const unsigned char *perm, unsigned nelt) -+{ -+ rtx rperm[MAX_VECT_LEN], x; -+ rtx_insn *insn; -+ unsigned i; -+ -+ for (i = 0; i < nelt; ++i) -+ rperm[i] = GEN_INT (perm[i]); -+ -+ x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm)); -+ x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x); -+ x = gen_rtx_SET (target, x); -+ -+ insn = emit_insn (x); -+ if (recog_memoized (insn) < 0) -+ { -+ remove_insn (insn); -+ return false; -+ } -+ return true; -+} -+ -+/* Similar, but generate a vec_concat from op0 and op1 as well. */ -+ -+static bool -+loongarch_expand_vselect_vconcat (rtx target, rtx op0, rtx op1, -+ const unsigned char *perm, unsigned nelt) -+{ -+ machine_mode v2mode; -+ rtx x; -+ -+ if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0)).exists (&v2mode)) -+ return false; -+ x = gen_rtx_VEC_CONCAT (v2mode, op0, op1); -+ return loongarch_expand_vselect (target, x, perm, nelt); -+} -+ -+/* Construct (set target (vec_select op0 (parallel selector))) and -+ return true if that's a valid instruction in the active ISA. */ -+ -+static bool -+loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d) -+{ -+ rtx x, elts[MAX_VECT_LEN]; -+ rtvec v; -+ rtx_insn *insn; -+ unsigned i; -+ -+ if (!ISA_HAS_LSX && !ISA_HAS_LASX) -+ return false; -+ -+ for (i = 0; i < d->nelt; i++) -+ elts[i] = GEN_INT (d->perm[i]); -+ -+ v = gen_rtvec_v (d->nelt, elts); -+ x = gen_rtx_PARALLEL (VOIDmode, v); -+ -+ if (!loongarch_const_vector_shuffle_set_p (x, d->vmode)) -+ return false; -+ -+ x = gen_rtx_VEC_SELECT (d->vmode, d->op0, x); -+ x = gen_rtx_SET (d->target, x); -+ -+ insn = emit_insn (x); -+ if (recog_memoized (insn) < 0) -+ { -+ remove_insn (insn); -+ return false; -+ } -+ return true; -+} -+ -+static bool -+loongarch_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) -+{ -+ unsigned int i, nelt = d->nelt; -+ unsigned char perm2[MAX_VECT_LEN]; -+ -+ if (d->one_vector_p) -+ { -+ /* Try interleave with alternating operands. */ -+ memcpy (perm2, d->perm, sizeof(perm2)); -+ for (i = 1; i < nelt; i += 2) -+ perm2[i] += nelt; -+ if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2, nelt)) -+ return true; -+ } -+ else -+ { -+ if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, -+ d->perm, nelt)) -+ return true; -+ -+ /* Try again with swapped operands. */ -+ for (i = 0; i < nelt; ++i) -+ perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1); -+ if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt)) -+ return true; -+ } -+ -+ if (loongarch_expand_lsx_shuffle (d)) -+ return true; -+ return false; -+} -+ -+/* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */ -+ -+static bool -+loongarch_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0, -+ rtx op1, const vec_perm_indices &sel) -+{ -+ struct expand_vec_perm_d d; -+ int i, nelt, which; -+ unsigned char orig_perm[MAX_VECT_LEN]; -+ bool ok; -+ -+ d.target = target; -+ d.op0 = op0; -+ d.op1 = op1; -+ -+ d.vmode = vmode; -+ gcc_assert (VECTOR_MODE_P (vmode)); -+ d.nelt = nelt = GET_MODE_NUNITS (vmode); -+ d.testing_p = !target; -+ -+ /* This is overly conservative, but ensures we don't get an -+ uninitialized warning on ORIG_PERM. */ -+ memset (orig_perm, 0, MAX_VECT_LEN); -+ for (i = which = 0; i < nelt; ++i) -+ { -+ int ei = sel[i] & (2 * nelt - 1); -+ which |= (ei < nelt ? 1 : 2); -+ orig_perm[i] = ei; -+ } -+ memcpy (d.perm, orig_perm, MAX_VECT_LEN); -+ -+ switch (which) -+ { -+ default: -+ gcc_unreachable(); -+ -+ case 3: -+ d.one_vector_p = false; -+ if (d.testing_p || !rtx_equal_p (d.op0, d.op1)) -+ break; -+ /* FALLTHRU */ -+ -+ case 2: -+ for (i = 0; i < nelt; ++i) -+ d.perm[i] &= nelt - 1; -+ d.op0 = d.op1; -+ d.one_vector_p = true; -+ break; -+ -+ case 1: -+ d.op1 = d.op0; -+ d.one_vector_p = true; -+ break; -+ } -+ -+ if (d.testing_p) -+ { -+ d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1); -+ d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2); -+ if (!d.one_vector_p) -+ d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3); -+ -+ start_sequence (); -+ ok = loongarch_expand_vec_perm_const_1 (&d); -+ end_sequence (); -+ return ok; -+ } -+ -+ ok = loongarch_expand_vec_perm_const_1 (&d); -+ -+ /* If we were given a two-vector permutation which just happened to -+ have both input vectors equal, we folded this into a one-vector -+ permutation. There are several loongson patterns that are matched -+ via direct vec_select+vec_concat expansion, but we do not have -+ support in loongarch_expand_vec_perm_const_1 to guess the adjustment -+ that should be made for a single operand. Just try again with -+ the original permutation. */ -+ if (!ok && which == 3) -+ { -+ d.op0 = op0; -+ d.op1 = op1; -+ d.one_vector_p = false; -+ memcpy (d.perm, orig_perm, MAX_VECT_LEN); -+ ok = loongarch_expand_vec_perm_const_1 (&d); -+ } -+ -+ return ok; -+} -+ -+/* Implement TARGET_SCHED_REASSOCIATION_WIDTH. */ -+ -+static int -+loongarch_sched_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED, -+ machine_mode mode) -+{ -+ switch (loongarch_tune) -+ { -+ case PROCESSOR_LOONGARCH64: -+ case PROCESSOR_LA464: -+ /* Vector part. */ -+ if (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode)) -+ { -+ /* Integer vector instructions execute in FP unit. -+ The width of integer/float-point vector instructions is 3. */ -+ return 3; -+ } -+ -+ /* Scalar part. */ -+ else if (INTEGRAL_MODE_P (mode)) -+ return 1; -+ else if (FLOAT_MODE_P (mode)) -+ return 4; -+ break; -+ default: -+ break; -+ } -+ return 1; -+} -+ -+/* Expand an integral vector unpack operation. */ -+ -+void -+loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) -+{ -+ machine_mode imode = GET_MODE (operands[1]); -+ rtx (*unpack) (rtx, rtx, rtx); -+ rtx (*extend) (rtx, rtx); -+ rtx (*cmpFunc) (rtx, rtx, rtx); -+ rtx (*swap_hi_lo) (rtx, rtx, rtx, rtx); -+ rtx tmp, dest, zero; -+ machine_mode halfmode = BLKmode; -+ -+ if (ISA_HAS_LASX && GET_MODE_SIZE (imode) == 32) -+ { -+ switch (imode) -+ { -+ -+ case E_V8SImode: -+ if (unsigned_p) -+ extend = gen_lasx_vext2xv_du_wu; -+ else -+ extend = gen_lasx_vext2xv_d_w; -+ swap_hi_lo = gen_lasx_xvpermi_q_v8si; -+ break; -+ -+ case E_V16HImode: -+ if (unsigned_p) -+ extend = gen_lasx_vext2xv_wu_hu; -+ else -+ extend = gen_lasx_vext2xv_w_h; -+ swap_hi_lo = gen_lasx_xvpermi_q_v16hi; -+ break; -+ -+ case E_V32QImode: -+ if (unsigned_p) -+ extend = gen_lasx_vext2xv_hu_bu; -+ else -+ extend = gen_lasx_vext2xv_h_b; -+ swap_hi_lo = gen_lasx_xvpermi_q_v32qi; -+ break; -+ -+ default: -+ gcc_unreachable (); -+ break; -+ } -+ -+ if (high_p) -+ { -+ tmp = gen_reg_rtx (imode); -+ emit_insn (swap_hi_lo (tmp, tmp, operands[1], const1_rtx)); -+ emit_insn(extend (operands[0], tmp)); -+ return; -+ } -+ -+ emit_insn(extend (operands[0], operands[1])); -+ return; -+ -+ } -+ else if (ISA_HAS_LSX) -+ { -+ switch (imode) -+ { -+ case E_V4SImode: -+ if (high_p != 0) -+ unpack = gen_lsx_vilvh_w; -+ else -+ unpack = gen_lsx_vilvl_w; -+ -+ cmpFunc = gen_lsx_vslt_w; -+ break; -+ -+ case E_V8HImode: -+ if (high_p != 0) -+ unpack = gen_lsx_vilvh_h; -+ else -+ unpack = gen_lsx_vilvl_h; -+ -+ cmpFunc = gen_lsx_vslt_h; -+ break; -+ -+ case E_V16QImode: -+ if (high_p != 0) -+ unpack = gen_lsx_vilvh_b; -+ else -+ unpack = gen_lsx_vilvl_b; -+ -+ cmpFunc = gen_lsx_vslt_b; -+ break; -+ -+ default: -+ gcc_unreachable (); -+ break; -+ } -+ -+ if (!unsigned_p) -+ { -+ /* Extract sign extention for each element comparing each element -+ with immediate zero. */ -+ tmp = gen_reg_rtx (imode); -+ emit_insn (cmpFunc (tmp, operands[1], CONST0_RTX (imode))); -+ } -+ else -+ tmp = force_reg (imode, CONST0_RTX (imode)); -+ -+ dest = gen_reg_rtx (imode); -+ -+ emit_insn (unpack (dest, operands[1], tmp)); -+ emit_move_insn (operands[0], gen_lowpart (GET_MODE (operands[0]), dest)); -+ return; -+ } -+ gcc_unreachable (); -+} -+ -+/* Construct and return PARALLEL RTX with CONST_INTs for HIGH (high_p == TRUE) -+ or LOW (high_p == FALSE) half of a vector for mode MODE. */ -+ -+rtx -+loongarch_lsx_vec_parallel_const_half (machine_mode mode, bool high_p) -+{ -+ int nunits = GET_MODE_NUNITS (mode); -+ rtvec v = rtvec_alloc (nunits / 2); -+ int base; -+ int i; -+ -+ base = high_p ? nunits / 2 : 0; -+ -+ for (i = 0; i < nunits / 2; i++) -+ RTVEC_ELT (v, i) = GEN_INT (base + i); -+ -+ return gen_rtx_PARALLEL (VOIDmode, v); -+} -+ -+/* A subroutine of loongarch_expand_vec_init, match constant vector elements. */ -+ -+static inline bool -+loongarch_constant_elt_p (rtx x) -+{ -+ return CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE; -+} -+ -+rtx -+loongarch_gen_const_int_vector_shuffle (machine_mode mode, int val) -+{ -+ int nunits = GET_MODE_NUNITS (mode); -+ int nsets = nunits / 4; -+ rtx elts[MAX_VECT_LEN]; -+ int set = 0; -+ int i, j; -+ -+ /* Generate a const_int vector replicating the same 4-element set -+ from an immediate. */ -+ for (j = 0; j < nsets; j++, set = 4 * j) -+ for (i = 0; i < 4; i++) -+ elts[set + i] = GEN_INT (set + ((val >> (2 * i)) & 0x3)); -+ -+ return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nunits, elts)); -+} -+ -+ -+/* Expand a vector initialization. */ -+ -+void -+loongarch_expand_vector_init (rtx target, rtx vals) -+{ -+ machine_mode vmode = GET_MODE (target); -+ machine_mode imode = GET_MODE_INNER (vmode); -+ unsigned i, nelt = GET_MODE_NUNITS (vmode); -+ unsigned nvar = 0, one_var = -1u; -+ bool all_same = true; -+ rtx x; -+ -+ for (i = 0; i < nelt; ++i) -+ { -+ x = XVECEXP (vals, 0, i); -+ if (!loongarch_constant_elt_p (x)) -+ nvar++, one_var = i; -+ if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0))) -+ all_same = false; -+ } -+ -+ if (ISA_HAS_LASX && GET_MODE_SIZE (vmode) == 32) -+ { -+ if (all_same) -+ { -+ rtx same = XVECEXP (vals, 0, 0); -+ rtx temp, temp2; -+ -+ if (CONST_INT_P (same) && nvar == 0 -+ && loongarch_signed_immediate_p (INTVAL (same), 10, 0)) -+ { -+ switch (vmode) -+ { -+ case E_V32QImode: -+ case E_V16HImode: -+ case E_V8SImode: -+ case E_V4DImode: -+ temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)); -+ emit_move_insn (target, temp); -+ return; -+ -+ default: -+ gcc_unreachable (); -+ } -+ } -+ -+ temp = gen_reg_rtx (imode); -+ if (imode == GET_MODE (same)) -+ temp2 = same; -+ else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) -+ { -+ if(GET_CODE (same) == MEM) -+ { -+ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); -+ loongarch_emit_move (reg_tmp, same); -+ temp2 = simplify_gen_subreg (imode, reg_tmp, GET_MODE (reg_tmp), 0); -+ } -+ else -+ temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0); -+ } -+ else -+ { -+ if(GET_CODE (same) == MEM) -+ { -+ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); -+ loongarch_emit_move (reg_tmp, same); -+ temp2 = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp)); -+ } -+ else -+ temp2 = lowpart_subreg (imode, same, GET_MODE (same)); -+ } -+ emit_move_insn (temp, temp2); -+ -+ switch (vmode) -+ { -+ case E_V32QImode: -+ case E_V16HImode: -+ case E_V8SImode: -+ case E_V4DImode: -+ loongarch_emit_move (target, gen_rtx_VEC_DUPLICATE (vmode, temp)); -+ break; -+ -+ case E_V8SFmode: -+ emit_insn (gen_lasx_xvreplve0_w_f_scalar (target, temp)); -+ break; -+ -+ case E_V4DFmode: -+ emit_insn (gen_lasx_xvreplve0_d_f_scalar (target, temp)); -+ break; -+ -+ default: -+ gcc_unreachable (); -+ } -+ } -+ else -+ { -+ rtvec vec = shallow_copy_rtvec (XVEC (vals, 0)); -+ -+ for (i = 0; i < nelt; ++i) -+ RTVEC_ELT (vec, i) = CONST0_RTX (imode); -+ -+ emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec)); -+ -+ machine_mode half_mode = VOIDmode; -+ rtx target_hi, target_lo; -+ -+ switch (vmode) -+ { -+ case E_V32QImode: -+ half_mode=E_V16QImode; -+ target_hi = gen_reg_rtx (half_mode); -+ target_lo = gen_reg_rtx (half_mode); -+ for (i = 0; i < nelt/2; ++i) -+ { -+ rtx temp_hi = gen_reg_rtx (imode); -+ rtx temp_lo = gen_reg_rtx (imode); -+ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); -+ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); -+ emit_insn (gen_vec_setv16qi (target_hi, temp_hi, GEN_INT (i))); -+ emit_insn (gen_vec_setv16qi (target_lo, temp_lo, GEN_INT (i))); -+ } -+ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); -+ break; -+ -+ case E_V16HImode: -+ half_mode=E_V8HImode; -+ target_hi = gen_reg_rtx (half_mode); -+ target_lo = gen_reg_rtx (half_mode); -+ for (i = 0; i < nelt/2; ++i) -+ { -+ rtx temp_hi = gen_reg_rtx (imode); -+ rtx temp_lo = gen_reg_rtx (imode); -+ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); -+ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); -+ emit_insn (gen_vec_setv8hi (target_hi, temp_hi, GEN_INT (i))); -+ emit_insn (gen_vec_setv8hi (target_lo, temp_lo, GEN_INT (i))); -+ } -+ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); -+ break; -+ -+ case E_V8SImode: -+ half_mode=V4SImode; -+ target_hi = gen_reg_rtx (half_mode); -+ target_lo = gen_reg_rtx (half_mode); -+ for (i = 0; i < nelt/2; ++i) -+ { -+ rtx temp_hi = gen_reg_rtx (imode); -+ rtx temp_lo = gen_reg_rtx (imode); -+ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); -+ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); -+ emit_insn (gen_vec_setv4si (target_hi, temp_hi, GEN_INT (i))); -+ emit_insn (gen_vec_setv4si (target_lo, temp_lo, GEN_INT (i))); -+ } -+ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); -+ break; -+ -+ case E_V4DImode: -+ half_mode=E_V2DImode; -+ target_hi = gen_reg_rtx (half_mode); -+ target_lo = gen_reg_rtx (half_mode); -+ for (i = 0; i < nelt/2; ++i) -+ { -+ rtx temp_hi = gen_reg_rtx (imode); -+ rtx temp_lo = gen_reg_rtx (imode); -+ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); -+ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); -+ emit_insn (gen_vec_setv2di (target_hi, temp_hi, GEN_INT (i))); -+ emit_insn (gen_vec_setv2di (target_lo, temp_lo, GEN_INT (i))); -+ } -+ /* PUT_MODE(target_hi, GET_MODE (target)); */ -+ /* PUT_MODE(target_lo, GET_MODE (target)); */ -+ /* emit_insn ( gen_lasx_shufi_q_v4di (target_hi, target_lo, GEN_INT(1))); */ -+ /* emit_move_insn (target, target_hi); */ -+ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); -+ break; -+ -+ case E_V8SFmode: -+ half_mode=E_V4SFmode; -+ target_hi = gen_reg_rtx (half_mode); -+ target_lo = gen_reg_rtx (half_mode); -+ for (i = 0; i < nelt/2; ++i) -+ { -+ rtx temp_hi = gen_reg_rtx (imode); -+ rtx temp_lo = gen_reg_rtx (imode); -+ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); -+ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); -+ emit_insn (gen_vec_setv4sf (target_hi, temp_hi, GEN_INT (i))); -+ emit_insn (gen_vec_setv4sf (target_lo, temp_lo, GEN_INT (i))); -+ } -+ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); -+ break; -+ -+ case E_V4DFmode: -+ half_mode=E_V2DFmode; -+ target_hi = gen_reg_rtx (half_mode); -+ target_lo = gen_reg_rtx (half_mode); -+ for (i = 0; i < nelt/2; ++i) -+ { -+ rtx temp_hi = gen_reg_rtx (imode); -+ rtx temp_lo = gen_reg_rtx (imode); -+ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); -+ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); -+ emit_insn (gen_vec_setv2df (target_hi, temp_hi, GEN_INT (i))); -+ emit_insn (gen_vec_setv2df (target_lo, temp_lo, GEN_INT (i))); -+ } -+ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); -+ break; -+ -+ default: -+ gcc_unreachable(); -+ } -+ -+ } -+ return; -+ } -+ -+ if (ISA_HAS_LSX) -+ { -+ if (all_same) -+ { -+ rtx same = XVECEXP (vals, 0, 0); -+ rtx temp, temp2; -+ -+ if (CONST_INT_P (same) && nvar == 0 -+ && loongarch_signed_immediate_p (INTVAL (same), 10, 0)) -+ { -+ switch (vmode) -+ { -+ case E_V16QImode: -+ case E_V8HImode: -+ case E_V4SImode: -+ case E_V2DImode: -+ temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)); -+ emit_move_insn (target, temp); -+ return; -+ -+ default: -+ gcc_unreachable (); -+ } -+ } -+ temp = gen_reg_rtx (imode); -+ if (imode == GET_MODE (same)) -+ temp2 = same; -+ else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) -+ { -+ if(GET_CODE (same) == MEM) -+ { -+ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); -+ loongarch_emit_move (reg_tmp, same); -+ temp2 = simplify_gen_subreg (imode, reg_tmp, GET_MODE (reg_tmp), 0); -+ } -+ else -+ temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0); -+ } -+ else -+ { -+ if(GET_CODE (same) == MEM) -+ { -+ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); -+ loongarch_emit_move (reg_tmp, same); -+ temp2 = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp)); -+ } -+ else -+ temp2 = lowpart_subreg (imode, same, GET_MODE (same)); -+ } -+ emit_move_insn (temp, temp2); -+ -+ switch (vmode) -+ { -+ case E_V16QImode: -+ case E_V8HImode: -+ case E_V4SImode: -+ case E_V2DImode: -+ loongarch_emit_move (target, gen_rtx_VEC_DUPLICATE (vmode, temp)); -+ break; -+ -+ case E_V4SFmode: -+ emit_insn (gen_lsx_vreplvei_w_f_scalar (target, temp)); -+ break; -+ -+ case E_V2DFmode: -+ emit_insn (gen_lsx_vreplvei_d_f_scalar (target, temp)); -+ break; -+ -+ default: -+ gcc_unreachable (); -+ } -+ } -+ else -+ { -+ emit_move_insn (target, CONST0_RTX (vmode)); -+ -+ for (i = 0; i < nelt; ++i) -+ { -+ rtx temp = gen_reg_rtx (imode); -+ emit_move_insn (temp, XVECEXP (vals, 0, i)); -+ switch (vmode) -+ { -+ case E_V16QImode: -+ emit_insn (gen_vec_setv16qi (target, temp, GEN_INT (i))); -+ break; -+ -+ case E_V8HImode: -+ emit_insn (gen_vec_setv8hi (target, temp, GEN_INT (i))); -+ break; -+ -+ case E_V4SImode: -+ emit_insn (gen_vec_setv4si (target, temp, GEN_INT (i))); -+ break; -+ -+ case E_V2DImode: -+ emit_insn (gen_vec_setv2di (target, temp, GEN_INT (i))); -+ break; -+ -+ case E_V4SFmode: -+ emit_insn (gen_vec_setv4sf (target, temp, GEN_INT (i))); -+ break; -+ -+ case E_V2DFmode: -+ emit_insn (gen_vec_setv2df (target, temp, GEN_INT (i))); -+ break; -+ -+ default: -+ gcc_unreachable (); -+ } -+ } -+ } -+ return; -+ } -+ -+ /* Load constants from the pool, or whatever's handy. */ -+ if (nvar == 0) -+ { -+ emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0))); -+ return; -+ } -+ -+ /* For two-part initialization, always use CONCAT. */ -+ if (nelt == 2) -+ { -+ rtx op0 = force_reg (imode, XVECEXP (vals, 0, 0)); -+ rtx op1 = force_reg (imode, XVECEXP (vals, 0, 1)); -+ x = gen_rtx_VEC_CONCAT (vmode, op0, op1); -+ emit_insn (gen_rtx_SET (target, x)); -+ return; -+ } -+ -+ /* Loongson is the only cpu with vectors with more elements. */ -+ gcc_assert (0); -+} -+ -+/* Implement HARD_REGNO_CALLER_SAVE_MODE. */ -+ -+machine_mode -+loongarch_hard_regno_caller_save_mode (unsigned int regno, -+ unsigned int nregs, -+ machine_mode mode) -+{ -+ /* For performance, avoid saving/restoring upper parts of a register -+ by returning MODE as save mode when the mode is known. */ -+ if (mode == VOIDmode) -+ return choose_hard_reg_mode (regno, nregs, false); -+ else -+ return mode; -+} -+ -+/* Generate RTL for comparing CMP_OP0 and CMP_OP1 using condition COND and -+ store the result -1 or 0 in DEST. */ -+ -+static void -+loongarch_expand_lsx_cmp (rtx dest, enum rtx_code cond, rtx op0, rtx op1) -+{ -+ machine_mode cmp_mode = GET_MODE (op0); -+ int unspec = -1; -+ bool negate = false; -+ -+ switch (cmp_mode) -+ { -+ case E_V16QImode: -+ case E_V32QImode: -+ case E_V8HImode: -+ case E_V16HImode: -+ case E_V4SImode: -+ case E_V8SImode: -+ case E_V2DImode: -+ case E_V4DImode: -+ switch (cond) -+ { -+ case NE: -+ cond = reverse_condition (cond); -+ negate = true; -+ break; -+ case EQ: -+ case LT: -+ case LE: -+ case LTU: -+ case LEU: -+ break; -+ case GE: -+ case GT: -+ case GEU: -+ case GTU: -+ std::swap (op0, op1); -+ cond = swap_condition (cond); -+ break; -+ default: -+ gcc_unreachable (); -+ } -+ loongarch_emit_binary (cond, dest, op0, op1); -+ if (negate) -+ emit_move_insn (dest, gen_rtx_NOT (GET_MODE (dest), dest)); -+ break; -+ -+ case E_V4SFmode: -+ case E_V2DFmode: -+ switch (cond) -+ { -+ case UNORDERED: -+ case ORDERED: -+ case EQ: -+ case NE: -+ case UNEQ: -+ case UNLE: -+ case UNLT: -+ break; -+ case LTGT: cond = NE; break; -+ case UNGE: cond = UNLE; std::swap (op0, op1); break; -+ case UNGT: cond = UNLT; std::swap (op0, op1); break; -+ case LE: unspec = UNSPEC_LSX_VFCMP_SLE; break; -+ case LT: unspec = UNSPEC_LSX_VFCMP_SLT; break; -+ case GE: unspec = UNSPEC_LSX_VFCMP_SLE; std::swap (op0, op1); break; -+ case GT: unspec = UNSPEC_LSX_VFCMP_SLT; std::swap (op0, op1); break; -+ default: -+ gcc_unreachable (); -+ } -+ if (unspec < 0) -+ loongarch_emit_binary (cond, dest, op0, op1); -+ else -+ { -+ rtx x = gen_rtx_UNSPEC (GET_MODE (dest), -+ gen_rtvec (2, op0, op1), unspec); -+ emit_insn (gen_rtx_SET (dest, x)); -+ } -+ break; -+ -+ case E_V8SFmode: -+ case E_V4DFmode: -+ switch (cond) -+ { -+ case UNORDERED: -+ case ORDERED: -+ case EQ: -+ case NE: -+ case UNEQ: -+ case UNLE: -+ case UNLT: -+ break; -+ case LTGT: cond = NE; break; -+ case UNGE: cond = UNLE; std::swap (op0, op1); break; -+ case UNGT: cond = UNLT; std::swap (op0, op1); break; -+ case LE: unspec = UNSPEC_LASX_XVFCMP_SLE; break; -+ case LT: unspec = UNSPEC_LASX_XVFCMP_SLT; break; -+ case GE: unspec = UNSPEC_LASX_XVFCMP_SLE; std::swap (op0, op1); break; -+ case GT: unspec = UNSPEC_LASX_XVFCMP_SLT; std::swap (op0, op1); break; -+ default: -+ gcc_unreachable (); -+ } -+ if (unspec < 0) -+ loongarch_emit_binary (cond, dest, op0, op1); -+ else -+ { -+ rtx x = gen_rtx_UNSPEC (GET_MODE (dest), -+ gen_rtvec (2, op0, op1), unspec); -+ emit_insn (gen_rtx_SET (dest, x)); -+ } -+ break; -+ -+ default: -+ gcc_unreachable (); -+ break; -+ } -+} -+ -+/* Expand VEC_COND_EXPR, where: -+ MODE is mode of the result -+ VIMODE equivalent integer mode -+ OPERANDS operands of VEC_COND_EXPR. */ -+ -+void -+loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode, -+ rtx *operands) -+{ -+ rtx cond = operands[3]; -+ rtx cmp_op0 = operands[4]; -+ rtx cmp_op1 = operands[5]; -+ rtx cmp_res = gen_reg_rtx (vimode); -+ -+ loongarch_expand_lsx_cmp (cmp_res, GET_CODE (cond), cmp_op0, cmp_op1); -+ -+ /* We handle the following cases: -+ 1) r = a CMP b ? -1 : 0 -+ 2) r = a CMP b ? -1 : v -+ 3) r = a CMP b ? v : 0 -+ 4) r = a CMP b ? v1 : v2 */ -+ -+ /* Case (1) above. We only move the results. */ -+ if (operands[1] == CONSTM1_RTX (vimode) -+ && operands[2] == CONST0_RTX (vimode)) -+ emit_move_insn (operands[0], cmp_res); -+ else -+ { -+ rtx src1 = gen_reg_rtx (vimode); -+ rtx src2 = gen_reg_rtx (vimode); -+ rtx mask = gen_reg_rtx (vimode); -+ rtx bsel; -+ -+ /* Move the vector result to use it as a mask. */ -+ emit_move_insn (mask, cmp_res); -+ -+ if (register_operand (operands[1], mode)) -+ { -+ rtx xop1 = operands[1]; -+ if (mode != vimode) -+ { -+ xop1 = gen_reg_rtx (vimode); -+ emit_move_insn (xop1, gen_rtx_SUBREG (vimode, operands[1], 0)); -+ } -+ emit_move_insn (src1, xop1); -+ } -+ else -+ { -+ gcc_assert (operands[1] == CONSTM1_RTX (vimode)); -+ /* Case (2) if the below doesn't move the mask to src2. */ -+ emit_move_insn (src1, mask); -+ } -+ -+ if (register_operand (operands[2], mode)) -+ { -+ rtx xop2 = operands[2]; -+ if (mode != vimode) -+ { -+ xop2 = gen_reg_rtx (vimode); -+ emit_move_insn (xop2, gen_rtx_SUBREG (vimode, operands[2], 0)); -+ } -+ emit_move_insn (src2, xop2); -+ } -+ else -+ { -+ gcc_assert (operands[2] == CONST0_RTX (mode)); -+ /* Case (3) if the above didn't move the mask to src1. */ -+ emit_move_insn (src2, mask); -+ } -+ -+ /* We deal with case (4) if the mask wasn't moved to either src1 or src2. -+ In any case, we eventually do vector mask-based copy. */ -+ bsel = gen_rtx_IOR (vimode, -+ gen_rtx_AND (vimode, -+ gen_rtx_NOT (vimode, mask), src2), -+ gen_rtx_AND (vimode, mask, src1)); -+ /* The result is placed back to a register with the mask. */ -+ emit_insn (gen_rtx_SET (mask, bsel)); -+ emit_move_insn (operands[0], gen_rtx_SUBREG (mode, mask, 0)); -+ } -+} -+ -+/* Expand integer vector comparison */ -+bool -+loongarch_expand_int_vec_cmp(rtx operands[]) -+{ -+ -+ rtx_code code = GET_CODE (operands[1]); -+ loongarch_expand_lsx_cmp (operands[0], code, operands[2], operands[3]); -+ return true; -+} -+ -+/* Expand integer vector comparison */ -+bool -+loongarch_expand_fp_vec_cmp(rtx operands[]) -+{ -+ rtx_code code = GET_CODE (operands[1]); -+ loongarch_expand_lsx_cmp (operands[0], code, operands[2], operands[3]); -+ return true; -+} -+ -+ -+/* Implement TARGET_CASE_VALUES_THRESHOLD. */ -+ -+unsigned int -+loongarch_case_values_threshold (void) -+{ -+ return default_case_values_threshold (); -+} -+ -+ -+/* Implement TARGET_SPILL_CLASS. */ -+ -+static reg_class_t -+loongarch_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED, -+ machine_mode mode ATTRIBUTE_UNUSED) -+{ -+ return NO_REGS; -+} -+ -+/* Implement TARGET_LRA_P. */ -+ -+static bool -+loongarch_lra_p (void) -+{ -+ return loongarch_lra_flag; -+} -+ -+/* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS. */ -+ -+static reg_class_t -+loongarch_ira_change_pseudo_allocno_class (int regno, reg_class_t allocno_class, -+ reg_class_t best_class ATTRIBUTE_UNUSED) -+{ -+ /* LRA will allocate an FPR for an integer mode pseudo instead of spilling -+ to memory if an FPR is present in the allocno class. It is rare that -+ we actually need to place an integer mode value in an FPR so where -+ possible limit the allocation to GR_REGS. This will slightly pessimize -+ code that involves integer to/from float conversions as these will have -+ to reload into FPRs in LRA. Such reloads are sometimes eliminated and -+ sometimes only partially eliminated. We choose to take this penalty -+ in order to eliminate usage of FPRs in code that does not use floating -+ point data. -+ -+ This change has a similar effect to increasing the cost of FPR->GPR -+ register moves for integer modes so that they are higher than the cost -+ of memory but changing the allocno class is more reliable. -+ -+ This is also similar to forbidding integer mode values in FPRs entirely -+ but this would lead to an inconsistency in the integer to/from float -+ instructions that say integer mode values must be placed in FPRs. */ -+ if (INTEGRAL_MODE_P (PSEUDO_REGNO_MODE (regno)) && allocno_class == ALL_REGS) -+ return GR_REGS; -+ return allocno_class; -+} -+ -+/* Implement TARGET_PROMOTE_FUNCTION_MODE */ -+ -+/* This function is equivalent to default_promote_function_mode_always_promote -+ except that it returns a promoted mode even if type is NULL_TREE. This is -+ needed by libcalls which have no type (only a mode) such as fixed conversion -+ routines that take a signed or unsigned char/short argument and convert it -+ to a fixed type. */ -+ -+static machine_mode -+loongarch_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, -+ machine_mode mode, -+ int *punsignedp ATTRIBUTE_UNUSED, -+ const_tree fntype ATTRIBUTE_UNUSED, -+ int for_return ATTRIBUTE_UNUSED) -+{ -+ int unsignedp; -+ -+ if (type != NULL_TREE) -+ return promote_mode (type, mode, punsignedp); -+ -+ unsignedp = *punsignedp; -+ PROMOTE_MODE (mode, unsignedp, type); -+ *punsignedp = unsignedp; -+ return mode; -+} -+ -+/* Implement TARGET_TRULY_NOOP_TRUNCATION. */ -+ -+static bool -+loongarch_truly_noop_truncation (poly_uint64 outprec, poly_uint64 inprec) -+{ -+ return !TARGET_64BIT || inprec <= 32 || outprec > 32; -+} -+ -+/* Implement TARGET_CONSTANT_ALIGNMENT. */ -+ -+static HOST_WIDE_INT -+loongarch_constant_alignment (const_tree exp, HOST_WIDE_INT align) -+{ -+ if (TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR) -+ return MAX (align, BITS_PER_WORD); -+ return align; -+} -+ -+/* Implement TARGET_STARTING_FRAME_OFFSET. See loongarch_compute_frame_info -+ for details about the frame layout. */ -+ -+static HOST_WIDE_INT -+loongarch_starting_frame_offset (void) -+{ -+ if (FRAME_GROWS_DOWNWARD) -+ return 0; -+ return crtl->outgoing_args_size; -+} -+ -+/* Loongson ext test for LA464 128 bit offset mem is legaly. */ -+ -+bool -+loongarch_la464_128_store_p (rtx operands[]) -+{ -+ int offset0; -+ int offset1; -+ rtx dst0 = operands[0]; -+ rtx dst1 = operands[2]; -+ rtx src0 = operands[1]; -+ rtx src1 = operands[3]; -+ int base_reg0; -+ int base_reg1; -+ -+ if (GET_CODE (XEXP (dst0, 0)) == PLUS) -+ { -+ offset0 = XINT (XEXP (XEXP (dst0, 0), 1), 0); -+ base_reg0 = REGNO (XEXP (XEXP (dst0, 0), 0)); -+ } -+ else if (GET_CODE (XEXP (dst0, 0)) == MINUS) -+ { -+ offset0 = XINT (XEXP (XEXP (dst0, 0), 1), 0); -+ base_reg0 = REGNO (XEXP (XEXP (dst0, 0), 0)); -+ } -+ else -+ { -+ offset0 = 0; -+ base_reg0 = REGNO (XEXP (dst0, 0)); -+ } -+ -+ if (GET_CODE (XEXP (dst1, 0)) == PLUS) -+ { -+ offset1= XINT (XEXP (XEXP (dst1, 0), 1), 0); -+ base_reg1 = REGNO (XEXP (XEXP (dst1, 0), 0)); -+ } -+ else if (GET_CODE (XEXP (dst1, 0)) == MINUS) -+ { -+ offset1= XINT (XEXP (XEXP (dst1, 0), 1), 0); -+ base_reg1 = REGNO (XEXP (XEXP (dst1, 0), 0)); -+ } -+ else -+ { -+ offset1 = 0; -+ base_reg1 = REGNO (XEXP (dst1, 0)); -+ } -+ -+ if (base_reg0 != base_reg1) -+ return false; -+ -+ if (offset1 % 16 !=0) -+ { -+ /* store offset is not align! */ -+ return false; -+ } -+ -+ if ( offset0 - offset1 !=8) -+ { -+ /* store offset diff is not 8! */ -+ return false; -+ } -+ -+ if ( offset1>4095 || offset1<-4096) -+ { -+ /* load offset out of range! */ -+ return false; -+ } -+ -+ return true; -+} -+ -+bool -+loongarch_la464_128_load_p (rtx operands[]) -+{ -+ int offset0; -+ int offset1; -+ rtx dst0 = operands[0]; -+ rtx dst1 = operands[2]; -+ rtx src0 = operands[1]; -+ rtx src1 = operands[3]; -+ int base_reg0; -+ int base_reg1; -+ int dst_reg0; -+ -+ dst_reg0 = REGNO (dst0); -+ -+ if (GET_CODE (XEXP (src0, 0)) == PLUS) -+ { -+ offset0 = XINT (XEXP (XEXP (src0, 0), 1), 0); -+ base_reg0 = REGNO (XEXP (XEXP (src0, 0), 0)); -+ } -+ else if (GET_CODE (XEXP (src0, 0)) == MINUS) -+ { -+ offset0 = XINT (XEXP (XEXP (src0, 0), 1), 0); -+ base_reg0 = REGNO (XEXP (XEXP (src0, 0), 0)); -+ } -+ else -+ { -+ offset0 = 0; -+ base_reg0 = REGNO (XEXP (src0, 0)); -+ } -+ -+ if (GET_CODE (XEXP (src1, 0)) == PLUS) -+ { -+ offset1= XINT (XEXP (XEXP (src1, 0), 1), 0); -+ base_reg1 = REGNO (XEXP (XEXP (src1, 0), 0)); -+ } -+ else if (GET_CODE (XEXP (src1, 0)) == MINUS) -+ { -+ offset1= XINT (XEXP (XEXP (src1, 0), 1), 0); -+ base_reg1 = REGNO (XEXP (XEXP (src1, 0), 0)); -+ } -+ else -+ { -+ offset1 =0; -+ base_reg1 = REGNO (XEXP (src1, 0)); -+ } -+ -+ if (base_reg0 != base_reg1) -+ return false; -+ -+ /* Skip read dead reg. */ -+ if (base_reg0 == dst_reg0) -+ return false; -+ -+ if (offset1 % 16 !=0) -+ { -+ /* load offset is not align! */ -+ return false; -+ } -+ -+ if ( offset0 - offset1 !=8) -+ { -+ /* load offset diff is not 8! */ -+ return false; -+ } -+ -+ if ( offset1>4095 || offset1<-4096) -+ { -+ /* load offset out of range! */ -+ return false; -+ } -+ -+ return true; -+} -+ -+/* A subroutine of loongarch_build_signbit_mask. If VECT is true, -+ then replicate the value for all elements of the vector -+ register. */ -+ -+rtx -+loongarch_build_const_vector (machine_mode mode, bool vect, rtx value) -+{ -+ int i, n_elt; -+ rtvec v; -+ machine_mode scalar_mode; -+ -+ switch (mode) -+ { -+ case E_V64QImode: -+ case E_V32QImode: -+ case E_V16QImode: -+ case E_V32HImode: -+ case E_V16HImode: -+ case E_V8HImode: -+ case E_V16SImode: -+ case E_V8SImode: -+ case E_V4SImode: -+ case E_V8DImode: -+ case E_V4DImode: -+ case E_V2DImode: -+ gcc_assert (vect); -+ /* FALLTHRU */ -+ case E_V16SFmode: -+ case E_V8SFmode: -+ case E_V4SFmode: -+ case E_V8DFmode: -+ case E_V4DFmode: -+ case E_V2DFmode: -+ n_elt = GET_MODE_NUNITS (mode); -+ v = rtvec_alloc (n_elt); -+ scalar_mode = GET_MODE_INNER (mode); -+ -+ RTVEC_ELT (v, 0) = value; -+ -+ for (i = 1; i < n_elt; ++i) -+ RTVEC_ELT (v, i) = vect ? value : CONST0_RTX (scalar_mode); -+ -+ return gen_rtx_CONST_VECTOR (mode, v); -+ -+ default: -+ gcc_unreachable (); -+ } -+} -+ -+/* Create a mask for the sign bit in MODE -+ for an SSE register. If VECT is true, then replicate the mask for -+ all elements of the vector register. If INVERT is true, then create -+ a mask excluding the sign bit. */ -+ -+rtx -+loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) -+{ -+ machine_mode vec_mode, imode; -+ wide_int w; -+ rtx mask, v; -+ -+ switch (mode) -+ { -+ case E_V16SImode: -+ case E_V16SFmode: -+ case E_V8SImode: -+ case E_V4SImode: -+ case E_V8SFmode: -+ case E_V4SFmode: -+ vec_mode = mode; -+ imode = SImode; -+ break; -+ -+ case E_V8DImode: -+ case E_V4DImode: -+ case E_V2DImode: -+ case E_V8DFmode: -+ case E_V4DFmode: -+ case E_V2DFmode: -+ vec_mode = mode; -+ imode = DImode; -+ break; -+ -+ case E_TImode: -+ case E_TFmode: -+ vec_mode = VOIDmode; -+ imode = TImode; -+ break; -+ -+ default: -+ gcc_unreachable (); -+ } -+ -+ machine_mode inner_mode = GET_MODE_INNER (mode); -+ w = wi::set_bit_in_zero (GET_MODE_BITSIZE (inner_mode) - 1, -+ GET_MODE_BITSIZE (inner_mode)); -+ if (invert) -+ w = wi::bit_not (w); -+ -+ /* Force this value into the low part of a fp vector constant. */ -+ mask = immed_wide_int_const (w, imode); -+ mask = gen_lowpart (inner_mode, mask); -+ -+ if (vec_mode == VOIDmode) -+ return force_reg (inner_mode, mask); -+ -+ v = loongarch_build_const_vector (vec_mode, vect, mask); -+ return force_reg (vec_mode, v); -+} -+ -+ -+ -+/* Initialize the GCC target structure. */ -+#undef TARGET_ASM_ALIGNED_HI_OP -+#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" -+#undef TARGET_ASM_ALIGNED_SI_OP -+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t" -+#undef TARGET_ASM_ALIGNED_DI_OP -+#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t" -+ -+#undef TARGET_OPTION_OVERRIDE -+#define TARGET_OPTION_OVERRIDE loongarch_option_override -+ -+#undef TARGET_LEGITIMIZE_ADDRESS -+#define TARGET_LEGITIMIZE_ADDRESS loongarch_legitimize_address -+ -+#undef TARGET_ASM_FUNCTION_PROLOGUE -+#define TARGET_ASM_FUNCTION_PROLOGUE loongarch_output_function_prologue -+#undef TARGET_ASM_FUNCTION_EPILOGUE -+#define TARGET_ASM_FUNCTION_EPILOGUE loongarch_output_function_epilogue -+#undef TARGET_ASM_SELECT_RTX_SECTION -+#define TARGET_ASM_SELECT_RTX_SECTION loongarch_select_rtx_section -+#undef TARGET_ASM_FUNCTION_RODATA_SECTION -+#define TARGET_ASM_FUNCTION_RODATA_SECTION loongarch_function_rodata_section -+ -+#undef TARGET_SCHED_INIT -+#define TARGET_SCHED_INIT loongarch_sched_init -+#undef TARGET_SCHED_REORDER -+#define TARGET_SCHED_REORDER loongarch_sched_reorder -+#undef TARGET_SCHED_REORDER2 -+#define TARGET_SCHED_REORDER2 loongarch_sched_reorder2 -+#undef TARGET_SCHED_VARIABLE_ISSUE -+#define TARGET_SCHED_VARIABLE_ISSUE loongarch_variable_issue -+#undef TARGET_SCHED_ADJUST_COST -+#define TARGET_SCHED_ADJUST_COST loongarch_adjust_cost -+#undef TARGET_SCHED_ISSUE_RATE -+#define TARGET_SCHED_ISSUE_RATE loongarch_issue_rate -+#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD -+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ -+ loongarch_multipass_dfa_lookahead -+#undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P -+#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \ -+ loongarch_small_register_classes_for_mode_p -+ -+#undef TARGET_FUNCTION_OK_FOR_SIBCALL -+#define TARGET_FUNCTION_OK_FOR_SIBCALL loongarch_function_ok_for_sibcall -+ -+#undef TARGET_INSERT_ATTRIBUTES -+#define TARGET_INSERT_ATTRIBUTES loongarch_insert_attributes -+#undef TARGET_MERGE_DECL_ATTRIBUTES -+#define TARGET_MERGE_DECL_ATTRIBUTES loongarch_merge_decl_attributes -+#undef TARGET_CAN_INLINE_P -+#define TARGET_CAN_INLINE_P loongarch_can_inline_p -+ -+#undef TARGET_VALID_POINTER_MODE -+#define TARGET_VALID_POINTER_MODE loongarch_valid_pointer_mode -+#undef TARGET_REGISTER_MOVE_COST -+#define TARGET_REGISTER_MOVE_COST loongarch_register_move_cost -+#undef TARGET_MEMORY_MOVE_COST -+#define TARGET_MEMORY_MOVE_COST loongarch_memory_move_cost -+#undef TARGET_RTX_COSTS -+#define TARGET_RTX_COSTS loongarch_rtx_costs -+#undef TARGET_ADDRESS_COST -+#define TARGET_ADDRESS_COST loongarch_address_cost -+#undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST -+#define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \ -+ loongarch_builtin_vectorization_cost -+ -+ -+#undef TARGET_IN_SMALL_DATA_P -+#define TARGET_IN_SMALL_DATA_P loongarch_in_small_data_p -+ -+#undef TARGET_MACHINE_DEPENDENT_REORG -+#define TARGET_MACHINE_DEPENDENT_REORG loongarch_reorg -+ -+#undef TARGET_PREFERRED_RELOAD_CLASS -+#define TARGET_PREFERRED_RELOAD_CLASS loongarch_preferred_reload_class -+ -+#undef TARGET_EXPAND_TO_RTL_HOOK -+#define TARGET_EXPAND_TO_RTL_HOOK loongarch_expand_to_rtl_hook -+#undef TARGET_ASM_FILE_START -+#define TARGET_ASM_FILE_START loongarch_file_start -+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE -+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true -+ -+#undef TARGET_EXPAND_BUILTIN_VA_START -+#define TARGET_EXPAND_BUILTIN_VA_START loongarch_va_start -+ -+#undef TARGET_PROMOTE_FUNCTION_MODE -+#define TARGET_PROMOTE_FUNCTION_MODE loongarch_promote_function_mode -+#undef TARGET_RETURN_IN_MEMORY -+#define TARGET_RETURN_IN_MEMORY loongarch_return_in_memory -+ -+#undef TARGET_ASM_OUTPUT_MI_THUNK -+#define TARGET_ASM_OUTPUT_MI_THUNK loongarch_output_mi_thunk -+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK -+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true -+ -+#undef TARGET_PRINT_OPERAND -+#define TARGET_PRINT_OPERAND loongarch_print_operand -+#undef TARGET_PRINT_OPERAND_ADDRESS -+#define TARGET_PRINT_OPERAND_ADDRESS loongarch_print_operand_address -+#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P -+#define TARGET_PRINT_OPERAND_PUNCT_VALID_P loongarch_print_operand_punct_valid_p -+ -+#undef TARGET_SETUP_INCOMING_VARARGS -+#define TARGET_SETUP_INCOMING_VARARGS loongarch_setup_incoming_varargs -+#undef TARGET_STRICT_ARGUMENT_NAMING -+#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true -+#undef TARGET_MUST_PASS_IN_STACK -+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size -+#undef TARGET_PASS_BY_REFERENCE -+#define TARGET_PASS_BY_REFERENCE loongarch_pass_by_reference -+#undef TARGET_ARG_PARTIAL_BYTES -+#define TARGET_ARG_PARTIAL_BYTES loongarch_arg_partial_bytes -+#undef TARGET_FUNCTION_ARG -+#define TARGET_FUNCTION_ARG loongarch_function_arg -+#undef TARGET_FUNCTION_ARG_ADVANCE -+#define TARGET_FUNCTION_ARG_ADVANCE loongarch_function_arg_advance -+#undef TARGET_FUNCTION_ARG_BOUNDARY -+#define TARGET_FUNCTION_ARG_BOUNDARY loongarch_function_arg_boundary -+ -+#undef TARGET_VECTOR_MODE_SUPPORTED_P -+#define TARGET_VECTOR_MODE_SUPPORTED_P loongarch_vector_mode_supported_p -+ -+#undef TARGET_SCALAR_MODE_SUPPORTED_P -+#define TARGET_SCALAR_MODE_SUPPORTED_P loongarch_scalar_mode_supported_p -+ -+#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE -+#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE loongarch_preferred_simd_mode -+ -+#undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES -+#define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \ -+ loongarch_autovectorize_vector_sizes -+ -+#undef TARGET_INIT_BUILTINS -+#define TARGET_INIT_BUILTINS loongarch_init_builtins -+#undef TARGET_BUILTIN_DECL -+#define TARGET_BUILTIN_DECL loongarch_builtin_decl -+#undef TARGET_EXPAND_BUILTIN -+#define TARGET_EXPAND_BUILTIN loongarch_expand_builtin -+ -+#undef TARGET_HAVE_TLS -+#define TARGET_HAVE_TLS HAVE_AS_TLS -+ -+#undef TARGET_CANNOT_FORCE_CONST_MEM -+#define TARGET_CANNOT_FORCE_CONST_MEM loongarch_cannot_force_const_mem -+ -+#undef TARGET_LEGITIMATE_CONSTANT_P -+#define TARGET_LEGITIMATE_CONSTANT_P loongarch_legitimate_constant_p -+ -+#undef TARGET_ENCODE_SECTION_INFO -+#define TARGET_ENCODE_SECTION_INFO loongarch_encode_section_info -+ -+#undef TARGET_ATTRIBUTE_TABLE -+#define TARGET_ATTRIBUTE_TABLE loongarch_attribute_table -+/* All our function attributes are related to how out-of-line copies should -+ be compiled or called. They don't in themselves prevent inlining. */ -+#undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P -+#define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true -+ -+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P -+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P loongarch_use_blocks_for_constant_p -+#undef TARGET_USE_ANCHORS_FOR_SYMBOL_P -+#define TARGET_USE_ANCHORS_FOR_SYMBOL_P loongarch_use_anchors_for_symbol_p -+ -+#undef TARGET_COMP_TYPE_ATTRIBUTES -+#define TARGET_COMP_TYPE_ATTRIBUTES loongarch_comp_type_attributes -+ -+#ifdef HAVE_AS_DTPRELWORD -+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL -+#define TARGET_ASM_OUTPUT_DWARF_DTPREL loongarch_output_dwarf_dtprel -+#endif -+#undef TARGET_DWARF_REGISTER_SPAN -+#define TARGET_DWARF_REGISTER_SPAN loongarch_dwarf_register_span -+#undef TARGET_DWARF_FRAME_REG_MODE -+#define TARGET_DWARF_FRAME_REG_MODE loongarch_dwarf_frame_reg_mode -+ -+#undef TARGET_LEGITIMATE_ADDRESS_P -+#define TARGET_LEGITIMATE_ADDRESS_P loongarch_legitimate_address_p -+ -+#undef TARGET_FRAME_POINTER_REQUIRED -+#define TARGET_FRAME_POINTER_REQUIRED loongarch_frame_pointer_required -+ -+#undef TARGET_CAN_ELIMINATE -+#define TARGET_CAN_ELIMINATE loongarch_can_eliminate -+ -+#undef TARGET_CONDITIONAL_REGISTER_USAGE -+#define TARGET_CONDITIONAL_REGISTER_USAGE loongarch_conditional_register_usage -+ -+#undef TARGET_TRAMPOLINE_INIT -+#define TARGET_TRAMPOLINE_INIT loongarch_trampoline_init -+ -+#undef TARGET_SHIFT_TRUNCATION_MASK -+#define TARGET_SHIFT_TRUNCATION_MASK loongarch_shift_truncation_mask -+ -+#undef TARGET_VECTORIZE_VEC_PERM_CONST -+#define TARGET_VECTORIZE_VEC_PERM_CONST loongarch_vectorize_vec_perm_const -+ -+#undef TARGET_SCHED_REASSOCIATION_WIDTH -+#define TARGET_SCHED_REASSOCIATION_WIDTH loongarch_sched_reassociation_width -+ -+#undef TARGET_CASE_VALUES_THRESHOLD -+#define TARGET_CASE_VALUES_THRESHOLD loongarch_case_values_threshold -+ -+#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV -+#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV loongarch_atomic_assign_expand_fenv -+ -+#undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS -+#define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true -+ -+#undef TARGET_SPILL_CLASS -+#define TARGET_SPILL_CLASS loongarch_spill_class -+#undef TARGET_LRA_P -+#define TARGET_LRA_P loongarch_lra_p -+#undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS -+#define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS loongarch_ira_change_pseudo_allocno_class -+ -+#undef TARGET_HARD_REGNO_SCRATCH_OK -+#define TARGET_HARD_REGNO_SCRATCH_OK loongarch_hard_regno_scratch_ok -+ -+#undef TARGET_HARD_REGNO_NREGS -+#define TARGET_HARD_REGNO_NREGS loongarch_hard_regno_nregs -+#undef TARGET_HARD_REGNO_MODE_OK -+#define TARGET_HARD_REGNO_MODE_OK loongarch_hard_regno_mode_ok -+ -+#undef TARGET_MODES_TIEABLE_P -+#define TARGET_MODES_TIEABLE_P loongarch_modes_tieable_p -+ -+#undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED -+#define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \ -+ loongarch_hard_regno_call_part_clobbered -+ -+#undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS -+#define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 2 -+ -+#undef TARGET_SECONDARY_MEMORY_NEEDED -+#define TARGET_SECONDARY_MEMORY_NEEDED loongarch_secondary_memory_needed -+ -+#undef TARGET_CAN_CHANGE_MODE_CLASS -+#define TARGET_CAN_CHANGE_MODE_CLASS loongarch_can_change_mode_class -+ -+#undef TARGET_TRULY_NOOP_TRUNCATION -+#define TARGET_TRULY_NOOP_TRUNCATION loongarch_truly_noop_truncation -+ -+#undef TARGET_CONSTANT_ALIGNMENT -+#define TARGET_CONSTANT_ALIGNMENT loongarch_constant_alignment -+ -+#undef TARGET_STARTING_FRAME_OFFSET -+#define TARGET_STARTING_FRAME_OFFSET loongarch_starting_frame_offset -+ -+struct gcc_target targetm = TARGET_INITIALIZER; -+ -+#include "gt-loongarch.h" -diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h -new file mode 100644 -index 000000000..18d17afb8 ---- /dev/null -+++ b/gcc/config/loongarch/loongarch.h -@@ -0,0 +1,2145 @@ -+/* Definitions of target machine for GNU compiler. LARCH version. -+ Copyright (C) 1989-2018 Free Software Foundation, Inc. -+ Contributed by A. Lichnewsky (lich@inria.inria.fr). -+ Changed by Michael Meissner (meissner@osf.org). -+ 64-bit r4000 support by Ian Lance Taylor (ian@cygnus.com) and -+ Brendan Eich (brendan@microunity.com). -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+ -+#include "config/vxworks-dummy.h" -+ -+#ifdef GENERATOR_FILE -+/* This is used in some insn conditions, so needs to be declared, but -+ does not need to be defined. */ -+extern int target_flags_explicit; -+#endif -+ -+/* LARCH external variables defined in loongarch.c. */ -+ -+/* Which ABI to use. ABILP32 (original 32, or o32), ABILPX32 (n32), -+ ABILP64 (n64) are all defined by SGI. */ -+ -+#define ABILP32 0 -+#define ABILPX32 1 -+#define ABILP64 2 -+ -+/* Information about one recognized processor. Defined here for the -+ benefit of TARGET_CPU_CPP_BUILTINS. */ -+struct loongarch_cpu_info { -+ /* The 'canonical' name of the processor as far as GCC is concerned. -+ It's typically a manufacturer's prefix followed by a numerical -+ designation. It should be lowercase. */ -+ const char *name; -+ -+ /* The internal processor number that most closely matches this -+ entry. Several processors can have the same value, if there's no -+ difference between them from GCC's point of view. */ -+ enum processor cpu; -+ -+ /* The ISA level that the processor implements. */ -+ int isa; -+ -+ /* A mask of PTF_* values. */ -+ unsigned int tune_flags; -+}; -+ -+#include "config/loongarch/loongarch-opts.h" -+ -+/* Macros to silence warnings about numbers being signed in traditional -+ C and unsigned in ISO C when compiled on 32-bit hosts. */ -+ -+#define BITMASK_HIGH (((unsigned long)1) << 31) /* 0x80000000 */ -+#define BITMASK_UPPER16 ((unsigned long)0xffff << 16) /* 0xffff0000 */ -+#define BITMASK_LOWER16 ((unsigned long)0xffff) /* 0x0000ffff */ -+ -+ -+/* Run-time compilation parameters selecting different hardware subsets. */ -+ -+/* True if we are generating position-independent VxWorks RTP code. */ -+#define TARGET_RTP_PIC (TARGET_VXWORKS_RTP && flag_pic) -+ -+/* True if we can optimize sibling calls. For simplicity, we only -+ handle cases in which call_insn_operand will reject invalid -+ sibcall addresses. There are two cases in which this isn't true: -+ -+ - TARGET_USE_GOT && !TARGET_EXPLICIT_RELOCS. call_insn_operand -+ accepts global constants, but all sibcalls must be indirect. */ -+#define TARGET_SIBCALLS (1) -+ -+/* True if we can use the J and JAL instructions. */ -+#define TARGET_ABSOLUTE_JUMPS (!flag_pic) -+ -+/* True if the output must have a writable .eh_frame. -+ See ASM_PREFERRED_EH_DATA_FORMAT for details. */ -+#ifdef HAVE_LD_PERSONALITY_RELAXATION -+#define TARGET_WRITABLE_EH_FRAME 0 -+#else -+#define TARGET_WRITABLE_EH_FRAME (flag_pic && TARGET_SHARED) -+#endif -+ -+ -+/* ISA has LSA available. */ -+#define ISA_HAS_LSA (1) -+ -+/* ISA has DLSA available. */ -+#define ISA_HAS_DLSA (TARGET_64BIT) -+ -+/* Architecture target defines. */ -+#define TARGET_LOONGARCH64 (loongarch_arch == PROCESSOR_LOONGARCH64) -+#define TUNE_LOONGARCH64 (loongarch_tune == PROCESSOR_LOONGARCH64) -+#define TARGET_LA464 (loongarch_arch == PROCESSOR_LA464) -+#define TUNE_LA464 (loongarch_tune == PROCESSOR_LA464) -+/* True if the pre-reload scheduler should try to create chains of -+ multiply-add or multiply-subtract instructions. For example, -+ suppose we have: -+ -+ t1 = a * b -+ t2 = t1 + c * d -+ t3 = e * f -+ t4 = t3 - g * h -+ -+ t1 will have a higher priority than t2 and t3 will have a higher -+ priority than t4. However, before reload, there is no dependence -+ between t1 and t3, and they can often have similar priorities. -+ The scheduler will then tend to prefer: -+ -+ t1 = a * b -+ t3 = e * f -+ t2 = t1 + c * d -+ t4 = t3 - g * h -+ -+ which stops us from making full use of macc/madd-style instructions. -+ This sort of situation occurs frequently in Fourier transforms and -+ in unrolled loops. -+ -+ To counter this, the TUNE_MACC_CHAINS code will reorder the ready -+ queue so that chained multiply-add and multiply-subtract instructions -+ appear ahead of any other instruction that is likely to clobber lo. -+ In the example above, if t2 and t3 become ready at the same time, -+ the code ensures that t2 is scheduled first. -+ -+ Multiply-accumulate instructions are a bigger win for some targets -+ than others, so this macro is defined on an opt-in basis. */ -+#define TUNE_MACC_CHAINS 0 -+ -+#define TARGET_OLDABI (loongarch_abi == ABILP32) -+#define TARGET_NEWABI (loongarch_abi == ABILPX32 || loongarch_abi == ABILP64) -+ -+/* TARGET_HARD_FLOAT and TARGET_SOFT_FLOAT reflect whether the FPU is -+ directly accessible, while the command-line options select -+ TARGET_HARD_FLOAT_ABI and TARGET_SOFT_FLOAT_ABI to reflect the ABI -+ in use. */ -+#define TARGET_HARD_FLOAT (TARGET_HARD_FLOAT_ABI) -+#define TARGET_SOFT_FLOAT (TARGET_SOFT_FLOAT_ABI) -+ -+/* False if SC acts as a memory barrier with respect to itself, -+ otherwise a SYNC will be emitted after SC for atomic operations -+ that require ordering between the SC and following loads and -+ stores. It does not tell anything about ordering of loads and -+ stores prior to and following the SC, only about the SC itself and -+ those loads and stores follow it. */ -+#define TARGET_SYNC_AFTER_SC (1) -+ -+/* Define preprocessor macros for the -march and -mtune options. -+ PREFIX is either _LARCH_ARCH or _LARCH_TUNE, INFO is the selected -+ processor. If INFO's canonical name is "foo", define PREFIX to -+ be "foo", and define an additional macro PREFIX_FOO. */ -+#define LARCH_CPP_SET_PROCESSOR(PREFIX, INFO) \ -+ do \ -+ { \ -+ char *macro, *p; \ -+ \ -+ macro = concat ((PREFIX), "_", (INFO)->name, NULL); \ -+ for (p = macro; *p != 0; p++) \ -+ if (*p == '+') \ -+ *p = 'P'; \ -+ else \ -+ *p = TOUPPER (*p); \ -+ \ -+ builtin_define (macro); \ -+ builtin_define_with_value ((PREFIX), (INFO)->name, 1); \ -+ free (macro); \ -+ } \ -+ while (0) -+ -+/* Target CPU builtins. */ -+#define TARGET_CPU_CPP_BUILTINS() loongarch_cpu_cpp_builtins (pfile) -+ -+/* Target CPU versions for D. */ -+#define TARGET_D_CPU_VERSIONS loongarch_d_target_versions -+ -+/* Default target_flags if no switches are specified */ -+ -+#ifndef TARGET_DEFAULT -+#define TARGET_DEFAULT 0 -+#endif -+ -+#ifndef TARGET_CPU_DEFAULT -+#define TARGET_CPU_DEFAULT 0 -+#endif -+ -+#ifdef IN_LIBGCC2 -+#undef TARGET_64BIT -+/* Make this compile time constant for libgcc2 */ -+#ifdef __loongarch64 -+#define TARGET_64BIT 1 -+#else -+#define TARGET_64BIT 0 -+#endif -+#endif /* IN_LIBGCC2 */ -+ -+#define TARGET_LIBGCC_SDATA_SECTION ".sdata" -+ -+#ifndef MULTILIB_ISA_DEFAULT -+#if LARCH_ISA_DEFAULT == 0 -+#define MULTILIB_ISA_DEFAULT "loongarch64" -+#endif -+#endif -+ -+#ifndef LARCH_ABI_DEFAULT -+#define LARCH_ABI_DEFAULT ABILP32 -+#endif -+ -+/* Use the most portable ABI flag for the ASM specs. */ -+ -+#if LARCH_ABI_DEFAULT == ABILP32 -+#define MULTILIB_ABI_DEFAULT "mabi=lp32" -+#elif LARCH_ABI_DEFAULT == ABILP64 -+#define MULTILIB_ABI_DEFAULT "mabi=lp64" -+#endif -+ -+#ifndef MULTILIB_DEFAULTS -+#define MULTILIB_DEFAULTS \ -+ {MULTILIB_ISA_DEFAULT, MULTILIB_ABI_DEFAULT } -+#endif -+ -+/* A spec condition that matches all -loongarch arguments. */ -+ -+#define LARCH_ISA_LEVEL_OPTION_SPEC \ -+ "loongarch" -+ -+/* A spec condition that matches all architecture arguments. */ -+ -+#define LARCH_ARCH_OPTION_SPEC \ -+ LARCH_ISA_LEVEL_OPTION_SPEC "|march=*" -+ -+/* A spec that infers a -loongarch argument from an -march argument. */ -+ -+#define LARCH_ISA_LEVEL_SPEC \ -+ "%{" LARCH_ISA_LEVEL_OPTION_SPEC ":;:}" -+ -+/* A spec that injects the default multilib ISA if no architecture is -+ specified. */ -+ -+#define LARCH_DEFAULT_ISA_LEVEL_SPEC \ -+ "%{" LARCH_ISA_LEVEL_OPTION_SPEC ":;: \ -+ %{!march=*: -" MULTILIB_ISA_DEFAULT "}}" -+ -+/* A spec that infers a -mhard-float or -msoft-float setting from an -+ -march argument. Note that soft-float and hard-float code are not -+ link-compatible. */ -+ -+#define LARCH_ARCH_FLOAT_SPEC \ -+ "%{mhard-float|msoft-float|mno-float|march=loongarch*:; \ -+ march=vr41*|march=m4k|march=4k*|march=24kc|march=24kec \ -+ |march=34kc|march=34kn|march=74kc|march=1004kc|march=5kc \ -+ |march=m14k*|march=m5101|march=octeon|march=xlr: -msoft-float; \ -+ march=*: -mhard-float}" -+ -+/* A spec condition that matches 32-bit options. It only works if -+ LARCH_ISA_LEVEL_SPEC has been applied. */ -+ -+#define LARCH_32BIT_OPTION_SPEC \ -+ "loongarch1|loongarch2|loongarch32*|mgp32" -+ -+#if (LARCH_ABI_DEFAULT == ABILPX32 \ -+ || LARCH_ABI_DEFAULT == ABILP64) -+#define OPT_ARCH64 "mabi=32|mgp32:;" -+#define OPT_ARCH32 "mabi=32|mgp32" -+#else -+#define OPT_ARCH64 "mabi=o64|mabi=n32|mabi=64|mgp64" -+#define OPT_ARCH32 "mabi=o64|mabi=n32|mabi=64|mgp64:;" -+#endif -+ -+/* Support for a compile-time default CPU, et cetera. The rules are: -+ --with-arch is ignored if -march is specified or a -loongarch is specified -+ ; likewise --with-arch-32 and --with-arch-64. -+ --with-tune is ignored if -mtune is specified; likewise -+ --with-tune-32 and --with-tune-64. -+ --with-abi is ignored if -mabi is specified. -+ --with-float is ignored if -mhard-float or -msoft-float are -+ specified. -+ --with-fpu is ignored if -msoft-float, -msingle-float or -mdouble-float are -+ specified. -+ --with-fp-32 is ignored if -msoft-float, -msingle-float, -mlsx or -mfp are -+ specified. -+ --with-divide is ignored if -mdivide-traps or -mdivide-breaks are -+ specified. */ -+#define OPTION_DEFAULT_SPECS \ -+ {"arch", "%{" LARCH_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}" }, \ -+ {"arch_32", "%{" OPT_ARCH32 ":%{" LARCH_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \ -+ {"arch_64", "%{" OPT_ARCH64 ":%{" LARCH_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \ -+ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \ -+ {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \ -+ {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \ -+ {"abi", "%{!mabi=*:-mabi=%(VALUE)}" }, \ -+ {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \ -+ {"fpu", "%{!msoft-float:%{!msingle-float:%{!mdouble-float:-m%(VALUE)-float}}}" }, \ -+ {"fp_32", "%{" OPT_ARCH32 \ -+ ":%{!msoft-float:%{!msingle-float:%{!mfp*:%{!mlsx:%{!mloongson-asx:-mfp%(VALUE)}}}}}" }, \ -+ {"divide", "%{!mdivide-traps:%{!mdivide-breaks:-mdivide-%(VALUE)}}" } -+ -+/* A spec that infers the: -+ -mlsx setting from a -march=la464 argument. -+ -mlasx setting from a -march=la464 argument. */ -+#define BASE_DRIVER_SELF_SPECS \ -+ LARCH_ASE_LSX_SPEC \ -+ LARCH_ASE_LASX_SPEC -+ -+#define LARCH_ASE_LSX_SPEC \ -+ "%{!mno-lsx: \ -+ %{march=la464: -mlsx}}" -+ -+#define LARCH_ASE_LASX_SPEC \ -+ "%{!mno-lasx: \ -+ %{march=la464: -mlasx}}" -+ -+#define DRIVER_SELF_SPECS \ -+ BASE_DRIVER_SELF_SPECS -+ -+/* from N_LARCH */ -+#define ABI_SPEC \ -+ "%{mabi=lp32:32}" \ -+ "%{mabi=lp64:64}" \ -+ -+#define STARTFILE_PREFIX_SPEC \ -+ "/lib" ABI_SPEC "/ " \ -+ "/usr/lib" ABI_SPEC "/ " \ -+ "/lib/ " \ -+ "/usr/lib/ " -+ -+/* This definition replaces the formerly used 'm' constraint with a -+ different constraint letter in order to avoid changing semantics of -+ the 'm' constraint when accepting new address formats in -+ TARGET_LEGITIMATE_ADDRESS_P. The constraint letter defined here -+ must not be used in insn definitions or inline assemblies. */ -+#define TARGET_MEM_CONSTRAINT 'w' -+ -+/* True if the file format uses 64-bit symbols. At present, this is -+ only true for n64, which uses 64-bit ELF. */ -+#define FILE_HAS_64BIT_SYMBOLS (loongarch_abi == ABILP64) -+ -+/* True if symbols are 64 bits wide. This is usually determined by -+ the ABI's file format, but it can be overridden by -msym32. Note that -+ overriding the size with -msym32 changes the ABI of relocatable objects, -+ although it doesn't change the ABI of a fully-linked object. */ -+#define ABI_HAS_64BIT_SYMBOLS (FILE_HAS_64BIT_SYMBOLS \ -+ && Pmode == DImode) -+ -+/* ISA supports instructions DMUL, DMULU, DMUH, DMUHU. */ -+#define ISA_HAS_DMUL (TARGET_64BIT) -+ -+/* ISA has floating-point RECIP.fmt and RSQRT.fmt instructions. The -+ LARCH64 rev. 1 ISA says that RECIP.D and RSQRT.D are unpredictable when -+ doubles are stored in pairs of FPRs, so for safety's sake, we apply -+ this restriction to the LARCH IV ISA too. */ -+#define ISA_HAS_FP_RECIP_RSQRT(MODE) \ -+ ((MODE) == SFmode \ -+ || (TARGET_FLOAT64 \ -+ && (MODE) == DFmode)) -+ -+/* The LSX ASE is available. */ -+#define ISA_HAS_LSX (TARGET_LSX) -+ -+/* The LASX ASE is available. */ -+#define ISA_HAS_LASX (TARGET_LASX) -+ -+/* Tell collect what flags to pass to nm. */ -+#ifndef NM_FLAGS -+#define NM_FLAGS "-Bn" -+#endif -+ -+ -+/* SUBTARGET_ASM_DEBUGGING_SPEC handles passing debugging options to -+ the assembler. It may be overridden by subtargets. -+ -+ Beginning with gas 2.13, -mdebug must be passed to correctly handle -+ COFF debugging info. */ -+ -+#ifndef SUBTARGET_ASM_DEBUGGING_SPEC -+#define SUBTARGET_ASM_DEBUGGING_SPEC "\ -+%{g} %{g0} %{g1} %{g2} %{g3} \ -+%{ggdb:-g} %{ggdb0:-g0} %{ggdb1:-g1} %{ggdb2:-g2} %{ggdb3:-g3} \ -+%{gstabs:-g} %{gstabs0:-g0} %{gstabs1:-g1} %{gstabs2:-g2} %{gstabs3:-g3} \ -+%{gstabs+:-g} %{gstabs+0:-g0} %{gstabs+1:-g1} %{gstabs+2:-g2} %{gstabs+3:-g3}" -+#endif -+ -+/* FP_ASM_SPEC represents the floating-point options that must be passed -+ to the assembler when FPXX support exists. Prior to that point the -+ assembler could accept the options but were not required for -+ correctness. We only add the options when absolutely necessary -+ because passing -msoft-float to the assembler will cause it to reject -+ all hard-float instructions which may require some user code to be -+ updated. */ -+ -+#ifdef HAVE_AS_DOT_MODULE -+#define FP_ASM_SPEC "\ -+%{mhard-float} %{msoft-float} \ -+%{msingle-float} %{mdouble-float}" -+#else -+#define FP_ASM_SPEC -+#endif -+ -+/* SUBTARGET_ASM_SPEC is always passed to the assembler. It may be -+ overridden by subtargets. */ -+ -+#ifndef SUBTARGET_ASM_SPEC -+#define SUBTARGET_ASM_SPEC "" -+#endif -+ -+#undef ASM_SPEC -+#define ASM_SPEC "\ -+%{mabi=*} %{!mabi=*: %(asm_abi_default_spec)} \ -+" -+/* Extra switches sometimes passed to the linker. */ -+ -+#ifndef LINK_SPEC -+#define LINK_SPEC "" -+#endif /* LINK_SPEC defined */ -+ -+ -+/* Specs for the compiler proper */ -+ -+/* SUBTARGET_CC1_SPEC is passed to the compiler proper. It may be -+ overridden by subtargets. */ -+#ifndef SUBTARGET_CC1_SPEC -+#define SUBTARGET_CC1_SPEC "" -+#endif -+ -+/* CC1_SPEC is the set of arguments to pass to the compiler proper. */ -+ -+#undef CC1_SPEC -+#define CC1_SPEC "\ -+%{G*} %{EB:-meb} %{EL:-mel} %{EB:%{EL:%emay not use both -EB and -EL}} \ -+%(subtarget_cc1_spec)" -+ -+/* Preprocessor specs. */ -+ -+/* SUBTARGET_CPP_SPEC is passed to the preprocessor. It may be -+ overridden by subtargets. */ -+#ifndef SUBTARGET_CPP_SPEC -+#define SUBTARGET_CPP_SPEC "" -+#endif -+ -+#define CPP_SPEC "%(subtarget_cpp_spec)" -+ -+/* This macro defines names of additional specifications to put in the specs -+ that can be used in various specifications like CC1_SPEC. Its definition -+ is an initializer with a subgrouping for each command option. -+ -+ Each subgrouping contains a string constant, that defines the -+ specification name, and a string constant that used by the GCC driver -+ program. -+ -+ Do not define this macro if it does not need to do anything. */ -+ -+#define EXTRA_SPECS \ -+ { "subtarget_cc1_spec", SUBTARGET_CC1_SPEC }, \ -+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \ -+ { "subtarget_asm_debugging_spec", SUBTARGET_ASM_DEBUGGING_SPEC }, \ -+ { "subtarget_asm_spec", SUBTARGET_ASM_SPEC }, \ -+ { "asm_abi_default_spec", "-" MULTILIB_ABI_DEFAULT }, \ -+ SUBTARGET_EXTRA_SPECS -+ -+#ifndef SUBTARGET_EXTRA_SPECS -+#define SUBTARGET_EXTRA_SPECS -+#endif -+ -+#define DBX_DEBUGGING_INFO 1 /* generate stabs (OSF/rose) */ -+#define DWARF2_DEBUGGING_INFO 1 /* dwarf2 debugging info */ -+ -+#ifndef PREFERRED_DEBUGGING_TYPE -+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG -+#endif -+ -+/* The size of DWARF addresses should be the same as the size of symbols -+ in the target file format. They shouldn't depend on things like -msym32, -+ because many DWARF consumers do not allow the mixture of address sizes -+ that one would then get from linking -msym32 code with -msym64 code. -+*/ -+#define DWARF2_ADDR_SIZE (FILE_HAS_64BIT_SYMBOLS ? 8 : 4) -+ -+/* By default, turn on GDB extensions. */ -+#define DEFAULT_GDB_EXTENSIONS 1 -+ -+/* Registers may have a prefix which can be ignored when matching -+ user asm and register definitions. */ -+#ifndef REGISTER_PREFIX -+#define REGISTER_PREFIX "$" -+#endif -+ -+/* Local compiler-generated symbols must have a prefix that the assembler -+ understands. By default, this is $, although some targets (e.g., -+ NetBSD-ELF) need to override this. */ -+ -+#ifndef LOCAL_LABEL_PREFIX -+#define LOCAL_LABEL_PREFIX "$" -+#endif -+ -+/* By default on the loongarch, external symbols do not have an underscore -+ prepended, but some targets (e.g., NetBSD) require this. */ -+ -+#ifndef USER_LABEL_PREFIX -+#define USER_LABEL_PREFIX "" -+#endif -+ -+/* On Sun 4, this limit is 2048. We use 1500 to be safe, -+ since the length can run past this up to a continuation point. */ -+#undef DBX_CONTIN_LENGTH -+#define DBX_CONTIN_LENGTH 1500 -+ -+/* How to renumber registers for dbx and gdb. */ -+#define DBX_REGISTER_NUMBER(REGNO) loongarch_dbx_regno[REGNO] -+ -+/* The mapping from gcc register number to DWARF 2 CFA column number. */ -+#define DWARF_FRAME_REGNUM(REGNO) loongarch_dwarf_regno[REGNO] -+ -+/* The DWARF 2 CFA column which tracks the return address. */ -+#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM -+ -+/* Before the prologue, RA lives in r1. */ -+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM) -+ -+/* Describe how we implement __builtin_eh_return. */ -+#define EH_RETURN_DATA_REGNO(N) \ -+ ((N) < (4) ? (N) + GP_ARG_FIRST : INVALID_REGNUM) -+ -+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4) -+ -+#define EH_USES(N) loongarch_eh_uses (N) -+ -+/* Offsets recorded in opcodes are a multiple of this alignment factor. -+ The default for this in 64-bit mode is 8, which causes problems with -+ SFmode register saves. */ -+#define DWARF_CIE_DATA_ALIGNMENT -4 -+ -+/* Correct the offset of automatic variables and arguments. Note that -+ the LARCH debug format wants all automatic variables and arguments -+ to be in terms of the virtual frame pointer (stack pointer before -+ any adjustment in the function), while the LARCH 3.0 linker wants -+ the frame pointer to be the stack pointer after the initial -+ adjustment. */ -+ -+#define DEBUGGER_AUTO_OFFSET(X) \ -+ loongarch_debugger_offset (X, (HOST_WIDE_INT) 0) -+#define DEBUGGER_ARG_OFFSET(OFFSET, X) \ -+ loongarch_debugger_offset (X, (HOST_WIDE_INT) OFFSET) -+ -+/* Target machine storage layout */ -+ -+#define BITS_BIG_ENDIAN 0 -+#define BYTES_BIG_ENDIAN 0 -+#define WORDS_BIG_ENDIAN 0 -+ -+#define MAX_BITS_PER_WORD 64 -+ -+/* Width of a word, in units (bytes). */ -+#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4) -+#ifndef IN_LIBGCC2 -+#define MIN_UNITS_PER_WORD 4 -+#endif -+ -+/* Width of a LSX vector register in bytes. */ -+#define UNITS_PER_LSX_REG 16 -+/* Width of a LSX vector register in bits. */ -+#define BITS_PER_LSX_REG (UNITS_PER_LSX_REG * BITS_PER_UNIT) -+ -+/* Width of a LASX vector register in bytes. */ -+#define UNITS_PER_LASX_REG 32 -+/* Width of a LASX vector register in bits. */ -+#define BITS_PER_LASX_REG (UNITS_PER_LASX_REG * BITS_PER_UNIT) -+ -+/* For LARCH, width of a floating point register. */ -+#define UNITS_PER_FPREG (TARGET_FLOAT64 ? 8 : 4) -+ -+/* The number of consecutive floating-point registers needed to store the -+ largest format supported by the FPU. */ -+#define MAX_FPRS_PER_FMT (TARGET_FLOAT64 || TARGET_SINGLE_FLOAT ? 1 : 2) -+ -+/* The number of consecutive floating-point registers needed to store the -+ smallest format supported by the FPU. */ -+#define MIN_FPRS_PER_FMT 1 -+ -+/* The largest size of value that can be held in floating-point -+ registers and moved with a single instruction. */ -+#define UNITS_PER_HWFPVALUE \ -+ (TARGET_SOFT_FLOAT_ABI ? 0 : MAX_FPRS_PER_FMT * UNITS_PER_FPREG) -+ -+/* The largest size of value that can be held in floating-point -+ registers. */ -+#define UNITS_PER_FPVALUE \ -+ (TARGET_SOFT_FLOAT_ABI ? 0 \ -+ : TARGET_SINGLE_FLOAT ? UNITS_PER_FPREG \ -+ : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT) -+ -+/* The number of bytes in a double. */ -+#define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT) -+ -+/* Set the sizes of the core types. */ -+#define SHORT_TYPE_SIZE 16 -+#define INT_TYPE_SIZE 32 -+#define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32) -+#define LONG_LONG_TYPE_SIZE 64 -+ -+#define FLOAT_TYPE_SIZE 32 -+#define DOUBLE_TYPE_SIZE 64 -+#define LONG_DOUBLE_TYPE_SIZE (TARGET_NEWABI ? 128 : 64) -+ -+/* Define the sizes of fixed-point types. */ -+#define SHORT_FRACT_TYPE_SIZE 8 -+#define FRACT_TYPE_SIZE 16 -+#define LONG_FRACT_TYPE_SIZE 32 -+#define LONG_LONG_FRACT_TYPE_SIZE 64 -+ -+#define SHORT_ACCUM_TYPE_SIZE 16 -+#define ACCUM_TYPE_SIZE 32 -+#define LONG_ACCUM_TYPE_SIZE 64 -+/* FIXME. LONG_LONG_ACCUM_TYPE_SIZE should be 128 bits, but GCC -+ doesn't support 128-bit integers for LARCH32 currently. */ -+#define LONG_LONG_ACCUM_TYPE_SIZE (TARGET_64BIT ? 128 : 64) -+ -+/* long double is not a fixed mode, but the idea is that, if we -+ support long double, we also want a 128-bit integer type. */ -+#define MAX_FIXED_MODE_SIZE LONG_DOUBLE_TYPE_SIZE -+ -+/* Width in bits of a pointer. */ -+#ifndef POINTER_SIZE -+#define POINTER_SIZE ((TARGET_64BIT) ? 64 : 32) -+#endif -+ -+/* Allocation boundary (in *bits*) for storing arguments in argument list. */ -+#define PARM_BOUNDARY BITS_PER_WORD -+ -+/* Allocation boundary (in *bits*) for the code of a function. */ -+#define FUNCTION_BOUNDARY 32 -+ -+/* Alignment of field after `int : 0' in a structure. */ -+#define EMPTY_FIELD_BOUNDARY 32 -+ -+/* Every structure's size must be a multiple of this. */ -+/* 8 is observed right on a DECstation and on riscos 4.02. */ -+#define STRUCTURE_SIZE_BOUNDARY 8 -+ -+/* There is no point aligning anything to a rounder boundary than -+ LONG_DOUBLE_TYPE_SIZE, unless under LSX/LASX the bigggest alignment is -+ BITS_PER_LSX_REG/BITS_PER_LASX_REG/.. */ -+#define BIGGEST_ALIGNMENT \ -+ (ISA_HAS_LASX? BITS_PER_LASX_REG : (ISA_HAS_LSX ? BITS_PER_LSX_REG : LONG_DOUBLE_TYPE_SIZE)) -+ -+/* All accesses must be aligned. */ -+#define STRICT_ALIGNMENT (TARGET_STRICT_ALIGN) -+ -+/* Define this if you wish to imitate the way many other C compilers -+ handle alignment of bitfields and the structures that contain -+ them. -+ -+ The behavior is that the type written for a bit-field (`int', -+ `short', or other integer type) imposes an alignment for the -+ entire structure, as if the structure really did contain an -+ ordinary field of that type. In addition, the bit-field is placed -+ within the structure so that it would fit within such a field, -+ not crossing a boundary for it. -+ -+ Thus, on most machines, a bit-field whose type is written as `int' -+ would not cross a four-byte boundary, and would force four-byte -+ alignment for the whole structure. (The alignment used may not -+ be four bytes; it is controlled by the other alignment -+ parameters.) -+ -+ If the macro is defined, its definition should be a C expression; -+ a nonzero value for the expression enables this behavior. */ -+ -+#define PCC_BITFIELD_TYPE_MATTERS 1 -+ -+/* If defined, a C expression to compute the alignment for a static -+ variable. TYPE is the data type, and ALIGN is the alignment that -+ the object would ordinarily have. The value of this macro is used -+ instead of that alignment to align the object. -+ -+ If this macro is not defined, then ALIGN is used. -+ -+ One use of this macro is to increase alignment of medium-size -+ data to make it all fit in fewer cache lines. Another is to -+ cause character arrays to be word-aligned so that `strcpy' calls -+ that copy constants to character arrays can be done inline. */ -+ -+#undef DATA_ALIGNMENT -+#define DATA_ALIGNMENT(TYPE, ALIGN) \ -+ ((((ALIGN) < BITS_PER_WORD) \ -+ && (TREE_CODE (TYPE) == ARRAY_TYPE \ -+ || TREE_CODE (TYPE) == UNION_TYPE \ -+ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN)) -+ -+/* We need this for the same reason as DATA_ALIGNMENT, namely to cause -+ character arrays to be word-aligned so that `strcpy' calls that copy -+ constants to character arrays can be done inline, and 'strcmp' can be -+ optimised to use word loads. */ -+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \ -+ DATA_ALIGNMENT (TYPE, ALIGN) -+ -+#define PAD_VARARGS_DOWN \ -+ (targetm.calls.function_arg_padding (TYPE_MODE (type), type) == PAD_DOWNWARD) -+ -+/* Define if operations between registers always perform the operation -+ on the full register even if a narrower mode is specified. */ -+#define WORD_REGISTER_OPERATIONS 1 -+ -+/* When in 64-bit mode, move insns will sign extend SImode and CCmode -+ moves. All other references are zero extended. */ -+#define LOAD_EXTEND_OP(MODE) \ -+ (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \ -+ ? SIGN_EXTEND : ZERO_EXTEND) -+ -+/* Define this macro if it is advisable to hold scalars in registers -+ in a wider mode than that declared by the program. In such cases, -+ the value is constrained to be within the bounds of the declared -+ type, but kept valid in the wider mode. The signedness of the -+ extension may differ from that of the type. */ -+ -+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ -+ if (GET_MODE_CLASS (MODE) == MODE_INT \ -+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \ -+ { \ -+ if ((MODE) == SImode) \ -+ (UNSIGNEDP) = 0; \ -+ (MODE) = Pmode; \ -+ } -+ -+/* Pmode is always the same as ptr_mode, but not always the same as word_mode. -+ Extensions of pointers to word_mode must be signed. */ -+#define POINTERS_EXTEND_UNSIGNED false -+ -+/* Define if loading short immediate values into registers sign extends. */ -+#define SHORT_IMMEDIATES_SIGN_EXTEND 1 -+ -+/* The [d]clz instructions have the natural values at 0. */ -+ -+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ -+ ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2) -+ -+/* Standard register usage. */ -+ -+/* Number of hardware registers. We have: -+ -+ - 32 integer registers -+ - 32 floating point registers -+ - 8 condition code registers -+ - 2 fake registers: -+ - ARG_POINTER_REGNUM -+ - FRAME_POINTER_REGNUM -+*/ -+ -+#define FIRST_PSEUDO_REGISTER 74 -+ -+/* By default, fix the kernel registers ($26 and $27), the global -+ pointer ($28) and the stack pointer ($29). This can change -+ depending on the command-line options. -+ -+ Regarding coprocessor registers: without evidence to the contrary, -+ it's best to assume that each coprocessor register has a unique -+ use. This can be overridden, in, e.g., loongarch_option_override or -+ TARGET_CONDITIONAL_REGISTER_USAGE should the assumption be -+ inappropriate for a particular target. */ -+ -+#define FIXED_REGISTERS \ -+{ \ -+ 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ -+ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ -+ 0, 0, 0, 0, 0, 0, 0, 1, 1, 1} -+ -+ -+/* Set up this array for o32 by default. -+ -+ Note that we don't mark $31 as a call-clobbered register. The idea is -+ that it's really the call instructions themselves which clobber $31. -+ We don't care what the called function does with it afterwards. -+ -+ This approach makes it easier to implement sibcalls. Unlike normal -+ calls, sibcalls don't clobber $31, so the register reaches the -+ called function in tact. EPILOGUE_USES says that $31 is useful -+ to the called function. */ -+ -+#define CALL_USED_REGISTERS \ -+{ \ -+ 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ -+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ -+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ -+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, \ -+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} -+ -+/* Internal macros to classify a register number as to whether it's a -+ general purpose register, a floating point register, a -+ multiply/divide register, or a status register. */ -+ -+#define GP_REG_FIRST 0 -+#define GP_REG_LAST 31 -+#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1) -+#define GP_DBX_FIRST 0 -+ -+#define FP_REG_FIRST 32 -+#define FP_REG_LAST 63 -+#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1) -+#define FP_DBX_FIRST ((write_symbols == DBX_DEBUG) ? 38 : 32) -+ -+#define LSX_REG_FIRST FP_REG_FIRST -+#define LSX_REG_LAST FP_REG_LAST -+#define LSX_REG_NUM FP_REG_NUM -+ -+#define LASX_REG_FIRST FP_REG_FIRST -+#define LASX_REG_LAST FP_REG_LAST -+#define LASX_REG_NUM FP_REG_NUM -+ -+/* The DWARF 2 CFA column which tracks the return address from a -+ signal handler context. This means that to maintain backwards -+ compatibility, no hard register can be assigned this column if it -+ would need to be handled by the DWARF unwinder. */ -+#define DWARF_ALT_FRAME_RETURN_COLUMN 72 -+ -+#define ST_REG_FIRST 64 -+#define ST_REG_LAST 71 -+#define ST_REG_NUM (ST_REG_LAST - ST_REG_FIRST + 1) -+ -+#define GP_REG_P(REGNO) \ -+ ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM) -+#define M16_REG_P(REGNO) \ -+ (((REGNO) >= 2 && (REGNO) <= 7) || (REGNO) == 16 || (REGNO) == 17) -+#define M16STORE_REG_P(REGNO) \ -+ (((REGNO) >= 2 && (REGNO) <= 7) || (REGNO) == 0 || (REGNO) == 17) -+#define FP_REG_P(REGNO) \ -+ ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM) -+#define ST_REG_P(REGNO) \ -+ ((unsigned int) ((int) (REGNO) - ST_REG_FIRST) < ST_REG_NUM) -+#define LSX_REG_P(REGNO) \ -+ ((unsigned int) ((int) (REGNO) - LSX_REG_FIRST) < LSX_REG_NUM) -+#define LASX_REG_P(REGNO) \ -+ ((unsigned int) ((int) (REGNO) - LASX_REG_FIRST) < LASX_REG_NUM) -+ -+#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X))) -+#define LSX_REG_RTX_P(X) (REG_P (X) && LSX_REG_P (REGNO (X))) -+#define LASX_REG_RTX_P(X) (REG_P (X) && LASX_REG_P (REGNO (X))) -+ -+ -+#define HARD_REGNO_RENAME_OK(OLD_REG, NEW_REG) \ -+ loongarch_hard_regno_rename_ok (OLD_REG, NEW_REG) -+ -+/* Select a register mode required for caller save of hard regno REGNO. */ -+#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ -+ loongarch_hard_regno_caller_save_mode (REGNO, NREGS, MODE) -+ -+/* Register to use for pushing function arguments. */ -+#define STACK_POINTER_REGNUM (GP_REG_FIRST + 3) -+ -+/* These two registers don't really exist: they get eliminated to either -+ the stack or hard frame pointer. */ -+#define ARG_POINTER_REGNUM 72 -+#define FRAME_POINTER_REGNUM 73 -+ -+#define HARD_FRAME_POINTER_REGNUM \ -+ (GP_REG_FIRST + 22) -+ -+/* FIXME: */ -+/* #define HARD_FRAME_POINTER_IS_FRAME_POINTER (HARD_FRAME_POINTER_REGNUM == FRAME_POINTER_REGNUM) */ -+/* #define HARD_FRAME_POINTER_IS_ARG_POINTER (HARD_FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM) */ -+ -+#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0 -+#define HARD_FRAME_POINTER_IS_ARG_POINTER 0 -+ -+/* FIXME: */ -+/* Register in which static-chain is passed to a function. */ -+#define STATIC_CHAIN_REGNUM (GP_REG_FIRST + 20) /* $t8 */ -+ -+#define LARCH_PROLOGUE_TEMP_REGNUM \ -+ (GP_REG_FIRST + 13) -+#define LARCH_PROLOGUE_TEMP2_REGNUM \ -+ (GP_REG_FIRST + 12) -+#define LARCH_PROLOGUE_TEMP3_REGNUM \ -+ (GP_REG_FIRST + 14) -+#define LARCH_EPILOGUE_TEMP_REGNUM \ -+ (GP_REG_FIRST + (12)) -+ -+#define LARCH_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP_REGNUM) -+#define LARCH_PROLOGUE_TEMP2(MODE) \ -+ gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP2_REGNUM) -+#define LARCH_PROLOGUE_TEMP3(MODE) \ -+ gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP3_REGNUM) -+#define LARCH_EPILOGUE_TEMP(MODE) gen_rtx_REG (MODE, LARCH_EPILOGUE_TEMP_REGNUM) -+ -+/* Define this macro if it is as good or better to call a constant -+ function address than to call an address kept in a register. */ -+#define NO_FUNCTION_CSE 1 -+ -+#define THREAD_POINTER_REGNUM (GP_REG_FIRST + 2) -+ -+ -+/* Define the classes of registers for register constraints in the -+ machine description. Also define ranges of constants. -+ -+ One of the classes must always be named ALL_REGS and include all hard regs. -+ If there is more than one class, another class must be named NO_REGS -+ and contain no registers. -+ -+ The name GENERAL_REGS must be the name of a class (or an alias for -+ another name such as ALL_REGS). This is the class of registers -+ that is allowed by "g" or "r" in a register constraint. -+ Also, registers outside this class are allocated only when -+ instructions express preferences for them. -+ -+ The classes must be numbered in nondecreasing order; that is, -+ a larger-numbered class must never be contained completely -+ in a smaller-numbered class. -+ -+ For any two classes, it is very desirable that there be another -+ class that represents their union. */ -+ -+enum reg_class -+{ -+ NO_REGS, /* no registers in set */ -+ SIBCALL_REGS, /* SIBCALL_REGS */ -+ JALR_REGS, /* JALR_REGS */ -+ GR_REGS, /* integer registers */ -+ CSR_REGS, /* integer registers except for $r0 and $r1 for csr. */ -+ FP_REGS, /* floating point registers */ -+ ST_REGS, /* status registers (fp status) */ -+ FRAME_REGS, /* arg pointer and frame pointer */ -+ ALL_REGS, /* all registers */ -+ LIM_REG_CLASSES /* max value + 1 */ -+}; -+ -+#define N_REG_CLASSES (int) LIM_REG_CLASSES -+ -+#define GENERAL_REGS GR_REGS -+ -+/* An initializer containing the names of the register classes as C -+ string constants. These names are used in writing some of the -+ debugging dumps. */ -+ -+#define REG_CLASS_NAMES \ -+{ \ -+ "NO_REGS", \ -+ "SIBCALL_REGS", \ -+ "JALR_REGS", \ -+ "GR_REGS", \ -+ "CSR_REGS", \ -+ "FP_REGS", \ -+ "ST_REGS", \ -+ "FRAME_REGS", \ -+ "ALL_REGS" \ -+} -+ -+/* An initializer containing the contents of the register classes, -+ as integers which are bit masks. The Nth integer specifies the -+ contents of class N. The way the integer MASK is interpreted is -+ that register R is in the class if `MASK & (1 << R)' is 1. -+ -+ When the machine has more than 32 registers, an integer does not -+ suffice. Then the integers are replaced by sub-initializers, -+ braced groupings containing several integers. Each -+ sub-initializer must be suitable as an initializer for the type -+ `HARD_REG_SET' which is defined in `hard-reg-set.h'. */ -+ -+#define REG_CLASS_CONTENTS \ -+{ \ -+ { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \ -+ { 0x001ff000, 0x00000000, 0x00000000 }, /* SIBCALL_REGS */ \ -+ { 0xff9ffff0, 0x00000000, 0x00000000 }, /* JALR_REGS */ \ -+ { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \ -+ { 0xfffffffc, 0x00000000, 0x00000000 }, /* CSR_REGS */ \ -+ { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \ -+ { 0x00000000, 0x00000000, 0x000000ff }, /* ST_REGS */ \ -+ { 0x00000000, 0x00000000, 0x00000300 }, /* FRAME_REGS */ \ -+ { 0xffffffff, 0xffffffff, 0x000003ff } /* ALL_REGS */ \ -+} -+ -+ -+/* A C expression whose value is a register class containing hard -+ register REGNO. In general there is more that one such class; -+ choose a class which is "minimal", meaning that no smaller class -+ also contains the register. */ -+ -+#define REGNO_REG_CLASS(REGNO) loongarch_regno_to_class[ (REGNO) ] -+ -+/* A macro whose definition is the name of the class to which a -+ valid base register must belong. A base register is one used in -+ an address which is the register value plus a displacement. */ -+ -+#define BASE_REG_CLASS (GR_REGS) -+ -+/* A macro whose definition is the name of the class to which a -+ valid index register must belong. An index register is one used -+ in an address where its value is either multiplied by a scale -+ factor or added to another register (as well as added to a -+ displacement). */ -+ -+#define INDEX_REG_CLASS NO_REGS -+ -+/* We generally want to put call-clobbered registers ahead of -+ call-saved ones. (IRA expects this.) */ -+ -+#define REG_ALLOC_ORDER \ -+{ /* Call-clobbered GPRs. */ \ -+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 4, 5, 6, 7, 8, 9, 10, 11, 1, \ -+ /* The global pointer. This is call-clobbered for o32 and o64 \ -+ abicalls, call-saved for n32 and n64 abicalls, and a program \ -+ invariant otherwise. Putting it between the call-clobbered \ -+ and call-saved registers should cope with all eventualities. */ \ -+ /* Call-saved GPRs. */ \ -+ 23, 24, 25, 26, 27, 28, 29, 30, 31, \ -+ /* GPRs that can never be exposed to the register allocator. */ \ -+ 0, 2, 3, 21, 22, \ -+ /* Call-clobbered FPRs. */ \ -+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \ -+ 48, 49, 50, 51,52, 53, 54, 55, \ -+ /* FPRs that are usually call-saved. The odd ones are actually \ -+ call-clobbered for n32, but listing them ahead of the even \ -+ registers might encourage the register allocator to fragment \ -+ the available FPR pairs. We need paired FPRs to store long \ -+ doubles, so it isn't clear that using a different order \ -+ for n32 would be a win. */ \ -+ 56, 57, 58, 59, 60, 61, 62, 63, \ -+ /* None of the remaining classes have defined call-saved \ -+ registers. */ \ -+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73} -+ -+/* True if VALUE is an unsigned 6-bit number. */ -+ -+#define UIMM6_OPERAND(VALUE) \ -+ (((VALUE) & ~(unsigned HOST_WIDE_INT) 0x3f) == 0) -+ -+/* True if VALUE is a signed 10-bit number. */ -+ -+#define IMM10_OPERAND(VALUE) \ -+ ((unsigned HOST_WIDE_INT) (VALUE) + 0x200 < 0x400) -+ -+/* True if VALUE is a signed 12-bit number. */ -+ -+#define IMM12_OPERAND(VALUE) \ -+ ((unsigned HOST_WIDE_INT) (VALUE) + 0x800 < 0x1000) -+ -+/* True if VALUE is a signed 13-bit number. */ -+ -+#define IMM13_OPERAND(VALUE) \ -+ ((unsigned HOST_WIDE_INT) (VALUE) + 0x1000 < 0x2000) -+ -+/* True if VALUE is a signed 16-bit number. */ -+ -+#define IMM16_OPERAND(VALUE) \ -+ ((unsigned HOST_WIDE_INT) (VALUE) + 0x8000 < 0x10000) -+ -+ -+/* True if VALUE is a signed 12-bit number. */ -+ -+#define SMALL_OPERAND(VALUE) \ -+ ((unsigned HOST_WIDE_INT) (VALUE) + 0x800 < 0x1000) -+ -+/* True if VALUE is an unsigned 12-bit number. */ -+ -+#define SMALL_OPERAND_UNSIGNED(VALUE) \ -+ (((VALUE) & ~(unsigned HOST_WIDE_INT) 0xfff) == 0) -+ -+/* True if VALUE can be loaded into a register using LUI. */ -+ -+#define LUI_OPERAND(VALUE) \ -+ (((VALUE) | 0x7ffff000) == 0x7ffff000 \ -+ || ((VALUE) | 0x7ffff000) + 0x1000 == 0) -+ -+/* True if VALUE can be loaded into a register using LUI. */ -+ -+#define LU32I_OPERAND(VALUE) \ -+ ((((VALUE) | 0x7ffff00000000) == 0x7ffff00000000) \ -+ || ((VALUE) | 0x7ffff00000000) + 0x100000000 == 0) -+ -+/* True if VALUE can be loaded into a register using LUI. */ -+ -+#define LU52I_OPERAND(VALUE) \ -+ ((((VALUE) | 0xfff0000000000000) == 0xfff0000000000000)) -+ -+/* Return a value X with the low 12 bits clear, and such that -+ VALUE - X is a signed 12-bit value. */ -+ -+#define CONST_HIGH_PART(VALUE) \ -+ (((VALUE) + 0x800) & ~(unsigned HOST_WIDE_INT) 0xfff) -+ -+#define CONST_LOW_PART(VALUE) \ -+ ((VALUE) - CONST_HIGH_PART (VALUE)) -+ -+#define SMALL_INT(X) SMALL_OPERAND (INTVAL (X)) -+#define SMALL_INT_UNSIGNED(X) SMALL_OPERAND_UNSIGNED (INTVAL (X)) -+#define LUI_INT(X) LUI_OPERAND (INTVAL (X)) -+#define LU32I_INT(X) LU32I_OPERAND (INTVAL (X)) -+#define LU52I_INT(X) LU52I_OPERAND (INTVAL (X)) -+#define ULARCH_12BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -2048, 2047)) -+#define LARCH_9BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -256, 255)) -+#define LISA_16BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -32768, 32767)) -+#define LISA_SHIFT_2_OFFSET_P(OFFSET) (((OFFSET) & 0x3) == 0) -+ -+/* The HI and LO registers can only be reloaded via the general -+ registers. Condition code registers can only be loaded to the -+ general registers, and from the floating point registers. */ -+ -+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \ -+ loongarch_secondary_reload_class (CLASS, MODE, X, true) -+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \ -+ loongarch_secondary_reload_class (CLASS, MODE, X, false) -+ -+/* Return the maximum number of consecutive registers -+ needed to represent mode MODE in a register of class CLASS. */ -+ -+#define CLASS_MAX_NREGS(CLASS, MODE) loongarch_class_max_nregs (CLASS, MODE) -+ -+/* Stack layout; function entry, exit and calling. */ -+ -+#define STACK_GROWS_DOWNWARD 1 -+ -+#define FRAME_GROWS_DOWNWARD 1 -+ -+#define RETURN_ADDR_RTX loongarch_return_addr -+ -+/* Similarly, don't use the least-significant bit to tell pointers to -+ code from vtable index. */ -+ -+#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta -+ -+#define ELIMINABLE_REGS \ -+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ -+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ -+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ -+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM},} -+ -+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ -+ (OFFSET) = loongarch_initial_elimination_offset ((FROM), (TO)) -+ -+/* Allocate stack space for arguments at the beginning of each function. */ -+#define ACCUMULATE_OUTGOING_ARGS 1 -+ -+/* The argument pointer always points to the first argument. */ -+#define FIRST_PARM_OFFSET(FNDECL) 0 -+ -+/* o32 and o64 reserve stack space for all argument registers. */ -+#define REG_PARM_STACK_SPACE(FNDECL) \ -+ (TARGET_OLDABI \ -+ ? (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD) \ -+ : 0) -+ -+/* Define this if it is the responsibility of the caller to -+ allocate the area reserved for arguments passed in registers. -+ If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect -+ of this macro is to determine whether the space is included in -+ `crtl->outgoing_args_size'. */ -+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1 -+ -+#define STACK_BOUNDARY (TARGET_NEWABI ? 128 : 64) -+ -+/* Symbolic macros for the registers used to return integer and floating -+ point values. */ -+ -+#define GP_RETURN (GP_REG_FIRST + 4) -+#define FP_RETURN ((TARGET_SOFT_FLOAT) ? GP_RETURN : (FP_REG_FIRST + 0)) -+ -+#define MAX_ARGS_IN_REGISTERS (TARGET_OLDABI ? 4 : 8) -+ -+/* Symbolic macros for the first/last argument registers. */ -+ -+#define GP_ARG_FIRST (GP_REG_FIRST + 4) -+#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) -+#define FP_ARG_FIRST (FP_REG_FIRST + 0) -+#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) -+ -+/* True if MODE is vector and supported in a LSX vector register. */ -+#define LSX_SUPPORTED_MODE_P(MODE) \ -+ (ISA_HAS_LSX \ -+ && (MODE >= 0 && MODE < NUM_MACHINE_MODES) \ -+ && GET_MODE_SIZE (MODE) == UNITS_PER_LSX_REG \ -+ && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \ -+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT)) -+ -+#define LASX_SUPPORTED_MODE_P(MODE) \ -+ (ISA_HAS_LASX \ -+ && (MODE >= 0 && MODE < NUM_MACHINE_MODES) \ -+ && (GET_MODE_SIZE (MODE) == UNITS_PER_LSX_REG \ -+ ||GET_MODE_SIZE (MODE) == UNITS_PER_LASX_REG) \ -+ && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \ -+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT)) -+ -+/* 1 if N is a possible register number for function argument passing. -+ We have no FP argument registers when soft-float. */ -+ -+/* Accept arguments in a0-a7, and in fa0-fa7 if permitted by the ABI. */ -+#define FUNCTION_ARG_REGNO_P(N) \ -+ (IN_RANGE ((N), GP_ARG_FIRST, GP_ARG_LAST) \ -+ || (UNITS_PER_FP_ARG && IN_RANGE ((N), FP_ARG_FIRST, FP_ARG_LAST))) -+ -+ -+/* This structure has to cope with two different argument allocation -+ schemes. Most LARCH ABIs view the arguments as a structure, of which -+ the first N words go in registers and the rest go on the stack. If I -+ < N, the Ith word might go in Ith integer argument register or in a -+ floating-point register. For these ABIs, we only need to remember -+ the offset of the current argument into the structure. -+ -+ So for the standard ABIs, the first N words are allocated to integer -+ registers, and loongarch_function_arg decides on an argument-by-argument -+ basis whether that argument should really go in an integer register, -+ or in a floating-point one. */ -+ -+typedef struct loongarch_args { -+ /* Always true for varargs functions. Otherwise true if at least -+ one argument has been passed in an integer register. */ -+ int gp_reg_found; -+ -+ /* The number of arguments seen so far. */ -+ unsigned int arg_number; -+ -+ /* The number of integer registers used so far. This is the number -+ of words that have been added to the argument structure, limited -+ to MAX_ARGS_IN_REGISTERS. */ -+ unsigned int num_gprs; -+ -+ unsigned int num_fprs; -+ -+ /* The number of words passed on the stack. */ -+ unsigned int stack_words; -+ -+ /* On the loongarch16, we need to keep track of which floating point -+ arguments were passed in general registers, but would have been -+ passed in the FP regs if this were a 32-bit function, so that we -+ can move them to the FP regs if we wind up calling a 32-bit -+ function. We record this information in fp_code, encoded in base -+ four. A zero digit means no floating point argument, a one digit -+ means an SFmode argument, and a two digit means a DFmode argument, -+ and a three digit is not used. The low order digit is the first -+ argument. Thus 6 == 1 * 4 + 2 means a DFmode argument followed by -+ an SFmode argument. ??? A more sophisticated approach will be -+ needed if LARCH_ABI != ABILP32. */ -+ int fp_code; -+ -+ /* True if the function has a prototype. */ -+ int prototype; -+} CUMULATIVE_ARGS; -+ -+/* Initialize a variable CUM of type CUMULATIVE_ARGS -+ for a call to a function whose data type is FNTYPE. -+ For a library call, FNTYPE is 0. */ -+ -+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \ -+ memset (&(CUM), 0, sizeof (CUM)) -+ -+ -+#define EPILOGUE_USES(REGNO) loongarch_epilogue_uses (REGNO) -+ -+/* Treat LOC as a byte offset from the stack pointer and round it up -+ to the next fully-aligned offset. */ -+#define LARCH_STACK_ALIGN(LOC) \ -+ (TARGET_NEWABI ? ROUND_UP ((LOC), 16) : ROUND_UP ((LOC), 8)) -+ -+ -+/* Output assembler code to FILE to increment profiler label # LABELNO -+ for profiling a function entry. */ -+ -+#define MCOUNT_NAME "_mcount" -+ -+/* Emit rtl for profiling. Output assembler code to FILE -+ to call "_mcount" for profiling a function entry. */ -+#define PROFILE_HOOK(LABEL) \ -+ { \ -+ rtx fun, ra; \ -+ ra = get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM); \ -+ fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \ -+ emit_library_call (fun, LCT_NORMAL, VOIDmode, ra, Pmode); \ -+ } -+ -+/* All the work done in PROFILE_HOOK, but still required. */ -+#define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0) -+ -+ -+/* The profiler preserves all interesting registers, including $31. */ -+#define LARCH_SAVE_REG_FOR_PROFILING_P(REGNO) false -+ -+/* No loongarch port has ever used the profiler counter word, so don't emit it -+ or the label for it. */ -+ -+#define NO_PROFILE_COUNTERS 1 -+ -+/* Define this macro if the code for function profiling should come -+ before the function prologue. Normally, the profiling code comes -+ after. */ -+ -+/* #define PROFILE_BEFORE_PROLOGUE */ -+ -+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, -+ the stack pointer does not matter. The value is tested only in -+ functions that have frame pointers. -+ No definition is equivalent to always zero. */ -+ -+#define EXIT_IGNORE_STACK 1 -+ -+ -+/* Trampolines are a block of code followed by two pointers. */ -+ -+#define TRAMPOLINE_SIZE \ -+ (loongarch_trampoline_code_size () + GET_MODE_SIZE (ptr_mode) * 2) -+ -+/* Forcing a 64-bit alignment for 32-bit targets allows us to load two -+ pointers from a single LUI base. */ -+ -+#define TRAMPOLINE_ALIGNMENT 64 -+ -+/* loongarch_trampoline_init calls this library function to flush -+ program and data caches. */ -+ -+#ifndef CACHE_FLUSH_FUNC -+#define CACHE_FLUSH_FUNC "_flush_cache" -+#endif -+ -+#define LARCH_ICACHE_SYNC(ADDR, SIZE) \ -+ /* Flush both caches. We need to flush the data cache in case \ -+ the system has a write-back cache. */ \ -+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, loongarch_cache_flush_func), \ -+ LCT_NORMAL, VOIDmode, ADDR, Pmode, SIZE, Pmode, \ -+ GEN_INT (3), TYPE_MODE (integer_type_node)) -+ -+ -+/* Addressing modes, and classification of registers for them. */ -+ -+#define REGNO_OK_FOR_INDEX_P(REGNO) 0 -+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \ -+ loongarch_regno_mode_ok_for_base_p (REGNO, MODE, 1) -+ -+/* Maximum number of registers that can appear in a valid memory address. */ -+ -+#define MAX_REGS_PER_ADDRESS 1 -+ -+/* Check for constness inline but use loongarch_legitimate_address_p -+ to check whether a constant really is an address. */ -+ -+#define CONSTANT_ADDRESS_P(X) \ -+ (CONSTANT_P (X) && memory_address_p (SImode, X)) -+ -+/* This handles the magic '..CURRENT_FUNCTION' symbol, which means -+ 'the start of the function that this code is output in'. */ -+ -+#define ASM_OUTPUT_LABELREF(FILE,NAME) \ -+ do { \ -+ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \ -+ asm_fprintf ((FILE), "%U%s", \ -+ XSTR (XEXP (DECL_RTL (current_function_decl), \ -+ 0), 0)); \ -+ else \ -+ asm_fprintf ((FILE), "%U%s", (NAME)); \ -+ } while (0) -+ -+/* Flag to mark a function decl symbol that requires a long call. */ -+#define SYMBOL_FLAG_LONG_CALL (SYMBOL_FLAG_MACH_DEP << 0) -+#define SYMBOL_REF_LONG_CALL_P(X) \ -+ ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_LONG_CALL) != 0) -+ -+/* This flag marks functions that cannot be lazily bound. */ -+#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1) -+#define SYMBOL_REF_BIND_NOW_P(RTX) \ -+ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0) -+ -+/* True if we're generating a form of LARCH16 code in which jump tables -+ are stored in the text section and encoded as 16-bit PC-relative -+ offsets. This is only possible when general text loads are allowed, -+ since the table access itself will be an "lh" instruction. If the -+ PC-relative offsets grow too large, 32-bit offsets are used instead. */ -+ -+ -+#define CASE_VECTOR_MODE (ptr_mode) -+ -+/* Only use short offsets if their range will not overflow. */ -+#define CASE_VECTOR_SHORTEN_MODE(MIN, MAX, BODY) \ -+ (ptr_mode ? HImode : SImode) -+ -+ -+/* Define this as 1 if `char' should by default be signed; else as 0. */ -+#ifndef DEFAULT_SIGNED_CHAR -+#define DEFAULT_SIGNED_CHAR 1 -+#endif -+ -+/* Although LDC1 and SDC1 provide 64-bit moves on 32-bit targets, -+ we generally don't want to use them for copying arbitrary data. -+ A single N-word move is usually the same cost as N single-word moves. */ -+#define MOVE_MAX UNITS_PER_WORD -+/* We don't modify it for LSX as it is only used by the classic reload. */ -+#define MAX_MOVE_MAX 8 -+ -+/* Define this macro as a C expression which is nonzero if -+ accessing less than a word of memory (i.e. a `char' or a -+ `short') is no faster than accessing a word of memory, i.e., if -+ such access require more than one instruction or if there is no -+ difference in cost between byte and (aligned) word loads. -+ -+ On RISC machines, it tends to generate better code to define -+ this as 1, since it avoids making a QI or HI mode register. -+ -+*/ -+#define SLOW_BYTE_ACCESS (1) -+ -+/* Standard LARCH integer shifts truncate the shift amount to the -+ width of the shifted operand. However, Loongson MMI shifts -+ do not truncate the shift amount at all. */ -+#define SHIFT_COUNT_TRUNCATED (1) -+ -+ -+/* Specify the machine mode that pointers have. -+ After generation of rtl, the compiler makes no further distinction -+ between pointers and any other objects of this machine mode. */ -+ -+#ifndef Pmode -+#define Pmode (TARGET_64BIT ? DImode : SImode) -+#endif -+ -+/* Give call MEMs SImode since it is the "most permissive" mode -+ for both 32-bit and 64-bit targets. */ -+ -+#define FUNCTION_MODE SImode -+ -+ -+/* We allocate $fcc registers by hand and can't cope with moves of -+ CCmode registers to and from pseudos (or memory). */ -+#define AVOID_CCMODE_COPIES -+ -+/* A C expression for the cost of a branch instruction. A value of -+ 1 is the default; other values are interpreted relative to that. */ -+ -+#define BRANCH_COST(speed_p, predictable_p) loongarch_branch_cost -+#define LOGICAL_OP_NON_SHORT_CIRCUIT 0 -+ -+/* The LARCH port has several functions that return an instruction count. -+ Multiplying the count by this value gives the number of bytes that -+ the instructions occupy. */ -+#define BASE_INSN_LENGTH (4) -+ -+/* The length of a NOP in bytes. */ -+#define NOP_INSN_LENGTH (4) -+ -+/* If defined, modifies the length assigned to instruction INSN as a -+ function of the context in which it is used. LENGTH is an lvalue -+ that contains the initially computed length of the insn and should -+ be updated with the correct length of the insn. */ -+#define ADJUST_INSN_LENGTH(INSN, LENGTH) \ -+ ((LENGTH) = loongarch_adjust_insn_length ((INSN), (LENGTH))) -+ -+/* Return the asm template for a conditional branch instruction. -+ OPCODE is the opcode's mnemonic and OPERANDS is the asm template for -+ its operands. */ -+#define LARCH_BRANCH(OPCODE, OPERANDS) \ -+ OPCODE "\t" OPERANDS -+ -+#define LARCH_BRANCH_C(OPCODE, OPERANDS) \ -+ OPCODE "%:\t" OPERANDS -+ -+/* Return an asm string that forces INSN to be treated as an absolute -+ J or JAL instruction instead of an assembler macro. */ -+#define LARCH_ABSOLUTE_JUMP(INSN) INSN -+ -+ -+/* Control the assembler format that we output. */ -+ -+/* Output to assembler file text saying following lines -+ may contain character constants, extra white space, comments, etc. */ -+ -+#ifndef ASM_APP_ON -+#define ASM_APP_ON " #APP\n" -+#endif -+ -+/* Output to assembler file text saying following lines -+ no longer contain unusual constructs. */ -+ -+#ifndef ASM_APP_OFF -+#define ASM_APP_OFF " #NO_APP\n" -+#endif -+ -+#define REGISTER_NAMES \ -+{ "$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7", \ -+ "$r8", "$r9", "$r10", "$r11", "$r12", "$r13", "$r14", "$r15", \ -+ "$r16", "$r17", "$r18", "$r19", "$r20", "$r21", "$r22", "$r23", \ -+ "$r24", "$r25", "$r26", "$r27", "$r28", "$r29", "$r30", "$r31", \ -+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", \ -+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", \ -+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23", \ -+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31", \ -+ "$fcc0","$fcc1","$fcc2","$fcc3","$fcc4","$fcc5","$fcc6","$fcc7", \ -+ "$arg", "$frame"} -+ -+/* List the "software" names for each register. Also list the numerical -+ names for $fp and $sp. */ -+ -+#define ADDITIONAL_REGISTER_NAMES \ -+{ \ -+ { "zero", 0 + GP_REG_FIRST }, \ -+ { "ra", 1 + GP_REG_FIRST }, \ -+ { "tp", 2 + GP_REG_FIRST }, \ -+ { "sp", 3 + GP_REG_FIRST }, \ -+ { "a0", 4 + GP_REG_FIRST }, \ -+ { "a1", 5 + GP_REG_FIRST }, \ -+ { "a2", 6 + GP_REG_FIRST }, \ -+ { "a3", 7 + GP_REG_FIRST }, \ -+ { "a4", 8 + GP_REG_FIRST }, \ -+ { "a5", 9 + GP_REG_FIRST }, \ -+ { "a6", 10 + GP_REG_FIRST }, \ -+ { "a7", 11 + GP_REG_FIRST }, \ -+ { "t0", 12 + GP_REG_FIRST }, \ -+ { "t1", 13 + GP_REG_FIRST }, \ -+ { "t2", 14 + GP_REG_FIRST }, \ -+ { "t3", 15 + GP_REG_FIRST }, \ -+ { "t4", 16 + GP_REG_FIRST }, \ -+ { "t5", 17 + GP_REG_FIRST }, \ -+ { "t6", 18 + GP_REG_FIRST }, \ -+ { "t7", 19 + GP_REG_FIRST }, \ -+ { "t8", 20 + GP_REG_FIRST }, \ -+ { "x", 21 + GP_REG_FIRST }, \ -+ { "fp", 22 + GP_REG_FIRST }, \ -+ { "s0", 23 + GP_REG_FIRST }, \ -+ { "s1", 24 + GP_REG_FIRST }, \ -+ { "s2", 25 + GP_REG_FIRST }, \ -+ { "s3", 26 + GP_REG_FIRST }, \ -+ { "s4", 27 + GP_REG_FIRST }, \ -+ { "s5", 28 + GP_REG_FIRST }, \ -+ { "s6", 29 + GP_REG_FIRST }, \ -+ { "s7", 30 + GP_REG_FIRST }, \ -+ { "s8", 31 + GP_REG_FIRST }, \ -+ { "v0", 4 + GP_REG_FIRST }, \ -+ { "v1", 5 + GP_REG_FIRST }, \ -+ { "vr0", 0 + FP_REG_FIRST }, \ -+ { "vr1", 1 + FP_REG_FIRST }, \ -+ { "vr2", 2 + FP_REG_FIRST }, \ -+ { "vr3", 3 + FP_REG_FIRST }, \ -+ { "vr4", 4 + FP_REG_FIRST }, \ -+ { "vr5", 5 + FP_REG_FIRST }, \ -+ { "vr6", 6 + FP_REG_FIRST }, \ -+ { "vr7", 7 + FP_REG_FIRST }, \ -+ { "vr8", 8 + FP_REG_FIRST }, \ -+ { "vr9", 9 + FP_REG_FIRST }, \ -+ { "vr10", 10 + FP_REG_FIRST }, \ -+ { "vr11", 11 + FP_REG_FIRST }, \ -+ { "vr12", 12 + FP_REG_FIRST }, \ -+ { "vr13", 13 + FP_REG_FIRST }, \ -+ { "vr14", 14 + FP_REG_FIRST }, \ -+ { "vr15", 15 + FP_REG_FIRST }, \ -+ { "vr16", 16 + FP_REG_FIRST }, \ -+ { "vr17", 17 + FP_REG_FIRST }, \ -+ { "vr18", 18 + FP_REG_FIRST }, \ -+ { "vr19", 19 + FP_REG_FIRST }, \ -+ { "vr20", 20 + FP_REG_FIRST }, \ -+ { "vr21", 21 + FP_REG_FIRST }, \ -+ { "vr22", 22 + FP_REG_FIRST }, \ -+ { "vr23", 23 + FP_REG_FIRST }, \ -+ { "vr24", 24 + FP_REG_FIRST }, \ -+ { "vr25", 25 + FP_REG_FIRST }, \ -+ { "vr26", 26 + FP_REG_FIRST }, \ -+ { "vr27", 27 + FP_REG_FIRST }, \ -+ { "vr28", 28 + FP_REG_FIRST }, \ -+ { "vr29", 29 + FP_REG_FIRST }, \ -+ { "vr30", 30 + FP_REG_FIRST }, \ -+ { "vr31", 31 + FP_REG_FIRST }, \ -+ { "xr0", 0 + FP_REG_FIRST }, \ -+ { "xr1", 1 + FP_REG_FIRST }, \ -+ { "xr2", 2 + FP_REG_FIRST }, \ -+ { "xr3", 3 + FP_REG_FIRST }, \ -+ { "xr4", 4 + FP_REG_FIRST }, \ -+ { "xr5", 5 + FP_REG_FIRST }, \ -+ { "xr6", 6 + FP_REG_FIRST }, \ -+ { "xr7", 7 + FP_REG_FIRST }, \ -+ { "xr8", 8 + FP_REG_FIRST }, \ -+ { "xr9", 9 + FP_REG_FIRST }, \ -+ { "xr10", 10 + FP_REG_FIRST }, \ -+ { "xr11", 11 + FP_REG_FIRST }, \ -+ { "xr12", 12 + FP_REG_FIRST }, \ -+ { "xr13", 13 + FP_REG_FIRST }, \ -+ { "xr14", 14 + FP_REG_FIRST }, \ -+ { "xr15", 15 + FP_REG_FIRST }, \ -+ { "xr16", 16 + FP_REG_FIRST }, \ -+ { "xr17", 17 + FP_REG_FIRST }, \ -+ { "xr18", 18 + FP_REG_FIRST }, \ -+ { "xr19", 19 + FP_REG_FIRST }, \ -+ { "xr20", 20 + FP_REG_FIRST }, \ -+ { "xr21", 21 + FP_REG_FIRST }, \ -+ { "xr22", 22 + FP_REG_FIRST }, \ -+ { "xr23", 23 + FP_REG_FIRST }, \ -+ { "xr24", 24 + FP_REG_FIRST }, \ -+ { "xr25", 25 + FP_REG_FIRST }, \ -+ { "xr26", 26 + FP_REG_FIRST }, \ -+ { "xr27", 27 + FP_REG_FIRST }, \ -+ { "xr28", 28 + FP_REG_FIRST }, \ -+ { "xr29", 29 + FP_REG_FIRST }, \ -+ { "xr30", 30 + FP_REG_FIRST }, \ -+ { "xr31", 31 + FP_REG_FIRST } \ -+} -+ -+#define DBR_OUTPUT_SEQEND(STREAM) \ -+do \ -+ { \ -+ /* Emit a blank line after the delay slot for emphasis. */ \ -+ fputs ("\n", STREAM); \ -+ } \ -+while (0) -+ -+/* The LARCH implementation uses some labels for its own purpose. The -+ following lists what labels are created, and are all formed by the -+ pattern $L[a-z].*. The machine independent portion of GCC creates -+ labels matching: $L[A-Z][0-9]+ and $L[0-9]+. -+ -+ LM[0-9]+ Silicon Graphics/ECOFF stabs label before each stmt. -+ $Lb[0-9]+ Begin blocks for LARCH debug support -+ $Lc[0-9]+ Label for use in s operation. -+ $Le[0-9]+ End blocks for LARCH debug support */ -+ -+#undef ASM_DECLARE_OBJECT_NAME -+#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \ -+ loongarch_declare_object (STREAM, NAME, "", ":\n") -+ -+/* Globalizing directive for a label. */ -+#define GLOBAL_ASM_OP "\t.globl\t" -+ -+/* This says how to define a global common symbol. */ -+ -+#define ASM_OUTPUT_ALIGNED_DECL_COMMON loongarch_output_aligned_decl_common -+ -+/* This says how to define a local common symbol (i.e., not visible to -+ linker). */ -+ -+#ifndef ASM_OUTPUT_ALIGNED_LOCAL -+#define ASM_OUTPUT_ALIGNED_LOCAL(STREAM, NAME, SIZE, ALIGN) \ -+ loongarch_declare_common_object (STREAM, NAME, "\n\t.lcomm\t", SIZE, ALIGN, false) -+#endif -+ -+/* This says how to output an external. It would be possible not to -+ output anything and let undefined symbol become external. However -+ the assembler uses length information on externals to allocate in -+ data/sdata bss/sbss, thereby saving exec time. */ -+ -+#undef ASM_OUTPUT_EXTERNAL -+#define ASM_OUTPUT_EXTERNAL(STREAM,DECL,NAME) \ -+ loongarch_output_external(STREAM,DECL,NAME) -+ -+/* This is how to declare a function name. The actual work of -+ emitting the label is moved to function_prologue, so that we can -+ get the line number correctly emitted before the .ent directive, -+ and after any .file directives. Define as empty so that the function -+ is not declared before the .ent directive elsewhere. */ -+ -+#undef ASM_DECLARE_FUNCTION_NAME -+#define ASM_DECLARE_FUNCTION_NAME(STREAM,NAME,DECL) \ -+ loongarch_declare_function_name(STREAM,NAME,DECL) -+ -+/* This is how to store into the string LABEL -+ the symbol_ref name of an internal numbered label where -+ PREFIX is the class of label and NUM is the number within the class. -+ This is suitable for output with `assemble_name'. */ -+ -+#undef ASM_GENERATE_INTERNAL_LABEL -+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \ -+ sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM)) -+ -+/* Print debug labels as "foo = ." rather than "foo:" because they should -+ represent a byte pointer rather than an ISA-encoded address. This is -+ particularly important for code like: -+ -+ $LFBxxx = . -+ .cfi_startproc -+ ... -+ .section .gcc_except_table,... -+ ... -+ .uleb128 foo-$LFBxxx -+ -+ The .uleb128 requies $LFBxxx to match the FDE start address, which is -+ likewise a byte pointer rather than an ISA-encoded address. -+ -+ At the time of writing, this hook is not used for the function end -+ label: -+ -+ $LFExxx: -+ .end foo -+ -+ */ -+ -+#define ASM_OUTPUT_DEBUG_LABEL(FILE, PREFIX, NUM) \ -+ fprintf (FILE, "%s%s%d = .\n", LOCAL_LABEL_PREFIX, PREFIX, NUM) -+ -+/* This is how to output an element of a case-vector that is absolute. */ -+ -+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \ -+ fprintf (STREAM, "\t%s\t%sL%d\n", \ -+ ptr_mode == DImode ? ".dword" : ".word", \ -+ LOCAL_LABEL_PREFIX, \ -+ VALUE) -+ -+/* This is how to output an element of a case-vector. We can make the -+ entries GP-relative when .gp(d)word is supported. */ -+ -+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \ -+do { \ -+ if (TARGET_RTP_PIC) \ -+ { \ -+ /* Make the entry relative to the start of the function. */ \ -+ rtx fnsym = XEXP (DECL_RTL (current_function_decl), 0); \ -+ fprintf (STREAM, "\t%s\t%sL%d-", \ -+ Pmode == DImode ? ".dword" : ".word", \ -+ LOCAL_LABEL_PREFIX, VALUE); \ -+ assemble_name (STREAM, XSTR (fnsym, 0)); \ -+ fprintf (STREAM, "\n"); \ -+ } \ -+ else \ -+ fprintf (STREAM, "\t%s\t%sL%d-%sL%d\n", \ -+ ptr_mode == DImode ? ".dword" : ".word", \ -+ LOCAL_LABEL_PREFIX, VALUE, \ -+ LOCAL_LABEL_PREFIX, REL); \ -+} while (0) -+ -+/* Mark inline jump tables as data for the purpose of disassembly. For -+ simplicity embed the jump table's label number in the local symbol -+ produced so that multiple jump tables within a single function end -+ up marked with unique symbols. Retain the alignment setting from -+ `elfos.h' as we are replacing the definition from there. */ -+ -+#undef ASM_OUTPUT_BEFORE_CASE_LABEL -+#define ASM_OUTPUT_BEFORE_CASE_LABEL(STREAM, PREFIX, NUM, TABLE) \ -+ do \ -+ { \ -+ ASM_OUTPUT_ALIGN ((STREAM), 2); \ -+ if (JUMP_TABLES_IN_TEXT_SECTION) \ -+ loongarch_set_text_contents_type (STREAM, "__jump_", NUM, FALSE); \ -+ } \ -+ while (0) -+ -+/* Reset text marking to code after an inline jump table. Like with -+ the beginning of a jump table use the label number to keep symbols -+ unique. */ -+ -+#define ASM_OUTPUT_CASE_END(STREAM, NUM, TABLE) \ -+ do \ -+ if (JUMP_TABLES_IN_TEXT_SECTION) \ -+ loongarch_set_text_contents_type (STREAM, "__jend_", NUM, TRUE); \ -+ while (0) -+ -+/* This is how to output an assembler line -+ that says to advance the location counter -+ to a multiple of 2**LOG bytes. */ -+ -+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \ -+ fprintf (STREAM, "\t.align\t%d\n", (LOG)) -+ -+#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM,LOG) \ -+ fprintf (STREAM, "\t.align\t%d,54525952,4\n", (LOG)) -+ -+ -+/* This is how to output an assembler line to advance the location -+ counter by SIZE bytes. */ -+ -+#undef ASM_OUTPUT_SKIP -+#define ASM_OUTPUT_SKIP(STREAM,SIZE) \ -+ fprintf (STREAM, "\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE)) -+ -+/* This is how to output a string. */ -+#undef ASM_OUTPUT_ASCII -+#define ASM_OUTPUT_ASCII loongarch_output_ascii -+ -+ -+/* Default to -G 8 */ -+#ifndef LARCH_DEFAULT_GVALUE -+#define LARCH_DEFAULT_GVALUE 8 -+#endif -+ -+/* Define the strings to put out for each section in the object file. */ -+#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */ -+#define DATA_SECTION_ASM_OP "\t.data" /* large data */ -+ -+#undef READONLY_DATA_SECTION_ASM_OP -+#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata" /* read-only data */ -+ -+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \ -+do \ -+ { \ -+ fprintf (STREAM, "\t%s\t%s,%s,-8\n\t%s\t%s,0(%s)\n", \ -+ TARGET_64BIT ? "daddiu" : "addiu", \ -+ reg_names[STACK_POINTER_REGNUM], \ -+ reg_names[STACK_POINTER_REGNUM], \ -+ TARGET_64BIT ? "sd" : "sw", \ -+ reg_names[REGNO], \ -+ reg_names[STACK_POINTER_REGNUM]); \ -+ } \ -+while (0) -+ -+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \ -+do \ -+ { \ -+ loongarch_push_asm_switch (&loongarch_noreorder); \ -+ fprintf (STREAM, "\t%s\t%s,0(%s)\n\t%s\t%s,%s,8\n", \ -+ TARGET_64BIT ? "ld" : "lw", \ -+ reg_names[REGNO], \ -+ reg_names[STACK_POINTER_REGNUM], \ -+ TARGET_64BIT ? "daddu" : "addu", \ -+ reg_names[STACK_POINTER_REGNUM], \ -+ reg_names[STACK_POINTER_REGNUM]); \ -+ loongarch_pop_asm_switch (&loongarch_noreorder); \ -+ } \ -+while (0) -+ -+/* How to start an assembler comment. -+ The leading space is important (the loongarch native assembler requires it). */ -+#ifndef ASM_COMMENT_START -+#define ASM_COMMENT_START " #" -+#endif -+ -+#undef SIZE_TYPE -+#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int") -+ -+#undef PTRDIFF_TYPE -+#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int") -+ -+/* The minimum alignment of any expanded block move. */ -+#define LARCH_MIN_MOVE_MEM_ALIGN 16 -+ -+/* The maximum number of bytes that can be copied by one iteration of -+ a movmemsi loop; see loongarch_block_move_loop. */ -+#define LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER \ -+ (UNITS_PER_WORD * 4) -+ -+/* The maximum number of bytes that can be copied by a straight-line -+ implementation of movmemsi; see loongarch_block_move_straight. We want -+ to make sure that any loop-based implementation will iterate at -+ least twice. */ -+#define LARCH_MAX_MOVE_BYTES_STRAIGHT \ -+ (LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER * 2) -+ -+/* The base cost of a memcpy call, for MOVE_RATIO and friends. These -+ values were determined experimentally by benchmarking with CSiBE. -+*/ -+#define LARCH_CALL_RATIO 8 -+ -+/* Any loop-based implementation of movmemsi will have at least -+ LARCH_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory -+ moves, so allow individual copies of fewer elements. -+ -+ When movmemsi is not available, use a value approximating -+ the length of a memcpy call sequence, so that move_by_pieces -+ will generate inline code if it is shorter than a function call. -+ Since move_by_pieces_ninsns counts memory-to-memory moves, but -+ we'll have to generate a load/store pair for each, halve the -+ value of LARCH_CALL_RATIO to take that into account. */ -+ -+#define MOVE_RATIO(speed) \ -+ (HAVE_movmemsi \ -+ ? LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD \ -+ : CLEAR_RATIO (speed) / 2) -+ -+/* For CLEAR_RATIO, when optimizing for size, give a better estimate -+ of the length of a memset call, but use the default otherwise. */ -+ -+#define CLEAR_RATIO(speed)\ -+ ((speed) ? 15 : LARCH_CALL_RATIO) -+ -+/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when -+ optimizing for size adjust the ratio to account for the overhead of -+ loading the constant and replicating it across the word. */ -+ -+#define SET_RATIO(speed) \ -+ ((speed) ? 15 : LARCH_CALL_RATIO - 2) -+ -+/* Since the bits of the _init and _fini function is spread across -+ many object files, each potentially with its own GP, we must assume -+ we need to load our GP. We don't preserve $gp or $ra, since each -+ init/fini chunk is supposed to initialize $gp, and crti/crtn -+ already take care of preserving $ra and, when appropriate, $gp. */ -+#if (defined _ABI64 && _LARCH_SIM == _ABI64) -+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ -+ asm (SECTION_OP "\n\ -+ .set push\n\ -+ la $r20, " USER_LABEL_PREFIX #FUNC "\n\ -+ jirl $r1, $r20, 0\n\ -+ .set pop\n\ -+ " TEXT_SECTION_ASM_OP); -+#endif -+#ifndef HAVE_AS_TLS -+#define HAVE_AS_TLS 0 -+#endif -+ -+#ifndef HAVE_AS_NAN -+#define HAVE_AS_NAN 0 -+#endif -+ -+#ifndef USED_FOR_TARGET -+/* Information about ".set noFOO; ...; .set FOO" blocks. */ -+struct loongarch_asm_switch { -+ /* The FOO in the description above. */ -+ const char *name; -+ -+ /* The current block nesting level, or 0 if we aren't in a block. */ -+ int nesting_level; -+}; -+ -+extern const enum reg_class loongarch_regno_to_class[]; -+extern const char *current_function_file; /* filename current function is in */ -+extern int num_source_filenames; /* current .file # */ -+extern int loongarch_dbx_regno[]; -+extern int loongarch_dwarf_regno[]; -+extern bool loongarch_split_p[]; -+extern bool loongarch_use_pcrel_pool_p[]; -+extern enum processor loongarch_arch; /* which cpu to codegen for */ -+extern enum processor loongarch_tune; /* which cpu to schedule for */ -+extern int loongarch_isa; /* architectural level */ -+extern int loongarch_isa_rev; -+extern const struct loongarch_cpu_info *loongarch_arch_info; -+extern const struct loongarch_cpu_info *loongarch_tune_info; -+extern unsigned int loongarch_base_compression_flags; -+ -+/* Information about a function's frame layout. */ -+struct GTY(()) loongarch_frame_info { -+ /* The size of the frame in bytes. */ -+ HOST_WIDE_INT total_size; -+ -+ /* The number of bytes allocated to variables. */ -+ HOST_WIDE_INT var_size; -+ -+ /* The number of bytes allocated to outgoing function arguments. */ -+ HOST_WIDE_INT args_size; -+ -+ /* The number of bytes allocated to the .cprestore slot, or 0 if there -+ is no such slot. */ -+ HOST_WIDE_INT cprestore_size; -+ -+ /* Bit X is set if the function saves or restores GPR X. */ -+ unsigned int mask; -+ -+ /* Likewise FPR X. */ -+ unsigned int fmask; -+ -+ /* Likewise doubleword accumulator X ($acX). */ -+ unsigned int acc_mask; -+ -+ /* The number of GPRs, FPRs, doubleword accumulators and COP0 -+ registers saved. */ -+ unsigned int num_gp; -+ unsigned int num_fp; -+ unsigned int num_acc; -+ unsigned int num_cop0_regs; -+ -+ /* The offset of the topmost GPR, FPR, accumulator and COP0-register -+ save slots from the top of the frame, or zero if no such slots are -+ needed. */ -+ HOST_WIDE_INT gp_save_offset; -+ HOST_WIDE_INT fp_save_offset; -+ HOST_WIDE_INT acc_save_offset; -+ HOST_WIDE_INT cop0_save_offset; -+ -+ /* Likewise, but giving offsets from the bottom of the frame. */ -+ HOST_WIDE_INT gp_sp_offset; -+ HOST_WIDE_INT fp_sp_offset; -+ HOST_WIDE_INT acc_sp_offset; -+ HOST_WIDE_INT cop0_sp_offset; -+ -+ /* Similar, but the value passed to _mcount. */ -+ HOST_WIDE_INT ra_fp_offset; -+ -+ /* The offset of arg_pointer_rtx from the bottom of the frame. */ -+ HOST_WIDE_INT arg_pointer_offset; -+ -+ /* The offset of hard_frame_pointer_rtx from the bottom of the frame. */ -+ HOST_WIDE_INT hard_frame_pointer_offset; -+ -+ /* How much the GPR save/restore routines adjust sp (or 0 if unused). */ -+ unsigned save_libcall_adjustment; -+ -+ /* Offset of virtual frame pointer from stack pointer/frame bottom */ -+ HOST_WIDE_INT frame_pointer_offset; -+}; -+ -+/* Enumeration for masked vectored (VI) and non-masked (EIC) interrupts. */ -+enum loongarch_int_mask -+{ -+ INT_MASK_EIC = -1, -+ INT_MASK_SW0 = 0, -+ INT_MASK_SW1 = 1, -+ INT_MASK_HW0 = 2, -+ INT_MASK_HW1 = 3, -+ INT_MASK_HW2 = 4, -+ INT_MASK_HW3 = 5, -+ INT_MASK_HW4 = 6, -+ INT_MASK_HW5 = 7 -+}; -+ -+/* Enumeration to mark the existence of the shadow register set. -+ SHADOW_SET_INTSTACK indicates a shadow register set with a valid stack -+ pointer. */ -+enum loongarch_shadow_set -+{ -+ SHADOW_SET_NO, -+ SHADOW_SET_YES, -+ SHADOW_SET_INTSTACK -+}; -+ -+struct GTY(()) machine_function { -+ /* The next floating-point condition-code register to allocate -+ for 8CC targets, relative to ST_REG_FIRST. */ -+ unsigned int next_fcc; -+ -+ /* The number of extra stack bytes taken up by register varargs. -+ This area is allocated by the callee at the very top of the frame. */ -+ int varargs_size; -+ -+ /* The current frame information, calculated by loongarch_compute_frame_info. */ -+ struct loongarch_frame_info frame; -+ -+ /* How many instructions it takes to load a label into $AT, or 0 if -+ this property hasn't yet been calculated. */ -+ unsigned int load_label_num_insns; -+ -+ /* True if loongarch_adjust_insn_length should ignore an instruction's -+ hazard attribute. */ -+ bool ignore_hazard_length_p; -+ -+ /* True if the whole function is suitable for .set noreorder and -+ .set nomacro. */ -+ bool all_noreorder_p; -+ -+ /* True if the function has "inflexible" and "flexible" references -+ to the global pointer. See loongarch_cfun_has_inflexible_gp_ref_p -+ and loongarch_cfun_has_flexible_gp_ref_p for details. */ -+ bool has_inflexible_gp_insn_p; -+ bool has_flexible_gp_insn_p; -+ -+ /* True if the function's prologue must load the global pointer -+ value into pic_offset_table_rtx and store the same value in -+ the function's cprestore slot (if any). Even if this value -+ is currently false, we may decide to set it to true later; -+ see loongarch_must_initialize_gp_p () for details. */ -+ bool must_initialize_gp_p; -+ -+ /* True if the current function must restore $gp after any potential -+ clobber. This value is only meaningful during the first post-epilogue -+ split_insns pass; see loongarch_must_initialize_gp_p () for details. */ -+ bool must_restore_gp_when_clobbered_p; -+ -+ /* True if this is an interrupt handler. */ -+ bool interrupt_handler_p; -+ -+ /* Records the way in which interrupts should be masked. Only used if -+ interrupts are not kept masked. */ -+ enum loongarch_int_mask int_mask; -+ -+ /* Records if this is an interrupt handler that uses shadow registers. */ -+ enum loongarch_shadow_set use_shadow_register_set; -+ -+ /* True if this is an interrupt handler that should keep interrupts -+ masked. */ -+ bool keep_interrupts_masked_p; -+ -+ /* True if this is an interrupt handler that should use DERET -+ instead of ERET. */ -+ bool use_debug_exception_return_p; -+ -+ /* True if at least one of the formal parameters to a function must be -+ written to the frame header (probably so its address can be taken). */ -+ bool does_not_use_frame_header; -+ -+ /* True if none of the functions that are called by this function need -+ stack space allocated for their arguments. */ -+ bool optimize_call_stack; -+ -+ /* True if one of the functions calling this function may not allocate -+ a frame header. */ -+ bool callers_may_not_allocate_frame; -+ -+ /* True if GCC stored callee saved registers in the frame header. */ -+ bool use_frame_header_for_callee_saved_regs; -+}; -+#endif -+ -+/* Enable querying of DFA units. */ -+#define CPU_UNITS_QUERY 0 -+ -+/* As on most targets, we want the .eh_frame section to be read-only where -+ possible. And as on most targets, this means two things: -+ -+ (a) Non-locally-binding pointers must have an indirect encoding, -+ so that the addresses in the .eh_frame section itself become -+ locally-binding. -+ -+ (b) A shared library's .eh_frame section must encode locally-binding -+ pointers in a relative (relocation-free) form. -+ -+ However, LARCH has traditionally not allowed directives like: -+ -+ .long x-. -+ -+ in cases where "x" is in a different section, or is not defined in the -+ same assembly file. We are therefore unable to emit the PC-relative -+ form required by (b) at assembly time. -+ -+ Fortunately, the linker is able to convert absolute addresses into -+ PC-relative addresses on our behalf. Unfortunately, only certain -+ versions of the linker know how to do this for indirect pointers, -+ and for personality data. We must fall back on using writable -+ .eh_frame sections for shared libraries if the linker does not -+ support this feature. */ -+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \ -+ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_absptr) -+ -+#define SWITCHABLE_TARGET 1 -+ -+/* Several named LARCH patterns depend on Pmode. These patterns have the -+ form _si for Pmode == SImode and _di for Pmode == DImode. -+ Add the appropriate suffix to generator function NAME and invoke it -+ with arguments ARGS. */ -+#define PMODE_INSN(NAME, ARGS) \ -+ (Pmode == SImode ? NAME ## _si ARGS : NAME ## _di ARGS) -+ -+/***********************/ -+/* N_LARCH-PORT */ -+/***********************/ -+/* The `Q' extension is not yet supported. */ -+/* TODO: according to march */ -+#define UNITS_PER_FP_REG (TARGET_DOUBLE_FLOAT ? 8 : 4) -+ -+/* The largest type that can be passed in floating-point registers. */ -+/* TODO: according to mabi */ -+#define UNITS_PER_FP_ARG (TARGET_HARD_FLOAT ? (TARGET_64BIT ? 8 : 4) : 0) -+ -+/* Internal macros to classify an ISA register's type. */ -+ -+#define GP_TEMP_FIRST (GP_REG_FIRST + 12) -+ -+#define CALLEE_SAVED_REG_NUMBER(REGNO) \ -+ ((REGNO) >= 22 && (REGNO) <= 31 ? (REGNO) - 22 : -1) -+ -+#define N_LARCH_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1) -+#define N_LARCH_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, N_LARCH_PROLOGUE_TEMP_REGNUM) -+ -+#define LIBCALL_VALUE(MODE) \ -+ loongarch_function_value (NULL_TREE, NULL_TREE, MODE) -+ -+#define FUNCTION_VALUE(VALTYPE, FUNC) \ -+ loongarch_function_value (VALTYPE, FUNC, VOIDmode) -+ -+#define FRAME_GROWS_DOWNWARD 1 -+ -+#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN) -diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md -new file mode 100644 -index 000000000..be950c9e4 ---- /dev/null -+++ b/gcc/config/loongarch/loongarch.md -@@ -0,0 +1,4320 @@ -+;; Loongarch.md Machine Description for LARCH based processors -+;; Copyright (C) 1989-2018 Free Software Foundation, Inc. -+;; Contributed by A. Lichnewsky, lich@inria.inria.fr -+;; Changes by Michael Meissner, meissner@osf.org -+ -+;; This file is part of GCC. -+ -+;; GCC is free software; you can redistribute it and/or modify -+;; it under the terms of the GNU General Public License as published by -+;; the Free Software Foundation; either version 3, or (at your option) -+;; any later version. -+ -+;; GCC is distributed in the hope that it will be useful, -+;; but WITHOUT ANY WARRANTY; without even the implied warranty of -+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+;; GNU General Public License for more details. -+ -+;; You should have received a copy of the GNU General Public License -+;; along with GCC; see the file COPYING3. If not see -+;; . -+ -+(define_enum "processor" [ -+ loongarch -+ loongarch64 -+ la464 -+]) -+ -+(define_c_enum "unspec" [ -+ ;; Integer operations that are too cumbersome to describe directly. -+ UNSPEC_WSBH -+ UNSPEC_DSBH -+ UNSPEC_DSHD -+ -+ ;; Floating-point moves. -+ UNSPEC_LOAD_LOW -+ UNSPEC_LOAD_HIGH -+ UNSPEC_STORE_WORD -+ UNSPEC_MOVGR2FRH -+ UNSPEC_MOVFRH2GR -+ -+ ;; Floating-point environment. -+ UNSPEC_MOVFCSR2GR -+ UNSPEC_MOVGR2FCSR -+ -+ ;; GP manipulation. -+ UNSPEC_EH_RETURN -+ -+ ;; -+ UNSPEC_FRINT -+ UNSPEC_FCLASS -+ UNSPEC_BYTEPICK_W -+ UNSPEC_BYTEPICK_D -+ UNSPEC_BITREV_4B -+ UNSPEC_BITREV_8B -+ -+ ;; Symbolic accesses. -+ UNSPEC_LOAD_CALL -+ -+ ;; Blockage and synchronisation. -+ UNSPEC_BLOCKAGE -+ UNSPEC_DBAR -+ UNSPEC_IBAR -+ -+ ;; CPUCFG -+ UNSPEC_CPUCFG -+ UNSPEC_ASRTLE_D -+ UNSPEC_ASRTGT_D -+ -+ UNSPEC_CSRRD -+ UNSPEC_CSRWR -+ UNSPEC_CSRXCHG -+ UNSPEC_IOCSRRD -+ UNSPEC_IOCSRWR -+ -+ ;; cacop -+ UNSPEC_CACOP -+ -+ ;; pte -+ UNSPEC_LDDIR -+ UNSPEC_LDPTE -+ -+ ;; Cache manipulation. -+ UNSPEC_LARCH_CACHE -+ -+ ;; Interrupt handling. -+ UNSPEC_ERTN -+ UNSPEC_DI -+ UNSPEC_EHB -+ UNSPEC_RDPGPR -+ -+ ;; Used in a call expression in place of args_size. It's present for PIC -+ ;; indirect calls where it contains args_size and the function symbol. -+ UNSPEC_CALL_ATTR -+ -+ -+ ;; Stack checking. -+ UNSPEC_PROBE_STACK_RANGE -+ -+ ;; The `.insn' pseudo-op. -+ UNSPEC_INSN_PSEUDO -+ -+ ;; TLS -+ UNSPEC_TLS_GD -+ UNSPEC_TLS_LD -+ UNSPEC_TLS_LE -+ UNSPEC_TLS_IE -+ -+ UNSPEC_LU52I_D -+ -+ UNSPEC_TIE -+ -+ ;; CRC -+ UNSPEC_CRC -+ UNSPEC_CRCC -+ UNSPEC_ADDRESS_FIRST -+]) -+ -+(define_c_enum "unspecv" [ -+ ;; Register save and restore. -+ UNSPECV_GPR_SAVE -+ UNSPECV_GPR_RESTORE -+ -+ UNSPECV_MOVE_EXTREME -+]) -+ -+ -+(define_constants -+ [(RETURN_ADDR_REGNUM 1) -+ (T0_REGNUM 12) -+ (T1_REGNUM 13) -+ (S0_REGNUM 23) -+ (S1_REGNUM 24) -+ (S2_REGNUM 25) -+ -+ ;; PIC long branch sequences are never longer than 100 bytes. -+ (MAX_PIC_BRANCH_LENGTH 100) -+]) -+ -+(include "predicates.md") -+(include "constraints.md") -+ -+;; .................... -+;; -+;; Attributes -+;; -+;; .................... -+ -+(define_attr "got" "unset,load" -+ (const_string "unset")) -+ -+;; For jal instructions, this attribute is DIRECT when the target address -+;; is symbolic and INDIRECT when it is a register. -+(define_attr "jal" "unset,direct,indirect" -+ (const_string "unset")) -+ -+ -+;; Classification of moves, extensions and truncations. Most values -+;; are as for "type" (see below) but there are also the following -+;; move-specific values: -+;; -+;; sll0 "sll DEST,SRC,0", which on 64-bit targets is guaranteed -+;; to produce a sign-extended DEST, even if SRC is not -+;; properly sign-extended -+;; pick_ins BSTRPICK.W, BSTRPICK.D, BSTRINS.W or BSTRINS.D instruction -+;; andi a single ANDI instruction -+;; shift_shift a shift left followed by a shift right -+;; -+;; This attribute is used to determine the instruction's length and -+;; scheduling type. For doubleword moves, the attribute always describes -+;; the split instructions; in some cases, it is more appropriate for the -+;; scheduling type to be "multi" instead. -+(define_attr "move_type" -+ "unknown,load,fpload,store,fpstore,mgtf,mftg,imul,move,fmove, -+ const,signext,pick_ins,logical,arith,sll0,andi,shift_shift" -+ (const_string "unknown")) -+ -+(define_attr "alu_type" "unknown,add,sub,not,nor,and,or,xor,simd_add" -+ (const_string "unknown")) -+ -+;; Main data type used by the insn -+(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,OI,SF,DF,TF,FCC, -+ V2DI,V4SI,V8HI,V16QI,V2DF,V4SF,V4DI,V8SI,V16HI,V32QI,V4DF,V8SF" -+ (const_string "unknown")) -+ -+;; True if the main data type is twice the size of a word. -+(define_attr "dword_mode" "no,yes" -+ (cond [(and (eq_attr "mode" "DI,DF") -+ (not (match_test "TARGET_64BIT"))) -+ (const_string "yes") -+ -+ (and (eq_attr "mode" "TI,TF") -+ (match_test "TARGET_64BIT")) -+ (const_string "yes")] -+ (const_string "no"))) -+ -+;; True if the main data type is four times of the size of a word. -+(define_attr "qword_mode" "no,yes" -+ (cond [(and (eq_attr "mode" "TI,TF") -+ (not (match_test "TARGET_64BIT"))) -+ (const_string "yes")] -+ (const_string "no"))) -+ -+;; True if the main data type is eight times of the size of a word. -+(define_attr "oword_mode" "no,yes" -+ (cond [(and (eq_attr "mode" "OI,V8SF,V4DF") -+ (not (match_test "TARGET_64BIT"))) -+ (const_string "yes")] -+ (const_string "no"))) -+ -+;; Attributes describing a sync loop. These loops have the form: -+;; -+;; if (RELEASE_BARRIER == YES) sync -+;; 1: OLDVAL = *MEM -+;; if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2 -+;; CMP = 0 [delay slot] -+;; $TMP1 = OLDVAL & EXCLUSIVE_MASK -+;; $TMP2 = INSN1 (OLDVAL, INSN1_OP2) -+;; $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK) -+;; $AT |= $TMP1 | $TMP3 -+;; if (!commit (*MEM = $AT)) goto 1. -+;; if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot] -+;; CMP = 1 -+;; if (ACQUIRE_BARRIER == YES) sync -+;; 2: -+;; -+;; where "$" values are temporaries and where the other values are -+;; specified by the attributes below. Values are specified as operand -+;; numbers and insns are specified as enums. If no operand number is -+;; specified, the following values are used instead: -+;; -+;; - OLDVAL: $AT -+;; - CMP: NONE -+;; - NEWVAL: $AT -+;; - INCLUSIVE_MASK: -1 -+;; - REQUIRED_OLDVAL: OLDVAL & INCLUSIVE_MASK -+;; - EXCLUSIVE_MASK: 0 -+;; -+;; MEM and INSN1_OP2 are required. -+;; -+;; Ideally, the operand attributes would be integers, with -1 meaning "none", -+;; but the gen* programs don't yet support that. -+(define_attr "sync_mem" "none,0,1,2,3,4,5" (const_string "none")) -+(define_attr "sync_oldval" "none,0,1,2,3,4,5" (const_string "none")) -+(define_attr "sync_cmp" "none,0,1,2,3,4,5" (const_string "none")) -+(define_attr "sync_newval" "none,0,1,2,3,4,5" (const_string "none")) -+(define_attr "sync_inclusive_mask" "none,0,1,2,3,4,5" (const_string "none")) -+(define_attr "sync_exclusive_mask" "none,0,1,2,3,4,5" (const_string "none")) -+(define_attr "sync_required_oldval" "none,0,1,2,3,4,5" (const_string "none")) -+(define_attr "sync_insn1_op2" "none,0,1,2,3,4,5" (const_string "none")) -+(define_attr "sync_insn1" "move,li,addu,addiu,subu,and,andi,or,ori,xor,xori" -+ (const_string "move")) -+(define_attr "sync_insn2" "nop,and,xor,not" -+ (const_string "nop")) -+;; Memory model specifier. -+;; "0"-"9" values specify the operand that stores the memory model value. -+;; "10" specifies MEMMODEL_ACQ_REL, -+;; "11" specifies MEMMODEL_ACQUIRE. -+(define_attr "sync_memmodel" "" (const_int 10)) -+ -+;; Accumulator operand for madd patterns. -+(define_attr "accum_in" "none,0,1,2,3,4,5" (const_string "none")) -+ -+;; Classification of each insn. -+;; branch conditional branch -+;; jump unconditional jump -+;; call unconditional call -+;; load load instruction(s) -+;; fpload floating point load -+;; fpidxload floating point indexed load -+;; store store instruction(s) -+;; fpstore floating point store -+;; fpidxstore floating point indexed store -+;; prefetch memory prefetch (register + offset) -+;; prefetchx memory indexed prefetch (register + register) -+;; condmove conditional moves -+;; mgtf move generate register to float register -+;; mftg move float register to generate register -+;; const load constant -+;; arith integer arithmetic instructions -+;; logical integer logical instructions -+;; shift integer shift instructions -+;; slt set less than instructions -+;; signext sign extend instructions -+;; clz the clz and clo instructions -+;; trap trap if instructions -+;; imul integer multiply 2 operands -+;; imul3 integer multiply 3 operands -+;; idiv3 integer divide 3 operands -+;; move integer register move ({,D}ADD{,U} with rt = 0) -+;; fmove floating point register move -+;; fadd floating point add/subtract -+;; fmul floating point multiply -+;; fmadd floating point multiply-add -+;; fdiv floating point divide -+;; frdiv floating point reciprocal divide -+;; fabs floating point absolute value -+;; fneg floating point negation -+;; fcmp floating point compare -+;; fcvt floating point convert -+;; fsqrt floating point square root -+;; frsqrt floating point reciprocal square root -+;; multi multiword sequence (or user asm statements) -+;; atomic atomic memory update instruction -+;; syncloop memory atomic operation implemented as a sync loop -+;; nop no operation -+;; ghost an instruction that produces no real code -+(define_attr "type" -+ "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore, -+ prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical, -+ shift,slt,signext,clz,trap,imul,imul3,idiv3,move, -+ fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcvt,fsqrt, -+ frsqrt,dspmac,dspmacsat,accext,accmod,dspalu,dspalusat, -+ multi,atomic,syncloop,nop,ghost, -+ simd_div,simd_fclass,simd_flog2,simd_fadd,simd_fcvt,simd_fmul,simd_fmadd, -+ simd_fdiv,simd_bitins,simd_bitmov,simd_insert,simd_sld,simd_mul,simd_fcmp, -+ simd_fexp2,simd_int_arith,simd_bit,simd_shift,simd_splat,simd_fill, -+ simd_permute,simd_shf,simd_sat,simd_pcnt,simd_copy,simd_branch,simd_clsx, -+ simd_fminmax,simd_logic,simd_move,simd_load,simd_store" -+ (cond [(eq_attr "jal" "!unset") (const_string "call") -+ (eq_attr "got" "load") (const_string "load") -+ -+ (eq_attr "alu_type" "add,sub") (const_string "arith") -+ -+ (eq_attr "alu_type" "not,nor,and,or,xor") (const_string "logical") -+ -+ ;; If a doubleword move uses these expensive instructions, -+ ;; it is usually better to schedule them in the same way -+ ;; as the singleword form, rather than as "multi". -+ (eq_attr "move_type" "load") (const_string "load") -+ (eq_attr "move_type" "fpload") (const_string "fpload") -+ (eq_attr "move_type" "store") (const_string "store") -+ (eq_attr "move_type" "fpstore") (const_string "fpstore") -+ (eq_attr "move_type" "mgtf") (const_string "mgtf") -+ (eq_attr "move_type" "mftg") (const_string "mftg") -+ -+ ;; These types of move are always single insns. -+ (eq_attr "move_type" "imul") (const_string "imul") -+ (eq_attr "move_type" "fmove") (const_string "fmove") -+ (eq_attr "move_type" "signext") (const_string "signext") -+ (eq_attr "move_type" "pick_ins") (const_string "arith") -+ (eq_attr "move_type" "arith") (const_string "arith") -+ (eq_attr "move_type" "logical") (const_string "logical") -+ (eq_attr "move_type" "sll0") (const_string "shift") -+ (eq_attr "move_type" "andi") (const_string "logical") -+ -+ ;; These types of move are always split. -+ (eq_attr "move_type" "shift_shift") -+ (const_string "multi") -+ -+ ;; These types of move are split for octaword modes only. -+ (and (eq_attr "move_type" "move,const") -+ (eq_attr "oword_mode" "yes")) -+ (const_string "multi") -+ -+ ;; These types of move are split for quadword modes only. -+ (and (eq_attr "move_type" "move,const") -+ (eq_attr "qword_mode" "yes")) -+ (const_string "multi") -+ -+ ;; These types of move are split for doubleword modes only. -+ (and (eq_attr "move_type" "move,const") -+ (eq_attr "dword_mode" "yes")) -+ (const_string "multi") -+ (eq_attr "move_type" "move") (const_string "move") -+ (eq_attr "move_type" "const") (const_string "const") -+ (eq_attr "sync_mem" "!none") (const_string "syncloop")] -+ (const_string "unknown"))) -+ -+(define_attr "compact_form" "always,maybe,never" -+ (cond [(eq_attr "jal" "direct") -+ (const_string "always") -+ (eq_attr "jal" "indirect") -+ (const_string "maybe") -+ (eq_attr "type" "jump") -+ (const_string "maybe")] -+ (const_string "never"))) -+ -+;; Mode for conversion types (fcvt) -+;; I2S integer to float single (SI/DI to SF) -+;; I2D integer to float double (SI/DI to DF) -+;; S2I float to integer (SF to SI/DI) -+;; D2I float to integer (DF to SI/DI) -+;; D2S double to float single -+;; S2D float single to double -+ -+(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D" -+ (const_string "unknown")) -+ -+(define_attr "compression" "none,all" -+ (const_string "none")) -+ -+;; The number of individual instructions that a non-branch pattern generates, -+;; using units of BASE_INSN_LENGTH. -+(define_attr "insn_count" "" -+ (cond [;; "Ghost" instructions occupy no space. -+ (eq_attr "type" "ghost") -+ (const_int 0) -+ -+ ;; Check for doubleword moves that are decomposed into two -+ ;; instructions. -+ (and (eq_attr "move_type" "mgtf,mftg,move") -+ (eq_attr "dword_mode" "yes")) -+ (const_int 2) -+ -+ ;; Check for quadword moves that are decomposed into four -+ ;; instructions. -+ (and (eq_attr "move_type" "mgtf,mftg,move") -+ (eq_attr "qword_mode" "yes")) -+ (const_int 4) -+ -+ ;; Check for Octaword moves that are decomposed into eight -+ ;; instructions. -+ (and (eq_attr "move_type" "mgtf,mftg,move") -+ (eq_attr "oword_mode" "yes")) -+ (const_int 8) -+ -+ ;; Constants, loads and stores are handled by external routines. -+ (and (eq_attr "move_type" "const") -+ (eq_attr "dword_mode" "yes")) -+ (symbol_ref "loongarch_split_const_insns (operands[1])") -+ (eq_attr "move_type" "const") -+ (symbol_ref "loongarch_const_insns (operands[1])") -+ (eq_attr "move_type" "load,fpload") -+ (symbol_ref "loongarch_load_store_insns (operands[1], insn)") -+ (eq_attr "move_type" "store,fpstore") -+ (symbol_ref "loongarch_load_store_insns (operands[0], insn)") -+ -+ (eq_attr "type" "idiv3") -+ (symbol_ref "loongarch_idiv_insns (GET_MODE (PATTERN (insn)))")] -+(const_int 1))) -+ -+;; Length of instruction in bytes. The default is derived from "insn_count", -+;; but there are special cases for branches (which must be handled here) -+;; and for compressed single instructions. -+ -+ -+ -+(define_attr "length" "" -+ (cond [ -+ ;; Branch instructions have a range of [-0x20000,0x1fffc]. -+ ;; If a branch is outside this range, we have a choice of two -+ ;; sequences. -+ ;; -+ ;; For PIC, an out-of-range branch like: -+ ;; -+ ;; bne r1,r2,target -+ ;; -+ ;; becomes the equivalent of: -+ ;; -+ ;; beq r1,r2,1f -+ ;; la rd,target -+ ;; jr rd -+ ;; 1: -+ ;; -+ ;; The non-PIC case is similar except that we use a direct -+ ;; jump instead of an la/jr pair. Since the target of this -+ ;; jump is an absolute 28-bit bit address (the other bits -+ ;; coming from the address of the delay slot) this form cannot -+ ;; cross a 256MB boundary. We could provide the option of -+ ;; using la/jr in this case too, but we do not do so at -+ ;; present. -+ ;; -+ ;; from the shorten_branches reference address. -+ (eq_attr "type" "branch") -+ (cond [;; Any variant can handle the 17-bit range. -+ (and (le (minus (match_dup 0) (pc)) (const_int 65532)) -+ (le (minus (pc) (match_dup 0)) (const_int 65534))) -+ (const_int 4) -+ -+ ;; The non-PIC case: branch, and J. -+ (match_test "TARGET_ABSOLUTE_JUMPS") -+ (const_int 8)] -+ -+ ;; Use MAX_PIC_BRANCH_LENGTH as a (gross) overestimate. -+ ;; loongarch_adjust_insn_length substitutes the correct length. -+ ;; -+ ;; Note that we can't simply use (symbol_ref ...) here -+ ;; because genattrtab needs to know the maximum length -+ ;; of an insn. -+ (const_int MAX_PIC_BRANCH_LENGTH)) -+ ] -+ (symbol_ref "get_attr_insn_count (insn) * BASE_INSN_LENGTH"))) -+ -+;; Attribute describing the processor. -+(define_enum_attr "cpu" "processor" -+ (const (symbol_ref "loongarch_tune"))) -+ -+;; The type of hardware hazard associated with this instruction. -+;; DELAY means that the next instruction cannot read the result -+;; of this one. -+(define_attr "hazard" "none,delay,forbidden_slot" -+ (const_string "none")) -+ -+;; Can the instruction be put into a delay slot? -+(define_attr "can_delay" "no,yes" -+ (if_then_else (and (eq_attr "type" "!branch,call,jump") -+ (eq_attr "hazard" "none") -+ (match_test "get_attr_insn_count (insn) == 1")) -+ (const_string "yes") -+ (const_string "no"))) -+ -+;; Describe a user's asm statement. -+(define_asm_attributes -+ [(set_attr "type" "multi") -+ (set_attr "can_delay" "no")]) -+ -+;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated -+;; from the same template. -+(define_mode_iterator GPR [SI (DI "TARGET_64BIT")]) -+ -+;; A copy of GPR that can be used when a pattern has two independent -+;; modes. -+(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")]) -+ -+;; Likewise, but for XLEN-sized quantities. -+(define_mode_iterator X [(SI "!TARGET_64BIT") (DI "TARGET_64BIT")]) -+ -+(define_mode_iterator MOVEP1 [SI SF]) -+(define_mode_iterator MOVEP2 [SI SF]) -+(define_mode_iterator JOIN_MODE [HI -+ SI -+ (SF "TARGET_HARD_FLOAT") -+ (DF "TARGET_HARD_FLOAT -+ && TARGET_DOUBLE_FLOAT")]) -+ -+;; This mode iterator allows :P to be used for patterns that operate on -+;; pointer-sized quantities. Exactly one of the two alternatives will match. -+(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")]) -+ -+;; 32-bit integer moves for which we provide move patterns. -+(define_mode_iterator IMOVE32 -+ [SI]) -+ -+;; 64-bit modes for which we provide move patterns. -+(define_mode_iterator MOVE64 -+ [DI DF]) -+ -+;; 128-bit modes for which we provide move patterns on 64-bit targets. -+(define_mode_iterator MOVE128 [TI TF]) -+ -+;; This mode iterator allows the QI and HI extension patterns to be -+;; defined from the same template. -+(define_mode_iterator SHORT [QI HI]) -+ -+;; Likewise the 64-bit truncate-and-shift patterns. -+(define_mode_iterator SUBDI [QI HI SI]) -+ -+;; This mode iterator allows the QI HI SI and DI extension patterns to be -+(define_mode_iterator QHWD [QI HI SI (DI "TARGET_64BIT")]) -+ -+ -+;; This mode iterator allows :ANYF to be used wherever a scalar or vector -+;; floating-point mode is allowed. -+(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT") -+ (DF "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT")]) -+ -+;; Like ANYF, but only applies to scalar modes. -+(define_mode_iterator SCALARF [(SF "TARGET_HARD_FLOAT") -+ (DF "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT")]) -+ -+;; A floating-point mode for which moves involving FPRs may need to be split. -+(define_mode_iterator SPLITF -+ [(DF "!TARGET_64BIT && TARGET_DOUBLE_FLOAT") -+ (DI "!TARGET_64BIT && TARGET_DOUBLE_FLOAT") -+ (TF "TARGET_64BIT && TARGET_FLOAT64")]) -+ -+;; In GPR templates, a string like "mul." will expand to "mul" in the -+;; 32-bit "mul.w" and "mul.d" in the 64-bit version. -+(define_mode_attr d [(SI "w") (DI "d")]) -+ -+;; Same as d but upper-case. -+(define_mode_attr D [(SI "") (DI "D")]) -+ -+;; This attribute gives the length suffix for a load or store instruction. -+;; The same suffixes work for zero and sign extensions. -+(define_mode_attr size [(QI "b") (HI "h") (SI "w") (DI "d")]) -+(define_mode_attr SIZE [(QI "B") (HI "H") (SI "W") (DI "D")]) -+ -+;; This attributes gives the mode mask of a SHORT. -+(define_mode_attr mask [(QI "0x00ff") (HI "0xffff")]) -+ -+;; This attributes gives the size (bits) of a SHORT. -+(define_mode_attr qi_hi [(QI "7") (HI "15")]) -+ -+;; Mode attributes for GPR loads. -+(define_mode_attr load [(SI "lw") (DI "ld")]) -+ -+(define_mode_attr load_l [(SI "ld.w") (DI "ld.d")]) -+;; Instruction names for stores. -+(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd")]) -+ -+;; Similarly for LARCH IV indexed FPR loads and stores. -+(define_mode_attr floadx [(SF "fldx.s") (DF "fldx.d") (V2SF "fldx.d")]) -+(define_mode_attr fstorex [(SF "fstx.s") (DF "fstx.d") (V2SF "fstx.d")]) -+ -+;; Similarly for LOONGSON indexed GPR loads and stores. -+(define_mode_attr loadx [(QI "ldx.b") -+ (HI "ldx.h") -+ (SI "ldx.w") -+ (DI "ldx.d")]) -+(define_mode_attr storex [(QI "stx.b") -+ (HI "stx.h") -+ (SI "stx.w") -+ (DI "stx.d")]) -+ -+;; This attribute gives the best constraint to use for registers of -+;; a given mode. -+(define_mode_attr reg [(SI "d") (DI "d") (FCC "z")]) -+ -+;; This attribute gives the format suffix for floating-point operations. -+(define_mode_attr fmt [(SF "s") (DF "d") (V2SF "ps")]) -+ -+;; This attribute gives the upper-case mode name for one unit of a -+;; floating-point mode or vector mode. -+(define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF") (V4SF "SF") -+ (V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI") -+ (V2DF "DF")(V8SF "SF")(V32QI "QI")(V16HI "HI")(V8SI "SI")(V4DI "DI")(V4DF "DF")]) -+ -+;; As above, but in lower case. -+(define_mode_attr unitmode [(SF "sf") (DF "df") (V2SF "sf") (V4SF "sf") -+ (V16QI "qi") (V8QI "qi") (V8HI "hi") (V4HI "hi") -+ (V4SI "si") (V2SI "si") (V2DI "di") (V2DF "df") -+ (V8SI "si") (V4DI "di") (V32QI "qi") (V16HI "hi") -+ (V8SF "sf") (V4DF "df")]) -+ -+;; This attribute gives the integer mode that has half the size of -+;; the controlling mode. -+(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (V2SF "SI") -+ (V2SI "SI") (V4HI "SI") (V8QI "SI") -+ (TF "DI")]) -+ -+(define_mode_attr p [(SI "") (DI "d")]) -+ -+;; This attribute works around the early SB-1 rev2 core "F2" erratum: -+;; -+;; In certain cases, div.s and div.ps may have a rounding error -+;; and/or wrong inexact flag. -+;; -+;; Therefore, we only allow div.s if not working around SB-1 rev2 -+;; errata or if a slight loss of precision is OK. -+(define_mode_attr divide_condition -+ [DF (SF "flag_unsafe_math_optimizations") -+ (V2SF "TARGET_SB1 && (flag_unsafe_math_optimizations)")]) -+ -+;; This attribute gives the conditions under which SQRT.fmt instructions -+;; can be used. -+(define_mode_attr sqrt_condition -+ [SF DF (V2SF "TARGET_SB1")]) -+ -+;; This code iterator allows signed and unsigned widening multiplications -+;; to use the same template. -+(define_code_iterator any_extend [sign_extend zero_extend]) -+ -+;; This code iterator allows the two right shift instructions to be -+;; generated from the same template. -+(define_code_iterator any_shiftrt [ashiftrt lshiftrt]) -+ -+;; This code iterator allows the three shift instructions to be generated -+;; from the same template. -+(define_code_iterator any_shift [ashift ashiftrt lshiftrt]) -+ -+;; This code iterator allows unsigned and signed division to be generated -+;; from the same template. -+(define_code_iterator any_div [div udiv]) -+ -+;; This code iterator allows unsigned and signed modulus to be generated -+;; from the same template. -+(define_code_iterator any_mod [mod umod]) -+ -+;; This code iterator allows addition and subtraction to be generated -+;; from the same template. -+(define_code_iterator addsub [plus minus]) -+ -+;; This code iterator allows addition and multiplication to be generated -+;; from the same template. -+(define_code_iterator addmul [plus mult]) -+ -+;; This code iterator allows addition subtraction and multiplication to be generated -+;; from the same template -+(define_code_iterator addsubmul [plus minus mult]) -+ -+;; This code iterator allows all native floating-point comparisons to be -+;; generated from the same template. -+(define_code_iterator fcond [unordered uneq unlt unle eq lt le ordered ltgt ne]) -+ -+;; This code iterator is used for comparisons that can be implemented -+;; by swapping the operands. -+(define_code_iterator swapped_fcond [ge gt unge ungt]) -+ -+;; Equality operators. -+(define_code_iterator equality_op [eq ne]) -+ -+;; These code iterators allow the signed and unsigned scc operations to use -+;; the same template. -+(define_code_iterator any_gt [gt gtu]) -+(define_code_iterator any_ge [ge geu]) -+(define_code_iterator any_lt [lt ltu]) -+(define_code_iterator any_le [le leu]) -+ -+(define_code_iterator any_return [return simple_return]) -+ -+;; expands to an empty string when doing a signed operation and -+;; "u" when doing an unsigned operation. -+(define_code_attr u [(sign_extend "") (zero_extend "u") -+ (div "") (udiv "u") -+ (mod "") (umod "u") -+ (gt "") (gtu "u") -+ (ge "") (geu "u") -+ (lt "") (ltu "u") -+ (le "") (leu "u")]) -+ -+;; is like except uppercase. -+(define_code_attr U [(sign_extend "") (zero_extend "U")]) -+ -+;; is like , but the signed form expands to "s" rather than "". -+(define_code_attr su [(sign_extend "s") (zero_extend "u")]) -+ -+;; expands to the name of the optab for a particular code. -+(define_code_attr optab [(ashift "ashl") -+ (ashiftrt "ashr") -+ (lshiftrt "lshr") -+ (ior "ior") -+ (xor "xor") -+ (and "and") -+ (plus "add") -+ (minus "sub") -+ (mult "mul") -+ (return "return") -+ (simple_return "simple_return")]) -+ -+;; expands to the name of the insn that implements a particular code. -+(define_code_attr insn [(ashift "sll") -+ (ashiftrt "sra") -+ (lshiftrt "srl") -+ (ior "or") -+ (xor "xor") -+ (and "and") -+ (plus "addu") -+ (minus "subu")]) -+ -+;; expands to the name of the insn that implements -+;; a particular code to operate on immediate values. -+(define_code_attr immediate_insn [(ior "ori") -+ (xor "xori") -+ (and "andi")]) -+ -+;; is the c.cond.fmt condition associated with a particular code. -+(define_code_attr fcond [(unordered "cun") -+ (uneq "cueq") -+ (unlt "cult") -+ (unle "cule") -+ (eq "ceq") -+ (lt "slt") -+ (le "sle") -+ (ordered "cor") -+ (ltgt "sne") -+ (ne "cune")]) -+ -+;; Similar, but for swapped conditions. -+(define_code_attr swapped_fcond [(ge "sle") -+ (gt "slt") -+ (unge "cule") -+ (ungt "cult")]) -+ -+;; The value of the bit when the branch is taken for branch_bit patterns. -+;; Comparison is always against zero so this depends on the operator. -+(define_code_attr bbv [(eq "0") (ne "1")]) -+ -+;; This is the inverse value of bbv. -+(define_code_attr bbinv [(eq "1") (ne "0")]) -+ -+;; The sel mnemonic to use depending on the condition test. -+(define_code_attr sel [(eq "masknez") (ne "maskeqz")]) -+(define_code_attr selinv [(eq "maskeqz") (ne "masknez")]) -+ -+;; Pipeline descriptions. -+;; -+;; generic.md provides a fallback for processors without a specific -+;; pipeline description. It is derived from the old define_function_unit -+;; version and uses the "alu" and "imuldiv" units declared below. -+;; -+;; Some of the processor-specific files are also derived from old -+;; define_function_unit descriptions and simply override the parts of -+;; generic.md that don't apply. The other processor-specific files -+;; are self-contained. -+(define_automaton "alu,imuldiv") -+ -+(define_cpu_unit "alu" "alu") -+(define_cpu_unit "imuldiv" "imuldiv") -+ -+;; Ghost instructions produce no real code and introduce no hazards. -+;; They exist purely to express an effect on dataflow. -+(define_insn_reservation "ghost" 0 -+ (eq_attr "type" "ghost") -+ "nothing") -+ -+(include "generic.md") -+ -+;; -+;; .................... -+;; -+;; CONDITIONAL TRAPS -+;; -+;; .................... -+;; -+ -+(define_insn "trap" -+ [(trap_if (const_int 1) (const_int 0))] -+ "" -+{ -+ return "break\t0"; -+} -+ [(set_attr "type" "trap")]) -+ -+ -+ -+;; -+;; .................... -+;; -+;; ADDITION -+;; -+;; .................... -+;; -+ -+(define_insn "add3" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (plus:ANYF (match_operand:ANYF 1 "register_operand" "f") -+ (match_operand:ANYF 2 "register_operand" "f")))] -+ "" -+ "fadd.\t%0,%1,%2" -+ [(set_attr "type" "fadd") -+ (set_attr "mode" "")]) -+ -+(define_expand "add3" -+ [(set (match_operand:GPR 0 "register_operand") -+ (plus:GPR (match_operand:GPR 1 "register_operand") -+ (match_operand:GPR 2 "arith_operand")))] -+ "") -+ -+(define_insn "*add3" -+ [(set (match_operand:GPR 0 "register_operand" "=r,r") -+ (plus:GPR (match_operand:GPR 1 "register_operand" "r,r") -+ (match_operand:GPR 2 "arith_operand" "r,Q")))] -+ "" -+{ -+ if (which_alternative == 0) -+ return "add.\t%0,%1,%2"; -+ else -+ return "addi.\t%0,%1,%2"; -+} -+ [(set_attr "alu_type" "add") -+ (set_attr "compression" "*,*") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "*addsi3_extended" -+ [(set (match_operand:DI 0 "register_operand" "=r,r") -+ (sign_extend:DI -+ (plus:SI (match_operand:SI 1 "register_operand" "r,r") -+ (match_operand:SI 2 "arith_operand" "r,Q"))))] -+ "TARGET_64BIT" -+ "@ -+ add.w\t%0,%1,%2 -+ addi.w\t%0,%1,%2" -+ [(set_attr "alu_type" "add") -+ (set_attr "mode" "SI")]) -+ -+ -+;; -+;; .................... -+;; -+;; SUBTRACTION -+;; -+;; .................... -+;; -+ -+(define_insn "sub3" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (minus:ANYF (match_operand:ANYF 1 "register_operand" "f") -+ (match_operand:ANYF 2 "register_operand" "f")))] -+ "" -+ "fsub.\t%0,%1,%2" -+ [(set_attr "type" "fadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "sub3" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (minus:GPR (match_operand:GPR 1 "register_operand" "r") -+ (match_operand:GPR 2 "register_operand" "r")))] -+ "" -+ "sub.\t%0,%1,%2" -+ [(set_attr "alu_type" "sub") -+ (set_attr "compression" "*") -+ (set_attr "mode" "")]) -+ -+(define_insn "*subsi3_extended" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (sign_extend:DI -+ (minus:SI (match_operand:SI 1 "register_operand" "r") -+ (match_operand:SI 2 "register_operand" "r"))))] -+ "TARGET_64BIT" -+ "sub.w\t%0,%1,%2" -+ [(set_attr "alu_type" "sub") -+ (set_attr "mode" "DI")]) -+ -+;; -+;; .................... -+;; -+;; MULTIPLICATION -+;; -+;; .................... -+;; -+ -+(define_expand "mul3" -+ [(set (match_operand:SCALARF 0 "register_operand") -+ (mult:SCALARF (match_operand:SCALARF 1 "register_operand") -+ (match_operand:SCALARF 2 "register_operand")))] -+ "" -+ "") -+ -+(define_insn "*mul3" -+ [(set (match_operand:SCALARF 0 "register_operand" "=f") -+ (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f") -+ (match_operand:SCALARF 2 "register_operand" "f")))] -+ "" -+ "fmul.\t%0,%1,%2" -+ [(set_attr "type" "fmul") -+ (set_attr "mode" "")]) -+ -+(define_insn "mul3" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (mult:GPR (match_operand:GPR 1 "register_operand" "r") -+ (match_operand:GPR 2 "register_operand" "r")))] -+ "" -+ "mul.\t%0,%1,%2" -+ [(set_attr "type" "imul3") -+ (set_attr "mode" "")]) -+ -+ -+ -+(define_insn "mulsidi3_64bit" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) -+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))] -+ "" -+ "mul.d\t%0,%1,%2" -+ [(set_attr "type" "imul3") -+ (set_attr "mode" "DI")]) -+ -+ -+;; -+;; ........................ -+;; -+;; MULTIPLICATION HIGH-PART -+;; -+;; ........................ -+;; -+ -+ -+(define_expand "mulditi3" -+ [(set (match_operand:TI 0 "register_operand") -+ (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand")) -+ (any_extend:TI (match_operand:DI 2 "register_operand"))))] -+ "TARGET_64BIT" -+{ -+ rtx low = gen_reg_rtx (DImode); -+ emit_insn (gen_muldi3 (low, operands[1], operands[2])); -+ -+ rtx high = gen_reg_rtx (DImode); -+ emit_insn (gen_muldi3_highpart (high, operands[1], operands[2])); -+ -+ emit_move_insn (gen_lowpart (DImode, operands[0]), low); -+ emit_move_insn (gen_highpart (DImode, operands[0]), high); -+ DONE; -+}) -+ -+(define_insn "muldi3_highpart" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (truncate:DI -+ (lshiftrt:TI -+ (mult:TI (any_extend:TI -+ (match_operand:DI 1 "register_operand" " r")) -+ (any_extend:TI -+ (match_operand:DI 2 "register_operand" " r"))) -+ (const_int 64))))] -+ "TARGET_64BIT" -+ "mulh.d\t%0,%1,%2" -+ [(set_attr "type" "imul") -+ (set_attr "mode" "DI")]) -+ -+(define_expand "mulsidi3" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (mult:DI (any_extend:DI -+ (match_operand:SI 1 "register_operand" " r")) -+ (any_extend:DI -+ (match_operand:SI 2 "register_operand" " r"))))] -+ "!TARGET_64BIT" -+{ -+ rtx temp = gen_reg_rtx (SImode); -+ emit_insn (gen_mulsi3 (temp, operands[1], operands[2])); -+ emit_insn (gen_mulsi3_highpart (loongarch_subword (operands[0], true), -+ operands[1], operands[2])); -+ emit_insn (gen_movsi (loongarch_subword (operands[0], false), temp)); -+ DONE; -+}) -+ -+(define_insn "mulsi3_highpart" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (truncate:SI -+ (lshiftrt:DI -+ (mult:DI (any_extend:DI -+ (match_operand:SI 1 "register_operand" " r")) -+ (any_extend:DI -+ (match_operand:SI 2 "register_operand" " r"))) -+ (const_int 32))))] -+ "!TARGET_64BIT" -+ "mulh.w\t%0,%1,%2" -+ [(set_attr "type" "imul") -+ (set_attr "mode" "SI")]) -+ -+;; Floating point multiply accumulate instructions. -+ -+(define_expand "fma4" -+ [(set (match_operand:ANYF 0 "register_operand") -+ (fma:ANYF (match_operand:ANYF 1 "register_operand") -+ (match_operand:ANYF 2 "register_operand") -+ (match_operand:ANYF 3 "register_operand")))] -+ "TARGET_HARD_FLOAT") -+ -+(define_insn "*fma4_madd4" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (fma:ANYF (match_operand:ANYF 1 "register_operand" "f") -+ (match_operand:ANYF 2 "register_operand" "f") -+ (match_operand:ANYF 3 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT" -+ "fmadd.\t%0,%1,%2,%3" -+ [(set_attr "type" "fmadd") -+ (set_attr "mode" "")]) -+ -+;; The fms, fnma, and fnms instructions can be used even when HONOR_NANS -+;; is true because while IEEE 754-2008 requires the negate operation to -+;; negate the sign of a NAN and the LARCH neg instruction does not do this, -+;; the fma part of the instruction has no requirement on how the sign of -+;; a NAN is handled and so the final sign bit of the entire operation is -+;; undefined. -+ -+(define_expand "fms4" -+ [(set (match_operand:ANYF 0 "register_operand") -+ (fma:ANYF (match_operand:ANYF 1 "register_operand") -+ (match_operand:ANYF 2 "register_operand") -+ (neg:ANYF (match_operand:ANYF 3 "register_operand"))))] -+ "TARGET_HARD_FLOAT") -+ -+ -+(define_insn "*fms4_msub4" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (fma:ANYF (match_operand:ANYF 1 "register_operand" "f") -+ (match_operand:ANYF 2 "register_operand" "f") -+ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] -+ "TARGET_HARD_FLOAT" -+ "fmsub.\t%0,%1,%2,%3" -+ [(set_attr "type" "fmadd") -+ (set_attr "mode" "")]) -+ -+;; fnma is defined in GCC as (fma (neg op1) op2 op3) -+;; (-op1 * op2) + op3 ==> -(op1 * op2) + op3 ==> -((op1 * op2) - op3) -+;; The loongarch nmsub instructions implement -((op1 * op2) - op3) -+;; This transformation means we may return the wrong signed zero -+;; so we check HONOR_SIGNED_ZEROS. -+ -+(define_expand "fnma4" -+ [(set (match_operand:ANYF 0 "register_operand") -+ (fma:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand")) -+ (match_operand:ANYF 2 "register_operand") -+ (match_operand:ANYF 3 "register_operand")))] -+ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)") -+ -+(define_insn "*fnma4_nmsub4" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (fma:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) -+ (match_operand:ANYF 2 "register_operand" "f") -+ (match_operand:ANYF 3 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)" -+ "fnmsub.\t%0,%1,%2,%3" -+ [(set_attr "type" "fmadd") -+ (set_attr "mode" "")]) -+ -+;; fnms is defined as: (fma (neg op1) op2 (neg op3)) -+;; ((-op1) * op2) - op3 ==> -(op1 * op2) - op3 ==> -((op1 * op2) + op3) -+;; The loongarch nmadd instructions implement -((op1 * op2) + op3) -+;; This transformation means we may return the wrong signed zero -+;; so we check HONOR_SIGNED_ZEROS. -+ -+(define_expand "fnms4" -+ [(set (match_operand:ANYF 0 "register_operand") -+ (fma:ANYF -+ (neg:ANYF (match_operand:ANYF 1 "register_operand")) -+ (match_operand:ANYF 2 "register_operand") -+ (neg:ANYF (match_operand:ANYF 3 "register_operand"))))] -+ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)") -+ -+(define_insn "*fnms4_nmadd4" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (fma:ANYF -+ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) -+ (match_operand:ANYF 2 "register_operand" "f") -+ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] -+ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)" -+ "fnmadd.\t%0,%1,%2,%3" -+ [(set_attr "type" "fmadd") -+ (set_attr "mode" "")]) -+ -+;; -+;; .................... -+;; -+;; DIVISION and REMAINDER -+;; -+;; .................... -+;; -+ -+(define_expand "div3" -+ [(set (match_operand:ANYF 0 "register_operand") -+ (div:ANYF (match_operand:ANYF 1 "reg_or_1_operand") -+ (match_operand:ANYF 2 "register_operand")))] -+ "" -+{ -+ if (const_1_operand (operands[1], mode)) -+ if (!(ISA_HAS_FP_RECIP_RSQRT (mode) -+ && flag_unsafe_math_optimizations)) -+ operands[1] = force_reg (mode, operands[1]); -+}) -+ -+;; These patterns work around the early SB-1 rev2 core "F1" erratum: -+;; -+;; If an mftg1 or dmftg1 happens to access the floating point register -+;; file at the same time a long latency operation (div, sqrt, recip, -+;; sqrt) iterates an intermediate result back through the floating -+;; point register file bypass, then instead returning the correct -+;; register value the mftg1 or dmftg1 operation returns the intermediate -+;; result of the long latency operation. -+;; -+;; The workaround is to insert an unconditional 'mov' from/to the -+;; long latency op destination register. -+ -+(define_insn "*div3" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (div:ANYF (match_operand:ANYF 1 "register_operand" "f") -+ (match_operand:ANYF 2 "register_operand" "f")))] -+ "" -+{ -+ return "fdiv.\t%0,%1,%2"; -+} -+ [(set_attr "type" "fdiv") -+ (set_attr "mode" "") -+ (set_attr "insn_count" "1")]) -+ -+(define_insn "*recip3" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") -+ (match_operand:ANYF 2 "register_operand" "f")))] -+ "ISA_HAS_FP_RECIP_RSQRT (mode) && flag_unsafe_math_optimizations" -+{ -+ return "frecip.\t%0,%2"; -+} -+ [(set_attr "type" "frdiv") -+ (set_attr "mode" "") -+ (set_attr "insn_count" "1")]) -+ -+;; Integer division and modulus. -+ -+(define_insn "div3" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") -+ (any_div:GPR (match_operand:GPR 1 "register_operand" "r") -+ (match_operand:GPR 2 "register_operand" "r")))] -+ "" -+ { -+ return loongarch_output_division ("div.\t%0,%1,%2", operands); -+ } -+ [(set_attr "type" "idiv3") -+ (set_attr "mode" "")]) -+ -+(define_insn "mod3" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") -+ (any_mod:GPR (match_operand:GPR 1 "register_operand" "r") -+ (match_operand:GPR 2 "register_operand" "r")))] -+ "" -+ { -+ return loongarch_output_division ("mod.\t%0,%1,%2", operands); -+ } -+ [(set_attr "type" "idiv3") -+ (set_attr "mode" "")]) -+ -+;; -+;; .................... -+;; -+;; SQUARE ROOT -+;; -+;; .................... -+ -+;; These patterns work around the early SB-1 rev2 core "F1" erratum (see -+;; "*div[sd]f3" comment for details). -+ -+(define_insn "sqrt2" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))] -+ "" -+{ -+ return "fsqrt.\t%0,%1"; -+} -+ [(set_attr "type" "fsqrt") -+ (set_attr "mode" "") -+ (set_attr "insn_count" "1")]) -+ -+(define_insn "*rsqrta" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") -+ (sqrt:ANYF (match_operand:ANYF 2 "register_operand" "f"))))] -+ "ISA_HAS_FP_RECIP_RSQRT (mode) && flag_unsafe_math_optimizations" -+{ -+ return "frsqrt.\t%0,%2"; -+} -+ [(set_attr "type" "frsqrt") -+ (set_attr "mode" "") -+ (set_attr "insn_count" "1")]) -+ -+(define_insn "*rsqrtb" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (sqrt:ANYF (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") -+ (match_operand:ANYF 2 "register_operand" "f"))))] -+ "ISA_HAS_FP_RECIP_RSQRT (mode) && flag_unsafe_math_optimizations" -+{ -+ return "frsqrt.\t%0,%2"; -+} -+ [(set_attr "type" "frsqrt") -+ (set_attr "mode" "") -+ (set_attr "insn_count" "1")]) -+ -+;; -+;; .................... -+;; -+;; ABSOLUTE VALUE -+;; -+;; .................... -+ -+;; Do not use the integer abs macro instruction, since that signals an -+;; exception on -2147483648 (sigh). -+ -+;; The "legacy" (as opposed to "2008") form of ABS.fmt is an arithmetic -+;; instruction that treats all NaN inputs as invalid; it does not clear -+;; their sign bit. We therefore can't use that form if the signs of -+;; NaNs matter. -+ -+(define_insn "abs2" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))] -+ "" -+ "fabs.\t%0,%1" -+ [(set_attr "type" "fabs") -+ (set_attr "mode" "")]) -+ -+;; -+;; ................... -+;; -+;; Count leading zeroes. -+;; -+;; ................... -+;; -+ -+(define_insn "clz2" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (clz:GPR (match_operand:GPR 1 "register_operand" "r")))] -+ "" -+ "clz.\t%0,%1" -+ [(set_attr "type" "clz") -+ (set_attr "mode" "")]) -+ -+;; -+;; ................... -+;; -+;; Count trailing zeroes. -+;; -+;; ................... -+;; -+ -+(define_insn "ctz2" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (ctz:GPR (match_operand:GPR 1 "register_operand" "r")))] -+ "" -+ "ctz.\t%0,%1" -+ [(set_attr "type" "clz") -+ (set_attr "mode" "")]) -+ -+ -+ -+;; -+;; .................... -+;; -+;; NEGATION and ONE'S COMPLEMENT -+;; -+;; .................... -+ -+(define_insn "negsi2" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (neg:SI (match_operand:SI 1 "register_operand" "r")))] -+ "" -+{ -+ return "sub.w\t%0,%.,%1"; -+} -+ [(set_attr "alu_type" "sub") -+ (set_attr "mode" "SI")]) -+ -+(define_insn "negdi2" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (neg:DI (match_operand:DI 1 "register_operand" "r")))] -+ "TARGET_64BIT" -+ "sub.d\t%0,%.,%1" -+ [(set_attr "alu_type" "sub") -+ (set_attr "mode" "DI")]) -+ -+;; The "legacy" (as opposed to "2008") form of NEG.fmt is an arithmetic -+;; instruction that treats all NaN inputs as invalid; it does not flip -+;; their sign bit. We therefore can't use that form if the signs of -+;; NaNs matter. -+ -+(define_insn "neg2" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))] -+ "" -+ "fneg.\t%0,%1" -+ [(set_attr "type" "fneg") -+ (set_attr "mode" "")]) -+ -+(define_insn "one_cmpl2" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (not:GPR (match_operand:GPR 1 "register_operand" "r")))] -+ "" -+{ -+ return "nor\t%0,%.,%1"; -+} -+ [(set_attr "alu_type" "not") -+ (set_attr "compression" "*") -+ (set_attr "mode" "")]) -+ -+ -+;; -+;; .................... -+;; -+;; LOGICAL -+;; -+;; .................... -+;; -+ -+ -+(define_expand "and3" -+ [(set (match_operand:GPR 0 "register_operand") -+ (and:GPR (match_operand:GPR 1 "register_operand") -+ (match_operand:GPR 2 "and_reg_operand")))]) -+ -+;; The middle-end is not allowed to convert ANDing with 0xffff_ffff into a -+;; zero_extendsidi2 because of TARGET_TRULY_NOOP_TRUNCATION, so handle these -+;; here. Note that this variant does not trigger for SI mode because we -+;; require a 64-bit HOST_WIDE_INT and 0xffff_ffff wouldn't be a canonical -+;; sign-extended SImode value. -+;; -+;; These are possible combinations for operand 1 and 2. -+;; (r=register, mem=memory, x=match, S=split): -+;; -+;; \ op1 r/EXT r/!EXT mem -+;; op2 -+;; -+;; andi x x -+;; 0xff x x x -+;; 0xffff x x x -+;; 0xffff_ffff x S x -+;; low-bitmask x -+;; register x x -+;; register =op1 -+ -+(define_insn "*and3" -+ [(set (match_operand:GPR 0 "register_operand" "=r,r,r,r,r,r,r") -+ (and:GPR (match_operand:GPR 1 "nonimmediate_operand" "o,o,W,r,r,r,r") -+ (match_operand:GPR 2 "and_operand" "Yb,Yh,Yw,K,Yx,Yw,r")))] -+ " and_operands_ok (mode, operands[1], operands[2])" -+{ -+ int len; -+ -+ switch (which_alternative) -+ { -+ case 0: -+ operands[1] = gen_lowpart (QImode, operands[1]); -+ return "ld.bu\t%0,%1"; -+ case 1: -+ operands[1] = gen_lowpart (HImode, operands[1]); -+ return "ld.hu\t%0,%1"; -+ case 2: -+ operands[1] = gen_lowpart (SImode, operands[1]); -+ if (loongarch_14bit_shifted_offset_address_p (XEXP (operands[1], 0), SImode)) -+ return "ldptr.w\t%0,%1\n\tbstrins.d\t%0,$r0,63,32"; -+ else if (loongarch_12bit_offset_address_p (XEXP (operands[1], 0), SImode)) -+ return "ld.wu\t%0,%1"; -+ else -+ gcc_unreachable (); -+ case 3: -+ return "andi\t%0,%1,%x2"; -+ case 4: -+ len = low_bitmask_len (mode, INTVAL (operands[2])); -+ operands[2] = GEN_INT (len-1); -+ return "bstrpick.\t%0,%1,%2,0"; -+ case 5: -+ return "#"; -+ case 6: -+ return "and\t%0,%1,%2"; -+ default: -+ gcc_unreachable (); -+ } -+} -+ [(set_attr "move_type" "load,load,load,andi,pick_ins,shift_shift,logical") -+ (set_attr "compression" "*,*,*,*,*,*,*") -+ (set_attr "mode" "")]) -+ -+(define_expand "ior3" -+ [(set (match_operand:GPR 0 "register_operand") -+ (ior:GPR (match_operand:GPR 1 "register_operand") -+ (match_operand:GPR 2 "uns_arith_operand")))] -+ "" -+{ -+}) -+ -+(define_insn "*ior3" -+ [(set (match_operand:GPR 0 "register_operand" "=r,r") -+ (ior:GPR (match_operand:GPR 1 "register_operand" "r,r") -+ (match_operand:GPR 2 "uns_arith_operand" "r,K")))] -+ "" -+ "@ -+ or\t%0,%1,%2 -+ ori\t%0,%1,%x2" -+ [(set_attr "alu_type" "or") -+ (set_attr "compression" "*,*") -+ (set_attr "mode" "")]) -+ -+(define_insn "*iorhi3" -+ [(set (match_operand:HI 0 "register_operand" "=r,r") -+ (ior:HI (match_operand:HI 1 "register_operand" "r,r") -+ (match_operand:HI 2 "uns_arith_operand" "K,r")))] -+ "" -+ "@ -+ ori\t%0,%1,%x2 -+ or\t%0,%1,%2" -+ [(set_attr "alu_type" "or") -+ (set_attr "mode" "HI")]) -+ -+(define_expand "xor3" -+ [(set (match_operand:GPR 0 "register_operand") -+ (xor:GPR (match_operand:GPR 1 "register_operand") -+ (match_operand:GPR 2 "uns_arith_operand")))] -+ "" -+ "") -+ -+(define_insn "*xor3" -+ [(set (match_operand:GPR 0 "register_operand" "=r,r") -+ (xor:GPR (match_operand:GPR 1 "register_operand" "r,r") -+ (match_operand:GPR 2 "uns_arith_operand" "r,K")))] -+ "" -+ "@ -+ xor\t%0,%1,%2 -+ xori\t%0,%1,%x2" -+ [(set_attr "alu_type" "xor") -+ (set_attr "compression" "*,*") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "*nor3" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (and:GPR (not:GPR (match_operand:GPR 1 "register_operand" "r")) -+ (not:GPR (match_operand:GPR 2 "register_operand" "r"))))] -+ "" -+ "nor\t%0,%1,%2" -+ [(set_attr "alu_type" "nor") -+ (set_attr "mode" "")]) -+ -+;; -+;; .................... -+;; -+;; TRUNCATION -+;; -+;; .................... -+ -+ -+ -+(define_insn "truncdfsf2" -+ [(set (match_operand:SF 0 "register_operand" "=f") -+ (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" -+ "fcvt.s.d\t%0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "cnv_mode" "D2S") -+ (set_attr "mode" "SF")]) -+ -+;; Integer truncation patterns. Truncating SImode values to smaller -+;; modes is a no-op, as it is for most other GCC ports. Truncating -+;; DImode values to SImode is not a no-op for TARGET_64BIT since we -+;; need to make sure that the lower 32 bits are properly sign-extended -+;; (see TARGET_TRULY_NOOP_TRUNCATION). Truncating DImode values into modes -+;; smaller than SImode is equivalent to two separate truncations: -+;; -+;; A B -+;; DI ---> HI == DI ---> SI ---> HI -+;; DI ---> QI == DI ---> SI ---> QI -+;; -+;; Step A needs a real instruction but step B does not. -+ -+(define_insn "truncdisi2" -+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,ZC,m") -+ (truncate:SI (match_operand:DI 1 "register_operand" "r,r,r")))] -+ "TARGET_64BIT" -+ "@ -+ slli.w\t%0,%1,0 -+ stptr.w\t%1,%0 -+ st.w\t%1,%0" -+ [(set_attr "move_type" "sll0,store,store") -+ (set_attr "mode" "SI")]) -+ -+(define_insn "truncdi2" -+ [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,m") -+ (truncate:SHORT (match_operand:DI 1 "register_operand" "r,r")))] -+ "TARGET_64BIT" -+ "@ -+ slli.w\t%0,%1,0 -+ st.\t%1,%0" -+ [(set_attr "move_type" "sll0,store") -+ (set_attr "mode" "SI")]) -+ -+;; Combiner patterns to optimize shift/truncate combinations. -+ -+(define_insn "*ashr_trunc" -+ [(set (match_operand:SUBDI 0 "register_operand" "=r") -+ (truncate:SUBDI -+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r") -+ (match_operand:DI 2 "const_arith_operand" ""))))] -+ "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)" -+ "srai.d\t%0,%1,%2" -+ [(set_attr "type" "shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "*lshr32_trunc" -+ [(set (match_operand:SUBDI 0 "register_operand" "=r") -+ (truncate:SUBDI -+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r") -+ (const_int 32))))] -+ "TARGET_64BIT" -+ "srai.d\t%0,%1,32" -+ [(set_attr "type" "shift") -+ (set_attr "mode" "")]) -+ -+ -+ -+;; -+;; .................... -+;; -+;; ZERO EXTENSION -+;; -+;; .................... -+ -+;; Extension insns. -+ -+(define_expand "zero_extendsidi2" -+ [(set (match_operand:DI 0 "register_operand") -+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))] -+ "TARGET_64BIT") -+ -+(define_insn "*zero_extendsidi2_dext" -+ [(set (match_operand:DI 0 "register_operand" "=r,r,r") -+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,ZC,W")))] -+ "TARGET_64BIT" -+ "@ -+ bstrpick.d\t%0,%1,31,0 -+ ldptr.w\t%0,%1\n\tlu32i.d\t%0,0 -+ ld.wu\t%0,%1" -+ [(set_attr "move_type" "arith,load,load") -+ (set_attr "mode" "DI") -+ (set_attr "insn_count" "1,2,1")]) -+ -+;; See the comment before the *and3 pattern why this is generated by -+;; combine. -+ -+(define_expand "zero_extend2" -+ [(set (match_operand:GPR 0 "register_operand") -+ (zero_extend:GPR (match_operand:SHORT 1 "nonimmediate_operand")))] -+ "" -+{ -+}) -+ -+(define_insn "*zero_extend2" -+ [(set (match_operand:GPR 0 "register_operand" "=r,r") -+ (zero_extend:GPR -+ (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))] -+ "" -+{ -+ switch (which_alternative) -+ { -+ case 0: -+ return "bstrpick.\t%0,%1,,0"; -+ case 1: -+ return "ld.u\t%0,%1"; -+ default: -+ gcc_unreachable (); -+ } -+} -+ [(set_attr "move_type" "pick_ins,load") -+ (set_attr "compression" "*,*") -+ (set_attr "mode" "")]) -+ -+ -+(define_expand "zero_extendqihi2" -+ [(set (match_operand:HI 0 "register_operand") -+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand")))] -+ "" -+{ -+}) -+ -+(define_insn "*zero_extendqihi2" -+ [(set (match_operand:HI 0 "register_operand" "=r,r") -+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))] -+ "" -+ "@ -+ andi\t%0,%1,0x00ff -+ ld.bu\t%0,%1" -+ [(set_attr "move_type" "andi,load") -+ (set_attr "mode" "HI")]) -+ -+;; Combiner patterns to optimize truncate/zero_extend combinations. -+ -+(define_insn "*zero_extend_trunc" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (zero_extend:GPR -+ (truncate:SHORT (match_operand:DI 1 "register_operand" "r"))))] -+ "TARGET_64BIT" -+ "bstrpick.\t%0,%1,,0" -+ [(set_attr "move_type" "pick_ins") -+ (set_attr "mode" "")]) -+ -+(define_insn "*zero_extendhi_truncqi" -+ [(set (match_operand:HI 0 "register_operand" "=r") -+ (zero_extend:HI -+ (truncate:QI (match_operand:DI 1 "register_operand" "r"))))] -+ "TARGET_64BIT" -+ "andi\t%0,%1,0xff" -+ [(set_attr "alu_type" "and") -+ (set_attr "mode" "HI")]) -+ -+;; -+;; .................... -+;; -+;; SIGN EXTENSION -+;; -+;; .................... -+ -+;; Extension insns. -+;; Those for integer source operand are ordered widest source type first. -+ -+;; When TARGET_64BIT, all SImode integer and accumulator registers -+;; should already be in sign-extended form (see TARGET_TRULY_NOOP_TRUNCATION -+;; and truncdisi2). We can therefore get rid of register->register -+;; instructions if we constrain the source to be in the same register as -+;; the destination. -+;; -+;; Only the pre-reload scheduler sees the type of the register alternatives; -+;; we split them into nothing before the post-reload scheduler runs. -+;; These alternatives therefore have type "move" in order to reflect -+;; what happens if the two pre-reload operands cannot be tied, and are -+;; instead allocated two separate GPRs. We don't distinguish between -+;; the GPR and LO cases because we don't usually know during pre-reload -+;; scheduling whether an operand will be LO or not. -+(define_insn_and_split "extendsidi2" -+ [(set (match_operand:DI 0 "register_operand" "=r,r,r") -+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "0,ZC,m")))] -+ "TARGET_64BIT" -+ "@ -+ # -+ ldptr.w\t%0,%1 -+ ld.w\t%0,%1" -+ "&& reload_completed && register_operand (operands[1], VOIDmode)" -+ [(const_int 0)] -+{ -+ emit_note (NOTE_INSN_DELETED); -+ DONE; -+} -+ [(set_attr "move_type" "move,load,load") -+ (set_attr "mode" "DI")]) -+ -+(define_expand "extend2" -+ [(set (match_operand:GPR 0 "register_operand") -+ (sign_extend:GPR (match_operand:SHORT 1 "nonimmediate_operand")))] -+ "") -+ -+ -+(define_insn "*extend2_se" -+ [(set (match_operand:GPR 0 "register_operand" "=r,r") -+ (sign_extend:GPR -+ (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))] -+ "" -+ "@ -+ ext.w.\t%0,%1 -+ ld.\t%0,%1" -+ [(set_attr "move_type" "signext,load") -+ (set_attr "mode" "")]) -+ -+(define_expand "extendqihi2" -+ [(set (match_operand:HI 0 "register_operand") -+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand")))] -+ "") -+ -+(define_insn "*extendqihi2_seb" -+ [(set (match_operand:HI 0 "register_operand" "=r,r") -+ (sign_extend:HI -+ (match_operand:QI 1 "nonimmediate_operand" "r,m")))] -+ "" -+ "@ -+ ext.w.b\t%0,%1 -+ ld.b\t%0,%1" -+ [(set_attr "move_type" "signext,load") -+ (set_attr "mode" "SI")]) -+ -+;; Combiner patterns for truncate/sign_extend combinations. The SI versions -+;; use the shift/truncate patterns. -+ -+(define_insn_and_split "*extenddi_truncate" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (sign_extend:DI -+ (truncate:SHORT (match_operand:DI 1 "register_operand" "r"))))] -+ "TARGET_64BIT" -+ "#" -+ "&& reload_completed" -+ [(set (match_dup 2) -+ (ashift:DI (match_dup 1) -+ (match_dup 3))) -+ (set (match_dup 0) -+ (ashiftrt:DI (match_dup 2) -+ (match_dup 3)))] -+{ -+ operands[2] = gen_lowpart (DImode, operands[0]); -+ operands[3] = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (mode)); -+} -+ [(set_attr "move_type" "shift_shift") -+ (set_attr "mode" "DI")]) -+ -+(define_insn_and_split "*extendsi_truncate" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (sign_extend:SI -+ (truncate:SHORT (match_operand:DI 1 "register_operand" "r"))))] -+ "TARGET_64BIT" -+ "#" -+ "&& reload_completed" -+ [(set (match_dup 2) -+ (ashift:DI (match_dup 1) -+ (match_dup 3))) -+ (set (match_dup 0) -+ (truncate:SI (ashiftrt:DI (match_dup 2) -+ (match_dup 3))))] -+{ -+ operands[2] = gen_lowpart (DImode, operands[0]); -+ operands[3] = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (mode)); -+} -+ [(set_attr "move_type" "shift_shift") -+ (set_attr "mode" "SI")]) -+ -+(define_insn_and_split "*extendhi_truncateqi" -+ [(set (match_operand:HI 0 "register_operand" "=r") -+ (sign_extend:HI -+ (truncate:QI (match_operand:DI 1 "register_operand" "r"))))] -+ "TARGET_64BIT" -+ "#" -+ "&& reload_completed" -+ [(set (match_dup 2) -+ (ashift:DI (match_dup 1) -+ (const_int 56))) -+ (set (match_dup 0) -+ (truncate:HI (ashiftrt:DI (match_dup 2) -+ (const_int 56))))] -+{ -+ operands[2] = gen_lowpart (DImode, operands[0]); -+} -+ [(set_attr "move_type" "shift_shift") -+ (set_attr "mode" "SI")]) -+ -+(define_insn "extendsfdf2" -+ [(set (match_operand:DF 0 "register_operand" "=f") -+ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" -+ "fcvt.d.s\t%0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "cnv_mode" "S2D") -+ (set_attr "mode" "DF")]) -+ -+;; -+;; .................... -+;; -+;; CONVERSIONS -+;; -+;; .................... -+ -+(define_expand "fix_truncdfsi2" -+ [(set (match_operand:SI 0 "register_operand") -+ (fix:SI (match_operand:DF 1 "register_operand")))] -+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" -+"" -+) -+ -+(define_insn "fix_truncdfsi2_insn" -+ [(set (match_operand:SI 0 "register_operand" "=f") -+ (fix:SI (match_operand:DF 1 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" -+ "ftintrz.w.d %0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "DF") -+ (set_attr "cnv_mode" "D2I")]) -+ -+ -+(define_expand "fix_truncsfsi2" -+ [(set (match_operand:SI 0 "register_operand") -+ (fix:SI (match_operand:SF 1 "register_operand")))] -+ "TARGET_HARD_FLOAT" -+"" -+) -+ -+(define_insn "fix_truncsfsi2_insn" -+ [(set (match_operand:SI 0 "register_operand" "=f") -+ (fix:SI (match_operand:SF 1 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT" -+ "ftintrz.w.s %0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "SF") -+ (set_attr "cnv_mode" "S2I")]) -+ -+ -+(define_insn "fix_truncdfdi2" -+ [(set (match_operand:DI 0 "register_operand" "=f") -+ (fix:DI (match_operand:DF 1 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" -+ "ftintrz.l.d %0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "DF") -+ (set_attr "cnv_mode" "D2I")]) -+ -+ -+(define_insn "fix_truncsfdi2" -+ [(set (match_operand:DI 0 "register_operand" "=f") -+ (fix:DI (match_operand:SF 1 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" -+ "ftintrz.l.s %0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "SF") -+ (set_attr "cnv_mode" "S2I")]) -+ -+ -+(define_insn "floatsidf2" -+ [(set (match_operand:DF 0 "register_operand" "=f") -+ (float:DF (match_operand:SI 1 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" -+ "ffint.d.w\t%0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "DF") -+ (set_attr "cnv_mode" "I2D")]) -+ -+ -+(define_insn "floatdidf2" -+ [(set (match_operand:DF 0 "register_operand" "=f") -+ (float:DF (match_operand:DI 1 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" -+ "ffint.d.l\t%0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "DF") -+ (set_attr "cnv_mode" "I2D")]) -+ -+ -+(define_insn "floatsisf2" -+ [(set (match_operand:SF 0 "register_operand" "=f") -+ (float:SF (match_operand:SI 1 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT" -+ "ffint.s.w\t%0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "SF") -+ (set_attr "cnv_mode" "I2S")]) -+ -+ -+(define_insn "floatdisf2" -+ [(set (match_operand:SF 0 "register_operand" "=f") -+ (float:SF (match_operand:DI 1 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" -+ "ffint.s.l\t%0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "SF") -+ (set_attr "cnv_mode" "I2S")]) -+ -+ -+(define_expand "fixuns_truncdfsi2" -+ [(set (match_operand:SI 0 "register_operand") -+ (unsigned_fix:SI (match_operand:DF 1 "register_operand")))] -+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" -+{ -+ rtx reg1 = gen_reg_rtx (DFmode); -+ rtx reg2 = gen_reg_rtx (DFmode); -+ rtx reg3 = gen_reg_rtx (SImode); -+ rtx_code_label *label1 = gen_label_rtx (); -+ rtx_code_label *label2 = gen_label_rtx (); -+ rtx test; -+ REAL_VALUE_TYPE offset; -+ -+ real_2expN (&offset, 31, DFmode); -+ -+ if (reg1) /* Turn off complaints about unreached code. */ -+ { -+ loongarch_emit_move (reg1, const_double_from_real_value (offset, DFmode)); -+ do_pending_stack_adjust (); -+ -+ test = gen_rtx_GE (VOIDmode, operands[1], reg1); -+ emit_jump_insn (gen_cbranchdf4 (test, operands[1], reg1, label1)); -+ -+ emit_insn (gen_fix_truncdfsi2 (operands[0], operands[1])); -+ emit_jump_insn (gen_rtx_SET (pc_rtx, -+ gen_rtx_LABEL_REF (VOIDmode, label2))); -+ emit_barrier (); -+ -+ emit_label (label1); -+ loongarch_emit_move (reg2, gen_rtx_MINUS (DFmode, operands[1], reg1)); -+ loongarch_emit_move (reg3, GEN_INT (trunc_int_for_mode -+ (BITMASK_HIGH, SImode))); -+ -+ emit_insn (gen_fix_truncdfsi2 (operands[0], reg2)); -+ emit_insn (gen_iorsi3 (operands[0], operands[0], reg3)); -+ -+ emit_label (label2); -+ -+ /* Allow REG_NOTES to be set on last insn (labels don't have enough -+ fields, and can't be used for REG_NOTES anyway). */ -+ emit_use (stack_pointer_rtx); -+ DONE; -+ } -+}) -+ -+ -+(define_expand "fixuns_truncdfdi2" -+ [(set (match_operand:DI 0 "register_operand") -+ (unsigned_fix:DI (match_operand:DF 1 "register_operand")))] -+ "TARGET_HARD_FLOAT && TARGET_64BIT && TARGET_DOUBLE_FLOAT" -+{ -+ rtx reg1 = gen_reg_rtx (DFmode); -+ rtx reg2 = gen_reg_rtx (DFmode); -+ rtx reg3 = gen_reg_rtx (DImode); -+ rtx_code_label *label1 = gen_label_rtx (); -+ rtx_code_label *label2 = gen_label_rtx (); -+ rtx test; -+ REAL_VALUE_TYPE offset; -+ -+ real_2expN (&offset, 63, DFmode); -+ -+ loongarch_emit_move (reg1, const_double_from_real_value (offset, DFmode)); -+ do_pending_stack_adjust (); -+ -+ test = gen_rtx_GE (VOIDmode, operands[1], reg1); -+ emit_jump_insn (gen_cbranchdf4 (test, operands[1], reg1, label1)); -+ -+ emit_insn (gen_fix_truncdfdi2 (operands[0], operands[1])); -+ emit_jump_insn (gen_rtx_SET (pc_rtx, gen_rtx_LABEL_REF (VOIDmode, label2))); -+ emit_barrier (); -+ -+ emit_label (label1); -+ loongarch_emit_move (reg2, gen_rtx_MINUS (DFmode, operands[1], reg1)); -+ loongarch_emit_move (reg3, GEN_INT (BITMASK_HIGH)); -+ emit_insn (gen_ashldi3 (reg3, reg3, GEN_INT (32))); -+ -+ emit_insn (gen_fix_truncdfdi2 (operands[0], reg2)); -+ emit_insn (gen_iordi3 (operands[0], operands[0], reg3)); -+ -+ emit_label (label2); -+ -+ /* Allow REG_NOTES to be set on last insn (labels don't have enough -+ fields, and can't be used for REG_NOTES anyway). */ -+ emit_use (stack_pointer_rtx); -+ DONE; -+}) -+ -+ -+(define_expand "fixuns_truncsfsi2" -+ [(set (match_operand:SI 0 "register_operand") -+ (unsigned_fix:SI (match_operand:SF 1 "register_operand")))] -+ "TARGET_HARD_FLOAT" -+{ -+ rtx reg1 = gen_reg_rtx (SFmode); -+ rtx reg2 = gen_reg_rtx (SFmode); -+ rtx reg3 = gen_reg_rtx (SImode); -+ rtx_code_label *label1 = gen_label_rtx (); -+ rtx_code_label *label2 = gen_label_rtx (); -+ rtx test; -+ REAL_VALUE_TYPE offset; -+ -+ real_2expN (&offset, 31, SFmode); -+ -+ loongarch_emit_move (reg1, const_double_from_real_value (offset, SFmode)); -+ do_pending_stack_adjust (); -+ -+ test = gen_rtx_GE (VOIDmode, operands[1], reg1); -+ emit_jump_insn (gen_cbranchsf4 (test, operands[1], reg1, label1)); -+ -+ emit_insn (gen_fix_truncsfsi2 (operands[0], operands[1])); -+ emit_jump_insn (gen_rtx_SET (pc_rtx, gen_rtx_LABEL_REF (VOIDmode, label2))); -+ emit_barrier (); -+ -+ emit_label (label1); -+ loongarch_emit_move (reg2, gen_rtx_MINUS (SFmode, operands[1], reg1)); -+ loongarch_emit_move (reg3, GEN_INT (trunc_int_for_mode -+ (BITMASK_HIGH, SImode))); -+ -+ emit_insn (gen_fix_truncsfsi2 (operands[0], reg2)); -+ emit_insn (gen_iorsi3 (operands[0], operands[0], reg3)); -+ -+ emit_label (label2); -+ -+ /* Allow REG_NOTES to be set on last insn (labels don't have enough -+ fields, and can't be used for REG_NOTES anyway). */ -+ emit_use (stack_pointer_rtx); -+ DONE; -+}) -+ -+ -+(define_expand "fixuns_truncsfdi2" -+ [(set (match_operand:DI 0 "register_operand") -+ (unsigned_fix:DI (match_operand:SF 1 "register_operand")))] -+ "TARGET_HARD_FLOAT && TARGET_64BIT && TARGET_DOUBLE_FLOAT" -+{ -+ rtx reg1 = gen_reg_rtx (SFmode); -+ rtx reg2 = gen_reg_rtx (SFmode); -+ rtx reg3 = gen_reg_rtx (DImode); -+ rtx_code_label *label1 = gen_label_rtx (); -+ rtx_code_label *label2 = gen_label_rtx (); -+ rtx test; -+ REAL_VALUE_TYPE offset; -+ -+ real_2expN (&offset, 63, SFmode); -+ -+ loongarch_emit_move (reg1, const_double_from_real_value (offset, SFmode)); -+ do_pending_stack_adjust (); -+ -+ test = gen_rtx_GE (VOIDmode, operands[1], reg1); -+ emit_jump_insn (gen_cbranchsf4 (test, operands[1], reg1, label1)); -+ -+ emit_insn (gen_fix_truncsfdi2 (operands[0], operands[1])); -+ emit_jump_insn (gen_rtx_SET (pc_rtx, gen_rtx_LABEL_REF (VOIDmode, label2))); -+ emit_barrier (); -+ -+ emit_label (label1); -+ loongarch_emit_move (reg2, gen_rtx_MINUS (SFmode, operands[1], reg1)); -+ loongarch_emit_move (reg3, GEN_INT (BITMASK_HIGH)); -+ emit_insn (gen_ashldi3 (reg3, reg3, GEN_INT (32))); -+ -+ emit_insn (gen_fix_truncsfdi2 (operands[0], reg2)); -+ emit_insn (gen_iordi3 (operands[0], operands[0], reg3)); -+ -+ emit_label (label2); -+ -+ /* Allow REG_NOTES to be set on last insn (labels don't have enough -+ fields, and can't be used for REG_NOTES anyway). */ -+ emit_use (stack_pointer_rtx); -+ DONE; -+}) -+ -+;; -+;; .................... -+;; -+;; DATA MOVEMENT -+;; -+;; .................... -+ -+(define_expand "extzv" -+ [(set (match_operand:GPR 0 "register_operand") -+ (zero_extract:GPR (match_operand:GPR 1 "register_operand") -+ (match_operand 2 "const_int_operand") -+ (match_operand 3 "const_int_operand")))] -+ "" -+{ -+ if (!loongarch_use_ins_ext_p (operands[1], INTVAL (operands[2]), -+ INTVAL (operands[3]))) -+ FAIL; -+}) -+ -+(define_insn "*extzv" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (zero_extract:GPR (match_operand:GPR 1 "register_operand" "r") -+ (match_operand 2 "const_int_operand" "") -+ (match_operand 3 "const_int_operand" "")))] -+ "loongarch_use_ins_ext_p (operands[1], INTVAL (operands[2]), -+ INTVAL (operands[3]))" -+{ -+ operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]) -1 ); -+ return "bstrpick.\t%0,%1,%2,%3"; -+} -+ [(set_attr "type" "arith") -+ (set_attr "mode" "")]) -+ -+(define_expand "insv" -+ [(set (zero_extract:GPR (match_operand:GPR 0 "register_operand") -+ (match_operand 1 "const_int_operand") -+ (match_operand 2 "const_int_operand")) -+ (match_operand:GPR 3 "reg_or_0_operand"))] -+ "" -+{ -+ if (!loongarch_use_ins_ext_p (operands[0], INTVAL (operands[1]), -+ INTVAL (operands[2]))) -+ FAIL; -+}) -+ -+(define_insn "*insv" -+ [(set (zero_extract:GPR (match_operand:GPR 0 "register_operand" "+r") -+ (match_operand:SI 1 "const_int_operand" "") -+ (match_operand:SI 2 "const_int_operand" "")) -+ (match_operand:GPR 3 "reg_or_0_operand" "rJ"))] -+ "loongarch_use_ins_ext_p (operands[0], INTVAL (operands[1]), -+ INTVAL (operands[2]))" -+{ -+ operands[1] = GEN_INT (INTVAL (operands[1]) + INTVAL (operands[2]) -1 ); -+ return "bstrins.\t%0,%z3,%1,%2"; -+} -+ [(set_attr "type" "arith") -+ (set_attr "mode" "")]) -+ -+;; Allow combine to split complex const_int load sequences, using operand 2 -+;; to store the intermediate results. See move_operand for details. -+(define_split -+ [(set (match_operand:GPR 0 "register_operand") -+ (match_operand:GPR 1 "splittable_const_int_operand")) -+ (clobber (match_operand:GPR 2 "register_operand"))] -+ "" -+ [(const_int 0)] -+{ -+ loongarch_move_integer (operands[2], operands[0], INTVAL (operands[1])); -+ DONE; -+}) -+ -+;; 64-bit integer moves -+ -+;; Unlike most other insns, the move insns can't be split with -+;; different predicates, because register spilling and other parts of -+;; the compiler, have memoized the insn number already. -+ -+(define_expand "movdi" -+ [(set (match_operand:DI 0 "") -+ (match_operand:DI 1 ""))] -+ "" -+{ -+ if (loongarch_legitimize_move (DImode, operands[0], operands[1])) -+ DONE; -+}) -+ -+ -+(define_insn "*movdi_32bit" -+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,ZC,r,m,*f,*f,*r,*m") -+ (match_operand:DI 1 "move_operand" "r,i,ZC,r,m,r,*J*r,*m,*f,*f"))] -+ "!TARGET_64BIT -+ && (register_operand (operands[0], DImode) -+ || reg_or_0_operand (operands[1], DImode))" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "move_type" "move,const,load,store,load,store,mgtf,fpload,mftg,fpstore") -+ (set (attr "mode") -+ (if_then_else (eq_attr "move_type" "imul") -+ (const_string "SI") -+ (const_string "DI")))]) -+ -+ -+(define_insn "*movdi_64bit" -+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,ZC,r,m,*f,*f,*r,*m") -+ (match_operand:DI 1 "move_operand" "r,Yd,ZC,rJ,m,rJ,*r*J,*m,*f,*f"))] -+ "TARGET_64BIT -+ && (register_operand (operands[0], DImode) -+ || reg_or_0_operand (operands[1], DImode)) -+ && !((GET_CODE (operands[1]) == SYMBOL_REF || GET_CODE (operands[1]) == LABEL_REF) -+ && symbolic_operand (operands[1], VOIDmode) -+ && (loongarch_cmodel_var == LARCH_CMODEL_EXTREME))" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "move_type" "move,const,load,store,load,store,mgtf,fpload,mftg,fpstore") -+ (set_attr "mode" "DI")]) -+ -+(define_insn "movdi_extreme" -+ [(parallel [(set (match_operand:DI 0 "register_operand" "=r") -+ (unspec_volatile:DI [(match_operand:DI 1 "symbolic_operand" "")] -+ UNSPECV_MOVE_EXTREME)) -+ (use (match_operand:DI 2 "register_operand" "=&r"))])] -+ "TARGET_64BIT && (loongarch_cmodel_var == LARCH_CMODEL_EXTREME)" -+ { -+ if (!loongarch_global_symbol_p (operands[1]) -+ || loongarch_symbol_binds_local_p (operands[1])) -+ return "la.local\t%0,%2,%1"; -+ else -+ return "la.global\t%0,%2,%1"; -+ } -+ [(set_attr "move_type" "const") -+ (set_attr "mode" "DI")]) -+;; 32-bit Integer moves -+ -+;; Unlike most other insns, the move insns can't be split with -+;; different predicates, because register spilling and other parts of -+;; the compiler, have memoized the insn number already. -+ -+(define_expand "mov" -+ [(set (match_operand:IMOVE32 0 "") -+ (match_operand:IMOVE32 1 ""))] -+ "" -+{ -+ if (loongarch_legitimize_move (mode, operands[0], operands[1])) -+ DONE; -+}) -+ -+;; The difference between these two is whether or not ints are allowed -+;; in FP registers (off by default, use -mdebugh to enable). -+ -+(define_insn "*mov_internal" -+ [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,ZC,r,m,*f,*f,*r,*m,*r,*z") -+ (match_operand:IMOVE32 1 "move_operand" "r,Yd,ZC,rJ,m,rJ,*r*J,*m,*f,*f,*z,*r"))] -+ "(register_operand (operands[0], mode) -+ || reg_or_0_operand (operands[1], mode))" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "move_type" "move,const,load,store,load,store,mgtf,fpload,mftg,fpstore,mftg,mgtf") -+ (set_attr "compression" "all,*,*,*,*,*,*,*,*,*,*,*") -+ (set_attr "mode" "SI")]) -+ -+ -+ -+;; LARCH supports loading and storing a floating point register from -+;; the sum of two general registers. We use two versions for each of -+;; these four instructions: one where the two general registers are -+;; SImode, and one where they are DImode. This is because general -+;; registers will be in SImode when they hold 32-bit values, but, -+;; since the 32-bit values are always sign extended, the [ls][wd]xc1 -+;; instructions will still work correctly. -+ -+;; ??? Perhaps it would be better to support these instructions by -+;; modifying TARGET_LEGITIMATE_ADDRESS_P and friends. However, since -+;; these instructions can only be used to load and store floating -+;; point registers, that would probably cause trouble in reload. -+ -+(define_insn "*_" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (mem:ANYF (plus:P (match_operand:P 1 "register_operand" "r") -+ (match_operand:P 2 "register_operand" "r"))))] -+ "" -+ "\t%0,%1,%2" -+ [(set_attr "type" "fpidxload") -+ (set_attr "mode" "")]) -+ -+(define_insn "*_" -+ [(set (mem:ANYF (plus:P (match_operand:P 1 "register_operand" "r") -+ (match_operand:P 2 "register_operand" "r"))) -+ (match_operand:ANYF 0 "register_operand" "f"))] -+ "TARGET_HARD_FLOAT" -+ "\t%0,%1,%2" -+ [(set_attr "type" "fpidxstore") -+ (set_attr "mode" "")]) -+ -+;; Loongson index address load and store. -+(define_insn "*_" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (mem:GPR -+ (plus:P (match_operand:P 1 "register_operand" "r") -+ (match_operand:P 2 "register_operand" "r"))))] -+ "" -+ "\t%0,%1,%2" -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "*_" -+ [(set (mem:GPR (plus:P (match_operand:P 1 "register_operand" "r") -+ (match_operand:P 2 "register_operand" "r"))) -+ (match_operand:GPR 0 "register_operand" "r"))] -+ "" -+ "\t%0,%1,%2" -+ [(set_attr "type" "store") -+ (set_attr "mode" "")]) -+ -+;; SHORT mode sign_extend. -+(define_insn "*extend__" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (sign_extend:GPR -+ (mem:SHORT -+ (plus:P (match_operand:P 1 "register_operand" "r") -+ (match_operand:P 2 "register_operand" "r")))))] -+ "" -+ "\t%0,%1,%2" -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "*extend_" -+ [(set (mem:SHORT (plus:P (match_operand:P 1 "register_operand" "r") -+ (match_operand:P 2 "register_operand" "r"))) -+ (match_operand:SHORT 0 "register_operand" "r"))] -+ "" -+ "\t%0,%1,%2" -+ [(set_attr "type" "store") -+ (set_attr "mode" "SI")]) -+ -+ -+;; 16-bit Integer moves -+ -+;; Unlike most other insns, the move insns can't be split with -+;; different predicates, because register spilling and other parts of -+;; the compiler, have memoized the insn number already. -+;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND. -+ -+(define_expand "movhi" -+ [(set (match_operand:HI 0 "") -+ (match_operand:HI 1 ""))] -+ "" -+{ -+ if (loongarch_legitimize_move (HImode, operands[0], operands[1])) -+ DONE; -+}) -+ -+(define_insn "*movhi_internal" -+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,m") -+ (match_operand:HI 1 "move_operand" "r,Yd,I,m,rJ"))] -+ "(register_operand (operands[0], HImode) -+ || reg_or_0_operand (operands[1], HImode))" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "move_type" "move,const,const,load,store") -+ (set_attr "compression" "all,all,*,*,*") -+ (set_attr "mode" "HI")]) -+ -+;; 8-bit Integer moves -+ -+;; Unlike most other insns, the move insns can't be split with -+;; different predicates, because register spilling and other parts of -+;; the compiler, have memoized the insn number already. -+;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND. -+ -+(define_expand "movqi" -+ [(set (match_operand:QI 0 "") -+ (match_operand:QI 1 ""))] -+ "" -+{ -+ if (loongarch_legitimize_move (QImode, operands[0], operands[1])) -+ DONE; -+}) -+ -+(define_insn "*movqi_internal" -+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m") -+ (match_operand:QI 1 "move_operand" "r,I,m,rJ"))] -+ "(register_operand (operands[0], QImode) -+ || reg_or_0_operand (operands[1], QImode))" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "move_type" "move,const,load,store") -+ (set_attr "compression" "all,*,*,*") -+ (set_attr "mode" "QI")]) -+ -+;; 32-bit floating point moves -+ -+(define_expand "movsf" -+ [(set (match_operand:SF 0 "") -+ (match_operand:SF 1 ""))] -+ "" -+{ -+ if (loongarch_legitimize_move (SFmode, operands[0], operands[1])) -+ DONE; -+}) -+ -+(define_insn "*movsf_hardfloat" -+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m") -+ (match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))] -+ "TARGET_HARD_FLOAT -+ && (register_operand (operands[0], SFmode) -+ || reg_or_0_operand (operands[1], SFmode))" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,store,mgtf,mftg,move,load,store") -+ (set_attr "mode" "SF")]) -+ -+(define_insn "*movsf_softfloat" -+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m") -+ (match_operand:SF 1 "move_operand" "Gr,m,r"))] -+ "TARGET_SOFT_FLOAT -+ && (register_operand (operands[0], SFmode) -+ || reg_or_0_operand (operands[1], SFmode))" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "move_type" "move,load,store") -+ (set_attr "mode" "SF")]) -+ -+ -+;; 64-bit floating point moves -+ -+(define_expand "movdf" -+ [(set (match_operand:DF 0 "") -+ (match_operand:DF 1 ""))] -+ "" -+{ -+ if (loongarch_legitimize_move (DFmode, operands[0], operands[1])) -+ DONE; -+}) -+ -+(define_insn "*movdf_hardfloat" -+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m") -+ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))] -+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT -+ && (register_operand (operands[0], DFmode) -+ || reg_or_0_operand (operands[1], DFmode))" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,store,mgtf,mftg,move,load,store") -+ (set_attr "mode" "DF")]) -+ -+(define_insn "*movdf_softfloat" -+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m") -+ (match_operand:DF 1 "move_operand" "rG,m,rG"))] -+ "(TARGET_SOFT_FLOAT || TARGET_SINGLE_FLOAT) -+ && (register_operand (operands[0], DFmode) -+ || reg_or_0_operand (operands[1], DFmode))" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "move_type" "move,load,store") -+ (set_attr "mode" "DF")]) -+ -+ -+;; 128-bit integer moves -+ -+(define_expand "movti" -+ [(set (match_operand:TI 0) -+ (match_operand:TI 1))] -+ "TARGET_64BIT" -+{ -+ if (loongarch_legitimize_move (TImode, operands[0], operands[1])) -+ DONE; -+}) -+ -+(define_insn "*movti" -+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r,r,m") -+ (match_operand:TI 1 "move_operand" "r,i,m,rJ"))] -+ "TARGET_64BIT -+ && (register_operand (operands[0], TImode) -+ || reg_or_0_operand (operands[1], TImode))" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "move_type" "move,const,load,store") -+ (set (attr "mode") -+ (if_then_else (eq_attr "move_type" "imul") -+ (const_string "SI") -+ (const_string "TI")))]) -+ -+ -+;; 128-bit floating point moves -+ -+(define_expand "movtf" -+ [(set (match_operand:TF 0) -+ (match_operand:TF 1))] -+ "TARGET_64BIT" -+{ -+ if (loongarch_legitimize_move (TFmode, operands[0], operands[1])) -+ DONE; -+}) -+ -+;; This pattern handles both hard- and soft-float cases. -+(define_insn "*movtf" -+ [(set (match_operand:TF 0 "nonimmediate_operand" "=r,r,m,f,r,f,m") -+ (match_operand:TF 1 "move_operand" "rG,m,rG,rG,f,m,f"))] -+ "TARGET_64BIT -+ && (register_operand (operands[0], TFmode) -+ || reg_or_0_operand (operands[1], TFmode))" -+ "#" -+ [(set_attr "move_type" "move,load,store,mgtf,mftg,fpload,fpstore") -+ (set_attr "mode" "TF")]) -+ -+ -+(define_split -+ [(set (match_operand:MOVE64 0 "nonimmediate_operand") -+ (match_operand:MOVE64 1 "move_operand"))] -+ "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1], insn)" -+ [(const_int 0)] -+{ -+ loongarch_split_move_insn (operands[0], operands[1], curr_insn); -+ DONE; -+}) -+ -+(define_split -+ [(set (match_operand:MOVE128 0 "nonimmediate_operand") -+ (match_operand:MOVE128 1 "move_operand"))] -+ "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1], insn)" -+ [(const_int 0)] -+{ -+ loongarch_split_move_insn (operands[0], operands[1], curr_insn); -+ DONE; -+}) -+ -+;; Emit a doubleword move in which exactly one of the operands is -+;; a floating-point register. We can't just emit two normal moves -+;; because of the constraints imposed by the FPU register model; -+;; see loongarch_cannot_change_mode_class for details. Instead, we keep -+;; the FPR whole and use special patterns to refer to each word of -+;; the other operand. -+ -+(define_expand "move_doubleword_fpr" -+ [(set (match_operand:SPLITF 0) -+ (match_operand:SPLITF 1))] -+ "" -+{ -+ if (FP_REG_RTX_P (operands[0])) -+ { -+ rtx low = loongarch_subword (operands[1], 0); -+ rtx high = loongarch_subword (operands[1], 1); -+ emit_insn (gen_load_low (operands[0], low)); -+ if (!TARGET_64BIT) -+ emit_insn (gen_movgr2frh (operands[0], high, operands[0])); -+ else -+ emit_insn (gen_load_high (operands[0], high, operands[0])); -+ } -+ else -+ { -+ rtx low = loongarch_subword (operands[0], 0); -+ rtx high = loongarch_subword (operands[0], 1); -+ emit_insn (gen_store_word (low, operands[1], const0_rtx)); -+ if (!TARGET_64BIT) -+ emit_insn (gen_movfrh2gr (high, operands[1])); -+ else -+ emit_insn (gen_store_word (high, operands[1], const1_rtx)); -+ } -+ DONE; -+}) -+ -+;; Load the low word of operand 0 with operand 1. -+(define_insn "load_low" -+ [(set (match_operand:SPLITF 0 "register_operand" "=f,f") -+ (unspec:SPLITF [(match_operand: 1 "general_operand" "rJ,m")] -+ UNSPEC_LOAD_LOW))] -+ "TARGET_HARD_FLOAT" -+{ -+ operands[0] = loongarch_subword (operands[0], 0); -+ return loongarch_output_move (operands[0], operands[1]); -+} -+ [(set_attr "move_type" "mgtf,fpload") -+ (set_attr "mode" "")]) -+ -+;; Load the high word of operand 0 from operand 1, preserving the value -+;; in the low word. -+(define_insn "load_high" -+ [(set (match_operand:SPLITF 0 "register_operand" "=f,f") -+ (unspec:SPLITF [(match_operand: 1 "general_operand" "rJ,m") -+ (match_operand:SPLITF 2 "register_operand" "0,0")] -+ UNSPEC_LOAD_HIGH))] -+ "TARGET_HARD_FLOAT" -+{ -+ operands[0] = loongarch_subword (operands[0], 1); -+ return loongarch_output_move (operands[0], operands[1]); -+} -+ [(set_attr "move_type" "mgtf,fpload") -+ (set_attr "mode" "")]) -+ -+;; Store one word of operand 1 in operand 0. Operand 2 is 1 to store the -+;; high word and 0 to store the low word. -+(define_insn "store_word" -+ [(set (match_operand: 0 "nonimmediate_operand" "=r,m") -+ (unspec: [(match_operand:SPLITF 1 "register_operand" "f,f") -+ (match_operand 2 "const_int_operand")] -+ UNSPEC_STORE_WORD))] -+ "TARGET_HARD_FLOAT" -+{ -+ operands[1] = loongarch_subword (operands[1], INTVAL (operands[2])); -+ return loongarch_output_move (operands[0], operands[1]); -+} -+ [(set_attr "move_type" "mftg,fpstore") -+ (set_attr "mode" "")]) -+ -+;; Move operand 1 to the high word of operand 0 using movgr2frh, preserving the -+;; value in the low word. -+(define_insn "movgr2frh" -+ [(set (match_operand:SPLITF 0 "register_operand" "=f") -+ (unspec:SPLITF [(match_operand: 1 "reg_or_0_operand" "rJ") -+ (match_operand:SPLITF 2 "register_operand" "0")] -+ UNSPEC_MOVGR2FRH))] -+ "TARGET_HARD_FLOAT && TARGET_FLOAT64" -+ "movgr2frh.w\t%z1,%0" -+ [(set_attr "move_type" "mgtf") -+ (set_attr "mode" "")]) -+ -+;; Move high word of operand 1 to operand 0 using movfrh2gr. -+(define_insn "movfrh2gr" -+ [(set (match_operand: 0 "register_operand" "=r") -+ (unspec: [(match_operand:SPLITF 1 "register_operand" "f")] -+ UNSPEC_MOVFRH2GR))] -+ "TARGET_HARD_FLOAT && TARGET_FLOAT64" -+ "movfrh2gr.s\t%0,%1" -+ [(set_attr "move_type" "mftg") -+ (set_attr "mode" "")]) -+ -+;; Expand in-line code to clear the instruction cache between operand[0] and -+;; operand[1]. -+(define_expand "clear_cache" -+ [(match_operand 0 "pmode_register_operand") -+ (match_operand 1 "pmode_register_operand")] -+ "" -+ " -+{ -+ emit_insn (gen_ibar (const0_rtx)); -+ DONE; -+}") -+ -+(define_insn "ibar" -+ [(unspec_volatile:SI [(match_operand 0 "const_uimm15_operand")] UNSPEC_IBAR)] -+ "" -+ "ibar\t%0") -+ -+(define_insn "dbar" -+ [(unspec_volatile:SI [(match_operand 0 "const_uimm15_operand")] UNSPEC_DBAR)] -+ "" -+ "dbar\t%0") -+ -+ -+ -+;; Privileged state instruction -+ -+(define_insn "cpucfg" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")] -+ UNSPEC_CPUCFG))] -+ "" -+ "cpucfg\t%0,%1" -+ [(set_attr "type" "load") -+ (set_attr "mode" "SI")]) -+ -+(define_insn "asrtle_d" -+ [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "r") -+ (match_operand:DI 1 "register_operand" "r")] -+ UNSPEC_ASRTLE_D)] -+ "TARGET_64BIT" -+ "asrtle.d\t%0,%1" -+ [(set_attr "type" "load") -+ (set_attr "mode" "DI")]) -+ -+(define_insn "asrtgt_d" -+ [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "r") -+ (match_operand:DI 1 "register_operand" "r")] -+ UNSPEC_ASRTGT_D)] -+ "TARGET_64BIT" -+ "asrtgt.d\t%0,%1" -+ [(set_attr "type" "load") -+ (set_attr "mode" "DI")]) -+ -+(define_insn "

csrrd" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (unspec_volatile:GPR [(match_operand 1 "const_uimm14_operand")] -+ UNSPEC_CSRRD))] -+ "" -+ "csrrd\t%0,%1" -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "

csrwr" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (unspec_volatile:GPR -+ [(match_operand:GPR 1 "register_operand" "0") -+ (match_operand 2 "const_uimm14_operand")] -+ UNSPEC_CSRWR))] -+ "" -+ "csrwr\t%0,%2" -+ [(set_attr "type" "store") -+ (set_attr "mode" "")]) -+ -+(define_insn "

csrxchg" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (unspec_volatile:GPR -+ [(match_operand:GPR 1 "register_operand" "0") -+ (match_operand:GPR 2 "register_operand" "q") -+ (match_operand 3 "const_uimm14_operand")] -+ UNSPEC_CSRXCHG))] -+ "" -+ "csrxchg\t%0,%2,%3" -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "iocsrrd_" -+ [(set (match_operand:QHWD 0 "register_operand" "=r") -+ (unspec_volatile:QHWD [(match_operand:SI 1 "register_operand" "r")] -+ UNSPEC_IOCSRRD))] -+ "" -+ "iocsrrd.\t%0,%1" -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "iocsrwr_" -+ [(unspec_volatile:QHWD [(match_operand:QHWD 0 "register_operand" "r") -+ (match_operand:SI 1 "register_operand" "r")] -+ UNSPEC_IOCSRWR)] -+ "" -+ "iocsrwr.\t%0,%1" -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "

cacop" -+ [(unspec_volatile:X [(match_operand 0 "const_uimm5_operand") -+ (match_operand:X 1 "register_operand" "r") -+ (match_operand 2 "const_imm12_operand")] -+ UNSPEC_CACOP)] -+ "" -+ "cacop\t%0,%1,%2" -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "

lddir" -+ [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r") -+ (match_operand:X 1 "register_operand" "r") -+ (match_operand 2 "const_uimm5_operand")] -+ UNSPEC_LDDIR)] -+ "" -+ "lddir\t%0,%1,%2" -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "

ldpte" -+ [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r") -+ (match_operand 1 "const_uimm5_operand")] -+ UNSPEC_LDPTE)] -+ "" -+ "ldpte\t%0,%1" -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) -+ -+ -+;; Block moves, see loongarch.c for more details. -+;; Argument 0 is the destination -+;; Argument 1 is the source -+;; Argument 2 is the length -+;; Argument 3 is the alignment -+ -+(define_expand "movmemsi" -+ [(parallel [(set (match_operand:BLK 0 "general_operand") -+ (match_operand:BLK 1 "general_operand")) -+ (use (match_operand:SI 2 "")) -+ (use (match_operand:SI 3 "const_int_operand"))])] -+ " !TARGET_MEMCPY" -+{ -+ if (loongarch_expand_block_move (operands[0], operands[1], operands[2])) -+ DONE; -+ else -+ FAIL; -+}) -+ -+;; -+;; .................... -+;; -+;; SHIFTS -+;; -+;; .................... -+ -+(define_expand "3" -+ [(set (match_operand:GPR 0 "register_operand") -+ (any_shift:GPR (match_operand:GPR 1 "register_operand") -+ (match_operand:SI 2 "arith_operand")))] -+ "" -+{ -+}) -+ -+(define_insn "*3" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (any_shift:GPR (match_operand:GPR 1 "register_operand" "r") -+ (match_operand:SI 2 "arith_operand" "rI")))] -+ "" -+{ -+ if (CONST_INT_P (operands[2])) -+ { -+ operands[2] = GEN_INT (INTVAL (operands[2]) -+ & (GET_MODE_BITSIZE (mode) - 1)); -+ return "i.\t%0,%1,%2"; -+ } else -+ return ".\t%0,%1,%2"; -+} -+ [(set_attr "type" "shift") -+ (set_attr "compression" "none") -+ (set_attr "mode" "")]) -+ -+(define_insn "*si3_extend" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (sign_extend:DI -+ (any_shift:SI (match_operand:SI 1 "register_operand" "r") -+ (match_operand:SI 2 "arith_operand" "rI"))))] -+ "TARGET_64BIT" -+{ -+ if (CONST_INT_P (operands[2])) -+ { -+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f); -+ return "i.w\t%0,%1,%2"; -+ } else -+ return ".w\t%0,%1,%2"; -+} -+ [(set_attr "type" "shift") -+ (set_attr "mode" "SI")]) -+ -+(define_insn "zero_extend_ashift1" -+ [ (set (match_operand:DI 0 "register_operand" "=r") -+ (and:DI (ashift:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0) -+ (match_operand 2 "const_immlsa_operand" "")) -+ (match_operand 3 "shift_mask_operand" "")))] -+"" -+"bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,$r0,%2" -+[(set_attr "type" "arith") -+ (set_attr "mode" "DI") -+ (set_attr "insn_count" "2")]) -+ -+(define_insn "zero_extend_ashift2" -+ [ (set (match_operand:DI 0 "register_operand" "=r") -+ (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r") -+ (match_operand 2 "const_immlsa_operand" "")) -+ (match_operand 3 "shift_mask_operand" "")))] -+"" -+"bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,$r0,%2" -+[(set_attr "type" "arith") -+ (set_attr "mode" "DI") -+ (set_attr "insn_count" "2")]) -+ -+(define_insn "alsl_paired1" -+ [(set (match_operand:DI 0 "register_operand" "=&r") -+ (plus:DI (and:DI (ashift:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0) -+ (match_operand 2 "const_immlsa_operand" "")) -+ (match_operand 3 "shift_mask_operand" "")) -+ (match_operand:DI 4 "register_operand" "r")))] -+ "" -+ "bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,%4,%2" -+ [(set_attr "type" "arith") -+ (set_attr "mode" "DI") -+ (set_attr "insn_count" "2")]) -+ -+(define_insn "alsl_paired2" -+ [(set (match_operand:DI 0 "register_operand" "=&r") -+ (plus:DI (match_operand:DI 1 "register_operand" "r") -+ (and:DI (ashift:DI (match_operand:DI 2 "register_operand" "r") -+ (match_operand 3 "const_immlsa_operand" "")) -+ (match_operand 4 "shift_mask_operand" ""))))] -+ "" -+ "bstrpick.d\t%0,%2,31,0\n\talsl.d\t%0,%0,%1,%3" -+ [(set_attr "type" "arith") -+ (set_attr "mode" "DI") -+ (set_attr "insn_count" "2")]) -+ -+(define_insn "alsl_" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (plus:GPR (ashift:GPR (match_operand:GPR 1 "register_operand" "r") -+ (match_operand 2 "const_immlsa_operand" "")) -+ (match_operand:GPR 3 "register_operand" "r")))] -+ "ISA_HAS_LSA" -+ "alsl.\t%0,%1,%3,%2" -+ [(set_attr "type" "arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "rotr3" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (rotatert:GPR (match_operand:GPR 1 "register_operand" "r") -+ (match_operand:SI 2 "arith_operand" "rI")))] -+ "" -+{ -+ if (CONST_INT_P (operands[2])) -+ { -+ return "rotri.\t%0,%1,%2"; -+ } else -+ return "rotr.\t%0,%1,%2"; -+} -+ [(set_attr "type" "shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "bswaphi2" -+ [(set (match_operand:HI 0 "register_operand" "=r") -+ (bswap:HI (match_operand:HI 1 "register_operand" "r")))] -+ "" -+ "revb.2h\t%0,%1" -+ [(set_attr "type" "shift")]) -+ -+(define_insn_and_split "bswapsi2" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (bswap:SI (match_operand:SI 1 "register_operand" "r")))] -+ "" -+ "#" -+ "" -+ [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_WSBH)) -+ (set (match_dup 0) (rotatert:SI (match_dup 0) (const_int 16)))] -+ "" -+ [(set_attr "insn_count" "2")]) -+ -+(define_insn_and_split "bswapdi2" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (bswap:DI (match_operand:DI 1 "register_operand" "r")))] -+ "TARGET_64BIT" -+ "#" -+ "" -+ [(set (match_dup 0) (unspec:DI [(match_dup 1)] UNSPEC_DSBH)) -+ (set (match_dup 0) (unspec:DI [(match_dup 0)] UNSPEC_DSHD))] -+ "" -+ [(set_attr "insn_count" "2")]) -+ -+(define_insn "wsbh" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_WSBH))] -+ "" -+ "revb.2h\t%0,%1" -+ [(set_attr "type" "shift")]) -+ -+(define_insn "dsbh" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_DSBH))] -+ "TARGET_64BIT" -+ "revb.4h\t%0,%1" -+ [(set_attr "type" "shift")]) -+ -+(define_insn "dshd" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_DSHD))] -+ "TARGET_64BIT" -+ "revh.d\t%0,%1" -+ [(set_attr "type" "shift")]) -+ -+;; -+;; .................... -+;; -+;; CONDITIONAL BRANCHES -+;; -+;; .................... -+ -+;; Conditional branches on floating-point equality tests. -+ -+(define_insn "*branch_fp_fcc" -+ [(set (pc) -+ (if_then_else -+ (match_operator 1 "equality_operator" -+ [(match_operand:FCC 2 "register_operand" "z") -+ (const_int 0)]) -+ (label_ref (match_operand 0 "" "")) -+ (pc)))] -+ "TARGET_HARD_FLOAT" -+{ -+ return loongarch_output_conditional_branch (insn, operands, -+ LARCH_BRANCH ("b%F1", "%Z2%0"), -+ LARCH_BRANCH ("b%W1", "%Z2%0")); -+} -+ [(set_attr "type" "branch")]) -+ -+(define_insn "*branch_fp_inverted_fcc" -+ [(set (pc) -+ (if_then_else -+ (match_operator 1 "equality_operator" -+ [(match_operand:FCC 2 "register_operand" "z") -+ (const_int 0)]) -+ (pc) -+ (label_ref (match_operand 0 "" ""))))] -+ "TARGET_HARD_FLOAT" -+{ -+ return loongarch_output_conditional_branch (insn, operands, -+ LARCH_BRANCH ("b%W1", "%Z2%0"), -+ LARCH_BRANCH ("b%F1", "%Z2%0")); -+} -+ [(set_attr "type" "branch")]) -+ -+;; Conditional branches on ordered comparisons with zero. -+ -+(define_insn "*branch_order" -+ [(set (pc) -+ (if_then_else -+ (match_operator 1 "order_operator" -+ [(match_operand:GPR 2 "register_operand" "r,r") -+ (match_operand:GPR 3 "reg_or_0_operand" "J,r")]) -+ (label_ref (match_operand 0 "" "")) -+ (pc)))] -+ "" -+ { return loongarch_output_order_conditional_branch (insn, operands, false); } -+ [(set_attr "type" "branch") -+ (set_attr "compact_form" "maybe,always") -+ (set_attr "hazard" "forbidden_slot")]) -+ -+(define_insn "*branch_order_inverted" -+ [(set (pc) -+ (if_then_else -+ (match_operator 1 "order_operator" -+ [(match_operand:GPR 2 "register_operand" "r,r") -+ (match_operand:GPR 3 "reg_or_0_operand" "J,r")]) -+ (pc) -+ (label_ref (match_operand 0 "" ""))))] -+ "" -+ { return loongarch_output_order_conditional_branch (insn, operands, true); } -+ [(set_attr "type" "branch") -+ (set_attr "compact_form" "maybe,always") -+ (set_attr "hazard" "forbidden_slot")]) -+ -+;; Conditional branch on equality comparison. -+ -+(define_insn "*branch_equality" -+ [(set (pc) -+ (if_then_else -+ (match_operator 1 "equality_operator" -+ [(match_operand:GPR 2 "register_operand" "r") -+ (match_operand:GPR 3 "reg_or_0_operand" "rJ")]) -+ (label_ref (match_operand 0 "" "")) -+ (pc)))] -+ "" -+ { return loongarch_output_equal_conditional_branch (insn, operands, false); } -+ [(set_attr "type" "branch") -+ (set_attr "compact_form" "maybe") -+ (set_attr "hazard" "forbidden_slot")]) -+ -+ -+(define_insn "*branch_equality_inverted" -+ [(set (pc) -+ (if_then_else -+ (match_operator 1 "equality_operator" -+ [(match_operand:GPR 2 "register_operand" "r") -+ (match_operand:GPR 3 "reg_or_0_operand" "rJ")]) -+ (pc) -+ (label_ref (match_operand 0 "" ""))))] -+ "" -+ { return loongarch_output_equal_conditional_branch (insn, operands, true); } -+ [(set_attr "type" "branch") -+ (set_attr "compact_form" "maybe") -+ (set_attr "hazard" "forbidden_slot")]) -+ -+ -+(define_expand "cbranch4" -+ [(set (pc) -+ (if_then_else (match_operator 0 "comparison_operator" -+ [(match_operand:GPR 1 "register_operand") -+ (match_operand:GPR 2 "nonmemory_operand")]) -+ (label_ref (match_operand 3 "")) -+ (pc)))] -+ "" -+{ -+ loongarch_expand_conditional_branch (operands); -+ DONE; -+}) -+ -+(define_expand "cbranch4" -+ [(set (pc) -+ (if_then_else (match_operator 0 "comparison_operator" -+ [(match_operand:SCALARF 1 "register_operand") -+ (match_operand:SCALARF 2 "register_operand")]) -+ (label_ref (match_operand 3 "")) -+ (pc)))] -+ "" -+{ -+ loongarch_expand_conditional_branch (operands); -+ DONE; -+}) -+ -+;; Used to implement built-in functions. -+(define_expand "condjump" -+ [(set (pc) -+ (if_then_else (match_operand 0) -+ (label_ref (match_operand 1)) -+ (pc)))]) -+ -+ -+ -+;; -+;; .................... -+;; -+;; SETTING A REGISTER FROM A COMPARISON -+;; -+;; .................... -+ -+;; Destination is always set in SI mode. -+ -+(define_expand "cstore4" -+ [(set (match_operand:SI 0 "register_operand") -+ (match_operator:SI 1 "loongarch_cstore_operator" -+ [(match_operand:GPR 2 "register_operand") -+ (match_operand:GPR 3 "nonmemory_operand")]))] -+ "" -+{ -+ loongarch_expand_scc (operands); -+ DONE; -+}) -+ -+(define_insn "*seq_zero_" -+ [(set (match_operand:GPR2 0 "register_operand" "=r") -+ (eq:GPR2 (match_operand:GPR 1 "register_operand" "r") -+ (const_int 0)))] -+ "" -+ "sltui\t%0,%1,1" -+ [(set_attr "type" "slt") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "*sne_zero_" -+ [(set (match_operand:GPR2 0 "register_operand" "=r") -+ (ne:GPR2 (match_operand:GPR 1 "register_operand" "r") -+ (const_int 0)))] -+ "" -+ "sltu\t%0,%.,%1" -+ [(set_attr "type" "slt") -+ (set_attr "mode" "")]) -+ -+(define_insn "*sgt_" -+ [(set (match_operand:GPR2 0 "register_operand" "=r") -+ (any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r") -+ (match_operand:GPR 2 "reg_or_0_operand" "rJ")))] -+ "" -+ "slt\t%0,%z2,%1" -+ [(set_attr "type" "slt") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "*sge_" -+ [(set (match_operand:GPR2 0 "register_operand" "=r") -+ (any_ge:GPR2 (match_operand:GPR 1 "register_operand" "r") -+ (const_int 1)))] -+ "" -+ "slti\t%0,%.,%1" -+ [(set_attr "type" "slt") -+ (set_attr "mode" "")]) -+ -+(define_insn "*slt_" -+ [(set (match_operand:GPR2 0 "register_operand" "=r") -+ (any_lt:GPR2 (match_operand:GPR 1 "register_operand" "r") -+ (match_operand:GPR 2 "arith_operand" "rI")))] -+ "" -+{ -+ if (CONST_INT_P (operands[2])) -+ { -+ return "slti\t%0,%1,%2"; -+ } else -+ return "slt\t%0,%1,%2"; -+} -+ [(set_attr "type" "slt") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "*sle_" -+ [(set (match_operand:GPR2 0 "register_operand" "=r") -+ (any_le:GPR2 (match_operand:GPR 1 "register_operand" "r") -+ (match_operand:GPR 2 "sle_operand" "")))] -+ "" -+{ -+ operands[2] = GEN_INT (INTVAL (operands[2]) + 1); -+ return "slti\t%0,%1,%2"; -+} -+ [(set_attr "type" "slt") -+ (set_attr "mode" "")]) -+ -+ -+;; -+;; .................... -+;; -+;; FLOATING POINT COMPARISONS -+;; -+;; .................... -+ -+(define_insn "s__using_fcc" -+ [(set (match_operand:FCC 0 "register_operand" "=z") -+ (fcond:FCC (match_operand:SCALARF 1 "register_operand" "f") -+ (match_operand:SCALARF 2 "register_operand" "f")))] -+ "" -+ "fcmp..\t%Z0%1,%2" -+ [(set_attr "type" "fcmp") -+ (set_attr "mode" "FCC")]) -+ -+(define_insn "s__using_fcc" -+ [(set (match_operand:FCC 0 "register_operand" "=z") -+ (swapped_fcond:FCC (match_operand:SCALARF 1 "register_operand" "f") -+ (match_operand:SCALARF 2 "register_operand" "f")))] -+ "" -+ "fcmp..\t%Z0%2,%1" -+ [(set_attr "type" "fcmp") -+ (set_attr "mode" "FCC")]) -+ -+;; -+;; .................... -+;; -+;; UNCONDITIONAL BRANCHES -+;; -+;; .................... -+ -+;; Unconditional branches. -+ -+(define_expand "jump" -+ [(set (pc) -+ (label_ref (match_operand 0)))]) -+ -+(define_insn "*jump_absolute" -+ [(set (pc) -+ (label_ref (match_operand 0)))] -+ "TARGET_ABSOLUTE_JUMPS" -+{ -+ return LARCH_ABSOLUTE_JUMP ("b\t%l0"); -+} -+ [(set_attr "type" "branch") -+ (set_attr "compact_form" "maybe")]) -+ -+(define_insn "*jump_pic" -+ [(set (pc) -+ (label_ref (match_operand 0)))] -+ "!TARGET_ABSOLUTE_JUMPS" -+{ -+ return "b\t%0"; -+} -+ [(set_attr "type" "branch") -+ (set_attr "compact_form" "maybe")]) -+ -+ -+ -+(define_expand "indirect_jump" -+ [(set (pc) (match_operand 0 "register_operand"))] -+ "" -+{ -+ operands[0] = force_reg (Pmode, operands[0]); -+ emit_jump_insn (PMODE_INSN (gen_indirect_jump, (operands[0]))); -+ DONE; -+}) -+ -+(define_insn "indirect_jump_" -+ [(set (pc) (match_operand:P 0 "register_operand" "r"))] -+ "" -+ { -+ return "jr\t%0"; -+ } -+ [(set_attr "type" "jump") -+ (set_attr "mode" "none")]) -+ -+(define_expand "tablejump" -+ [(set (pc) -+ (match_operand 0 "register_operand")) -+ (use (label_ref (match_operand 1 "")))] -+ "" -+{ -+ if (flag_pic) -+ operands[0] = expand_simple_binop (Pmode, PLUS, operands[0], -+ gen_rtx_LABEL_REF (Pmode, operands[1]), -+ NULL_RTX, 0, OPTAB_DIRECT); -+ emit_jump_insn (PMODE_INSN (gen_tablejump, (operands[0], operands[1]))); -+ DONE; -+}) -+ -+(define_insn "tablejump_" -+ [(set (pc) -+ (match_operand:P 0 "register_operand" "r")) -+ (use (label_ref (match_operand 1 "" "")))] -+ "" -+ { -+ return "jr\t%0"; -+ } -+ [(set_attr "type" "jump") -+ (set_attr "mode" "none")]) -+ -+ -+;; -+;; .................... -+;; -+;; Function prologue/epilogue -+;; -+;; .................... -+;; -+ -+(define_expand "prologue" -+ [(const_int 1)] -+ "" -+{ -+ loongarch_expand_prologue (); -+ DONE; -+}) -+ -+;; Block any insns from being moved before this point, since the -+;; profiling call to mcount can use various registers that aren't -+;; saved or used to pass arguments. -+ -+(define_insn "blockage" -+ [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)] -+ "" -+ "" -+ [(set_attr "type" "ghost") -+ (set_attr "mode" "none")]) -+ -+(define_insn "probe_stack_range_" -+ [(set (match_operand:P 0 "register_operand" "=r") -+ (unspec_volatile:P [(match_operand:P 1 "register_operand" "0") -+ (match_operand:P 2 "register_operand" "r") -+ (match_operand:P 3 "register_operand" "r")] -+ UNSPEC_PROBE_STACK_RANGE))] -+ "" -+ { return loongarch_output_probe_stack_range (operands[0], operands[2], operands[3]); } -+ [(set_attr "type" "unknown") -+ (set_attr "can_delay" "no") -+ (set_attr "mode" "")]) -+ -+(define_expand "epilogue" -+ [(const_int 2)] -+ "" -+{ -+ loongarch_expand_epilogue (false); -+ DONE; -+}) -+ -+(define_expand "sibcall_epilogue" -+ [(const_int 2)] -+ "" -+{ -+ loongarch_expand_epilogue (true); -+ DONE; -+}) -+ -+;; Trivial return. Make it look like a normal return insn as that -+;; allows jump optimizations to work better. -+ -+(define_expand "return" -+ [(simple_return)] -+ "loongarch_can_use_return_insn ()" -+ { }) -+ -+(define_expand "simple_return" -+ [(simple_return)] -+ "" -+ { }) -+ -+(define_insn "*" -+ [(any_return)] -+ "" -+ { -+ operands[0] = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM); -+ return "jr\t%0"; -+ } -+ [(set_attr "type" "jump") -+ (set_attr "mode" "none")]) -+ -+;; Normal return. -+ -+(define_insn "_internal" -+ [(any_return) -+ (use (match_operand 0 "pmode_register_operand" ""))] -+ "" -+ { -+ return "jr\t%0"; -+ } -+ [(set_attr "type" "jump") -+ (set_attr "mode" "none")]) -+ -+;; Exception return. -+(define_insn "loongarch_ertn" -+ [(return) -+ (unspec_volatile [(const_int 0)] UNSPEC_ERTN)] -+ "" -+ "ertn" -+ [(set_attr "type" "trap") -+ (set_attr "mode" "none")]) -+ -+;; Disable interrupts. -+(define_insn "loongarch_di" -+ [(unspec_volatile [(const_int 0)] UNSPEC_DI)] -+ "" -+ "di" -+ [(set_attr "type" "trap") -+ (set_attr "mode" "none")]) -+ -+;; Execution hazard barrier. -+(define_insn "loongarch_ehb" -+ [(unspec_volatile [(const_int 0)] UNSPEC_EHB)] -+ "" -+ "ehb" -+ [(set_attr "type" "trap") -+ (set_attr "mode" "none")]) -+ -+;; Read GPR from previous shadow register set. -+(define_insn "loongarch_rdpgpr_" -+ [(set (match_operand:P 0 "register_operand" "=r") -+ (unspec_volatile:P [(match_operand:P 1 "register_operand" "r")] -+ UNSPEC_RDPGPR))] -+ "" -+ "rdpgpr\t%0,%1" -+ [(set_attr "type" "move") -+ (set_attr "mode" "")]) -+ -+;; This is used in compiling the unwind routines. -+(define_expand "eh_return" -+ [(use (match_operand 0 "general_operand"))] -+ "" -+{ -+ if (GET_MODE (operands[0]) != word_mode) -+ operands[0] = convert_to_mode (word_mode, operands[0], 0); -+ if (TARGET_64BIT) -+ emit_insn (gen_eh_set_lr_di (operands[0])); -+ else -+ emit_insn (gen_eh_set_lr_si (operands[0])); -+ DONE; -+}) -+ -+;; Clobber the return address on the stack. We can't expand this -+;; until we know where it will be put in the stack frame. -+ -+(define_insn "eh_set_lr_si" -+ [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN) -+ (clobber (match_scratch:SI 1 "=&r"))] -+ "! TARGET_64BIT" -+ "#") -+ -+(define_insn "eh_set_lr_di" -+ [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN) -+ (clobber (match_scratch:DI 1 "=&r"))] -+ "TARGET_64BIT" -+ "#") -+ -+(define_split -+ [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN) -+ (clobber (match_scratch 1))] -+ "reload_completed" -+ [(const_int 0)] -+{ -+ loongarch_set_return_address (operands[0], operands[1]); -+ DONE; -+}) -+ -+ -+ -+;; -+;; .................... -+;; -+;; FUNCTION CALLS -+;; -+;; .................... -+ -+ -+;; Sibling calls. All these patterns use jump instructions. -+ -+;; If TARGET_SIBCALLS, call_insn_operand will only accept constant -+;; addresses if a direct jump is acceptable. Since the 'S' constraint -+;; is defined in terms of call_insn_operand, the same is true of the -+;; constraints. -+ -+;; When we use an indirect jump, we need a register that will be -+;; preserved by the epilogue. -+ -+(define_expand "sibcall" -+ [(parallel [(call (match_operand 0 "") -+ (match_operand 1 "")) -+ (use (match_operand 2 "")) ;; next_arg_reg -+ (use (match_operand 3 ""))])] ;; struct_value_size_rtx -+ "TARGET_SIBCALLS" -+{ -+ rtx target = loongarch_legitimize_call_address (XEXP (operands[0], 0)); -+ -+ emit_call_insn (gen_sibcall_internal (target, operands[1])); -+ DONE; -+}) -+ -+(define_insn "sibcall_internal" -+ [(call (mem:SI (match_operand 0 "call_insn_operand" "j,c,a,t,h")) -+ (match_operand 1 "" ""))] -+ "TARGET_SIBCALLS && SIBLING_CALL_P (insn)" -+{ -+ switch (which_alternative) -+ { -+ case 0: -+ return "jr\t%0"; -+ case 1: -+ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -+ return "pcaddu18i\t$r12,(%%pcrel(%0+0x20000))>>18\n\t" -+ "jirl\t$r0,$r12,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.local\t$r12,$r13,%0\n\tjr\t$r12"; -+ else -+ return "b\t%0"; -+ case 2: -+ if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) -+ return "b\t%0"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; -+ else -+ return "la.global\t$r12,%0\n\tjr\t$r12"; -+ case 3: -+ if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; -+ else -+ return "la.global\t$r12,%0\n\tjr\t$r12"; -+ case 4: -+ if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) -+ return "b\t%%plt(%0)"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -+ return "pcaddu18i\t$r12,(%%plt(%0)+0x20000)>>18\n\t" -+ "jirl\t$r0,$r12,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; -+ else -+ sorry ("cmodel extreme and tiny static not support plt."); -+ default: -+ gcc_unreachable (); -+ } -+} -+ [(set_attr "jal" "indirect,direct,direct,direct,direct")]) -+ -+(define_expand "sibcall_value" -+ [(parallel [(set (match_operand 0 "") -+ (call (match_operand 1 "") -+ (match_operand 2 ""))) -+ (use (match_operand 3 ""))])] ;; next_arg_reg -+ "TARGET_SIBCALLS" -+{ -+ rtx target = loongarch_legitimize_call_address (XEXP (operands[1], 0)); -+ -+ /* Handle return values created by loongarch_return_fpr_pair. */ -+ if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 2) -+ { -+ emit_call_insn (gen_sibcall_value_multiple_internal (XEXP (XVECEXP (operands[0], 0, 0), 0), -+ target, operands[2], XEXP (XVECEXP (operands[0], 0, 1), 0))); -+ } -+ else -+ { -+ /* Handle return values created by loongarch_return_fpr_single. */ -+ if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 1) -+ operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); -+ -+ emit_call_insn (gen_sibcall_value_internal (operands[0], target, operands[2])); -+ } -+ DONE; -+}) -+ -+(define_insn "sibcall_value_internal" -+ [(set (match_operand 0 "register_operand" "") -+ (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) -+ (match_operand 2 "" "")))] -+ "TARGET_SIBCALLS && SIBLING_CALL_P (insn)" -+{ -+ switch (which_alternative) -+ { -+ case 0: -+ return "jr\t%1"; -+ case 1: -+ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -+ return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" -+ "jirl\t$r0,$r12,%%pcrel(%1+4)-((%%pcrel(%1+4+0x20000))>>18<<18)"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.local\t$r12,$r13,%1\n\t" -+ "jr\t$r12"; -+ else -+ return "b\t%1"; -+ case 2: -+ if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) -+ return "b\t%1"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.global\t$r12,$r13,%1\n\t" -+ "jr\t$r12"; -+ else -+ return "la.global\t$r12,%1\n\t" -+ "jr\t$r12"; -+ case 3: -+ if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.global\t$r12,$r13,%1\n\t" -+ "jr\t$r12"; -+ else -+ return "la.global\t$r12,%1\n\t" -+ "jr\t$r12"; -+ case 4: -+ if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) -+ return " b\t%%plt(%1)"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -+ return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" -+ "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; -+ else -+ sorry ("loongarch cmodel extreme and tiny-static not support plt."); -+ default: -+ gcc_unreachable (); -+ } -+} -+ [(set_attr "jal" "indirect,direct,direct,direct,direct")]) -+ -+(define_insn "sibcall_value_multiple_internal" -+ [(set (match_operand 0 "register_operand" "") -+ (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) -+ (match_operand 2 "" ""))) -+ (set (match_operand 3 "register_operand" "") -+ (call (mem:SI (match_dup 1)) -+ (match_dup 2)))] -+ "TARGET_SIBCALLS && SIBLING_CALL_P (insn)" -+{ -+ switch (which_alternative) -+ { -+ case 0: -+ return "jr\t%1"; -+ case 1: -+ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -+ return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" -+ "jirl\t$r0,$r12,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.local\t$r12,$r13,%1\n\t" -+ "jr\t$r12"; -+ else -+ return "b\t%1"; -+ case 2: -+ if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) -+ return "b\t%1"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.global\t$r12,$r13,%1\n\t" -+ "jr\t$r12"; -+ else -+ return "la.global\t$r12,%1\n\t" -+ "jr\t$r12"; -+ case 3: -+ if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.global\t$r12,$r13,%1\n\t" -+ "jr\t$r12"; -+ else -+ return "la.global\t$r12,%1\n\t" -+ "jr\t$r12"; -+ case 4: -+ if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) -+ return "b\t%%plt(%1)"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -+ return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" -+ "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; -+ else -+ sorry ("loongarch cmodel extreme and tiny-static not support plt."); -+ default: -+ gcc_unreachable (); -+ } -+} -+ [(set_attr "jal" "indirect,direct,direct,direct,direct")]) -+ -+(define_expand "call" -+ [(parallel [(call (match_operand 0 "") -+ (match_operand 1 "")) -+ (use (match_operand 2 "")) ;; next_arg_reg -+ (use (match_operand 3 ""))])] ;; struct_value_size_rtx -+ "" -+{ -+ rtx target = loongarch_legitimize_call_address (XEXP (operands[0], 0)); -+ -+ emit_call_insn (gen_call_internal (target, operands[1])); -+ DONE; -+}) -+;; In the last case, we can generate the individual instructions with -+;; a define_split. There are several things to be wary of: -+;; -+;; - We can't expose the load of $gp before reload. If we did, -+;; it might get removed as dead, but reload can introduce new -+;; uses of $gp by rematerializing constants. -+;; -+;; - We shouldn't restore $gp after calls that never return. -+;; It isn't valid to insert instructions between a noreturn -+;; call and the following barrier. -+;; -+;; - The splitter deliberately changes the liveness of $gp. The unsplit -+;; instruction preserves $gp and so have no effect on its liveness. -+;; But once we generate the separate insns, it becomes obvious that -+;; $gp is not live on entry to the call. -+;; -+ -+(define_insn "call_internal" -+ [(call (mem:SI (match_operand 0 "call_insn_operand" "e,c,a,t,h")) -+ (match_operand 1 "" "")) -+ (clobber (reg:SI RETURN_ADDR_REGNUM))] -+ "" -+{ -+ switch (which_alternative) -+ { -+ case 0: -+ return "jirl\t$r1,%0,0"; -+ case 1: -+ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -+ return "pcaddu18i\t$r1,%%pcrel(%0+0x20000)>>18\n\t" -+ "jirl\t$r1,$r1,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.local\t$r1,$r12,%0\n\t" -+ "jirl\t$r1,$r1,0"; -+ else -+ return "bl\t%0"; -+ case 2: -+ if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) -+ return "bl\t%0"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.global\t$r1,$r12,%0\n\t" -+ "jirl\t$r1,$r1,0"; -+ else -+ return "la.global\t$r1,%0\n\t" -+ "jirl\t$r1,$r1,0"; -+ case 3: -+ if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.global\t$r1,$r12,%0\n\t" -+ "jirl\t$r1,$r1,0"; -+ else -+ return "la.global\t$r1,%0\n\t" -+ "jirl\t$r1,$r1,0"; -+ case 4: -+ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -+ return "pcaddu18i\t$r1,(%%plt(%0)+0x20000)>>18\n\t" -+ "jirl\t$r1,$r1,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) -+ return "bl\t%%plt(%0)"; -+ else -+ sorry ("cmodel extreme and tiny-static not support plt."); -+ default: -+ gcc_unreachable (); -+ } -+} -+ [(set_attr "jal" "indirect,direct,direct,direct,direct") -+ (set_attr "insn_count" "1,2,3,3,2")]) -+ -+ -+(define_expand "call_value" -+ [(parallel [(set (match_operand 0 "") -+ (call (match_operand 1 "") -+ (match_operand 2 ""))) -+ (use (match_operand 3 ""))])] ;; next_arg_reg -+ "" -+{ -+ rtx target = loongarch_legitimize_call_address (XEXP (operands[1], 0)); -+ /* Handle return values created by loongarch_return_fpr_pair. */ -+ if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 2) -+ emit_call_insn (gen_call_value_multiple_internal (XEXP (XVECEXP (operands[0], 0, 0), 0), -+ target, operands[2], XEXP (XVECEXP (operands[0], 0, 1), 0))); -+ else -+ { -+ /* Handle return values created by loongarch_return_fpr_single. */ -+ if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 1) -+ operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); -+ -+ emit_call_insn (gen_call_value_internal (operands[0], target, operands[2])); -+ } -+ DONE; -+}) -+ -+;; See comment for call_internal. -+(define_insn "call_value_internal" -+ [(set (match_operand 0 "register_operand" "") -+ (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) -+ (match_operand 2 "" ""))) -+ (clobber (reg:SI RETURN_ADDR_REGNUM))] -+ "" -+{ -+ switch (which_alternative) -+ { -+ case 0: -+ return "jirl\t$r1,%1,0"; -+ case 1: -+ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -+ return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" -+ "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.local\t$r1,$r12,%1\n\t" -+ "jirl\t$r1,$r1,0"; -+ else -+ return "bl\t%1"; -+ case 2: -+ if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) -+ return "bl\t%1"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.global\t$r1,$r12,%1\n\t" -+ "jirl\t$r1,$r1,0"; -+ else -+ return "la.global\t$r1,%1\n\t" -+ "jirl\t$r1,$r1,0"; -+ case 3: -+ if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.global\t$r1,$r12,%1\n\t" -+ "jirl\t$r1,$r1,0"; -+ else -+ return "la.global\t$r1,%1\n\t" -+ "jirl\t$r1,$r1,0"; -+ case 4: -+ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -+ return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" -+ "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) -+ return "bl\t%%plt(%1)"; -+ else -+ sorry ("loongarch cmodel extreme and tiny-static not support plt."); -+ default: -+ gcc_unreachable (); -+ } -+} -+ [(set_attr "jal" "indirect,direct,direct,direct,direct") -+ (set_attr "insn_count" "1,2,3,3,2")]) -+ -+;; See comment for call_internal. -+(define_insn "call_value_multiple_internal" -+ [(set (match_operand 0 "register_operand" "") -+ (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) -+ (match_operand 2 "" ""))) -+ (set (match_operand 3 "register_operand" "") -+ (call (mem:SI (match_dup 1)) -+ (match_dup 2))) -+ (clobber (reg:SI RETURN_ADDR_REGNUM))] -+ "" -+{ -+ switch (which_alternative) -+ { -+ case 0: -+ return "jirl\t$r1,%1,0"; -+ case 1: -+ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -+ return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" -+ "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.local\t$r1,$r12,%1\n\t" -+ "jirl\t$r1,$r1,0"; -+ else -+ return "bl\t%1"; -+ case 2: -+ if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) -+ return "bl\t%1"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.global\t$r1,$r12,%1\n\t" -+ "jirl\t$r1,$r1,0 "; -+ else -+ return "la.global\t$r1,%1\n\t" -+ "jirl\t$r1,$r1,0"; -+ case 3: -+ if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -+ return "la.global\t$r1,$r12,%1\n\t" -+ "jirl\t$r1,$r1,0"; -+ else -+ return "la.global\t$r1,%1\n\t" -+ "jirl\t$r1,$r1,0"; -+ case 4: -+ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -+ return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" -+ "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; -+ else if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) -+ return "bl\t%%plt(%1)"; -+ else -+ sorry ("loongarch cmodel extreme and tiny-static not support plt."); -+ default: -+ gcc_unreachable (); -+ } -+} -+ [(set_attr "jal" "indirect,direct,direct,direct,direct") -+ (set_attr "insn_count" "1,2,3,3,2")]) -+ -+ -+;; Call subroutine returning any type. -+ -+(define_expand "untyped_call" -+ [(parallel [(call (match_operand 0 "") -+ (const_int 0)) -+ (match_operand 1 "") -+ (match_operand 2 "")])] -+ "" -+{ -+ int i; -+ -+ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx)); -+ -+ for (i = 0; i < XVECLEN (operands[2], 0); i++) -+ { -+ rtx set = XVECEXP (operands[2], 0, i); -+ loongarch_emit_move (SET_DEST (set), SET_SRC (set)); -+ } -+ -+ emit_insn (gen_blockage ()); -+ DONE; -+}) -+ -+;; -+;; .................... -+;; -+;; MISC. -+;; -+;; .................... -+;; -+ -+ -+(define_insn "*prefetch_indexed_" -+ [(prefetch (plus:P (match_operand:P 0 "register_operand" "r") -+ (match_operand:P 1 "register_operand" "r")) -+ (match_operand 2 "const_int_operand" "n") -+ (match_operand 3 "const_int_operand" "n"))] -+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" -+{ -+ operands[2] = loongarch_prefetch_cookie (operands[2], operands[3]); -+ return "prefx\t%2,%1(%0)"; -+} -+ [(set_attr "type" "prefetchx")]) -+ -+(define_insn "nop" -+ [(const_int 0)] -+ "" -+ "nop" -+ [(set_attr "type" "nop") -+ (set_attr "mode" "none")]) -+ -+;; Like nop, but commented out when outside a .set noreorder block. -+(define_insn "hazard_nop" -+ [(const_int 1)] -+ "" -+ { -+ return "#nop"; -+ } -+ [(set_attr "type" "nop")]) -+ -+;; The `.insn' pseudo-op. -+(define_insn "insn_pseudo" -+ [(unspec_volatile [(const_int 0)] UNSPEC_INSN_PSEUDO)] -+ "" -+ ".insn" -+ [(set_attr "mode" "none") -+ (set_attr "insn_count" "0")]) -+ -+;; Conditional move instructions. -+ -+(define_insn "*sel_using_" -+ [(set (match_operand:GPR 0 "register_operand" "=r,r") -+ (if_then_else:GPR -+ (equality_op:GPR2 (match_operand:GPR2 1 "register_operand" "r,r") -+ (const_int 0)) -+ (match_operand:GPR 2 "reg_or_0_operand" "r,J") -+ (match_operand:GPR 3 "reg_or_0_operand" "J,r")))] -+ "register_operand (operands[2], mode) -+ != register_operand (operands[3], mode)" -+ "@ -+ \t%0,%2,%1 -+ \t%0,%3,%1" -+ [(set_attr "type" "condmove") -+ (set_attr "mode" "")]) -+ -+;; sel.fmt copies the 3rd argument when the 1st is non-zero and the 2nd -+;; argument if the 1st is zero. This means operand 2 and 3 are -+;; inverted in the instruction. -+ -+(define_insn "*sel" -+ [(set (match_operand:SCALARF 0 "register_operand" "=f") -+ (if_then_else:SCALARF -+ (ne:FCC (match_operand:FCC 1 "register_operand" "z") -+ (const_int 0)) -+ (match_operand:SCALARF 2 "reg_or_0_operand" "f") -+ (match_operand:SCALARF 3 "reg_or_0_operand" "f")))] -+ "" -+ "fsel\t%0,%3,%2,%1" -+ [(set_attr "type" "condmove") -+ (set_attr "mode" "")]) -+ -+;; These are the main define_expand's used to make conditional moves. -+ -+(define_expand "movcc" -+ [(set (match_operand:GPR 0 "register_operand") -+ (if_then_else:GPR (match_operator 1 "comparison_operator" -+ [(match_operand:GPR 2 "reg_or_0_operand") -+ (match_operand:GPR 3 "reg_or_0_operand")])))] -+ "TARGET_COND_MOVE_INT" -+{ -+ if (!INTEGRAL_MODE_P (GET_MODE (XEXP (operands[1], 0)))) -+ FAIL; -+ -+ loongarch_expand_conditional_move (operands); -+ DONE; -+}) -+ -+(define_expand "movcc" -+ [(set (match_operand:SCALARF 0 "register_operand") -+ (if_then_else:SCALARF (match_operator 1 "comparison_operator" -+ [(match_operand:SCALARF 2 "reg_or_0_operand") -+ (match_operand:SCALARF 3 "reg_or_0_operand")])))] -+ "TARGET_COND_MOVE_FLOAT" -+{ -+ if (!FLOAT_MODE_P (GET_MODE (XEXP (operands[1], 0)))) -+ FAIL; -+ -+ loongarch_expand_conditional_move (operands); -+ DONE; -+}) -+ -+(define_split -+ [(match_operand 0 "small_data_pattern")] -+ "reload_completed" -+ [(match_dup 0)] -+ { operands[0] = loongarch_rewrite_small_data (operands[0]); }) -+ -+;; Thread-Local Storage -+ -+(define_insn "got_load_tls_gd" -+ [(set (match_operand:P 0 "register_operand" "=r") -+ (unspec:P -+ [(match_operand:P 1 "symbolic_operand" "")] -+ UNSPEC_TLS_GD))] -+ "" -+ "la.tls.gd\t%0,%1" -+ [(set_attr "got" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "got_load_tls_ld" -+ [(set (match_operand:P 0 "register_operand" "=r") -+ (unspec:P -+ [(match_operand:P 1 "symbolic_operand" "")] -+ UNSPEC_TLS_LD))] -+ "" -+ "la.tls.ld\t%0,%1" -+ [(set_attr "got" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "got_load_tls_le" -+ [(set (match_operand:P 0 "register_operand" "=r") -+ (unspec:P -+ [(match_operand:P 1 "symbolic_operand" "")] -+ UNSPEC_TLS_LE))] -+ "" -+ "la.tls.le\t%0,%1" -+ [(set_attr "got" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "got_load_tls_ie" -+ [(set (match_operand:P 0 "register_operand" "=r") -+ (unspec:P -+ [(match_operand:P 1 "symbolic_operand" "")] -+ UNSPEC_TLS_IE))] -+ "" -+ "la.tls.ie\t%0,%1" -+ [(set_attr "got" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "loongarch_movfcsr2gr" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (unspec_volatile:SI [(match_operand 1 "const_uimm5_operand")] UNSPEC_MOVFCSR2GR))] -+ "TARGET_HARD_FLOAT" -+ "movfcsr2gr\t%0,$r%1") -+ -+(define_insn "loongarch_movgr2fcsr" -+ [(unspec_volatile [(match_operand 0 "const_uimm5_operand") -+ (match_operand:SI 1 "register_operand" "r")] -+ UNSPEC_MOVGR2FCSR)] -+ "TARGET_HARD_FLOAT" -+ "movgr2fcsr\t$r%0,%1") -+ -+ -+;; Match paired HI/SI/SF/DFmode load/stores. -+(define_insn "*join2_load_store" -+ [(set (match_operand:JOIN_MODE 0 "nonimmediate_operand" "=r,f,m,m,r,ZC") -+ (match_operand:JOIN_MODE 1 "nonimmediate_operand" "m,m,r,f,ZC,r")) -+ (set (match_operand:JOIN_MODE 2 "nonimmediate_operand" "=r,f,m,m,r,ZC") -+ (match_operand:JOIN_MODE 3 "nonimmediate_operand" "m,m,r,f,ZC,r"))] -+ "reload_completed" -+ { -+ bool load_p = (which_alternative == 0 || which_alternative == 1); -+ /* Reg-renaming pass reuses base register if it is dead after bonded loads. -+ Hardware does not bond those loads, even when they are consecutive. -+ However, order of the loads need to be checked for correctness. */ -+ if (!load_p || !reg_overlap_mentioned_p (operands[0], operands[1])) -+ { -+ output_asm_insn (loongarch_output_move (operands[0], operands[1]), -+ operands); -+ output_asm_insn (loongarch_output_move (operands[2], operands[3]), -+ &operands[2]); -+ } -+ else -+ { -+ output_asm_insn (loongarch_output_move (operands[2], operands[3]), -+ &operands[2]); -+ output_asm_insn (loongarch_output_move (operands[0], operands[1]), -+ operands); -+ } -+ return ""; -+ } -+ [(set_attr "move_type" "load,fpload,store,fpstore,load,store") -+ (set_attr "insn_count" "2,2,2,2,2,2")]) -+ -+;; 2 HI/SI/SF/DF loads are joined. -+;; P5600 does not support bonding of two LBs, hence QI mode is not included. -+;; The loads must be non-volatile as they might be reordered at the time of asm -+;; generation. -+(define_peephole2 -+ [(set (match_operand:JOIN_MODE 0 "register_operand") -+ (match_operand:JOIN_MODE 1 "non_volatile_mem_operand")) -+ (set (match_operand:JOIN_MODE 2 "register_operand") -+ (match_operand:JOIN_MODE 3 "non_volatile_mem_operand"))] -+ "loongarch_load_store_bonding_p (operands, mode, true)" -+ [(parallel [(set (match_dup 0) -+ (match_dup 1)) -+ (set (match_dup 2) -+ (match_dup 3))])] -+ "") -+ -+;; 2 HI/SI/SF/DF stores are joined. -+;; P5600 does not support bonding of two SBs, hence QI mode is not included. -+(define_peephole2 -+ [(set (match_operand:JOIN_MODE 0 "memory_operand") -+ (match_operand:JOIN_MODE 1 "register_operand")) -+ (set (match_operand:JOIN_MODE 2 "memory_operand") -+ (match_operand:JOIN_MODE 3 "register_operand"))] -+ "loongarch_load_store_bonding_p (operands, mode, false)" -+ [(parallel [(set (match_dup 0) -+ (match_dup 1)) -+ (set (match_dup 2) -+ (match_dup 3))])] -+ "") -+ -+;; Match paired HImode loads. -+(define_insn "*join2_loadhi" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (any_extend:SI (match_operand:HI 1 "non_volatile_mem_operand" "m"))) -+ (set (match_operand:SI 2 "register_operand" "=r") -+ (any_extend:SI (match_operand:HI 3 "non_volatile_mem_operand" "m")))] -+ "reload_completed" -+ { -+ /* Reg-renaming pass reuses base register if it is dead after bonded loads. -+ Hardware does not bond those loads, even when they are consecutive. -+ However, order of the loads need to be checked for correctness. */ -+ if (!reg_overlap_mentioned_p (operands[0], operands[1])) -+ { -+ output_asm_insn ("ld.h\t%0,%1", operands); -+ output_asm_insn ("ld.h\t%2,%3", operands); -+ } -+ else -+ { -+ output_asm_insn ("ld.h\t%2,%3", operands); -+ output_asm_insn ("ld.h\t%0,%1", operands); -+ } -+ -+ return ""; -+ } -+ [(set_attr "move_type" "load") -+ (set_attr "insn_count" "2")]) -+ -+ -+;; 2 HI loads are joined. -+(define_peephole2 -+ [(set (match_operand:SI 0 "register_operand") -+ (any_extend:SI (match_operand:HI 1 "non_volatile_mem_operand"))) -+ (set (match_operand:SI 2 "register_operand") -+ (any_extend:SI (match_operand:HI 3 "non_volatile_mem_operand")))] -+ "loongarch_load_store_bonding_p (operands, HImode, true)" -+ [(parallel [(set (match_dup 0) -+ (any_extend:SI (match_dup 1))) -+ (set (match_dup 2) -+ (any_extend:SI (match_dup 3)))])] -+ "") -+ -+ -+;; Logical AND NOT. -+(define_insn "loongson_gsandn" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (and:GPR -+ (not:GPR (match_operand:GPR 1 "register_operand" "r")) -+ (match_operand:GPR 2 "register_operand" "r")))] -+ "" -+ "andn\t%0,%2,%1" -+ [(set_attr "type" "logical")]) -+ -+;; Logical AND NOT. -+(define_insn "loongson_gsorn" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (ior:GPR -+ (not:GPR (match_operand:GPR 1 "register_operand" "r")) -+ (match_operand:GPR 2 "register_operand" "r")))] -+ "" -+ "orn\t%0,%2,%1" -+ [(set_attr "type" "logical")]) -+ -+(define_insn "smax3" -+ [(set (match_operand:SCALARF 0 "register_operand" "=f") -+ (smax:SCALARF (match_operand:SCALARF 1 "register_operand" "f") -+ (match_operand:SCALARF 2 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT" -+ "fmax.\t%0,%1,%2" -+ [(set_attr "type" "fmove") -+ (set_attr "mode" "")]) -+ -+(define_insn "smin3" -+ [(set (match_operand:SCALARF 0 "register_operand" "=f") -+ (smin:SCALARF (match_operand:SCALARF 1 "register_operand" "f") -+ (match_operand:SCALARF 2 "register_operand" "f")))] -+ "TARGET_HARD_FLOAT" -+ "fmin.\t%0,%1,%2" -+ [(set_attr "type" "fmove") -+ (set_attr "mode" "")]) -+ -+(define_insn "smaxa3" -+ [(set (match_operand:SCALARF 0 "register_operand" "=f") -+ (if_then_else:SCALARF -+ (gt (abs:SCALARF (match_operand:SCALARF 1 "register_operand" "f")) -+ (abs:SCALARF (match_operand:SCALARF 2 "register_operand" "f"))) -+ (match_dup 1) -+ (match_dup 2)))] -+ "TARGET_HARD_FLOAT" -+ "fmaxa.\t%0,%1,%2" -+ [(set_attr "type" "fmove") -+ (set_attr "mode" "")]) -+ -+(define_insn "smina3" -+ [(set (match_operand:SCALARF 0 "register_operand" "=f") -+ (if_then_else:SCALARF -+ (lt (abs:SCALARF (match_operand:SCALARF 1 "register_operand" "f")) -+ (abs:SCALARF (match_operand:SCALARF 2 "register_operand" "f"))) -+ (match_dup 1) -+ (match_dup 2)))] -+ "TARGET_HARD_FLOAT" -+ "fmina.\t%0,%1,%2" -+ [(set_attr "type" "fmove") -+ (set_attr "mode" "")]) -+ -+(define_insn "frint_" -+ [(set (match_operand:SCALARF 0 "register_operand" "=f") -+ (unspec:SCALARF [(match_operand:SCALARF 1 "register_operand" "f")] -+ UNSPEC_FRINT))] -+ "" -+ "frint.\t%0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "")]) -+ -+(define_insn "fclass_" -+ [(set (match_operand:SCALARF 0 "register_operand" "=f") -+ (unspec:SCALARF [(match_operand:SCALARF 1 "register_operand" "f")] -+ UNSPEC_FCLASS))] -+ "" -+ "fclass.\t%0,%1" -+ [(set_attr "type" "unknown") -+ (set_attr "mode" "")]) -+ -+(define_insn "bytepick_w" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (unspec:SI [(match_operand:SI 1 "register_operand" "r") -+ (match_operand:SI 2 "register_operand" "r") -+ (match_operand:SI 3 "const_0_to_3_operand" "n")] -+ UNSPEC_BYTEPICK_W))] -+ "" -+ "bytepick.w\t%0,%1,%2,%z3" -+ [(set_attr "type" "dspalu") -+ (set_attr "mode" "SI")]) -+ -+(define_insn "bytepick_d" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (unspec:DI [(match_operand:DI 1 "register_operand" "r") -+ (match_operand:DI 2 "register_operand" "r") -+ (match_operand:DI 3 "const_0_to_7_operand" "n")] -+ UNSPEC_BYTEPICK_D))] -+ "" -+ "bytepick.d\t%0,%1,%2,%z3" -+ [(set_attr "type" "dspalu") -+ (set_attr "mode" "DI")]) -+ -+(define_insn "bitrev_4b" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (unspec:SI [(match_operand:SI 1 "register_operand" "r")] -+ UNSPEC_BITREV_4B))] -+ "" -+ "bitrev.4b\t%0,%1" -+ [(set_attr "type" "unknown") -+ (set_attr "mode" "SI")]) -+ -+(define_insn "bitrev_8b" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] -+ UNSPEC_BITREV_8B))] -+ "" -+ "bitrev.8b\t%0,%1" -+ [(set_attr "type" "unknown") -+ (set_attr "mode" "DI")]) -+ -+ -+ -+(define_insn "lu32i_d" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (ior:DI -+ (zero_extend:DI -+ (subreg:SI (match_operand:DI 1 "register_operand" "0") 0)) -+ (match_operand:DI 2 "const_lu32i_operand" "u")))] -+ "TARGET_64BIT" -+ "lu32i.d\t%0,%X2>>32" -+ [(set_attr "type" "arith") -+ (set_attr "mode" "DI")]) -+ -+(define_insn "lu52i_d" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (ior:DI -+ (and:DI (match_operand:DI 1 "register_operand" "r") -+ (match_operand 2 "lu52i_mask_operand")) -+ (match_operand 3 "const_lu52i_operand" "v")))] -+ "TARGET_64BIT" -+ "lu52i.d\t%0,%1,%X3>>52" -+ [(set_attr "type" "arith") -+ (set_attr "mode" "DI")]) -+ -+(define_mode_iterator QHSD [QI HI SI DI]) -+ -+(define_insn "crc_w__w" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (unspec:SI [(match_operand:QHSD 1 "register_operand" "r") -+ (match_operand:SI 2 "register_operand" "r")] -+ UNSPEC_CRC))] -+ "" -+ "crc.w..w\t%0,%1,%2" -+ [(set_attr "type" "unknown") -+ (set_attr "mode" "")]) -+ -+(define_insn "crcc_w__w" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (unspec:SI [(match_operand:QHSD 1 "register_operand" "r") -+ (match_operand:SI 2 "register_operand" "r")] -+ UNSPEC_CRCC))] -+ "" -+ "crcc.w..w\t%0,%1,%2" -+ [(set_attr "type" "unknown") -+ (set_attr "mode" "")]) -+ -+;; Synchronization instructions. -+ -+(include "sync.md") -+ -+; The LoongArch SX Instructions. -+(include "lsx.md") -+ -+; The MSA2.0 Instructions. -+(include "lsx2.md") -+ -+; The LoongArch ASX Instructions. -+(include "lasx.md") -+ -+;; Is copying of this instruction disallowed? -+(define_attr "cannot_copy" "no,yes" (const_string "no")) -+ -+(define_insn "stack_tie" -+ [(set (mem:BLK (scratch)) -+ (unspec:BLK [(match_operand:X 0 "register_operand" "r") -+ (match_operand:X 1 "register_operand" "r")] -+ UNSPEC_TIE))] -+ "" -+ "" -+ [(set_attr "length" "0")] -+) -+ -+(define_insn "gpr_save" -+ [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_SAVE) -+ (clobber (reg:SI T0_REGNUM)) -+ (clobber (reg:SI T1_REGNUM))] -+ "" -+ { return loongarch_output_gpr_save (INTVAL (operands[0])); }) -+ -+(define_insn "gpr_restore" -+ [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_RESTORE)] -+ "" -+ "tail\t__loongarch_restore_%0") -+ -+(define_insn "gpr_restore_return" -+ [(return) -+ (use (match_operand 0 "pmode_register_operand" "")) -+ (const_int 0)] -+ "" -+ "") -+ -diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt -new file mode 100644 -index 000000000..660de3674 ---- /dev/null -+++ b/gcc/config/loongarch/loongarch.opt -@@ -0,0 +1,171 @@ -+ -+; -+; Copyright (C) 2005-2018 Free Software Foundation, Inc. -+; -+; This file is part of GCC. -+; -+; GCC is free software; you can redistribute it and/or modify it under -+; the terms of the GNU General Public License as published by the Free -+; Software Foundation; either version 3, or (at your option) any later -+; version. -+; -+; GCC is distributed in the hope that it will be useful, but WITHOUT -+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -+; License for more details. -+; -+; You should have received a copy of the GNU General Public License -+; along with GCC; see the file COPYING3. If not see -+; . -+ -+HeaderInclude -+config/loongarch/loongarch-opts.h -+ -+mabi= -+Target RejectNegative Joined Enum(loongarch_abi) Var(loongarch_abi) Init(LARCH_ABI_DEFAULT) -+-mabi=ABI Generate code that conforms to the given ABI. -+ -+Enum -+Name(loongarch_abi) Type(int) -+Known Loongarch ABIs (for use with the -mabi= option): -+ -+EnumValue -+Enum(loongarch_abi) String(lp32) Value(ABILP32) -+ -+EnumValue -+Enum(loongarch_abi) String(lpx32) Value(ABILPX32) -+ -+EnumValue -+Enum(loongarch_abi) String(lp64) Value(ABILP64) -+ -+march= -+Target RejectNegative Joined Var(loongarch_arch_option) ToLower Enum(loongarch_arch_opt_value) -+-march=ISA Generate code for the given ISA. -+ -+mbranch-cost= -+Target RejectNegative Joined UInteger Var(loongarch_branch_cost) -+-mbranch-cost=COST Set the cost of branches to roughly COST instructions. -+ -+mcheck-zero-division -+Target Report Mask(CHECK_ZERO_DIV) -+Trap on integer divide by zero. -+ -+mdouble-float -+Target Report RejectNegative InverseMask(SINGLE_FLOAT, DOUBLE_FLOAT) -+Allow hardware floating-point instructions to cover both 32-bit and 64-bit operations. -+ -+mflush-func= -+Target RejectNegative Joined Var(loongarch_cache_flush_func) Init(CACHE_FLUSH_FUNC) -+-mflush-func=FUNC Use FUNC to flush the cache before calling stack trampolines. -+ -+Mask(64BIT) -+ -+Mask(FLOAT64) -+ -+mhard-float -+Target Report RejectNegative InverseMask(SOFT_FLOAT_ABI, HARD_FLOAT_ABI) -+Allow the use of hardware floating-point ABI and instructions. -+ -+mlong-calls -+Target Report Var(TARGET_LONG_CALLS) -+Use indirect calls. -+ -+mmemcpy -+Target Report Mask(MEMCPY) -+Don't optimize block moves. -+ -+mno-float -+Target Report RejectNegative Var(TARGET_NO_FLOAT) Condition(TARGET_SUPPORTS_NO_FLOAT) -+Prevent the use of all floating-point operations. -+ -+mno-flush-func -+Target RejectNegative -+Do not use a cache-flushing function before calling stack trampolines. -+ -+mrelax-pic-calls -+Target Report Mask(RELAX_PIC_CALLS) -+Try to allow the linker to turn PIC calls into direct calls. -+ -+mshared -+Target Report Var(TARGET_SHARED) Init(1) -+When generating -mabicalls code, make the code suitable for use in shared libraries. -+ -+msingle-float -+Target Report RejectNegative Mask(SINGLE_FLOAT) -+Restrict the use of hardware floating-point instructions to 32-bit operations. -+ -+msoft-float -+Target Report RejectNegative Mask(SOFT_FLOAT_ABI) -+Prevent the use of all hardware floating-point instructions. -+ -+mlra -+Target Report Var(loongarch_lra_flag) Init(1) Save -+Use LRA instead of reload. -+ -+mtune= -+Target RejectNegative Joined Var(loongarch_tune_option) ToLower Enum(loongarch_arch_opt_value) -+-mtune=PROCESSOR Optimize the output for PROCESSOR. -+ -+mframe-header-opt -+Target Report Var(flag_frame_header_optimization) Optimization -+Optimize frame header. -+ -+noasmopt -+Driver -+ -+mstrict-align -+Target Report Mask(STRICT_ALIGN) Save -+Do not generate unaligned memory accesses. -+ -+mlsx -+Target Report Mask(LSX) -+Use LoongArch SX Extension instructions. -+ -+mlasx -+Target Report Var(TARGET_LASX) -+Use LoongArch ASX Extension instructions. -+ -+malign-llsc-target -+Target Report Var(TARGET_ALIGN_LLSC_TARGET) -+Target align llsc target. -+ -+mmax-inline-memcpy-size= -+Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) -+-mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. -+ -+mvecarg -+Target Report Var(TARGET_VECARG) Init(1) -+Target pass vect arg uses vector register. -+ -+mcond-move-int -+Target Report Var(TARGET_COND_MOVE_INT) Init(1) -+Conditional moves for integral are enabled. -+ -+mcond-move-float -+Target Report Var(TARGET_COND_MOVE_FLOAT) Init(1) -+Conditional moves for float are enabled. -+ -+; The code model option names for -mcmodel. -+ -+Enum -+Name(cmodel) Type(enum loongarch_code_model) -+The code model option names for -mcmodel: -+ -+EnumValue -+Enum(cmodel) String(normal) Value(LARCH_CMODEL_NORMAL) -+ -+EnumValue -+Enum(cmodel) String(tiny) Value(LARCH_CMODEL_TINY) -+ -+EnumValue -+Enum(cmodel) String(tiny-static) Value(LARCH_CMODEL_TINY_STATIC) -+ -+EnumValue -+Enum(cmodel) String(large) Value(LARCH_CMODEL_LARGE) -+ -+EnumValue -+Enum(cmodel) String(extreme) Value(LARCH_CMODEL_EXTREME) -+ -+mcmodel= -+Target RejectNegative Joined Enum(cmodel) Var(loongarch_cmodel_var) Init(LARCH_CMODEL_NORMAL) Save -+Specify the code model. -diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md -new file mode 100644 -index 000000000..1f7034366 ---- /dev/null -+++ b/gcc/config/loongarch/lsx.md -@@ -0,0 +1,3181 @@ -+;; Machine Description for LARCH Loongson SX ASE -+;; -+;; Copyright (C) 2018 Free Software Foundation, Inc. -+;; -+;; This file is part of GCC. -+;; -+;; GCC is free software; you can redistribute it and/or modify -+;; it under the terms of the GNU General Public License as published by -+;; the Free Software Foundation; either version 3, or (at your option) -+;; any later version. -+;; -+;; GCC is distributed in the hope that it will be useful, -+;; but WITHOUT ANY WARRANTY; without even the implied warranty of -+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+;; GNU General Public License for more details. -+;; -+;; You should have received a copy of the GNU General Public License -+;; along with GCC; see the file COPYING3. If not see -+;; . -+;; -+ -+(define_c_enum "unspec" [ -+ UNSPEC_LSX_ASUB_S -+ UNSPEC_LSX_VABSD_U -+ UNSPEC_LSX_VAVG_S -+ UNSPEC_LSX_VAVG_U -+ UNSPEC_LSX_VAVGR_S -+ UNSPEC_LSX_VAVGR_U -+ UNSPEC_LSX_VBITCLR -+ UNSPEC_LSX_VBITCLRI -+ UNSPEC_LSX_VBITREV -+ UNSPEC_LSX_VBITREVI -+ UNSPEC_LSX_VBITSET -+ UNSPEC_LSX_VBITSETI -+ UNSPEC_LSX_BRANCH_V -+ UNSPEC_LSX_BRANCH -+ UNSPEC_LSX_VFCMP_CAF -+ UNSPEC_LSX_VFCLASS -+ UNSPEC_LSX_VFCMP_CUNE -+ UNSPEC_LSX_VFCVT -+ UNSPEC_LSX_VFCVTH -+ UNSPEC_LSX_VFCVTL -+ UNSPEC_LSX_VFLOGB -+ UNSPEC_LSX_VFRECIP -+ UNSPEC_LSX_VFRINT -+ UNSPEC_LSX_VFRSQRT -+ UNSPEC_LSX_VFCMP_SAF -+ UNSPEC_LSX_VFCMP_SEQ -+ UNSPEC_LSX_VFCMP_SLE -+ UNSPEC_LSX_VFCMP_SLT -+ UNSPEC_LSX_VFCMP_SNE -+ UNSPEC_LSX_VFCMP_SOR -+ UNSPEC_LSX_VFCMP_SUEQ -+ UNSPEC_LSX_VFCMP_SULE -+ UNSPEC_LSX_VFCMP_SULT -+ UNSPEC_LSX_VFCMP_SUN -+ UNSPEC_LSX_VFCMP_SUNE -+ UNSPEC_LSX_VFTINT_S -+ UNSPEC_LSX_VFTINT_U -+ UNSPEC_LSX_VCLO -+ UNSPEC_LSX_VSAT_S -+ UNSPEC_LSX_VSAT_U -+ UNSPEC_LSX_VREPLVE -+ UNSPEC_LSX_VREPLVEI -+ UNSPEC_LSX_VSRAR -+ UNSPEC_LSX_VSRARI -+ UNSPEC_LSX_VSRLR -+ UNSPEC_LSX_VSRLRI -+ UNSPEC_LSX_VSSUB_S -+ UNSPEC_LSX_VSSUB_U -+ UNSPEC_LSX_VSHUF -+ UNSPEC_LSX_VABS -+ UNSPEC_LSX_VMUH_S -+ UNSPEC_LSX_VMUH_U -+ UNSPEC_LSX_VEXTW_S -+ UNSPEC_LSX_VEXTW_U -+ UNSPEC_LSX_VSLLWIL_S -+ UNSPEC_LSX_VSLLWIL_U -+ UNSPEC_LSX_VSRAN -+ UNSPEC_LSX_VSSRAN_S -+ UNSPEC_LSX_VSSRAN_U -+ UNSPEC_LSX_VSRAIN -+ UNSPEC_LSX_VSRAINS_S -+ UNSPEC_LSX_VSRAINS_U -+ UNSPEC_LSX_VSRARN -+ UNSPEC_LSX_VSRLN -+ UNSPEC_LSX_VSRLRN -+ UNSPEC_LSX_VSSRLRN_U -+ UNSPEC_LSX_VFRSTPI -+ UNSPEC_LSX_VFRSTP -+ UNSPEC_LSX_VSHUF4I -+ UNSPEC_LSX_VBSRL_V -+ UNSPEC_LSX_VBSLL_V -+ UNSPEC_LSX_VEXTRINS -+ UNSPEC_LSX_VMSKLTZ -+ UNSPEC_LSX_VSIGNCOV -+ UNSPEC_LSX_VFTINTRNE -+ UNSPEC_LSX_VFTINTRP -+ UNSPEC_LSX_VFTINTRM -+ UNSPEC_LSX_VFTINT_W_D -+ UNSPEC_LSX_VFFINT_S_L -+ UNSPEC_LSX_VFTINTRZ_W_D -+ UNSPEC_LSX_VFTINTRP_W_D -+ UNSPEC_LSX_VFTINTRM_W_D -+ UNSPEC_LSX_VFTINTRNE_W_D -+ UNSPEC_LSX_VFTINTL_L_S -+ UNSPEC_LSX_VFFINTH_D_W -+ UNSPEC_LSX_VFFINTL_D_W -+ UNSPEC_LSX_VFTINTRZL_L_S -+ UNSPEC_LSX_VFTINTRZH_L_S -+ UNSPEC_LSX_VFTINTRPL_L_S -+ UNSPEC_LSX_VFTINTRPH_L_S -+ UNSPEC_LSX_VFTINTRMH_L_S -+ UNSPEC_LSX_VFTINTRML_L_S -+ UNSPEC_LSX_VFTINTRNEL_L_S -+ UNSPEC_LSX_VFTINTRNEH_L_S -+ UNSPEC_LSX_VFTINTH_L_H -+ UNSPEC_LSX_VFRINTRNE_S -+ UNSPEC_LSX_VFRINTRNE_D -+ UNSPEC_LSX_VFRINTRZ_S -+ UNSPEC_LSX_VFRINTRZ_D -+ UNSPEC_LSX_VFRINTRP_S -+ UNSPEC_LSX_VFRINTRP_D -+ UNSPEC_LSX_VFRINTRM_S -+ UNSPEC_LSX_VFRINTRM_D -+ UNSPEC_LSX_VSSRARN_S -+ UNSPEC_LSX_VSSRARN_U -+ UNSPEC_LSX_VSSRLN_U -+ UNSPEC_LSX_VSSRLN -+ UNSPEC_LSX_VSSRLRN -+ UNSPEC_LSX_VLDI -+ UNSPEC_LSX_VSHUF_B -+ UNSPEC_LSX_VLDX -+ UNSPEC_LSX_VSTX -+ UNSPEC_LSX_VEXTL_QU_DU -+ UNSPEC_LSX_VSETEQZ_V -+]) -+ -+;; This attribute gives suffix for integers in VHMODE. -+(define_mode_attr dlsxfmt -+ [(V2DI "q") -+ (V4SI "d") -+ (V8HI "w") -+ (V16QI "h")]) -+ -+(define_mode_attr dlsxfmt_u -+ [(V2DI "qu") -+ (V4SI "du") -+ (V8HI "wu") -+ (V16QI "hu")]) -+ -+ -+;; All vector modes with 128 bits. -+(define_mode_iterator LSX [V2DF V4SF V2DI V4SI V8HI V16QI]) -+ -+;; Same as LSX. Used by vcond to iterate two modes. -+(define_mode_iterator LSX_2 [V2DF V4SF V2DI V4SI V8HI V16QI]) -+ -+;; Only used for splitting insert_d and copy_{u,s}.d. -+(define_mode_iterator LSX_D [V2DI V2DF]) -+ -+;; Only used for copy_{u,s}.w. -+(define_mode_iterator LSX_W [V4SI V4SF]) -+ -+;; Only integer modes. -+(define_mode_iterator ILSX [V2DI V4SI V8HI V16QI]) -+ -+;; As ILSX but excludes V16QI. -+(define_mode_iterator ILSX_DWH [V2DI V4SI V8HI]) -+ -+;; As ILSX but excludes V2DI. -+(define_mode_iterator ILSX_WHB [V4SI V8HI V16QI]) -+ -+;; Only integer modes equal or larger than a word. -+(define_mode_iterator ILSX_DW [V2DI V4SI]) -+ -+;; Only integer modes smaller than a word. -+(define_mode_iterator ILSX_HB [V8HI V16QI]) -+ -+;;;; Only integer modes for fixed-point madd_q/maddr_q. -+;;(define_mode_iterator ILSX_WH [V4SI V8HI]) -+ -+;; Only floating-point modes. -+(define_mode_iterator FLSX [V2DF V4SF]) -+ -+;; Only used for immediate set shuffle elements instruction. -+(define_mode_iterator LSX_WHB_W [V4SI V8HI V16QI V4SF]) -+ -+;; The attribute gives the integer vector mode with same size. -+(define_mode_attr VIMODE -+ [(V2DF "V2DI") -+ (V4SF "V4SI") -+ (V2DI "V2DI") -+ (V4SI "V4SI") -+ (V8HI "V8HI") -+ (V16QI "V16QI")]) -+ -+;; The attribute gives half modes for vector modes. -+(define_mode_attr VHMODE -+ [(V8HI "V16QI") -+ (V4SI "V8HI") -+ (V2DI "V4SI")]) -+ -+;; The attribute gives double modes for vector modes. -+(define_mode_attr VDMODE -+ [(V2DI "V2DI") -+ (V4SI "V2DI") -+ (V8HI "V4SI") -+ (V16QI "V8HI")]) -+ -+;; The attribute gives half modes with same number of elements for vector modes. -+(define_mode_attr VTRUNCMODE -+ [(V8HI "V8QI") -+ (V4SI "V4HI") -+ (V2DI "V2SI")]) -+ -+;; This attribute gives the mode of the result for "vpickve2gr_b, copy_u_b" etc. -+(define_mode_attr VRES -+ [(V2DF "DF") -+ (V4SF "SF") -+ (V2DI "DI") -+ (V4SI "SI") -+ (V8HI "SI") -+ (V16QI "SI")]) -+ -+;; Only used with LSX_D iterator. -+(define_mode_attr lsx_d -+ [(V2DI "reg_or_0") -+ (V2DF "register")]) -+ -+;; This attribute gives the integer vector mode with same size. -+(define_mode_attr mode_i -+ [(V2DF "v2di") -+ (V4SF "v4si") -+ (V2DI "v2di") -+ (V4SI "v4si") -+ (V8HI "v8hi") -+ (V16QI "v16qi")]) -+ -+;; This attribute gives suffix for LSX instructions. -+(define_mode_attr lsxfmt -+ [(V2DF "d") -+ (V4SF "w") -+ (V2DI "d") -+ (V4SI "w") -+ (V8HI "h") -+ (V16QI "b")]) -+ -+;; This attribute gives suffix for LSX instructions. -+(define_mode_attr lsxfmt_u -+ [(V2DF "du") -+ (V4SF "wu") -+ (V2DI "du") -+ (V4SI "wu") -+ (V8HI "hu") -+ (V16QI "bu")]) -+ -+;; This attribute gives suffix for integers in VHMODE. -+(define_mode_attr hlsxfmt -+ [(V2DI "w") -+ (V4SI "h") -+ (V8HI "b")]) -+ -+;; This attribute gives suffix for integers in VHMODE. -+(define_mode_attr hlsxfmt_u -+ [(V2DI "wu") -+ (V4SI "hu") -+ (V8HI "bu")]) -+ -+;; This attribute gives define_insn suffix for LSX instructions that need -+;; distinction between integer and floating point. -+(define_mode_attr lsxfmt_f -+ [(V2DF "d_f") -+ (V4SF "w_f") -+ (V2DI "d") -+ (V4SI "w") -+ (V8HI "h") -+ (V16QI "b")]) -+ -+(define_mode_attr flsxfmt_f -+ [(V2DF "d_f") -+ (V4SF "s_f") -+ (V2DI "d") -+ (V4SI "w") -+ (V8HI "h") -+ (V16QI "b")]) -+ -+(define_mode_attr flsxfmt -+ [(V2DF "d") -+ (V4SF "s") -+ (V2DI "d") -+ (V4SI "s")]) -+ -+(define_mode_attr ilsxfmt -+ [(V2DF "l") -+ (V4SF "w")]) -+ -+(define_mode_attr ilsxfmt_u -+ [(V2DF "lu") -+ (V4SF "wu")]) -+ -+;; This is used to form an immediate operand constraint using -+;; "const__operand". -+(define_mode_attr indeximm -+ [(V2DF "0_or_1") -+ (V4SF "0_to_3") -+ (V2DI "0_or_1") -+ (V4SI "0_to_3") -+ (V8HI "uimm3") -+ (V16QI "uimm4")]) -+ -+;; This attribute represents bitmask needed for vec_merge using -+;; "const__operand". -+(define_mode_attr bitmask -+ [(V2DF "exp_2") -+ (V4SF "exp_4") -+ (V2DI "exp_2") -+ (V4SI "exp_4") -+ (V8HI "exp_8") -+ (V16QI "exp_16")]) -+ -+;; This attribute is used to form an immediate operand constraint using -+;; "const__operand". -+(define_mode_attr bitimm -+ [(V16QI "uimm3") -+ (V8HI "uimm4") -+ (V4SI "uimm5") -+ (V2DI "uimm6")]) -+ -+(define_expand "vec_init" -+ [(match_operand:LSX 0 "register_operand") -+ (match_operand:LSX 1 "")] -+ "ISA_HAS_LSX" -+{ -+ loongarch_expand_vector_init (operands[0], operands[1]); -+ DONE; -+}) -+ -+;; vpickev pattern with implicit type conversion. -+(define_insn "vec_pack_trunc_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (vec_concat: -+ (truncate: -+ (match_operand:ILSX_DWH 1 "register_operand" "f")) -+ (truncate: -+ (match_operand:ILSX_DWH 2 "register_operand" "f"))))] -+ "ISA_HAS_LSX" -+ "vpickev.\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "")]) -+ -+(define_expand "vec_unpacks_hi_v4sf" -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (float_extend:V2DF -+ (vec_select:V2SF -+ (match_operand:V4SF 1 "register_operand" "f") -+ (match_dup 2))))] -+ "ISA_HAS_LSX" -+{ -+ operands[2] = loongarch_lsx_vec_parallel_const_half (V4SFmode, true/*high_p*/); -+}) -+ -+(define_expand "vec_unpacks_lo_v4sf" -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (float_extend:V2DF -+ (vec_select:V2SF -+ (match_operand:V4SF 1 "register_operand" "f") -+ (match_dup 2))))] -+ "ISA_HAS_LSX" -+{ -+ operands[2] = loongarch_lsx_vec_parallel_const_half (V4SFmode, false/*high_p*/); -+}) -+ -+(define_expand "vec_unpacks_hi_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILSX_WHB 1 "register_operand")] -+ "ISA_HAS_LSX" -+{ -+ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, true/*high_p*/); -+ DONE; -+}) -+ -+(define_expand "vec_unpacks_lo_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILSX_WHB 1 "register_operand")] -+ "ISA_HAS_LSX" -+{ -+ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, false/*high_p*/); -+ DONE; -+}) -+ -+(define_expand "vec_unpacku_hi_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILSX_WHB 1 "register_operand")] -+ "ISA_HAS_LSX" -+{ -+ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, true/*high_p*/); -+ DONE; -+}) -+ -+(define_expand "vec_unpacku_lo_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILSX_WHB 1 "register_operand")] -+ "ISA_HAS_LSX" -+{ -+ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, false/*high_p*/); -+ DONE; -+}) -+ -+(define_expand "vec_extract" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILSX 1 "register_operand") -+ (match_operand 2 "const__operand")] -+ "ISA_HAS_LSX" -+{ -+ if (mode == QImode || mode == HImode) -+ { -+ rtx dest1 = gen_reg_rtx (SImode); -+ emit_insn (gen_lsx_vpickve2gr_ (dest1, operands[1], operands[2])); -+ emit_move_insn (operands[0], -+ gen_lowpart (mode, dest1)); -+ } -+ else -+ emit_insn (gen_lsx_vpickve2gr_ (operands[0], operands[1], operands[2])); -+ DONE; -+}) -+ -+(define_expand "vec_extract" -+ [(match_operand: 0 "register_operand") -+ (match_operand:FLSX 1 "register_operand") -+ (match_operand 2 "const__operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx temp; -+ HOST_WIDE_INT val = INTVAL (operands[2]); -+ -+ if (val == 0) -+ temp = operands[1]; -+ else -+ { -+ rtx n = GEN_INT (val * GET_MODE_SIZE (mode)); -+ temp = gen_reg_rtx (mode); -+ emit_insn (gen_lsx_vbsrl_ (temp, operands[1], n)); -+ } -+ emit_insn (gen_lsx_vec_extract_ (operands[0], temp)); -+ DONE; -+}) -+ -+(define_insn_and_split "lsx_vec_extract_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (vec_select: -+ (match_operand:FLSX 1 "register_operand" "f") -+ (parallel [(const_int 0)])))] -+ "ISA_HAS_LSX" -+ "#" -+ "&& reload_completed" -+ [(set (match_dup 0) (match_dup 1))] -+{ -+ operands[1] = gen_rtx_REG (mode, REGNO (operands[1])); -+} -+ [(set_attr "move_type" "fmove") -+ (set_attr "mode" "")]) -+ -+(define_expand "vec_set" -+ [(match_operand:ILSX 0 "register_operand") -+ (match_operand: 1 "reg_or_0_operand") -+ (match_operand 2 "const__operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx index = GEN_INT (1 << INTVAL (operands[2])); -+ emit_insn (gen_lsx_vinsgr2vr_ (operands[0], operands[1], -+ operands[0], index)); -+ DONE; -+}) -+ -+(define_expand "vec_set" -+ [(match_operand:FLSX 0 "register_operand") -+ (match_operand: 1 "register_operand") -+ (match_operand 2 "const__operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx index = GEN_INT (1 << INTVAL (operands[2])); -+ emit_insn (gen_lsx_vextrins__scalar (operands[0], operands[1], -+ operands[0], index)); -+ DONE; -+}) -+ -+(define_expand "vcondu" -+ [(match_operand:LSX 0 "register_operand") -+ (match_operand:LSX 1 "reg_or_m1_operand") -+ (match_operand:LSX 2 "reg_or_0_operand") -+ (match_operator 3 "" -+ [(match_operand:ILSX 4 "register_operand") -+ (match_operand:ILSX 5 "register_operand")])] -+ "ISA_HAS_LSX -+ && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" -+{ -+ loongarch_expand_vec_cond_expr (mode, mode, operands); -+ DONE; -+}) -+ -+(define_expand "vcond" -+ [(match_operand:LSX 0 "register_operand") -+ (match_operand:LSX 1 "reg_or_m1_operand") -+ (match_operand:LSX 2 "reg_or_0_operand") -+ (match_operator 3 "" -+ [(match_operand:LSX_2 4 "register_operand") -+ (match_operand:LSX_2 5 "register_operand")])] -+ "ISA_HAS_LSX -+ && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" -+{ -+ loongarch_expand_vec_cond_expr (mode, mode, operands); -+ DONE; -+}) -+ -+(define_insn "lsx_vinsgr2vr_" -+ [(set (match_operand:LSX 0 "register_operand" "=f") -+ (vec_merge:LSX -+ (vec_duplicate:LSX -+ (match_operand: 1 "reg_or_0_operand" "rJ")) -+ (match_operand:LSX 2 "register_operand" "0") -+ (match_operand 3 "const__operand" "")))] -+ "ISA_HAS_LSX" -+{ -+ if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode)) -+ return "#"; -+ else -+ return "vinsgr2vr.\t%w0,%z1,%y3"; -+} -+ [(set_attr "type" "simd_insert") -+ (set_attr "mode" "")]) -+ -+(define_split -+ [(set (match_operand:LSX_D 0 "register_operand") -+ (vec_merge:LSX_D -+ (vec_duplicate:LSX_D -+ (match_operand: 1 "_operand")) -+ (match_operand:LSX_D 2 "register_operand") -+ (match_operand 3 "const__operand")))] -+ "reload_completed && ISA_HAS_LSX && !TARGET_64BIT" -+ [(const_int 0)] -+{ -+ loongarch_split_lsx_insert_d (operands[0], operands[2], operands[3], operands[1]); -+ DONE; -+}) -+ -+(define_insn "lsx_vextrins__internal" -+ [(set (match_operand:LSX 0 "register_operand" "=f") -+ (vec_merge:LSX -+ (vec_duplicate:LSX -+ (vec_select: -+ (match_operand:LSX 1 "register_operand" "f") -+ (parallel [(const_int 0)]))) -+ (match_operand:LSX 2 "register_operand" "0") -+ (match_operand 3 "const__operand" "")))] -+ "ISA_HAS_LSX" -+ "vextrins.\t%w0,%w1,%y3<<4" -+ [(set_attr "type" "simd_insert") -+ (set_attr "mode" "")]) -+ -+;; Operand 3 is a scalar. -+(define_insn "lsx_vextrins__scalar" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (vec_merge:FLSX -+ (vec_duplicate:FLSX -+ (match_operand: 1 "register_operand" "f")) -+ (match_operand:FLSX 2 "register_operand" "0") -+ (match_operand 3 "const__operand" "")))] -+ "ISA_HAS_LSX" -+ "vextrins.\t%w0,%w1,%y3<<4" -+ [(set_attr "type" "simd_insert") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vpickve2gr_" -+ [(set (match_operand: 0 "register_operand" "=r") -+ (any_extend: -+ (vec_select: -+ (match_operand:ILSX_HB 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const__operand" "")]))))] -+ "ISA_HAS_LSX" -+ "vpickve2gr.\t%0,%w1,%2" -+ [(set_attr "type" "simd_copy") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vpickve2gr_" -+ [(set (match_operand: 0 "register_operand" "=r") -+ (any_extend: -+ (vec_select: -+ (match_operand:LSX_W 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const__operand" "")]))))] -+ "ISA_HAS_LSX" -+ "vpickve2gr.\t%0,%w1,%2" -+ [(set_attr "type" "simd_copy") -+ (set_attr "mode" "")]) -+ -+(define_insn_and_split "lsx_vpickve2gr_du" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (vec_select:DI -+ (match_operand:V2DI 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const_0_or_1_operand" "")])))] -+ "ISA_HAS_LSX" -+{ -+ if (TARGET_64BIT) -+ return "vpickve2gr.du\t%0,%w1,%2"; -+ else -+ return "#"; -+} -+ "reload_completed && ISA_HAS_LSX && !TARGET_64BIT" -+ [(const_int 0)] -+{ -+ loongarch_split_lsx_copy_d (operands[0], operands[1], operands[2], -+ gen_lsx_vpickve2gr_wu); -+ DONE; -+} -+ [(set_attr "type" "simd_copy") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn_and_split "lsx_vpickve2gr_" -+ [(set (match_operand: 0 "register_operand" "=r") -+ (vec_select: -+ (match_operand:LSX_D 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const__operand" "")])))] -+ "ISA_HAS_LSX" -+{ -+ if (TARGET_64BIT) -+ return "vpickve2gr.\t%0,%w1,%2"; -+ else -+ return "#"; -+} -+ "reload_completed && ISA_HAS_LSX && !TARGET_64BIT" -+ [(const_int 0)] -+{ -+ loongarch_split_lsx_copy_d (operands[0], operands[1], operands[2], -+ gen_lsx_vpickve2gr_w); -+ DONE; -+} -+ [(set_attr "type" "simd_copy") -+ (set_attr "mode" "")]) -+ -+ -+(define_expand "abs2" -+ [(match_operand:ILSX 0 "register_operand" "=f") -+ (abs:ILSX (match_operand:ILSX 1 "register_operand" "f"))] -+ "ISA_HAS_LSX" -+{ -+ if (ISA_HAS_LSX) -+ { -+ emit_insn (gen_vabs2 (operands[0], operands[1])); -+ DONE; -+ } else { -+ rtx reg = gen_reg_rtx (mode); -+ emit_move_insn (reg, CONST0_RTX (mode)); -+ emit_insn (gen_lsx_vadda_ (operands[0], operands[1], reg)); -+ DONE; -+ } -+}) -+ -+(define_expand "neg2" -+ [(set (match_operand:ILSX 0 "register_operand") -+ (neg:ILSX (match_operand:ILSX 1 "register_operand")))] -+ "ISA_HAS_LSX" -+{ -+ emit_insn (gen_vneg2 (operands[0], operands[1])); -+ DONE; -+}) -+ -+(define_expand "neg2" -+ [(set (match_operand:FLSX 0 "register_operand") -+ (neg:FLSX (match_operand:FLSX 1 "register_operand")))] -+ "ISA_HAS_LSX" -+{ -+ rtx reg = gen_reg_rtx (mode); -+ emit_move_insn (reg, CONST0_RTX (mode)); -+ emit_insn(gen_sub3(operands[0], reg, operands[1])); -+ DONE; -+}) -+ -+(define_expand "lsx_vrepli" -+ [(match_operand:ILSX 0 "register_operand") -+ (match_operand 1 "const_imm10_operand")] -+ "ISA_HAS_LSX" -+{ -+ if (mode == V16QImode) -+ operands[1] = GEN_INT (trunc_int_for_mode (INTVAL (operands[1]), -+ mode)); -+ emit_move_insn (operands[0], -+ loongarch_gen_const_int_vector (mode, INTVAL (operands[1]))); -+ DONE; -+}) -+ -+(define_insn "lsx_vshuf_" -+ [(set (match_operand:ILSX_DWH 0 "register_operand" "=f") -+ (unspec:ILSX_DWH [(match_operand:ILSX_DWH 1 "register_operand" "0") -+ (match_operand:ILSX_DWH 2 "register_operand" "f") -+ (match_operand:ILSX_DWH 3 "register_operand" "f")] -+ UNSPEC_LSX_VSHUF))] -+ "ISA_HAS_LSX" -+ "vshuf.\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_sld") -+ (set_attr "mode" "")]) -+ -+(define_expand "mov" -+ [(set (match_operand:LSX 0) -+ (match_operand:LSX 1))] -+ "ISA_HAS_LSX" -+{ -+ if (loongarch_legitimize_move (mode, operands[0], operands[1])) -+ DONE; -+}) -+ -+(define_expand "movmisalign" -+ [(set (match_operand:LSX 0) -+ (match_operand:LSX 1))] -+ "ISA_HAS_LSX" -+{ -+ if (loongarch_legitimize_move (mode, operands[0], operands[1])) -+ DONE; -+}) -+ -+;; 128-bit LSX modes can only exist in LSX registers or memory. An exception -+;; is allowing LSX modes for GP registers for arguments and return values. -+(define_insn "mov_lsx" -+ [(set (match_operand:LSX 0 "nonimmediate_operand" "=f,f,R,*r,*f") -+ (match_operand:LSX 1 "move_operand" "fYGYI,R,f,*f,*r"))] -+ "ISA_HAS_LSX" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert") -+ (set_attr "mode" "")]) -+ -+(define_split -+ [(set (match_operand:LSX 0 "nonimmediate_operand") -+ (match_operand:LSX 1 "move_operand"))] -+ "reload_completed && ISA_HAS_LSX -+ && loongarch_split_move_insn_p (operands[0], operands[1], insn)" -+ [(const_int 0)] -+{ -+ loongarch_split_move_insn (operands[0], operands[1], curr_insn); -+ DONE; -+}) -+ -+;; Offset load -+(define_expand "lsx_ld_" -+ [(match_operand:LSX 0 "register_operand") -+ (match_operand 1 "pmode_register_operand") -+ (match_operand 2 "aq10_operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], -+ INTVAL (operands[2])); -+ loongarch_emit_move (operands[0], gen_rtx_MEM (mode, addr)); -+ DONE; -+}) -+ -+;; Offset store -+(define_expand "lsx_st_" -+ [(match_operand:LSX 0 "register_operand") -+ (match_operand 1 "pmode_register_operand") -+ (match_operand 2 "aq10_operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], -+ INTVAL (operands[2])); -+ loongarch_emit_move (gen_rtx_MEM (mode, addr), operands[0]); -+ DONE; -+}) -+ -+;; Integer operations -+(define_insn "add3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f,f") -+ (plus:ILSX -+ (match_operand:ILSX 1 "register_operand" "f,f,f") -+ (match_operand:ILSX 2 "reg_or_vector_same_ximm5_operand" "f,Unv5,Uuv5")))] -+ "ISA_HAS_LSX" -+{ -+ switch (which_alternative) -+ { -+ case 0: -+ return "vadd.\t%w0,%w1,%w2"; -+ case 1: -+ { -+ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0)); -+ -+ operands[2] = GEN_INT (-val); -+ return "vsubi.\t%w0,%w1,%d2"; -+ } -+ case 2: -+ return "vaddi.\t%w0,%w1,%E2"; -+ default: -+ gcc_unreachable (); -+ } -+} -+ [(set_attr "alu_type" "simd_add") -+ (set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "sub3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f") -+ (minus:ILSX -+ (match_operand:ILSX 1 "register_operand" "f,f") -+ (match_operand:ILSX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] -+ "ISA_HAS_LSX" -+ "@ -+ vsub.\t%w0,%w1,%w2 -+ vsubi.\t%w0,%w1,%E2" -+ [(set_attr "alu_type" "simd_add") -+ (set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "mul3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (mult:ILSX (match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vmul.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_mul") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vmadd_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (plus:ILSX (mult:ILSX (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand:ILSX 3 "register_operand" "f")) -+ (match_operand:ILSX 1 "register_operand" "0")))] -+ "ISA_HAS_LSX" -+ "vmadd.\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_mul") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vmsub_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (minus:ILSX (match_operand:ILSX 1 "register_operand" "0") -+ (mult:ILSX (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand:ILSX 3 "register_operand" "f"))))] -+ "ISA_HAS_LSX" -+ "vmsub.\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_mul") -+ (set_attr "mode" "")]) -+ -+(define_insn "div3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (div:ILSX (match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ { return loongarch_lsx_output_division ("vdiv.\t%w0,%w1,%w2", operands); } -+ [(set_attr "type" "simd_div") -+ (set_attr "mode" "")]) -+ -+(define_insn "udiv3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (udiv:ILSX (match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ { return loongarch_lsx_output_division ("vdiv.\t%w0,%w1,%w2", operands); } -+ [(set_attr "type" "simd_div") -+ (set_attr "mode" "")]) -+ -+(define_insn "mod3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (mod:ILSX (match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ { return loongarch_lsx_output_division ("vmod.\t%w0,%w1,%w2", operands); } -+ [(set_attr "type" "simd_div") -+ (set_attr "mode" "")]) -+ -+(define_insn "umod3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (umod:ILSX (match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ { return loongarch_lsx_output_division ("vmod.\t%w0,%w1,%w2", operands); } -+ [(set_attr "type" "simd_div") -+ (set_attr "mode" "")]) -+ -+(define_insn "xor3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f,f") -+ (xor:ILSX -+ (match_operand:ILSX 1 "register_operand" "f,f,f") -+ (match_operand:ILSX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] -+ "ISA_HAS_LSX" -+ "@ -+ vxor.v\t%w0,%w1,%w2 -+ vbitrevi.%v0\t%w0,%w1,%V2 -+ vxori.b\t%w0,%w1,%B2" -+ [(set_attr "type" "simd_logic,simd_bit,simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "ior3" -+ [(set (match_operand:LSX 0 "register_operand" "=f,f,f") -+ (ior:LSX -+ (match_operand:LSX 1 "register_operand" "f,f,f") -+ (match_operand:LSX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] -+ "ISA_HAS_LSX" -+ "@ -+ vor.v\t%w0,%w1,%w2 -+ vbitseti.%v0\t%w0,%w1,%V2 -+ vori.b\t%w0,%w1,%B2" -+ [(set_attr "type" "simd_logic,simd_bit,simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "and3" -+ [(set (match_operand:LSX 0 "register_operand" "=f,f,f") -+ (and:LSX -+ (match_operand:LSX 1 "register_operand" "f,f,f") -+ (match_operand:LSX 2 "reg_or_vector_same_val_operand" "f,YZ,Urv8")))] -+ "ISA_HAS_LSX" -+{ -+ switch (which_alternative) -+ { -+ case 0: -+ return "vand.v\t%w0,%w1,%w2"; -+ case 1: -+ { -+ rtx elt0 = CONST_VECTOR_ELT (operands[2], 0); -+ unsigned HOST_WIDE_INT val = ~UINTVAL (elt0); -+ operands[2] = loongarch_gen_const_int_vector (mode, val & (-val)); -+ return "vbitclri.%v0\t%w0,%w1,%V2"; -+ } -+ case 2: -+ return "vandi.b\t%w0,%w1,%B2"; -+ default: -+ gcc_unreachable (); -+ } -+} -+ [(set_attr "type" "simd_logic,simd_bit,simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "one_cmpl2" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (not:ILSX (match_operand:ILSX 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vnor.v\t%w0,%w1,%w1" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "TI")]) -+ -+(define_insn "vlshr3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f") -+ (lshiftrt:ILSX -+ (match_operand:ILSX 1 "register_operand" "f,f") -+ (match_operand:ILSX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] -+ "ISA_HAS_LSX" -+ "@ -+ vsrl.\t%w0,%w1,%w2 -+ vsrli.\t%w0,%w1,%E2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "vashr3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f") -+ (ashiftrt:ILSX -+ (match_operand:ILSX 1 "register_operand" "f,f") -+ (match_operand:ILSX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] -+ "ISA_HAS_LSX" -+ "@ -+ vsra.\t%w0,%w1,%w2 -+ vsrai.\t%w0,%w1,%E2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "vashl3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f") -+ (ashift:ILSX -+ (match_operand:ILSX 1 "register_operand" "f,f") -+ (match_operand:ILSX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] -+ "ISA_HAS_LSX" -+ "@ -+ vsll.\t%w0,%w1,%w2 -+ vslli.\t%w0,%w1,%E2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+;; Floating-point operations -+(define_insn "add3" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (plus:FLSX (match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vfadd.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "sub3" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (minus:FLSX (match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vfsub.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "mul3" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (mult:FLSX (match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vfmul.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fmul") -+ (set_attr "mode" "")]) -+ -+(define_insn "div3" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (div:FLSX (match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vfdiv.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ -+(define_insn "fma4" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (fma:FLSX (match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f") -+ (match_operand:FLSX 3 "register_operand" "0")))] -+ "ISA_HAS_LSX" -+ "vfmadd.\t%w0,%w1,%w2,%w0" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "fnma4" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (fma:FLSX (neg:FLSX (match_operand:FLSX 1 "register_operand" "f")) -+ (match_operand:FLSX 2 "register_operand" "f") -+ (match_operand:FLSX 3 "register_operand" "0")))] -+ "ISA_HAS_LSX" -+ "vfnmsub.\t%w0,%w1,%w2,%w0" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "sqrt2" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (sqrt:FLSX (match_operand:FLSX 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vfsqrt.\t%w0,%w1" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ -+;; Built-in functions -+(define_insn "lsx_vadda_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (plus:ILSX (abs:ILSX (match_operand:ILSX 1 "register_operand" "f")) -+ (abs:ILSX (match_operand:ILSX 2 "register_operand" "f"))))] -+ "ISA_HAS_LSX" -+ "vadda.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "ssadd3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (ss_plus:ILSX (match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vsadd.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "usadd3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (us_plus:ILSX (match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vsadd.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vabsd_s_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_ASUB_S))] -+ "ISA_HAS_LSX" -+ "vabsd.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vabsd_u_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VABSD_U))] -+ "ISA_HAS_LSX" -+ "vabsd.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vavg_s_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VAVG_S))] -+ "ISA_HAS_LSX" -+ "vavg.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vavg_u_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VAVG_U))] -+ "ISA_HAS_LSX" -+ "vavg.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vavgr_s_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VAVGR_S))] -+ "ISA_HAS_LSX" -+ "vavgr.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vavgr_u_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VAVGR_U))] -+ "ISA_HAS_LSX" -+ "vavgr.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vbitclr_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VBITCLR))] -+ "ISA_HAS_LSX" -+ "vbitclr.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vbitclri_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LSX_VBITCLRI))] -+ "ISA_HAS_LSX" -+ "vbitclri.\t%w0,%w1,%2" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vbitrev_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VBITREV))] -+ "ISA_HAS_LSX" -+ "vbitrev.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vbitrevi_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand 2 "const_lsx_branch_operand" "")] -+ UNSPEC_LSX_VBITREVI))] -+ "ISA_HAS_LSX" -+ "vbitrevi.\t%w0,%w1,%2" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vbitsel_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (ior:ILSX (and:ILSX (not:ILSX -+ (match_operand:ILSX 3 "register_operand" "f")) -+ (match_operand:ILSX 1 "register_operand" "f")) -+ (and:ILSX (match_dup 3) -+ (match_operand:ILSX 2 "register_operand" "f"))))] -+ "ISA_HAS_LSX" -+ "vbitsel.v\t%w0,%w1,%w2,%w3" -+ [(set_attr "type" "simd_bitmov") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vbitseli_b" -+ [(set (match_operand:V16QI 0 "register_operand" "=f") -+ (ior:V16QI (and:V16QI (not:V16QI -+ (match_operand:V16QI 1 "register_operand" "0")) -+ (match_operand:V16QI 2 "register_operand" "f")) -+ (and:V16QI (match_dup 1) -+ (match_operand:V16QI 3 "const_vector_same_val_operand" "Urv8"))))] -+ "ISA_HAS_LSX" -+ "vbitseli.b\t%w0,%w2,%B3" -+ [(set_attr "type" "simd_bitmov") -+ (set_attr "mode" "V16QI")]) -+ -+(define_insn "lsx_vbitset_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VBITSET))] -+ "ISA_HAS_LSX" -+ "vbitset.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vbitseti_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LSX_VBITSETI))] -+ "ISA_HAS_LSX" -+ "vbitseti.\t%w0,%w1,%2" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_code_iterator ICC [eq le leu lt ltu]) -+ -+(define_code_attr icc -+ [(eq "eq") -+ (le "le") -+ (leu "le") -+ (lt "lt") -+ (ltu "lt")]) -+ -+(define_code_attr icci -+ [(eq "eqi") -+ (le "lei") -+ (leu "lei") -+ (lt "lti") -+ (ltu "lti")]) -+ -+(define_code_attr cmpi -+ [(eq "s") -+ (le "s") -+ (leu "u") -+ (lt "s") -+ (ltu "u")]) -+ -+(define_code_attr cmpi_1 -+ [(eq "") -+ (le "") -+ (leu "u") -+ (lt "") -+ (ltu "u")]) -+ -+(define_insn "lsx_vs_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f") -+ (ICC:ILSX -+ (match_operand:ILSX 1 "register_operand" "f,f") -+ (match_operand:ILSX 2 "reg_or_vector_same_imm5_operand" "f,Uv5")))] -+ "ISA_HAS_LSX" -+ "@ -+ vs.\t%w0,%w1,%w2 -+ vs.\t%w0,%w1,%E2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vfclass_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:FLSX 1 "register_operand" "f")] -+ UNSPEC_LSX_VFCLASS))] -+ "ISA_HAS_LSX" -+ "vfclass.\t%w0,%w1" -+ [(set_attr "type" "simd_fclass") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vfcmp_caf_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VFCMP_CAF))] -+ "ISA_HAS_LSX" -+ "vfcmp.caf.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fcmp") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vfcmp_cune_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VFCMP_CUNE))] -+ "ISA_HAS_LSX" -+ "vfcmp.cune.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fcmp") -+ (set_attr "mode" "")]) -+ -+(define_code_iterator vfcond [unordered ordered eq ne le lt uneq unle unlt]) -+ -+(define_code_attr fcc -+ [(unordered "cun") -+ (ordered "cor") -+ (eq "ceq") -+ (ne "cne") -+ (uneq "cueq") -+ (unle "cule") -+ (unlt "cult") -+ (le "cle") -+ (lt "clt")]) -+ -+(define_int_iterator FSC_UNS [UNSPEC_LSX_VFCMP_SAF UNSPEC_LSX_VFCMP_SUN UNSPEC_LSX_VFCMP_SOR -+ UNSPEC_LSX_VFCMP_SEQ UNSPEC_LSX_VFCMP_SNE UNSPEC_LSX_VFCMP_SUEQ -+ UNSPEC_LSX_VFCMP_SUNE UNSPEC_LSX_VFCMP_SULE UNSPEC_LSX_VFCMP_SULT -+ UNSPEC_LSX_VFCMP_SLE UNSPEC_LSX_VFCMP_SLT]) -+ -+(define_int_attr fsc -+ [(UNSPEC_LSX_VFCMP_SAF "saf") -+ (UNSPEC_LSX_VFCMP_SUN "sun") -+ (UNSPEC_LSX_VFCMP_SOR "sor") -+ (UNSPEC_LSX_VFCMP_SEQ "seq") -+ (UNSPEC_LSX_VFCMP_SNE "sne") -+ (UNSPEC_LSX_VFCMP_SUEQ "sueq") -+ (UNSPEC_LSX_VFCMP_SUNE "sune") -+ (UNSPEC_LSX_VFCMP_SULE "sule") -+ (UNSPEC_LSX_VFCMP_SULT "sult") -+ (UNSPEC_LSX_VFCMP_SLE "sle") -+ (UNSPEC_LSX_VFCMP_SLT "slt")]) -+ -+(define_insn "lsx_vfcmp__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (vfcond: (match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vfcmp..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fcmp") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vfcmp__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f")] -+ FSC_UNS))] -+ "ISA_HAS_LSX" -+ "vfcmp..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fcmp") -+ (set_attr "mode" "")]) -+ -+(define_mode_attr fint -+ [(V4SF "v4si") -+ (V2DF "v2di")]) -+ -+(define_mode_attr FINTCNV -+ [(V4SF "I2S") -+ (V2DF "I2D")]) -+ -+(define_mode_attr FINTCNV_2 -+ [(V4SF "S2I") -+ (V2DF "D2I")]) -+ -+(define_insn "float2" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (float:FLSX (match_operand: 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vffint..\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "cnv_mode" "") -+ (set_attr "mode" "")]) -+ -+(define_insn "floatuns2" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (unsigned_float:FLSX -+ (match_operand: 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vffint..\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "cnv_mode" "") -+ (set_attr "mode" "")]) -+ -+(define_mode_attr FFQ -+ [(V4SF "V8HI") -+ (V2DF "V4SI")]) -+ -+(define_insn "lsx_vreplgr2vr_" -+ [(set (match_operand:LSX 0 "register_operand" "=f,f") -+ (vec_duplicate:LSX -+ (match_operand: 1 "reg_or_0_operand" "r,J")))] -+ "ISA_HAS_LSX" -+{ -+ if (which_alternative == 1) -+ return "ldi.\t%w0,0"; -+ -+ if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode)) -+ return "#"; -+ else -+ return "vreplgr2vr.\t%w0,%z1"; -+} -+ [(set_attr "type" "simd_fill") -+ (set_attr "mode" "")]) -+ -+(define_split -+ [(set (match_operand:LSX_D 0 "register_operand") -+ (vec_duplicate:LSX_D -+ (match_operand: 1 "register_operand")))] -+ "reload_completed && ISA_HAS_LSX && !TARGET_64BIT" -+ [(const_int 0)] -+{ -+ loongarch_split_lsx_fill_d (operands[0], operands[1]); -+ DONE; -+}) -+ -+(define_insn "lsx_vflogb_" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] -+ UNSPEC_LSX_VFLOGB))] -+ "ISA_HAS_LSX" -+ "vflogb.\t%w0,%w1" -+ [(set_attr "type" "simd_flog2") -+ (set_attr "mode" "")]) -+ -+(define_insn "smax3" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (smax:FLSX (match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vfmax.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fminmax") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vfmaxa_" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (if_then_else:FLSX -+ (gt (abs:FLSX (match_operand:FLSX 1 "register_operand" "f")) -+ (abs:FLSX (match_operand:FLSX 2 "register_operand" "f"))) -+ (match_dup 1) -+ (match_dup 2)))] -+ "ISA_HAS_LSX" -+ "vfmaxa.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fminmax") -+ (set_attr "mode" "")]) -+ -+(define_insn "smin3" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (smin:FLSX (match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vfmin.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fminmax") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vfmina_" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (if_then_else:FLSX -+ (lt (abs:FLSX (match_operand:FLSX 1 "register_operand" "f")) -+ (abs:FLSX (match_operand:FLSX 2 "register_operand" "f"))) -+ (match_dup 1) -+ (match_dup 2)))] -+ "ISA_HAS_LSX" -+ "vfmina.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fminmax") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vfrecip_" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRECIP))] -+ "ISA_HAS_LSX" -+ "vfrecip.\t%w0,%w1" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vfrint_" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRINT))] -+ "ISA_HAS_LSX" -+ "vfrint.\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vfrsqrt_" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRSQRT))] -+ "ISA_HAS_LSX" -+ "vfrsqrt.\t%w0,%w1" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vftint_s__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:FLSX 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINT_S))] -+ "ISA_HAS_LSX" -+ "vftint..\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "cnv_mode" "") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vftint_u__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:FLSX 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINT_U))] -+ "ISA_HAS_LSX" -+ "vftint..\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "cnv_mode" "") -+ (set_attr "mode" "")]) -+ -+(define_insn "fix_trunc2" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (fix: (match_operand:FLSX 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vftintrz..\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "cnv_mode" "") -+ (set_attr "mode" "")]) -+ -+(define_insn "fixuns_trunc2" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unsigned_fix: (match_operand:FLSX 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vftintrz..\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "cnv_mode" "") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vhw_h_b" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (addsub:V8HI -+ (any_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 1 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))) -+ (any_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)])))))] -+ "ISA_HAS_LSX" -+ "vhw.h.b\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vhw_w_h" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (addsub:V4SI -+ (any_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 1 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))) -+ (any_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)])))))] -+ "ISA_HAS_LSX" -+ "vhw.w.h\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vhw_d_w" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (addsub:V2DI -+ (any_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 1 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3)]))) -+ (any_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2)])))))] -+ "ISA_HAS_LSX" -+ "vhw.d.w\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vpackev_b" -+ [(set (match_operand:V16QI 0 "register_operand" "=f") -+ (vec_select:V16QI -+ (vec_concat:V32QI -+ (match_operand:V16QI 1 "register_operand" "f") -+ (match_operand:V16QI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 16) -+ (const_int 2) (const_int 18) -+ (const_int 4) (const_int 20) -+ (const_int 6) (const_int 22) -+ (const_int 8) (const_int 24) -+ (const_int 10) (const_int 26) -+ (const_int 12) (const_int 28) -+ (const_int 14) (const_int 30)])))] -+ "ISA_HAS_LSX" -+ "vpackev.b\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V16QI")]) -+ -+(define_insn "lsx_vpackev_h" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (vec_select:V8HI -+ (vec_concat:V16HI -+ (match_operand:V8HI 1 "register_operand" "f") -+ (match_operand:V8HI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 8) -+ (const_int 2) (const_int 10) -+ (const_int 4) (const_int 12) -+ (const_int 6) (const_int 14)])))] -+ "ISA_HAS_LSX" -+ "vpackev.h\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vpackev_w" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (vec_select:V4SI -+ (vec_concat:V8SI -+ (match_operand:V4SI 1 "register_operand" "f") -+ (match_operand:V4SI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 4) -+ (const_int 2) (const_int 6)])))] -+ "ISA_HAS_LSX" -+ "vpackev.w\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vpackev_w_f" -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (vec_select:V4SF -+ (vec_concat:V8SF -+ (match_operand:V4SF 1 "register_operand" "f") -+ (match_operand:V4SF 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 4) -+ (const_int 2) (const_int 6)])))] -+ "ISA_HAS_LSX" -+ "vpackev.w\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vilvh_b" -+ [(set (match_operand:V16QI 0 "register_operand" "=f") -+ (vec_select:V16QI -+ (vec_concat:V32QI -+ (match_operand:V16QI 1 "register_operand" "f") -+ (match_operand:V16QI 2 "register_operand" "f")) -+ (parallel [(const_int 8) (const_int 24) -+ (const_int 9) (const_int 25) -+ (const_int 10) (const_int 26) -+ (const_int 11) (const_int 27) -+ (const_int 12) (const_int 28) -+ (const_int 13) (const_int 29) -+ (const_int 14) (const_int 30) -+ (const_int 15) (const_int 31)])))] -+ "ISA_HAS_LSX" -+ "vilvh.b\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V16QI")]) -+ -+(define_insn "lsx_vilvh_h" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (vec_select:V8HI -+ (vec_concat:V16HI -+ (match_operand:V8HI 1 "register_operand" "f") -+ (match_operand:V8HI 2 "register_operand" "f")) -+ (parallel [(const_int 4) (const_int 12) -+ (const_int 5) (const_int 13) -+ (const_int 6) (const_int 14) -+ (const_int 7) (const_int 15)])))] -+ "ISA_HAS_LSX" -+ "vilvh.h\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vilvh_w" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (vec_select:V4SI -+ (vec_concat:V8SI -+ (match_operand:V4SI 1 "register_operand" "f") -+ (match_operand:V4SI 2 "register_operand" "f")) -+ (parallel [(const_int 2) (const_int 6) -+ (const_int 3) (const_int 7)])))] -+ "ISA_HAS_LSX" -+ "vilvh.w\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vilvh_w_f" -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (vec_select:V4SF -+ (vec_concat:V8SF -+ (match_operand:V4SF 1 "register_operand" "f") -+ (match_operand:V4SF 2 "register_operand" "f")) -+ (parallel [(const_int 2) (const_int 6) -+ (const_int 3) (const_int 7)])))] -+ "ISA_HAS_LSX" -+ "vilvh.w\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vilvh_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (vec_select:V2DI -+ (vec_concat:V4DI -+ (match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 3)])))] -+ "ISA_HAS_LSX" -+ "vilvh.d\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vilvh_d_f" -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (vec_select:V2DF -+ (vec_concat:V4DF -+ (match_operand:V2DF 1 "register_operand" "f") -+ (match_operand:V2DF 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 3)])))] -+ "ISA_HAS_LSX" -+ "vilvh.d\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vpackod_b" -+ [(set (match_operand:V16QI 0 "register_operand" "=f") -+ (vec_select:V16QI -+ (vec_concat:V32QI -+ (match_operand:V16QI 1 "register_operand" "f") -+ (match_operand:V16QI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 17) -+ (const_int 3) (const_int 19) -+ (const_int 5) (const_int 21) -+ (const_int 7) (const_int 23) -+ (const_int 9) (const_int 25) -+ (const_int 11) (const_int 27) -+ (const_int 13) (const_int 29) -+ (const_int 15) (const_int 31)])))] -+ "ISA_HAS_LSX" -+ "vpackod.b\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V16QI")]) -+ -+(define_insn "lsx_vpackod_h" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (vec_select:V8HI -+ (vec_concat:V16HI -+ (match_operand:V8HI 1 "register_operand" "f") -+ (match_operand:V8HI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 9) -+ (const_int 3) (const_int 11) -+ (const_int 5) (const_int 13) -+ (const_int 7) (const_int 15)])))] -+ "ISA_HAS_LSX" -+ "vpackod.h\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vpackod_w" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (vec_select:V4SI -+ (vec_concat:V8SI -+ (match_operand:V4SI 1 "register_operand" "f") -+ (match_operand:V4SI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 5) -+ (const_int 3) (const_int 7)])))] -+ "ISA_HAS_LSX" -+ "vpackod.w\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vpackod_w_f" -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (vec_select:V4SF -+ (vec_concat:V8SF -+ (match_operand:V4SF 1 "register_operand" "f") -+ (match_operand:V4SF 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 5) -+ (const_int 3) (const_int 7)])))] -+ "ISA_HAS_LSX" -+ "vpackod.w\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vilvl_b" -+ [(set (match_operand:V16QI 0 "register_operand" "=f") -+ (vec_select:V16QI -+ (vec_concat:V32QI -+ (match_operand:V16QI 1 "register_operand" "f") -+ (match_operand:V16QI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 16) -+ (const_int 1) (const_int 17) -+ (const_int 2) (const_int 18) -+ (const_int 3) (const_int 19) -+ (const_int 4) (const_int 20) -+ (const_int 5) (const_int 21) -+ (const_int 6) (const_int 22) -+ (const_int 7) (const_int 23)])))] -+ "ISA_HAS_LSX" -+ "vilvl.b\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V16QI")]) -+ -+(define_insn "lsx_vilvl_h" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (vec_select:V8HI -+ (vec_concat:V16HI -+ (match_operand:V8HI 1 "register_operand" "f") -+ (match_operand:V8HI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 8) -+ (const_int 1) (const_int 9) -+ (const_int 2) (const_int 10) -+ (const_int 3) (const_int 11)])))] -+ "ISA_HAS_LSX" -+ "vilvl.h\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vilvl_w" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (vec_select:V4SI -+ (vec_concat:V8SI -+ (match_operand:V4SI 1 "register_operand" "f") -+ (match_operand:V4SI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 4) -+ (const_int 1) (const_int 5)])))] -+ "ISA_HAS_LSX" -+ "vilvl.w\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vilvl_w_f" -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (vec_select:V4SF -+ (vec_concat:V8SF -+ (match_operand:V4SF 1 "register_operand" "f") -+ (match_operand:V4SF 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 4) -+ (const_int 1) (const_int 5)])))] -+ "ISA_HAS_LSX" -+ "vilvl.w\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vilvl_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (vec_select:V2DI -+ (vec_concat:V4DI -+ (match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 2)])))] -+ "ISA_HAS_LSX" -+ "vilvl.d\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vilvl_d_f" -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (vec_select:V2DF -+ (vec_concat:V4DF -+ (match_operand:V2DF 1 "register_operand" "f") -+ (match_operand:V2DF 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 2)])))] -+ "ISA_HAS_LSX" -+ "vilvl.d\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "smax3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f") -+ (smax:ILSX (match_operand:ILSX 1 "register_operand" "f,f") -+ (match_operand:ILSX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] -+ "ISA_HAS_LSX" -+ "@ -+ vmax.\t%w0,%w1,%w2 -+ vmaxi.\t%w0,%w1,%E2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "umax3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f") -+ (umax:ILSX (match_operand:ILSX 1 "register_operand" "f,f") -+ (match_operand:ILSX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] -+ "ISA_HAS_LSX" -+ "@ -+ vmax.\t%w0,%w1,%w2 -+ vmaxi.\t%w0,%w1,%B2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "smin3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f") -+ (smin:ILSX (match_operand:ILSX 1 "register_operand" "f,f") -+ (match_operand:ILSX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] -+ "ISA_HAS_LSX" -+ "@ -+ vmin.\t%w0,%w1,%w2 -+ vmini.\t%w0,%w1,%E2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "umin3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f") -+ (umin:ILSX (match_operand:ILSX 1 "register_operand" "f,f") -+ (match_operand:ILSX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] -+ "ISA_HAS_LSX" -+ "@ -+ vmin.\t%w0,%w1,%w2 -+ vmini.\t%w0,%w1,%B2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vclo_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")] -+ UNSPEC_LSX_VCLO))] -+ "ISA_HAS_LSX" -+ "vclo.\t%w0,%w1" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "clz2" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (clz:ILSX (match_operand:ILSX 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vclz.\t%w0,%w1" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_nor_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f") -+ (and:ILSX (not:ILSX (match_operand:ILSX 1 "register_operand" "f,f")) -+ (not:ILSX (match_operand:ILSX 2 "reg_or_vector_same_val_operand" "f,Urv8"))))] -+ "ISA_HAS_LSX" -+ "@ -+ vnor.v\t%w0,%w1,%w2 -+ vnori.b\t%w0,%w1,%B2" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vpickev_b" -+[(set (match_operand:V16QI 0 "register_operand" "=f") -+ (vec_select:V16QI -+ (vec_concat:V32QI -+ (match_operand:V16QI 1 "register_operand" "f") -+ (match_operand:V16QI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14) -+ (const_int 16) (const_int 18) -+ (const_int 20) (const_int 22) -+ (const_int 24) (const_int 26) -+ (const_int 28) (const_int 30)])))] -+ "ISA_HAS_LSX" -+ "vpickev.b\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V16QI")]) -+ -+(define_insn "lsx_vpickev_h" -+[(set (match_operand:V8HI 0 "register_operand" "=f") -+ (vec_select:V8HI -+ (vec_concat:V16HI -+ (match_operand:V8HI 1 "register_operand" "f") -+ (match_operand:V8HI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)])))] -+ "ISA_HAS_LSX" -+ "vpickev.h\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vpickev_w" -+[(set (match_operand:V4SI 0 "register_operand" "=f") -+ (vec_select:V4SI -+ (vec_concat:V8SI -+ (match_operand:V4SI 1 "register_operand" "f") -+ (match_operand:V4SI 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)])))] -+ "ISA_HAS_LSX" -+ "vpickev.w\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vpickev_w_f" -+[(set (match_operand:V4SF 0 "register_operand" "=f") -+ (vec_select:V4SF -+ (vec_concat:V8SF -+ (match_operand:V4SF 1 "register_operand" "f") -+ (match_operand:V4SF 2 "register_operand" "f")) -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)])))] -+ "ISA_HAS_LSX" -+ "vpickev.w\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vpickod_b" -+[(set (match_operand:V16QI 0 "register_operand" "=f") -+ (vec_select:V16QI -+ (vec_concat:V32QI -+ (match_operand:V16QI 1 "register_operand" "f") -+ (match_operand:V16QI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15) -+ (const_int 17) (const_int 19) -+ (const_int 21) (const_int 23) -+ (const_int 25) (const_int 27) -+ (const_int 29) (const_int 31)])))] -+ "ISA_HAS_LSX" -+ "vpickod.b\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V16QI")]) -+ -+(define_insn "lsx_vpickod_h" -+[(set (match_operand:V8HI 0 "register_operand" "=f") -+ (vec_select:V8HI -+ (vec_concat:V16HI -+ (match_operand:V8HI 1 "register_operand" "f") -+ (match_operand:V8HI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)])))] -+ "ISA_HAS_LSX" -+ "vpickod.h\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vpickod_w" -+[(set (match_operand:V4SI 0 "register_operand" "=f") -+ (vec_select:V4SI -+ (vec_concat:V8SI -+ (match_operand:V4SI 1 "register_operand" "f") -+ (match_operand:V4SI 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)])))] -+ "ISA_HAS_LSX" -+ "vpickod.w\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vpickod_w_f" -+[(set (match_operand:V4SF 0 "register_operand" "=f") -+ (vec_select:V4SF -+ (vec_concat:V8SF -+ (match_operand:V4SF 1 "register_operand" "f") -+ (match_operand:V4SF 2 "register_operand" "f")) -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)])))] -+ "ISA_HAS_LSX" -+ "vpickod.w\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_permute") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "popcount2" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (popcount:ILSX (match_operand:ILSX 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vpcnt.\t%w0,%w1" -+ [(set_attr "type" "simd_pcnt") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsat_s_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LSX_VSAT_S))] -+ "ISA_HAS_LSX" -+ "vsat.\t%w0,%w1,%2" -+ [(set_attr "type" "simd_sat") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsat_u_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LSX_VSAT_U))] -+ "ISA_HAS_LSX" -+ "vsat.\t%w0,%w1,%2" -+ [(set_attr "type" "simd_sat") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vshuf4i_" -+ [(set (match_operand:LSX_WHB_W 0 "register_operand" "=f") -+ (vec_select:LSX_WHB_W -+ (match_operand:LSX_WHB_W 1 "register_operand" "f") -+ (match_operand 2 "par_const_vector_shf_set_operand" "")))] -+ "ISA_HAS_LSX" -+{ -+ HOST_WIDE_INT val = 0; -+ unsigned int i; -+ -+ /* We convert the selection to an immediate. */ -+ for (i = 0; i < 4; i++) -+ val |= INTVAL (XVECEXP (operands[2], 0, i)) << (2 * i); -+ -+ operands[2] = GEN_INT (val); -+ return "vshuf4i.\t%w0,%w1,%X2"; -+} -+ [(set_attr "type" "simd_shf") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsrar_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VSRAR))] -+ "ISA_HAS_LSX" -+ "vsrar.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsrari_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LSX_VSRARI))] -+ "ISA_HAS_LSX" -+ "vsrari.\t%w0,%w1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsrlr_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VSRLR))] -+ "ISA_HAS_LSX" -+ "vsrlr.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsrlri_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LSX_VSRLRI))] -+ "ISA_HAS_LSX" -+ "vsrlri.\t%w0,%w1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssub_s_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VSSUB_S))] -+ "ISA_HAS_LSX" -+ "vssub.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssub_u_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VSSUB_U))] -+ "ISA_HAS_LSX" -+ "vssub.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vreplve_" -+ [(set (match_operand:LSX 0 "register_operand" "=f") -+ (unspec:LSX [(match_operand:LSX 1 "register_operand" "f") -+ (match_operand:SI 2 "register_operand" "r")] -+ UNSPEC_LSX_VREPLVE))] -+ "ISA_HAS_LSX" -+ "vreplve.\t%w0,%w1,%z2" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vreplvei_" -+ [(set (match_operand:LSX 0 "register_operand" "=f") -+ (vec_duplicate:LSX -+ (vec_select: -+ (match_operand:LSX 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const__operand" "")]))))] -+ "ISA_HAS_LSX" -+ "vreplvei.\t%w0,%w1,%2" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vreplvei__scalar" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (unspec:FLSX [(match_operand: 1 "register_operand" "f")] -+ UNSPEC_LSX_VREPLVEI))] -+ "ISA_HAS_LSX" -+ "vreplvei.\t%w0,%w1,0" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vfcvt_h_s" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "f") -+ (match_operand:V4SF 2 "register_operand" "f")] -+ UNSPEC_LSX_VFCVT))] -+ "ISA_HAS_LSX" -+ "vfcvt.h.s\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vfcvt_s_d" -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (unspec:V4SF [(match_operand:V2DF 1 "register_operand" "f") -+ (match_operand:V2DF 2 "register_operand" "f")] -+ UNSPEC_LSX_VFCVT))] -+ "ISA_HAS_LSX" -+ "vfcvt.s.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "vec_pack_trunc_v2df" -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (vec_concat:V4SF -+ (float_truncate:V2SF (match_operand:V2DF 1 "register_operand" "f")) -+ (float_truncate:V2SF (match_operand:V2DF 2 "register_operand" "f"))))] -+ "ISA_HAS_LSX" -+ "vfcvt.s.d\t%w0,%w2,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vfcvth_s_h" -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "f")] -+ UNSPEC_LSX_VFCVTH))] -+ "ISA_HAS_LSX" -+ "vfcvth.s.h\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vfcvth_d_s" -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (float_extend:V2DF -+ (vec_select:V2SF -+ (match_operand:V4SF 1 "register_operand" "f") -+ (parallel [(const_int 2) (const_int 3)]))))] -+ "ISA_HAS_LSX" -+ "vfcvth.d.s\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vfcvtl_s_h" -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "f")] -+ UNSPEC_LSX_VFCVTL))] -+ "ISA_HAS_LSX" -+ "vfcvtl.s.h\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vfcvtl_d_s" -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (float_extend:V2DF -+ (vec_select:V2SF -+ (match_operand:V4SF 1 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 1)]))))] -+ "ISA_HAS_LSX" -+ "vfcvtl.d.s\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V2DF")]) -+ -+(define_code_attr lsxbr -+ [(eq "bz") -+ (ne "bnz")]) -+ -+(define_code_attr lsxeq_v -+ [(eq "eqz") -+ (ne "nez")]) -+ -+(define_code_attr lsxne_v -+ [(eq "nez") -+ (ne "eqz")]) -+ -+(define_code_attr lsxeq -+ [(eq "anyeqz") -+ (ne "allnez")]) -+ -+(define_code_attr lsxne -+ [(eq "allnez") -+ (ne "anyeqz")]) -+ -+(define_insn "lsx__" -+ [(set (pc) (if_then_else -+ (equality_op -+ (unspec:SI [(match_operand:LSX 1 "register_operand" "f")] -+ UNSPEC_LSX_BRANCH) -+ (match_operand:SI 2 "const_0_operand")) -+ (label_ref (match_operand 0)) -+ (pc))) -+ (clobber (match_scratch:FCC 3 "=z"))] -+ "ISA_HAS_LSX" -+{ -+ return loongarch_output_conditional_branch (insn, operands, -+ "vset.\t%Z3%w1\n\tbcnez\t%Z3%0", -+ "vset.\t%Z3%w1\n\tbcnez\t%Z3%0"); -+} -+ [(set_attr "type" "simd_branch") -+ (set_attr "mode" "") -+ (set_attr "compact_form" "never")]) -+ -+(define_insn "lsx__v_" -+ [(set (pc) (if_then_else -+ (equality_op -+ (unspec:SI [(match_operand:LSX 1 "register_operand" "f")] -+ UNSPEC_LSX_BRANCH_V) -+ (match_operand:SI 2 "const_0_operand")) -+ (label_ref (match_operand 0)) -+ (pc))) -+ (clobber (match_scratch:FCC 3 "=z"))] -+ "ISA_HAS_LSX" -+{ -+ return loongarch_output_conditional_branch (insn, operands, -+ "vset.v\t%Z3%w1\n\tbcnez\t%Z3%0", -+ "vset.v\t%Z3%w1\n\tbcnez\t%Z3%0"); -+} -+ [(set_attr "type" "simd_branch") -+ (set_attr "mode" "TI") -+ (set_attr "compact_form" "never")]) -+ -+;; vec_concate -+(define_expand "vec_concatv2di" -+ [(set (match_operand:V2DI 0 "register_operand") -+ (vec_concat:V2DI -+ (match_operand:DI 1 "register_operand") -+ (match_operand:DI 2 "register_operand")))] -+ "ISA_HAS_LSX" -+{ -+ emit_insn (gen_lsx_vinsgr2vr_d (operands[0], operands[1], -+ operands[0], GEN_INT(0))); -+ emit_insn (gen_lsx_vinsgr2vr_d (operands[0], operands[2], -+ operands[0], GEN_INT(1))); -+ DONE; -+}) -+ -+ -+(define_insn "vandn3" -+ [(set (match_operand:LSX 0 "register_operand" "=f") -+ (and:LSX (not:LSX (match_operand:LSX 1 "register_operand" "f")) -+ (match_operand:LSX 2 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vandn.v\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "vabs2" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (abs:ILSX (match_operand:ILSX 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vsigncov.\t%w0,%w1,%w1" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "vneg2" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (neg:ILSX (match_operand:ILSX 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vneg.\t%w0,%w1" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vmuh_s_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VMUH_S))] -+ "ISA_HAS_LSX" -+ "vmuh.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vmuh_u_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VMUH_U))] -+ "ISA_HAS_LSX" -+ "vmuh.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vextw_s_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "f")] -+ UNSPEC_LSX_VEXTW_S))] -+ "ISA_HAS_LSX" -+ "vextw_s.d\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vextw_u_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "f")] -+ UNSPEC_LSX_VEXTW_U))] -+ "ISA_HAS_LSX" -+ "vextw_u.d\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vsllwil_s__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_WHB 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LSX_VSLLWIL_S))] -+ "ISA_HAS_LSX" -+ "vsllwil..\t%w0,%w1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsllwil_u__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_WHB 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LSX_VSLLWIL_U))] -+ "ISA_HAS_LSX" -+ "vsllwil..\t%w0,%w1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsran__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand:ILSX_DWH 2 "register_operand" "f")] -+ UNSPEC_LSX_VSRAN))] -+ "ISA_HAS_LSX" -+ "vsran..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssran_s__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand:ILSX_DWH 2 "register_operand" "f")] -+ UNSPEC_LSX_VSSRAN_S))] -+ "ISA_HAS_LSX" -+ "vssran..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssran_u__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand:ILSX_DWH 2 "register_operand" "f")] -+ UNSPEC_LSX_VSSRAN_U))] -+ "ISA_HAS_LSX" -+ "vssran..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsrain_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LSX_VSRAIN))] -+ "ISA_HAS_LSX" -+ "vsrain.\t%w0,%w1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+;; FIXME: bitimm -+(define_insn "lsx_vsrains_s_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LSX_VSRAINS_S))] -+ "ISA_HAS_LSX" -+ "vsrains_s.\t%w0,%w1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+;; FIXME: bitimm -+(define_insn "lsx_vsrains_u_" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LSX_VSRAINS_U))] -+ "ISA_HAS_LSX" -+ "vsrains_u.\t%w0,%w1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsrarn__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand:ILSX_DWH 2 "register_operand" "f")] -+ UNSPEC_LSX_VSRARN))] -+ "ISA_HAS_LSX" -+ "vsrarn..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssrarn_s__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand:ILSX_DWH 2 "register_operand" "f")] -+ UNSPEC_LSX_VSSRARN_S))] -+ "ISA_HAS_LSX" -+ "vssrarn..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssrarn_u__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand:ILSX_DWH 2 "register_operand" "f")] -+ UNSPEC_LSX_VSSRARN_U))] -+ "ISA_HAS_LSX" -+ "vssrarn..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsrln__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand:ILSX_DWH 2 "register_operand" "f")] -+ UNSPEC_LSX_VSRLN))] -+ "ISA_HAS_LSX" -+ "vsrln..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssrln_u__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand:ILSX_DWH 2 "register_operand" "f")] -+ UNSPEC_LSX_VSSRLN_U))] -+ "ISA_HAS_LSX" -+ "vssrln..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsrlrn__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand:ILSX_DWH 2 "register_operand" "f")] -+ UNSPEC_LSX_VSRLRN))] -+ "ISA_HAS_LSX" -+ "vsrlrn..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssrlrn_u__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand:ILSX_DWH 2 "register_operand" "f")] -+ UNSPEC_LSX_VSSRLRN_U))] -+ "ISA_HAS_LSX" -+ "vssrlrn..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vfrstpi_" -+ [(set (match_operand:ILSX_HB 0 "register_operand" "=f") -+ (unspec:ILSX_HB [(match_operand:ILSX_HB 1 "register_operand" "0") -+ (match_operand:ILSX_HB 2 "register_operand" "f") -+ (match_operand 3 "const_uimm5_operand" "")] -+ UNSPEC_LSX_VFRSTPI))] -+ "ISA_HAS_LSX" -+ "vfrstpi.\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vfrstp_" -+ [(set (match_operand:ILSX_HB 0 "register_operand" "=f") -+ (unspec:ILSX_HB [(match_operand:ILSX_HB 1 "register_operand" "0") -+ (match_operand:ILSX_HB 2 "register_operand" "f") -+ (match_operand:ILSX_HB 3 "register_operand" "f")] -+ UNSPEC_LSX_VFRSTP))] -+ "ISA_HAS_LSX" -+ "vfrstp.\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vshuf4i_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") -+ (match_operand:V2DI 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand")] -+ UNSPEC_LSX_VSHUF4I))] -+ "ISA_HAS_LSX" -+ "vshuf4i.d\t%w0,%w2,%3" -+ [(set_attr "type" "simd_sld") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vbsrl_" -+ [(set (match_operand:LSX 0 "register_operand" "=f") -+ (unspec:LSX [(match_operand:LSX 1 "register_operand" "f") -+ (match_operand 2 "const_uimm5_operand" "")] -+ UNSPEC_LSX_VBSRL_V))] -+ "ISA_HAS_LSX" -+ "vbsrl.v\t%w0,%w1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vbsll_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand 2 "const_uimm5_operand" "")] -+ UNSPEC_LSX_VBSLL_V))] -+ "ISA_HAS_LSX" -+ "vbsll.v\t%w0,%w1,%2" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vextrins_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VEXTRINS))] -+ "ISA_HAS_LSX" -+ "vextrins.\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vmskltz_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")] -+ UNSPEC_LSX_VMSKLTZ))] -+ "ISA_HAS_LSX" -+ "vmskltz.\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsigncov_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VSIGNCOV))] -+ "ISA_HAS_LSX" -+ "vsigncov.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_expand "copysign3" -+ [(set (match_dup 4) -+ (and:FLSX -+ (not:FLSX (match_dup 3)) -+ (match_operand:FLSX 1 "register_operand"))) -+ (set (match_dup 5) -+ (and:FLSX (match_dup 3) -+ (match_operand:FLSX 2 "register_operand"))) -+ (set (match_operand:FLSX 0 "register_operand") -+ (ior:FLSX (match_dup 4) (match_dup 5)))] -+ "ISA_HAS_LSX" -+{ -+ operands[3] = loongarch_build_signbit_mask (mode, 1, 0); -+ -+ operands[4] = gen_reg_rtx (mode); -+ operands[5] = gen_reg_rtx (mode); -+}) -+ -+(define_insn "absv2df2" -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (abs:V2DF (match_operand:V2DF 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vbitclri.d\t%w0,%w1,63" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "absv4sf2" -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (abs:V4SF (match_operand:V4SF 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vbitclri.w\t%w0,%w1,31" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "vfmadd4" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (fma:FLSX (match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f") -+ (match_operand:FLSX 3 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vfmadd.\t%w0,%w1,$w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "vfmsub4" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (fma:FLSX (match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f") -+ (neg:FLSX (match_operand:FLSX 3 "register_operand" "f"))))] -+ "ISA_HAS_LSX" -+ "vfmsub.\t%w0,%w1,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "vfnmsub4_nmsub4" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (neg:FLSX -+ (fma:FLSX -+ (match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f") -+ (neg:FLSX (match_operand:FLSX 3 "register_operand" "f")))))] -+ "ISA_HAS_LSX" -+ "vfnmsub.\t%w0,%w1,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "vfnmadd4_nmadd4" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (neg:FLSX -+ (fma:FLSX -+ (match_operand:FLSX 1 "register_operand" "f") -+ (match_operand:FLSX 2 "register_operand" "f") -+ (match_operand:FLSX 3 "register_operand" "f"))))] -+ "ISA_HAS_LSX" -+ "vfnmadd.\t%w0,%w1,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vftintrne_w_s" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRNE))] -+ "ISA_HAS_LSX" -+ "vftintrne.w.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vftintrne_l_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRNE))] -+ "ISA_HAS_LSX" -+ "vftintrne.l.d\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vftintrp_w_s" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRP))] -+ "ISA_HAS_LSX" -+ "vftintrp.w.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vftintrp_l_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRP))] -+ "ISA_HAS_LSX" -+ "vftintrp.l.d\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vftintrm_w_s" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRM))] -+ "ISA_HAS_LSX" -+ "vftintrm.w.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vftintrm_l_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRM))] -+ "ISA_HAS_LSX" -+ "vftintrm.l.d\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vftint_w_d" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") -+ (match_operand:V2DF 2 "register_operand" "f")] -+ UNSPEC_LSX_VFTINT_W_D))] -+ "ISA_HAS_LSX" -+ "vftint.w.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vffint_s_l" -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (unspec:V4SF [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VFFINT_S_L))] -+ "ISA_HAS_LSX" -+ "vffint.s.l\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vftintrz_w_d" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") -+ (match_operand:V2DF 2 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRZ_W_D))] -+ "ISA_HAS_LSX" -+ "vftintrz.w.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vftintrp_w_d" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") -+ (match_operand:V2DF 2 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRP_W_D))] -+ "ISA_HAS_LSX" -+ "vftintrp.w.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vftintrm_w_d" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") -+ (match_operand:V2DF 2 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRM_W_D))] -+ "ISA_HAS_LSX" -+ "vftintrm.w.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vftintrne_w_d" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") -+ (match_operand:V2DF 2 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRNE_W_D))] -+ "ISA_HAS_LSX" -+ "vftintrne.w.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vftinth_l_s" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTH_L_H))] -+ "ISA_HAS_LSX" -+ "vftinth.l.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vftintl_l_s" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTL_L_S))] -+ "ISA_HAS_LSX" -+ "vftintl.l.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vffinth_d_w" -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (unspec:V2DF [(match_operand:V4SI 1 "register_operand" "f")] -+ UNSPEC_LSX_VFFINTH_D_W))] -+ "ISA_HAS_LSX" -+ "vffinth.d.w\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vffintl_d_w" -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (unspec:V2DF [(match_operand:V4SI 1 "register_operand" "f")] -+ UNSPEC_LSX_VFFINTL_D_W))] -+ "ISA_HAS_LSX" -+ "vffintl.d.w\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vftintrzh_l_s" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRZH_L_S))] -+ "ISA_HAS_LSX" -+ "vftintrzh.l.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vftintrzl_l_s" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRZL_L_S))] -+ "ISA_HAS_LSX" -+ "vftintrzl.l.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vftintrph_l_s" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRPH_L_S))] -+ "ISA_HAS_LSX" -+ "vftintrph.l.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vftintrpl_l_s" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRPL_L_S))] -+ "ISA_HAS_LSX" -+ "vftintrpl.l.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vftintrmh_l_s" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRMH_L_S))] -+ "ISA_HAS_LSX" -+ "vftintrmh.l.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vftintrml_l_s" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRML_L_S))] -+ "ISA_HAS_LSX" -+ "vftintrml.l.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vftintrneh_l_s" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRNEH_L_S))] -+ "ISA_HAS_LSX" -+ "vftintrneh.l.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vftintrnel_l_s" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFTINTRNEL_L_S))] -+ "ISA_HAS_LSX" -+ "vftintrnel.l.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vfrintrne_s" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRINTRNE_S))] -+ "ISA_HAS_LSX" -+ "vfrintrne.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vfrintrne_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRINTRNE_D))] -+ "ISA_HAS_LSX" -+ "vfrintrne.d\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vfrintrz_s" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRINTRZ_S))] -+ "ISA_HAS_LSX" -+ "vfrintrz.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vfrintrz_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRINTRZ_D))] -+ "ISA_HAS_LSX" -+ "vfrintrz.d\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vfrintrp_s" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRINTRP_S))] -+ "ISA_HAS_LSX" -+ "vfrintrp.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vfrintrp_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRINTRP_D))] -+ "ISA_HAS_LSX" -+ "vfrintrp.d\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V2DF")]) -+ -+(define_insn "lsx_vfrintrm_s" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRINTRM_S))] -+ "ISA_HAS_LSX" -+ "vfrintrm.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "lsx_vfrintrm_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRINTRM_D))] -+ "ISA_HAS_LSX" -+ "vfrintrm.d\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V2DF")]) -+ -+;; Offset load and broadcast -+(define_expand "lsx_vldrepl_" -+ [(match_operand:LSX 0 "register_operand") -+ (match_operand 1 "pmode_register_operand") -+ (match_operand 2 "aq12_operand")] -+ "ISA_HAS_LSX" -+{ -+ emit_insn (gen_lsx_vldrepl__insn -+ (operands[0], operands[1], operands[2])); -+ DONE; -+}) -+ -+(define_insn "lsx_vldrepl__insn" -+ [(set (match_operand:LSX 0 "register_operand" "=f") -+ (vec_duplicate:LSX -+ (mem: (plus:DI (match_operand:DI 1 "register_operand" "r") -+ (match_operand 2 "aq12_operand" )))))] -+ "ISA_HAS_LSX" -+{ -+ return "vldrepl.\t%w0,%1,%2"; -+} -+ [(set_attr "type" "simd_load") -+ (set_attr "mode" "") -+ (set_attr "length" "4")]) -+ -+;; Offset store by sel -+(define_expand "lsx_vstelm_" -+ [(match_operand:LSX 0 "register_operand") -+ (match_operand 3 "const__operand") -+ (match_operand 2 "aq8_operand") -+ (match_operand 1 "pmode_register_operand")] -+ "ISA_HAS_LSX" -+{ -+ emit_insn (gen_lsx_vstelm__insn -+ (operands[1], operands[2], operands[0], operands[3])); -+ DONE; -+}) -+ -+(define_insn "lsx_vstelm__insn" -+ [(set (mem: (plus:DI (match_operand:DI 0 "register_operand" "r") -+ (match_operand 1 "aq8_operand" ))) -+ (vec_select: -+ (match_operand:LSX 2 "register_operand" "f") -+ (parallel [(match_operand 3 "const__operand" "")])))] -+ -+ "ISA_HAS_LSX" -+{ -+ return "vstelm.\t%w2,%0,%1,%3"; -+} -+ [(set_attr "type" "simd_store") -+ (set_attr "mode" "") -+ (set_attr "length" "4")]) -+ -+(define_expand "lsx_vld" -+ [(match_operand:V16QI 0 "register_operand") -+ (match_operand 1 "pmode_register_operand") -+ (match_operand 2 "aq12b_operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], -+ INTVAL (operands[2])); -+ loongarch_emit_move (operands[0], gen_rtx_MEM (V16QImode, addr)); -+ DONE; -+}) -+ -+(define_expand "lsx_vst" -+ [(match_operand:V16QI 0 "register_operand") -+ (match_operand 1 "pmode_register_operand") -+ (match_operand 2 "aq12b_operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], -+ INTVAL (operands[2])); -+ loongarch_emit_move (gen_rtx_MEM (V16QImode, addr), operands[0]); -+ DONE; -+}) -+ -+(define_insn "lsx_vssrln__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand:ILSX_DWH 2 "register_operand" "f")] -+ UNSPEC_LSX_VSSRLN))] -+ "ISA_HAS_LSX" -+ "vssrln..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+ -+(define_insn "lsx_vssrlrn__" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") -+ (match_operand:ILSX_DWH 2 "register_operand" "f")] -+ UNSPEC_LSX_VSSRLRN))] -+ "ISA_HAS_LSX" -+ "vssrlrn..\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "vorn3" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (ior:ILSX (not:ILSX (match_operand:ILSX 2 "register_operand" "f")) -+ (match_operand:ILSX 1 "register_operand" "f")))] -+ "ISA_HAS_LSX" -+ "vorn.v\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_logic") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vldi" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI[(match_operand 1 "const_imm13_operand")] -+ UNSPEC_LSX_VLDI))] -+ "ISA_HAS_LSX" -+{ -+ HOST_WIDE_INT val = INTVAL (operands[1]); -+ if(val < 0) -+ { -+ HOST_WIDE_INT modeVal = (val & 0xf00) >> 8; -+ if(modeVal < 13) -+ return "vldi\t%w0,%1"; -+ else -+ sorry("for const_imm13_operand, only support 0000 ~ 1100 in bits'12...9' when bit'13' is 1."); -+ } -+ else -+ return "vldi\t%w0,%1"; -+} -+ [(set_attr "type" "simd_load") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vshuf_b" -+ [(set (match_operand:V16QI 0 "register_operand" "=f") -+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f") -+ (match_operand:V16QI 2 "register_operand" "f") -+ (match_operand:V16QI 3 "register_operand" "f")] -+ UNSPEC_LSX_VSHUF_B))] -+ "ISA_HAS_LSX" -+ "vshuf.b\t%w0,%w1,%w2,%w3" -+ [(set_attr "type" "simd_shf") -+ (set_attr "mode" "V16QI")]) -+ -+(define_insn "lsx_vldx" -+ [(set (match_operand:V16QI 0 "register_operand" "=f") -+ (unspec:V16QI [(match_operand:DI 1 "register_operand" "r") -+ (match_operand:DI 2 "reg_or_0_operand" "rJ")] -+ UNSPEC_LSX_VLDX))] -+ "ISA_HAS_LSX" -+{ -+ return "vldx\t%w0,%1,%z2"; -+} -+ [(set_attr "type" "simd_load") -+ (set_attr "mode" "V16QI")]) -+ -+(define_insn "lsx_vstx" -+ [(set (mem:V16QI (plus:DI (match_operand:DI 1 "register_operand" "r") -+ (match_operand:DI 2 "reg_or_0_operand" "rJ"))) -+ (unspec: V16QI[(match_operand:V16QI 0 "register_operand" "f")] -+ UNSPEC_LSX_VSTX))] -+ -+ "ISA_HAS_LSX" -+{ -+ return "vstx\t%w0,%1,%z2"; -+} -+ [(set_attr "type" "simd_store") -+ (set_attr "mode" "DI")]) -+ -+(define_insn "lsx_vextl_qu_du" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")] -+ UNSPEC_LSX_VEXTL_QU_DU))] -+ "ISA_HAS_LSX" -+ "vextl.qu.du\t%w0,%w1" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vseteqz_v" -+ [(set (match_operand:FCC 0 "register_operand" "=z") -+ (eq:FCC -+ (unspec:SI [(match_operand:V16QI 1 "register_operand" "f")] -+ UNSPEC_LSX_VSETEQZ_V) -+ (match_operand:SI 2 "const_0_operand")))] -+ "ISA_HAS_LSX" -+{ -+ return "vseteqz.v\t%0,%1"; -+} -+ [(set_attr "type" "simd_fcmp") -+ (set_attr "mode" "FCC")]) -diff --git a/gcc/config/loongarch/lsx2.md b/gcc/config/loongarch/lsx2.md -new file mode 100644 -index 000000000..2f56acfc4 ---- /dev/null -+++ b/gcc/config/loongarch/lsx2.md -@@ -0,0 +1,1091 @@ -+;; Machine Description for LARCH Loongson SX ASE -+;; -+;; Copyright (C) 2018 Free Software Foundation, Inc. -+;; -+;; This file is part of GCC. -+;; -+;; GCC is free software; you can redistribute it and/or modify -+;; it under the terms of the GNU General Public License as published by -+;; the Free Software Foundation; either version 3, or (at your option) -+;; any later version. -+;; -+;; GCC is distributed in the hope that it will be useful, -+;; but WITHOUT ANY WARRANTY; without even the implied warranty of -+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+;; GNU General Public License for more details. -+;; -+;; You should have received a copy of the GNU General Public License -+;; along with GCC; see the file COPYING3. If not see -+;; . -+;; -+ -+(define_mode_attr d2lsxfmt -+ [(V4SI "q") -+ (V8HI "d") -+ (V16QI "w")]) -+ -+(define_mode_attr d2lsxfmt_u -+ [(V4SI "qu") -+ (V8HI "du") -+ (V16QI "wu")]) -+ -+;; The attribute gives two double modes for vector modes. -+(define_mode_attr VD2MODE -+ [(V4SI "V2DI") -+ (V8HI "V2DI") -+ (V16QI "V4SI")]) -+ -+(define_c_enum "unspec" [ -+ UNSPEC_LSX_VADDWEV -+ UNSPEC_LSX_VADDWEV2 -+ UNSPEC_LSX_VADDWEV3 -+ UNSPEC_LSX_VADDWOD -+ UNSPEC_LSX_VADDWOD2 -+ UNSPEC_LSX_VADDWOD3 -+ UNSPEC_LSX_VSUBWEV -+ UNSPEC_LSX_VSUBWEV2 -+ UNSPEC_LSX_VSUBWOD -+ UNSPEC_LSX_VSUBWOD2 -+ UNSPEC_LSX_VMULWEV -+ UNSPEC_LSX_VMULWEV2 -+ UNSPEC_LSX_VMULWEV3 -+ UNSPEC_LSX_VMULWOD -+ UNSPEC_LSX_VMULWOD2 -+ UNSPEC_LSX_VMULWOD3 -+ UNSPEC_LSX_VHADDW_Q_D -+ UNSPEC_LSX_VHADDW_QU_DU -+ UNSPEC_LSX_VHSUBW_Q_D -+ UNSPEC_LSX_VHSUBW_QU_DU -+ UNSPEC_LSX_VMADDWEV -+ UNSPEC_LSX_VMADDWEV2 -+ UNSPEC_LSX_VMADDWEV3 -+ UNSPEC_LSX_VMADDWOD -+ UNSPEC_LSX_VMADDWOD2 -+ UNSPEC_LSX_VMADDWOD3 -+ UNSPEC_LSX_VROTR -+ UNSPEC_LSX_VADD_Q -+ UNSPEC_LSX_VSUB_Q -+ UNSPEC_LSX_VEXTH_Q_D -+ UNSPEC_LSX_VEXTH_QU_DU -+ UNSPEC_LSX_VMSKGEZ -+ UNSPEC_LSX_VMSKNZ -+ UNSPEC_LSX_VROTRI -+ UNSPEC_LSX_VEXTL_Q_D -+ UNSPEC_LSX_VSRLNI -+ UNSPEC_LSX_VSRLRNI -+ UNSPEC_LSX_VSSRLNI -+ UNSPEC_LSX_VSSRLNI2 -+ UNSPEC_LSX_VSSRLRNI -+ UNSPEC_LSX_VSSRLRNI2 -+ UNSPEC_LSX_VSRANI -+ UNSPEC_LSX_VSRARNI -+ UNSPEC_LSX_VSSRANI -+ UNSPEC_LSX_VSSRANI2 -+ UNSPEC_LSX_VSSRARNI -+ UNSPEC_LSX_VSSRARNI2 -+ UNSPEC_LSX_VPERMI -+]) -+ -+(define_insn "lsx_vwev_d_w" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (addsubmul:V2DI -+ (any_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2)]))) -+ (any_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2)])))))] -+ "ISA_HAS_LSX" -+ "vwev.d.w\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vwev_w_h" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (addsubmul:V4SI -+ (any_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))) -+ (any_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)])))))] -+ "ISA_HAS_LSX" -+ "vwev.w.h\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vwev_h_b" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (addsubmul:V8HI -+ (any_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)]))) -+ (any_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)])))))] -+ "ISA_HAS_LSX" -+ "vwev.h.b\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vwod_d_w" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (addsubmul:V2DI -+ (any_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 1 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3)]))) -+ (any_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 2 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3)])))))] -+ "ISA_HAS_LSX" -+ "vwod.d.w\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vwod_w_h" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (addsubmul:V4SI -+ (any_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 1 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))) -+ (any_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 2 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)])))))] -+ "ISA_HAS_LSX" -+ "vwod.w.h\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vwod_h_b" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (addsubmul:V8HI -+ (any_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 1 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))) -+ (any_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 2 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)])))))] -+ "ISA_HAS_LSX" -+ "vwod.h.b\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vwev_d_wu_w" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (addmul:V2DI -+ (zero_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2)]))) -+ (sign_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2)])))))] -+ "ISA_HAS_LSX" -+ "vwev.d.wu.w\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vwev_w_hu_h" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (addmul:V4SI -+ (zero_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))) -+ (sign_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)])))))] -+ "ISA_HAS_LSX" -+ "vwev.w.hu.h\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vwev_h_bu_b" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (addmul:V8HI -+ (zero_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)]))) -+ (sign_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)])))))] -+ "ISA_HAS_LSX" -+ "vwev.h.bu.b\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vwod_d_wu_w" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (addmul:V2DI -+ (zero_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 1 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3)]))) -+ (sign_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 2 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3)])))))] -+ "ISA_HAS_LSX" -+ "vwod.d.wu.w\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vwod_w_hu_h" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (addmul:V4SI -+ (zero_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 1 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))) -+ (sign_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 2 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)])))))] -+ "ISA_HAS_LSX" -+ "vwod.w.hu.h\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vwod_h_bu_b" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (addmul:V8HI -+ (zero_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 1 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))) -+ (sign_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 2 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)])))))] -+ "ISA_HAS_LSX" -+ "vwod.h.bu.b\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vaddwev_q_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VADDWEV))] -+ "ISA_HAS_LSX" -+ "vaddwev.q.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vaddwev_q_du" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VADDWEV2))] -+ "ISA_HAS_LSX" -+ "vaddwev.q.du\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vaddwod_q_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VADDWOD))] -+ "ISA_HAS_LSX" -+ "vaddwod.q.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vaddwod_q_du" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VADDWOD2))] -+ "ISA_HAS_LSX" -+ "vaddwod.q.du\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vsubwev_q_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VSUBWEV))] -+ "ISA_HAS_LSX" -+ "vsubwev.q.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vsubwev_q_du" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VSUBWEV2))] -+ "ISA_HAS_LSX" -+ "vsubwev.q.du\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vsubwod_q_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VSUBWOD))] -+ "ISA_HAS_LSX" -+ "vsubwod.q.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vsubwod_q_du" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VSUBWOD2))] -+ "ISA_HAS_LSX" -+ "vsubwod.q.du\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vaddwev_q_du_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VADDWEV3))] -+ "ISA_HAS_LSX" -+ "vaddwev.q.du.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vaddwod_q_du_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VADDWOD3))] -+ "ISA_HAS_LSX" -+ "vaddwod.q.du.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmulwev_q_du_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VMULWEV3))] -+ "ISA_HAS_LSX" -+ "vmulwev.q.du.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmulwod_q_du_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VMULWOD3))] -+ "ISA_HAS_LSX" -+ "vmulwod.q.du.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmulwev_q_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VMULWEV))] -+ "ISA_HAS_LSX" -+ "vmulwev.q.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmulwev_q_du" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VMULWEV2))] -+ "ISA_HAS_LSX" -+ "vmulwev.q.du\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmulwod_q_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VMULWOD))] -+ "ISA_HAS_LSX" -+ "vmulwod.q.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmulwod_q_du" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VMULWOD2))] -+ "ISA_HAS_LSX" -+ "vmulwod.q.du\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vhaddw_q_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VHADDW_Q_D))] -+ "ISA_HAS_LSX" -+ "vhaddw.q.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vhaddw_qu_du" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VHADDW_QU_DU))] -+ "ISA_HAS_LSX" -+ "vhaddw.qu.du\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vhsubw_q_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VHSUBW_Q_D))] -+ "ISA_HAS_LSX" -+ "vhsubw.q.d\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vhsubw_qu_du" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VHSUBW_QU_DU))] -+ "ISA_HAS_LSX" -+ "vhsubw.qu.du\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmaddwev_d_w" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (plus:V2DI -+ (match_operand:V2DI 1 "register_operand" "0") -+ (mult:V2DI -+ (any_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 2 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2)]))) -+ (any_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 3 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2)]))))))] -+ "ISA_HAS_LSX" -+ "vmaddwev.d.w\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmaddwev_w_h" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (plus:V4SI -+ (match_operand:V4SI 1 "register_operand" "0") -+ (mult:V4SI -+ (any_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 2 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))) -+ (any_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 3 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))))))] -+ "ISA_HAS_LSX" -+ "vmaddwev.w.h\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vmaddwev_h_b" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (plus:V8HI -+ (match_operand:V8HI 1 "register_operand" "0") -+ (mult:V8HI -+ (any_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 2 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)]))) -+ (any_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 3 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)]))))))] -+ "ISA_HAS_LSX" -+ "vmaddwev.h.b\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vmaddwod_d_w" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (plus:V2DI -+ (match_operand:V2DI 1 "register_operand" "0") -+ (mult:V2DI -+ (any_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 2 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3)]))) -+ (any_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 3 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3)]))))))] -+ "ISA_HAS_LSX" -+ "vmaddwod.d.w\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmaddwod_w_h" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (plus:V4SI -+ (match_operand:V4SI 1 "register_operand" "0") -+ (mult:V4SI -+ (any_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 2 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))) -+ (any_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 3 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))))))] -+ "ISA_HAS_LSX" -+ "vmaddwod.w.h\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vmaddwod_h_b" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (plus:V8HI -+ (match_operand:V8HI 1 "register_operand" "0") -+ (mult:V8HI -+ (any_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 2 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))) -+ (any_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 3 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))))))] -+ "ISA_HAS_LSX" -+ "vmaddwod.h.b\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vmaddwev_d_wu_w" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (plus:V2DI -+ (match_operand:V2DI 1 "register_operand" "0") -+ (mult:V2DI -+ (zero_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 2 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2)]))) -+ (sign_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 3 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2)]))))))] -+ "ISA_HAS_LSX" -+ "vmaddwev.d.wu.w\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmaddwev_w_hu_h" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (plus:V4SI -+ (match_operand:V4SI 1 "register_operand" "0") -+ (mult:V4SI -+ (zero_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 2 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))) -+ (sign_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 3 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))))))] -+ "ISA_HAS_LSX" -+ "vmaddwev.w.hu.h\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vmaddwev_h_bu_b" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (plus:V8HI -+ (match_operand:V8HI 1 "register_operand" "0") -+ (mult:V8HI -+ (zero_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 2 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)]))) -+ (sign_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 3 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6) -+ (const_int 8) (const_int 10) -+ (const_int 12) (const_int 14)]))))))] -+ "ISA_HAS_LSX" -+ "vmaddwev.h.bu.b\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vmaddwod_d_wu_w" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (plus:V2DI -+ (match_operand:V2DI 1 "register_operand" "0") -+ (mult:V2DI -+ (zero_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 2 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3)]))) -+ (sign_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 3 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3)]))))))] -+ "ISA_HAS_LSX" -+ "vmaddwod.d.wu.w\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmaddwod_w_hu_h" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (plus:V4SI -+ (match_operand:V4SI 1 "register_operand" "0") -+ (mult:V4SI -+ (zero_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 2 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))) -+ (sign_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 3 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7)]))))))] -+ "ISA_HAS_LSX" -+ "vmaddwod.w.hu.h\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vmaddwod_h_bu_b" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (plus:V8HI -+ (match_operand:V8HI 1 "register_operand" "0") -+ (mult:V8HI -+ (zero_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 2 "register_operand" "%f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))) -+ (sign_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 3 "register_operand" "f") -+ (parallel [(const_int 1) (const_int 3) -+ (const_int 5) (const_int 7) -+ (const_int 9) (const_int 11) -+ (const_int 13) (const_int 15)]))))))] -+ "ISA_HAS_LSX" -+ "vmaddwod.h.bu.b\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_fmadd") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vmaddwev_q_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") -+ (match_operand:V2DI 2 "register_operand" "f") -+ (match_operand:V2DI 3 "register_operand" "f")] -+ UNSPEC_LSX_VMADDWEV))] -+ "ISA_HAS_LSX" -+ "vmaddwev.q.d\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmaddwod_q_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") -+ (match_operand:V2DI 2 "register_operand" "f") -+ (match_operand:V2DI 3 "register_operand" "f")] -+ UNSPEC_LSX_VMADDWOD))] -+ "ISA_HAS_LSX" -+ "vmaddwod.q.d\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmaddwev_q_du" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") -+ (match_operand:V2DI 2 "register_operand" "f") -+ (match_operand:V2DI 3 "register_operand" "f")] -+ UNSPEC_LSX_VMADDWEV2))] -+ "ISA_HAS_LSX" -+ "vmaddwev.q.du\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmaddwod_q_du" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") -+ (match_operand:V2DI 2 "register_operand" "f") -+ (match_operand:V2DI 3 "register_operand" "f")] -+ UNSPEC_LSX_VMADDWOD2))] -+ "ISA_HAS_LSX" -+ "vmaddwod.q.du\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmaddwev_q_du_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") -+ (match_operand:V2DI 2 "register_operand" "f") -+ (match_operand:V2DI 3 "register_operand" "f")] -+ UNSPEC_LSX_VMADDWEV3))] -+ "ISA_HAS_LSX" -+ "vmaddwev.q.du.d\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmaddwod_q_du_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") -+ (match_operand:V2DI 2 "register_operand" "f") -+ (match_operand:V2DI 3 "register_operand" "f")] -+ UNSPEC_LSX_VMADDWOD3))] -+ "ISA_HAS_LSX" -+ "vmaddwod.q.du.d\t%w0,%w2,%w3" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vrotr_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand:ILSX 2 "register_operand" "f")] -+ UNSPEC_LSX_VROTR))] -+ "ISA_HAS_LSX" -+ "vrotr.\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vadd_q" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VADD_Q))] -+ "ISA_HAS_LSX" -+ "vadd.q\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vsub_q" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") -+ (match_operand:V2DI 2 "register_operand" "f")] -+ UNSPEC_LSX_VSUB_Q))] -+ "ISA_HAS_LSX" -+ "vsub.q\t%w0,%w1,%w2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vmskgez_b" -+ [(set (match_operand:V16QI 0 "register_operand" "=f") -+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f")] -+ UNSPEC_LSX_VMSKGEZ))] -+ "ISA_HAS_LSX" -+ "vmskgez.b\t%w0,%w1" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "V16QI")]) -+ -+(define_insn "lsx_vmsknz_b" -+ [(set (match_operand:V16QI 0 "register_operand" "=f") -+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f")] -+ UNSPEC_LSX_VMSKNZ))] -+ "ISA_HAS_LSX" -+ "vmsknz.b\t%w0,%w1" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "V16QI")]) -+ -+(define_insn "lsx_vexth_h_b" -+ [(set (match_operand:V8HI 0 "register_operand" "=f") -+ (any_extend:V8HI -+ (vec_select:V8QI -+ (match_operand:V16QI 1 "register_operand" "f") -+ (parallel [(const_int 8) (const_int 9) -+ (const_int 10) (const_int 11) -+ (const_int 12) (const_int 13) -+ (const_int 14) (const_int 15)]))))] -+ "ISA_HAS_LSX" -+ "vexth.h.b\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V8HI")]) -+ -+(define_insn "lsx_vexth_w_h" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (any_extend:V4SI -+ (vec_select:V4HI -+ (match_operand:V8HI 1 "register_operand" "f") -+ (parallel [(const_int 4) (const_int 5) -+ (const_int 6) (const_int 7)]))))] -+ "ISA_HAS_LSX" -+ "vexth.w.h\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V4SI")]) -+ -+(define_insn "lsx_vexth_d_w" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (any_extend:V2DI -+ (vec_select:V2SI -+ (match_operand:V4SI 1 "register_operand" "f") -+ (parallel [(const_int 2) (const_int 3)]))))] -+ "ISA_HAS_LSX" -+ "vexth.d.w\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vexth_q_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")] -+ UNSPEC_LSX_VEXTH_Q_D))] -+ "ISA_HAS_LSX" -+ "vexth.q.d\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vexth_qu_du" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")] -+ UNSPEC_LSX_VEXTH_QU_DU))] -+ "ISA_HAS_LSX" -+ "vexth.qu.du\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vrotri_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") -+ (match_operand 2 "const__operand" "")] -+ UNSPEC_LSX_VROTRI))] -+ "ISA_HAS_LSX" -+ "vrotri.\t%w0,%w1,%2" -+ [(set_attr "type" "simd_shf") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vextl_q_d" -+ [(set (match_operand:V2DI 0 "register_operand" "=f") -+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")] -+ UNSPEC_LSX_VEXTL_Q_D))] -+ "ISA_HAS_LSX" -+ "vextl.q.d\t%w0,%w1" -+ [(set_attr "type" "simd_fcvt") -+ (set_attr "mode" "V2DI")]) -+ -+(define_insn "lsx_vsrlni__" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VSRLNI))] -+ "ISA_HAS_LSX" -+ "vsrlni..\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsrlrni__" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VSRLRNI))] -+ "ISA_HAS_LSX" -+ "vsrlrni..\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssrlni__" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VSSRLNI))] -+ "ISA_HAS_LSX" -+ "vssrlni..\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssrlni__" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VSSRLNI2))] -+ "ISA_HAS_LSX" -+ "vssrlni..\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssrlrni__" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VSSRLRNI))] -+ "ISA_HAS_LSX" -+ "vssrlrni..\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssrlrni__" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VSSRLRNI2))] -+ "ISA_HAS_LSX" -+ "vssrlrni..\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsrani__" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VSRANI))] -+ "ISA_HAS_LSX" -+ "vsrani..\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vsrarni__" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VSRARNI))] -+ "ISA_HAS_LSX" -+ "vsrarni..\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssrani__" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VSSRANI))] -+ "ISA_HAS_LSX" -+ "vssrani..\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssrani__" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VSSRANI2))] -+ "ISA_HAS_LSX" -+ "vssrani..\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssrarni__" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VSSRARNI))] -+ "ISA_HAS_LSX" -+ "vssrarni..\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vssrarni__" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VSSRARNI2))] -+ "ISA_HAS_LSX" -+ "vssrarni..\t%w0,%w2,%3" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ -+(define_insn "lsx_vpermi_w" -+ [(set (match_operand:V4SI 0 "register_operand" "=f") -+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") -+ (match_operand:V4SI 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LSX_VPERMI))] -+ "ISA_HAS_LSX" -+ "vpermi.w\t%w0,%w2,%3" -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "V4SI")]) -+ -diff --git a/gcc/config/loongarch/lsxintrin.h b/gcc/config/loongarch/lsxintrin.h -new file mode 100644 -index 000000000..fe3043e3d ---- /dev/null -+++ b/gcc/config/loongarch/lsxintrin.h -@@ -0,0 +1,4980 @@ -+/* LARCH Loongson SX intrinsics include file. -+ -+ Copyright (C) 2018 Free Software Foundation, Inc. -+ -+ This file is part of GCC. -+ -+ GCC is free software; you can redistribute it and/or modify it -+ under the terms of the GNU General Public License as published -+ by the Free Software Foundation; either version 3, or (at your -+ option) any later version. -+ -+ GCC is distributed in the hope that it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -+ License for more details. -+ -+ Under Section 7 of GPL version 3, you are granted additional -+ permissions described in the GCC Runtime Library Exception, version -+ 3.1, as published by the Free Software Foundation. -+ -+ You should have received a copy of the GNU General Public License and -+ a copy of the GCC Runtime Library Exception along with this program; -+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+ . */ -+ -+#ifndef _GCC_LOONGSON_SXINTRIN_H -+#define _GCC_LOONGSON_SXINTRIN_H 1 -+ -+#if defined(__loongarch_sx) -+typedef signed char v16i8 __attribute__ ((vector_size(16), aligned(16))); -+typedef signed char v16i8_b __attribute__ ((vector_size(16), aligned(1))); -+typedef unsigned char v16u8 __attribute__ ((vector_size(16), aligned(16))); -+typedef unsigned char v16u8_b __attribute__ ((vector_size(16), aligned(1))); -+typedef short v8i16 __attribute__ ((vector_size(16), aligned(16))); -+typedef short v8i16_h __attribute__ ((vector_size(16), aligned(2))); -+typedef unsigned short v8u16 __attribute__ ((vector_size(16), aligned(16))); -+typedef unsigned short v8u16_h __attribute__ ((vector_size(16), aligned(2))); -+typedef int v4i32 __attribute__ ((vector_size(16), aligned(16))); -+typedef int v4i32_w __attribute__ ((vector_size(16), aligned(4))); -+typedef unsigned int v4u32 __attribute__ ((vector_size(16), aligned(16))); -+typedef unsigned int v4u32_w __attribute__ ((vector_size(16), aligned(4))); -+typedef long long v2i64 __attribute__ ((vector_size(16), aligned(16))); -+typedef long long v2i64_d __attribute__ ((vector_size(16), aligned(8))); -+typedef unsigned long long v2u64 __attribute__ ((vector_size(16), aligned(16))); -+typedef unsigned long long v2u64_d __attribute__ ((vector_size(16), aligned(8))); -+typedef float v4f32 __attribute__ ((vector_size(16), aligned(16))); -+typedef float v4f32_w __attribute__ ((vector_size(16), aligned(4))); -+typedef double v2f64 __attribute__ ((vector_size(16), aligned(16))); -+typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8))); -+ -+typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__)); -+typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); -+typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsll_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsll_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsll_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsll_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsll_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsll_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsll_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsll_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: V16QI, V16QI, UQI. */ -+#define __lsx_vslli_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vslli_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V8HI, V8HI, UQI. */ -+#define __lsx_vslli_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vslli_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V4SI, V4SI, UQI. */ -+#define __lsx_vslli_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslli_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V2DI, V2DI, UQI. */ -+#define __lsx_vslli_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vslli_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsra_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsra_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsra_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsra_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsra_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsra_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsra_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsra_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: V16QI, V16QI, UQI. */ -+#define __lsx_vsrai_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsrai_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V8HI, V8HI, UQI. */ -+#define __lsx_vsrai_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsrai_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V4SI, V4SI, UQI. */ -+#define __lsx_vsrai_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsrai_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V2DI, V2DI, UQI. */ -+#define __lsx_vsrai_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vsrai_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrar_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrar_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrar_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrar_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrar_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrar_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrar_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrar_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: V16QI, V16QI, UQI. */ -+#define __lsx_vsrari_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsrari_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V8HI, V8HI, UQI. */ -+#define __lsx_vsrari_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsrari_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V4SI, V4SI, UQI. */ -+#define __lsx_vsrari_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsrari_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V2DI, V2DI, UQI. */ -+#define __lsx_vsrari_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vsrari_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrl_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrl_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrl_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrl_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrl_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrl_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrl_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrl_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: V16QI, V16QI, UQI. */ -+#define __lsx_vsrli_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsrli_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V8HI, V8HI, UQI. */ -+#define __lsx_vsrli_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsrli_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V4SI, V4SI, UQI. */ -+#define __lsx_vsrli_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsrli_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V2DI, V2DI, UQI. */ -+#define __lsx_vsrli_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vsrli_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrlr_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrlr_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrlr_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrlr_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrlr_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrlr_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrlr_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrlr_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: V16QI, V16QI, UQI. */ -+#define __lsx_vsrlri_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsrlri_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V8HI, V8HI, UQI. */ -+#define __lsx_vsrlri_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsrlri_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V4SI, V4SI, UQI. */ -+#define __lsx_vsrlri_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsrlri_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V2DI, V2DI, UQI. */ -+#define __lsx_vsrlri_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vsrlri_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitclr_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vbitclr_b((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitclr_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vbitclr_h((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitclr_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vbitclr_w((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitclr_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vbitclr_d((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ -+#define __lsx_vbitclri_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vbitclri_b((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ -+#define __lsx_vbitclri_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vbitclri_h((v8u16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ -+#define __lsx_vbitclri_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vbitclri_w((v4u32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ -+#define __lsx_vbitclri_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vbitclri_d((v2u64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitset_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vbitset_b((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitset_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vbitset_h((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitset_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vbitset_w((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitset_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vbitset_d((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ -+#define __lsx_vbitseti_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vbitseti_b((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ -+#define __lsx_vbitseti_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vbitseti_h((v8u16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ -+#define __lsx_vbitseti_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vbitseti_w((v4u32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ -+#define __lsx_vbitseti_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vbitseti_d((v2u64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitrev_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vbitrev_b((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitrev_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vbitrev_h((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitrev_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vbitrev_w((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitrev_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vbitrev_d((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ -+#define __lsx_vbitrevi_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vbitrevi_b((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ -+#define __lsx_vbitrevi_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vbitrevi_h((v8u16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ -+#define __lsx_vbitrevi_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vbitrevi_w((v4u32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ -+#define __lsx_vbitrevi_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vbitrevi_d((v2u64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vadd_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vadd_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vadd_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vadd_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vadd_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vadd_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vadd_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vadd_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V16QI, V16QI, UQI. */ -+#define __lsx_vaddi_bu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vaddi_bu((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, V8HI, UQI. */ -+#define __lsx_vaddi_hu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vaddi_hu((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V4SI, V4SI, UQI. */ -+#define __lsx_vaddi_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vaddi_wu((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V2DI, V2DI, UQI. */ -+#define __lsx_vaddi_du(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vaddi_du((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsub_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsub_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsub_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsub_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsub_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsub_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsub_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsub_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V16QI, V16QI, UQI. */ -+#define __lsx_vsubi_bu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsubi_bu((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, V8HI, UQI. */ -+#define __lsx_vsubi_hu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsubi_hu((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V4SI, V4SI, UQI. */ -+#define __lsx_vsubi_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsubi_wu((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V2DI, V2DI, UQI. */ -+#define __lsx_vsubi_du(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsubi_du((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmax_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmax_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmax_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmax_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmax_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmax_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmax_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmax_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V16QI, V16QI, QI. */ -+#define __lsx_vmaxi_b(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmaxi_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V8HI, V8HI, QI. */ -+#define __lsx_vmaxi_h(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmaxi_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V4SI, V4SI, QI. */ -+#define __lsx_vmaxi_w(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmaxi_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V2DI, V2DI, QI. */ -+#define __lsx_vmaxi_d(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmaxi_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmax_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmax_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmax_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmax_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmax_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmax_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmax_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmax_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ -+#define __lsx_vmaxi_bu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmaxi_bu((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ -+#define __lsx_vmaxi_hu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmaxi_hu((v8u16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ -+#define __lsx_vmaxi_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmaxi_wu((v4u32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ -+#define __lsx_vmaxi_du(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmaxi_du((v2u64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmin_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmin_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmin_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmin_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmin_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmin_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmin_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmin_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V16QI, V16QI, QI. */ -+#define __lsx_vmini_b(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmini_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V8HI, V8HI, QI. */ -+#define __lsx_vmini_h(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmini_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V4SI, V4SI, QI. */ -+#define __lsx_vmini_w(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmini_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V2DI, V2DI, QI. */ -+#define __lsx_vmini_d(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmini_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmin_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmin_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmin_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmin_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmin_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmin_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmin_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmin_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ -+#define __lsx_vmini_bu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmini_bu((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ -+#define __lsx_vmini_hu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmini_hu((v8u16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ -+#define __lsx_vmini_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmini_wu((v4u32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ -+#define __lsx_vmini_du(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmini_du((v2u64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vseq_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vseq_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vseq_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vseq_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vseq_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vseq_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vseq_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vseq_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V16QI, V16QI, QI. */ -+#define __lsx_vseqi_b(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vseqi_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V8HI, V8HI, QI. */ -+#define __lsx_vseqi_h(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vseqi_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V4SI, V4SI, QI. */ -+#define __lsx_vseqi_w(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vseqi_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V2DI, V2DI, QI. */ -+#define __lsx_vseqi_d(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vseqi_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V16QI, V16QI, QI. */ -+#define __lsx_vslti_b(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslti_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vslt_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vslt_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vslt_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vslt_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vslt_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vslt_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vslt_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vslt_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V8HI, V8HI, QI. */ -+#define __lsx_vslti_h(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslti_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V4SI, V4SI, QI. */ -+#define __lsx_vslti_w(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslti_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V2DI, V2DI, QI. */ -+#define __lsx_vslti_d(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslti_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vslt_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vslt_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vslt_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vslt_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vslt_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vslt_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vslt_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vslt_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V16QI, UV16QI, UQI. */ -+#define __lsx_vslti_bu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslti_bu((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, UV8HI, UQI. */ -+#define __lsx_vslti_hu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslti_hu((v8u16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V4SI, UV4SI, UQI. */ -+#define __lsx_vslti_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslti_wu((v4u32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V2DI, UV2DI, UQI. */ -+#define __lsx_vslti_du(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslti_du((v2u64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsle_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsle_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsle_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsle_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsle_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsle_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsle_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsle_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V16QI, V16QI, QI. */ -+#define __lsx_vslei_b(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslei_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V8HI, V8HI, QI. */ -+#define __lsx_vslei_h(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslei_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V4SI, V4SI, QI. */ -+#define __lsx_vslei_w(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslei_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, si5. */ -+/* Data types in instruction templates: V2DI, V2DI, QI. */ -+#define __lsx_vslei_d(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslei_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsle_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsle_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsle_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsle_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsle_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsle_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsle_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsle_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V16QI, UV16QI, UQI. */ -+#define __lsx_vslei_bu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslei_bu((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, UV8HI, UQI. */ -+#define __lsx_vslei_hu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslei_hu((v8u16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V4SI, UV4SI, UQI. */ -+#define __lsx_vslei_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslei_wu((v4u32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V2DI, UV2DI, UQI. */ -+#define __lsx_vslei_du(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslei_du((v2u64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: V16QI, V16QI, UQI. */ -+#define __lsx_vsat_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsat_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V8HI, V8HI, UQI. */ -+#define __lsx_vsat_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsat_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V4SI, V4SI, UQI. */ -+#define __lsx_vsat_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsat_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V2DI, V2DI, UQI. */ -+#define __lsx_vsat_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vsat_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ -+#define __lsx_vsat_bu(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsat_bu((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ -+#define __lsx_vsat_hu(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsat_hu((v8u16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ -+#define __lsx_vsat_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsat_wu((v4u32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ -+#define __lsx_vsat_du(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vsat_du((v2u64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vadda_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vadda_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vadda_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vadda_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vadda_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vadda_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vadda_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vadda_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsadd_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsadd_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsadd_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsadd_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsadd_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsadd_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsadd_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsadd_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsadd_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsadd_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsadd_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsadd_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsadd_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsadd_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsadd_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsadd_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavg_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavg_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavg_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavg_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavg_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavg_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavg_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavg_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavg_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavg_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavg_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavg_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavg_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavg_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavg_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavg_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavgr_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavgr_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavgr_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavgr_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavgr_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavgr_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavgr_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavgr_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavgr_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavgr_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavgr_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavgr_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavgr_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavgr_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vavgr_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vavgr_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssub_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssub_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssub_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssub_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssub_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssub_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssub_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssub_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssub_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssub_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssub_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssub_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssub_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssub_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssub_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssub_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vabsd_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vabsd_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vabsd_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vabsd_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vabsd_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vabsd_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vabsd_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vabsd_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vabsd_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vabsd_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vabsd_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vabsd_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vabsd_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vabsd_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vabsd_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vabsd_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmul_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmul_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmul_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmul_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmul_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmul_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmul_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmul_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmadd_b(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmadd_b((v16i8)_1, (v16i8)_2, (v16i8)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmadd_h(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmadd_h((v8i16)_1, (v8i16)_2, (v8i16)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmadd_w(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmadd_w((v4i32)_1, (v4i32)_2, (v4i32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmadd_d(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmadd_d((v2i64)_1, (v2i64)_2, (v2i64)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmsub_b(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmsub_b((v16i8)_1, (v16i8)_2, (v16i8)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmsub_h(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmsub_h((v8i16)_1, (v8i16)_2, (v8i16)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmsub_w(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmsub_w((v4i32)_1, (v4i32)_2, (v4i32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmsub_d(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmsub_d((v2i64)_1, (v2i64)_2, (v2i64)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vdiv_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vdiv_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vdiv_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vdiv_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vdiv_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vdiv_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vdiv_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vdiv_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vdiv_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vdiv_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vdiv_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vdiv_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vdiv_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vdiv_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vdiv_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vdiv_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhaddw_h_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhaddw_h_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhaddw_w_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhaddw_w_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhaddw_d_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhaddw_d_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhaddw_hu_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhaddw_hu_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhaddw_wu_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhaddw_wu_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhaddw_du_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhaddw_du_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhsubw_h_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhsubw_h_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhsubw_w_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhsubw_w_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhsubw_d_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhsubw_d_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhsubw_hu_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhsubw_hu_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhsubw_wu_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhsubw_wu_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhsubw_du_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhsubw_du_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmod_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmod_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmod_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmod_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmod_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmod_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmod_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmod_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmod_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmod_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmod_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmod_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmod_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmod_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmod_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmod_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, rk. */ -+/* Data types in instruction templates: V16QI, V16QI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vreplve_b(__m128i _1, int _2) -+{ -+ return (__m128i)__builtin_lsx_vreplve_b((v16i8)_1, (int)_2); -+} -+ -+/* Assembly instruction format: vd, vj, rk. */ -+/* Data types in instruction templates: V8HI, V8HI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vreplve_h(__m128i _1, int _2) -+{ -+ return (__m128i)__builtin_lsx_vreplve_h((v8i16)_1, (int)_2); -+} -+ -+/* Assembly instruction format: vd, vj, rk. */ -+/* Data types in instruction templates: V4SI, V4SI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vreplve_w(__m128i _1, int _2) -+{ -+ return (__m128i)__builtin_lsx_vreplve_w((v4i32)_1, (int)_2); -+} -+ -+/* Assembly instruction format: vd, vj, rk. */ -+/* Data types in instruction templates: V2DI, V2DI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vreplve_d(__m128i _1, int _2) -+{ -+ return (__m128i)__builtin_lsx_vreplve_d((v2i64)_1, (int)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V16QI, V16QI, UQI. */ -+#define __lsx_vreplvei_b(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vreplvei_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: V8HI, V8HI, UQI. */ -+#define __lsx_vreplvei_h(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vreplvei_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui2. */ -+/* Data types in instruction templates: V4SI, V4SI, UQI. */ -+#define __lsx_vreplvei_w(/*__m128i*/ _1, /*ui2*/ _2) ((__m128i)__builtin_lsx_vreplvei_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui1. */ -+/* Data types in instruction templates: V2DI, V2DI, UQI. */ -+#define __lsx_vreplvei_d(/*__m128i*/ _1, /*ui1*/ _2) ((__m128i)__builtin_lsx_vreplvei_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpickev_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpickev_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpickev_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpickev_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpickev_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpickev_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpickev_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpickev_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpickod_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpickod_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpickod_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpickod_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpickod_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpickod_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpickod_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpickod_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vilvh_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vilvh_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vilvh_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vilvh_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vilvh_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vilvh_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vilvh_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vilvh_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vilvl_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vilvl_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vilvl_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vilvl_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vilvl_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vilvl_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vilvl_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vilvl_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpackev_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpackev_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpackev_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpackev_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpackev_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpackev_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpackev_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpackev_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpackod_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpackod_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpackod_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpackod_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpackod_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpackod_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpackod_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vpackod_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vshuf_h(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vshuf_h((v8i16)_1, (v8i16)_2, (v8i16)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vshuf_w(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vshuf_w((v4i32)_1, (v4i32)_2, (v4i32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vshuf_d(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vshuf_d((v2i64)_1, (v2i64)_2, (v2i64)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vand_v(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vand_v((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ -+#define __lsx_vandi_b(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vandi_b((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vor_v(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vor_v((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ -+#define __lsx_vori_b(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vori_b((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vnor_v(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vnor_v((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ -+#define __lsx_vnori_b(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vnori_b((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vxor_v(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vxor_v((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ -+#define __lsx_vxori_b(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vxori_b((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk, va. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vbitsel_v(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vbitsel_v((v16u8)_1, (v16u8)_2, (v16u8)_3); -+} -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI, USI. */ -+#define __lsx_vbitseli_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vbitseli_b((v16u8)(_1), (v16u8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: V16QI, V16QI, USI. */ -+#define __lsx_vshuf4i_b(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vshuf4i_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: V8HI, V8HI, USI. */ -+#define __lsx_vshuf4i_h(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vshuf4i_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: V4SI, V4SI, USI. */ -+#define __lsx_vshuf4i_w(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vshuf4i_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, rj. */ -+/* Data types in instruction templates: V16QI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vreplgr2vr_b(int _1) -+{ -+ return (__m128i)__builtin_lsx_vreplgr2vr_b((int)_1); -+} -+ -+/* Assembly instruction format: vd, rj. */ -+/* Data types in instruction templates: V8HI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vreplgr2vr_h(int _1) -+{ -+ return (__m128i)__builtin_lsx_vreplgr2vr_h((int)_1); -+} -+ -+/* Assembly instruction format: vd, rj. */ -+/* Data types in instruction templates: V4SI, SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vreplgr2vr_w(int _1) -+{ -+ return (__m128i)__builtin_lsx_vreplgr2vr_w((int)_1); -+} -+ -+/* Assembly instruction format: vd, rj. */ -+/* Data types in instruction templates: V2DI, DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vreplgr2vr_d(long int _1) -+{ -+ return (__m128i)__builtin_lsx_vreplgr2vr_d((long int)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpcnt_b(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vpcnt_b((v16i8)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpcnt_h(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vpcnt_h((v8i16)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpcnt_w(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vpcnt_w((v4i32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vpcnt_d(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vpcnt_d((v2i64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vclo_b(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vclo_b((v16i8)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vclo_h(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vclo_h((v8i16)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vclo_w(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vclo_w((v4i32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vclo_d(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vclo_d((v2i64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vclz_b(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vclz_b((v16i8)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vclz_h(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vclz_h((v8i16)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vclz_w(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vclz_w((v4i32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vclz_d(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vclz_d((v2i64)_1); -+} -+ -+/* Assembly instruction format: rd, vj, ui4. */ -+/* Data types in instruction templates: SI, V16QI, UQI. */ -+#define __lsx_vpickve2gr_b(/*__m128i*/ _1, /*ui4*/ _2) ((int)__builtin_lsx_vpickve2gr_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: rd, vj, ui3. */ -+/* Data types in instruction templates: SI, V8HI, UQI. */ -+#define __lsx_vpickve2gr_h(/*__m128i*/ _1, /*ui3*/ _2) ((int)__builtin_lsx_vpickve2gr_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: rd, vj, ui2. */ -+/* Data types in instruction templates: SI, V4SI, UQI. */ -+#define __lsx_vpickve2gr_w(/*__m128i*/ _1, /*ui2*/ _2) ((int)__builtin_lsx_vpickve2gr_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: rd, vj, ui1. */ -+/* Data types in instruction templates: DI, V2DI, UQI. */ -+#define __lsx_vpickve2gr_d(/*__m128i*/ _1, /*ui1*/ _2) ((long int)__builtin_lsx_vpickve2gr_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: rd, vj, ui4. */ -+/* Data types in instruction templates: USI, V16QI, UQI. */ -+#define __lsx_vpickve2gr_bu(/*__m128i*/ _1, /*ui4*/ _2) ((unsigned int)__builtin_lsx_vpickve2gr_bu((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: rd, vj, ui3. */ -+/* Data types in instruction templates: USI, V8HI, UQI. */ -+#define __lsx_vpickve2gr_hu(/*__m128i*/ _1, /*ui3*/ _2) ((unsigned int)__builtin_lsx_vpickve2gr_hu((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: rd, vj, ui2. */ -+/* Data types in instruction templates: USI, V4SI, UQI. */ -+#define __lsx_vpickve2gr_wu(/*__m128i*/ _1, /*ui2*/ _2) ((unsigned int)__builtin_lsx_vpickve2gr_wu((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: rd, vj, ui1. */ -+/* Data types in instruction templates: UDI, V2DI, UQI. */ -+#define __lsx_vpickve2gr_du(/*__m128i*/ _1, /*ui1*/ _2) ((unsigned long int)__builtin_lsx_vpickve2gr_du((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, rj, ui4. */ -+/* Data types in instruction templates: V16QI, V16QI, SI, UQI. */ -+#define __lsx_vinsgr2vr_b(/*__m128i*/ _1, /*int*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vinsgr2vr_b((v16i8)(_1), (int)(_2), (_3))) -+ -+/* Assembly instruction format: vd, rj, ui3. */ -+/* Data types in instruction templates: V8HI, V8HI, SI, UQI. */ -+#define __lsx_vinsgr2vr_h(/*__m128i*/ _1, /*int*/ _2, /*ui3*/ _3) ((__m128i)__builtin_lsx_vinsgr2vr_h((v8i16)(_1), (int)(_2), (_3))) -+ -+/* Assembly instruction format: vd, rj, ui2. */ -+/* Data types in instruction templates: V4SI, V4SI, SI, UQI. */ -+#define __lsx_vinsgr2vr_w(/*__m128i*/ _1, /*int*/ _2, /*ui2*/ _3) ((__m128i)__builtin_lsx_vinsgr2vr_w((v4i32)(_1), (int)(_2), (_3))) -+ -+/* Assembly instruction format: vd, rj, ui1. */ -+/* Data types in instruction templates: V2DI, V2DI, DI, UQI. */ -+#define __lsx_vinsgr2vr_d(/*__m128i*/ _1, /*long int*/ _2, /*ui1*/ _3) ((__m128i)__builtin_lsx_vinsgr2vr_d((v2i64)(_1), (long int)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SF, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfadd_s(__m128 _1, __m128 _2) -+{ -+ return (__m128)__builtin_lsx_vfadd_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfadd_d(__m128d _1, __m128d _2) -+{ -+ return (__m128d)__builtin_lsx_vfadd_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SF, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfsub_s(__m128 _1, __m128 _2) -+{ -+ return (__m128)__builtin_lsx_vfsub_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfsub_d(__m128d _1, __m128d _2) -+{ -+ return (__m128d)__builtin_lsx_vfsub_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SF, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfmul_s(__m128 _1, __m128 _2) -+{ -+ return (__m128)__builtin_lsx_vfmul_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfmul_d(__m128d _1, __m128d _2) -+{ -+ return (__m128d)__builtin_lsx_vfmul_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SF, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfdiv_s(__m128 _1, __m128 _2) -+{ -+ return (__m128)__builtin_lsx_vfdiv_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfdiv_d(__m128d _1, __m128d _2) -+{ -+ return (__m128d)__builtin_lsx_vfdiv_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcvt_h_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcvt_h_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfcvt_s_d(__m128d _1, __m128d _2) -+{ -+ return (__m128)__builtin_lsx_vfcvt_s_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SF, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfmin_s(__m128 _1, __m128 _2) -+{ -+ return (__m128)__builtin_lsx_vfmin_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfmin_d(__m128d _1, __m128d _2) -+{ -+ return (__m128d)__builtin_lsx_vfmin_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SF, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfmina_s(__m128 _1, __m128 _2) -+{ -+ return (__m128)__builtin_lsx_vfmina_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfmina_d(__m128d _1, __m128d _2) -+{ -+ return (__m128d)__builtin_lsx_vfmina_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SF, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfmax_s(__m128 _1, __m128 _2) -+{ -+ return (__m128)__builtin_lsx_vfmax_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfmax_d(__m128d _1, __m128d _2) -+{ -+ return (__m128d)__builtin_lsx_vfmax_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SF, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfmaxa_s(__m128 _1, __m128 _2) -+{ -+ return (__m128)__builtin_lsx_vfmaxa_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfmaxa_d(__m128d _1, __m128d _2) -+{ -+ return (__m128d)__builtin_lsx_vfmaxa_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfclass_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vfclass_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfclass_d(__m128d _1) -+{ -+ return (__m128i)__builtin_lsx_vfclass_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfsqrt_s(__m128 _1) -+{ -+ return (__m128)__builtin_lsx_vfsqrt_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfsqrt_d(__m128d _1) -+{ -+ return (__m128d)__builtin_lsx_vfsqrt_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfrecip_s(__m128 _1) -+{ -+ return (__m128)__builtin_lsx_vfrecip_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfrecip_d(__m128d _1) -+{ -+ return (__m128d)__builtin_lsx_vfrecip_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfrint_s(__m128 _1) -+{ -+ return (__m128)__builtin_lsx_vfrint_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfrint_d(__m128d _1) -+{ -+ return (__m128d)__builtin_lsx_vfrint_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfrsqrt_s(__m128 _1) -+{ -+ return (__m128)__builtin_lsx_vfrsqrt_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfrsqrt_d(__m128d _1) -+{ -+ return (__m128d)__builtin_lsx_vfrsqrt_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vflogb_s(__m128 _1) -+{ -+ return (__m128)__builtin_lsx_vflogb_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vflogb_d(__m128d _1) -+{ -+ return (__m128d)__builtin_lsx_vflogb_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SF, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfcvth_s_h(__m128i _1) -+{ -+ return (__m128)__builtin_lsx_vfcvth_s_h((v8i16)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfcvth_d_s(__m128 _1) -+{ -+ return (__m128d)__builtin_lsx_vfcvth_d_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SF, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfcvtl_s_h(__m128i _1) -+{ -+ return (__m128)__builtin_lsx_vfcvtl_s_h((v8i16)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfcvtl_d_s(__m128 _1) -+{ -+ return (__m128d)__builtin_lsx_vfcvtl_d_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftint_w_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftint_w_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftint_l_d(__m128d _1) -+{ -+ return (__m128i)__builtin_lsx_vftint_l_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: UV4SI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftint_wu_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftint_wu_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: UV2DI, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftint_lu_d(__m128d _1) -+{ -+ return (__m128i)__builtin_lsx_vftint_lu_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrz_w_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrz_w_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrz_l_d(__m128d _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrz_l_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: UV4SI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrz_wu_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrz_wu_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: UV2DI, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrz_lu_d(__m128d _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrz_lu_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SF, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vffint_s_w(__m128i _1) -+{ -+ return (__m128)__builtin_lsx_vffint_s_w((v4i32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DF, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vffint_d_l(__m128i _1) -+{ -+ return (__m128d)__builtin_lsx_vffint_d_l((v2i64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SF, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vffint_s_wu(__m128i _1) -+{ -+ return (__m128)__builtin_lsx_vffint_s_wu((v4u32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DF, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vffint_d_lu(__m128i _1) -+{ -+ return (__m128d)__builtin_lsx_vffint_d_lu((v2u64)_1); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vandn_v(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vandn_v((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vneg_b(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vneg_b((v16i8)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vneg_h(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vneg_h((v8i16)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vneg_w(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vneg_w((v4i32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vneg_d(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vneg_d((v2i64)_1); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmuh_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmuh_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmuh_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmuh_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmuh_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmuh_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmuh_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmuh_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmuh_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmuh_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmuh_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmuh_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmuh_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmuh_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmuh_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmuh_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: V8HI, V16QI, UQI. */ -+#define __lsx_vsllwil_h_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsllwil_h_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V4SI, V8HI, UQI. */ -+#define __lsx_vsllwil_w_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsllwil_w_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V2DI, V4SI, UQI. */ -+#define __lsx_vsllwil_d_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsllwil_d_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: UV8HI, UV16QI, UQI. */ -+#define __lsx_vsllwil_hu_bu(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsllwil_hu_bu((v16u8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: UV4SI, UV8HI, UQI. */ -+#define __lsx_vsllwil_wu_hu(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsllwil_wu_hu((v8u16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV2DI, UV4SI, UQI. */ -+#define __lsx_vsllwil_du_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsllwil_du_wu((v4u32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsran_b_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsran_b_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsran_h_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsran_h_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsran_w_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsran_w_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssran_b_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssran_b_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssran_h_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssran_h_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssran_w_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssran_w_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssran_bu_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssran_bu_h((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssran_hu_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssran_hu_w((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssran_wu_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssran_wu_d((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrarn_b_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrarn_b_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrarn_h_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrarn_h_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrarn_w_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrarn_w_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrarn_b_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrarn_b_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrarn_h_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrarn_h_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrarn_w_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrarn_w_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrarn_bu_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrarn_bu_h((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrarn_hu_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrarn_hu_w((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrarn_wu_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrarn_wu_d((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrln_b_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrln_b_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrln_h_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrln_h_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrln_w_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrln_w_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrln_bu_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrln_bu_h((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrln_hu_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrln_hu_w((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrln_wu_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrln_wu_d((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrlrn_b_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrlrn_b_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrlrn_h_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrlrn_h_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsrlrn_w_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsrlrn_w_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrlrn_bu_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrlrn_bu_h((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrlrn_hu_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrlrn_hu_w((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrlrn_wu_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrlrn_wu_d((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, UQI. */ -+#define __lsx_vfrstpi_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vfrstpi_b((v16i8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, UQI. */ -+#define __lsx_vfrstpi_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vfrstpi_h((v8i16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfrstp_b(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vfrstp_b((v16i8)_1, (v16i8)_2, (v16i8)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfrstp_h(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vfrstp_h((v8i16)_1, (v8i16)_2, (v8i16)_3); -+} -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ -+#define __lsx_vshuf4i_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vshuf4i_d((v2i64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V16QI, V16QI, UQI. */ -+#define __lsx_vbsrl_v(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vbsrl_v((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V16QI, V16QI, UQI. */ -+#define __lsx_vbsll_v(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vbsll_v((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ -+#define __lsx_vextrins_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vextrins_b((v16i8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ -+#define __lsx_vextrins_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vextrins_h((v8i16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ -+#define __lsx_vextrins_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vextrins_w((v4i32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ -+#define __lsx_vextrins_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vextrins_d((v2i64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmskltz_b(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vmskltz_b((v16i8)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmskltz_h(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vmskltz_h((v8i16)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmskltz_w(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vmskltz_w((v4i32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmskltz_d(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vmskltz_d((v2i64)_1); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsigncov_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsigncov_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsigncov_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsigncov_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsigncov_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsigncov_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsigncov_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsigncov_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk, va. */ -+/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfmadd_s(__m128 _1, __m128 _2, __m128 _3) -+{ -+ return (__m128)__builtin_lsx_vfmadd_s((v4f32)_1, (v4f32)_2, (v4f32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk, va. */ -+/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfmadd_d(__m128d _1, __m128d _2, __m128d _3) -+{ -+ return (__m128d)__builtin_lsx_vfmadd_d((v2f64)_1, (v2f64)_2, (v2f64)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk, va. */ -+/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfmsub_s(__m128 _1, __m128 _2, __m128 _3) -+{ -+ return (__m128)__builtin_lsx_vfmsub_s((v4f32)_1, (v4f32)_2, (v4f32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk, va. */ -+/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfmsub_d(__m128d _1, __m128d _2, __m128d _3) -+{ -+ return (__m128d)__builtin_lsx_vfmsub_d((v2f64)_1, (v2f64)_2, (v2f64)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk, va. */ -+/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfnmadd_s(__m128 _1, __m128 _2, __m128 _3) -+{ -+ return (__m128)__builtin_lsx_vfnmadd_s((v4f32)_1, (v4f32)_2, (v4f32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk, va. */ -+/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfnmadd_d(__m128d _1, __m128d _2, __m128d _3) -+{ -+ return (__m128d)__builtin_lsx_vfnmadd_d((v2f64)_1, (v2f64)_2, (v2f64)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk, va. */ -+/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vfnmsub_s(__m128 _1, __m128 _2, __m128 _3) -+{ -+ return (__m128)__builtin_lsx_vfnmsub_s((v4f32)_1, (v4f32)_2, (v4f32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk, va. */ -+/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vfnmsub_d(__m128d _1, __m128d _2, __m128d _3) -+{ -+ return (__m128d)__builtin_lsx_vfnmsub_d((v2f64)_1, (v2f64)_2, (v2f64)_3); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrne_w_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrne_w_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrne_l_d(__m128d _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrne_l_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrp_w_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrp_w_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrp_l_d(__m128d _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrp_l_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrm_w_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrm_w_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrm_l_d(__m128d _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrm_l_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftint_w_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vftint_w_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SF, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128 __lsx_vffint_s_l(__m128i _1, __m128i _2) -+{ -+ return (__m128)__builtin_lsx_vffint_s_l((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrz_w_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vftintrz_w_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrp_w_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vftintrp_w_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrm_w_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vftintrm_w_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrne_w_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vftintrne_w_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintl_l_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintl_l_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftinth_l_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftinth_l_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DF, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vffinth_d_w(__m128i _1) -+{ -+ return (__m128d)__builtin_lsx_vffinth_d_w((v4i32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DF, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128d __lsx_vffintl_d_w(__m128i _1) -+{ -+ return (__m128d)__builtin_lsx_vffintl_d_w((v4i32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrzl_l_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrzl_l_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrzh_l_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrzh_l_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrpl_l_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrpl_l_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrph_l_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrph_l_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrml_l_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrml_l_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrmh_l_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrmh_l_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrnel_l_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrnel_l_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vftintrneh_l_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vftintrneh_l_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfrintrne_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vfrintrne_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfrintrne_d(__m128d _1) -+{ -+ return (__m128i)__builtin_lsx_vfrintrne_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfrintrz_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vfrintrz_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfrintrz_d(__m128d _1) -+{ -+ return (__m128i)__builtin_lsx_vfrintrz_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfrintrp_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vfrintrp_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfrintrp_d(__m128d _1) -+{ -+ return (__m128i)__builtin_lsx_vfrintrp_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfrintrm_s(__m128 _1) -+{ -+ return (__m128i)__builtin_lsx_vfrintrm_s((v4f32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfrintrm_d(__m128d _1) -+{ -+ return (__m128i)__builtin_lsx_vfrintrm_d((v2f64)_1); -+} -+ -+/* Assembly instruction format: vd, rj, si8, idx. */ -+/* Data types in instruction templates: VOID, V16QI, CVPOINTER, SI, UQI. */ -+#define __lsx_vstelm_b(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lsx_vstelm_b((v16i8)(_1), (void *)(_2), (_3), (_4))) -+ -+/* Assembly instruction format: vd, rj, si8, idx. */ -+/* Data types in instruction templates: VOID, V8HI, CVPOINTER, SI, UQI. */ -+#define __lsx_vstelm_h(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lsx_vstelm_h((v8i16)(_1), (void *)(_2), (_3), (_4))) -+ -+/* Assembly instruction format: vd, rj, si8, idx. */ -+/* Data types in instruction templates: VOID, V4SI, CVPOINTER, SI, UQI. */ -+#define __lsx_vstelm_w(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lsx_vstelm_w((v4i32)(_1), (void *)(_2), (_3), (_4))) -+ -+/* Assembly instruction format: vd, rj, si8, idx. */ -+/* Data types in instruction templates: VOID, V2DI, CVPOINTER, SI, UQI. */ -+#define __lsx_vstelm_d(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lsx_vstelm_d((v2i64)(_1), (void *)(_2), (_3), (_4))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwev_d_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwev_d_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwev_w_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwev_w_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwev_h_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwev_h_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwod_d_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwod_d_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwod_w_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwod_w_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwod_h_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwod_h_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwev_d_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwev_d_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwev_w_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwev_w_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwev_h_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwev_h_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwod_d_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwod_d_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwod_w_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwod_w_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwod_h_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwod_h_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwev_d_wu_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwev_d_wu_w((v4u32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwev_w_hu_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwev_w_hu_h((v8u16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwev_h_bu_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwev_h_bu_b((v16u8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwod_d_wu_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwod_d_wu_w((v4u32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwod_w_hu_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwod_w_hu_h((v8u16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwod_h_bu_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwod_h_bu_b((v16u8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwev_d_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwev_d_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwev_w_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwev_w_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwev_h_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwev_h_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwod_d_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwod_d_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwod_w_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwod_w_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwod_h_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwod_h_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwev_d_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwev_d_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwev_w_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwev_w_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwev_h_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwev_h_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwod_d_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwod_d_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwod_w_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwod_w_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwod_h_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwod_h_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwev_q_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwev_q_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwod_q_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwod_q_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwev_q_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwev_q_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwod_q_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwod_q_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwev_q_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwev_q_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwod_q_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwod_q_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwev_q_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwev_q_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsubwod_q_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsubwod_q_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwev_q_du_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwev_q_du_d((v2u64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vaddwod_q_du_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vaddwod_q_du_d((v2u64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwev_d_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwev_d_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwev_w_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwev_w_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwev_h_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwev_h_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwod_d_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwod_d_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwod_w_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwod_w_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwod_h_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwod_h_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwev_d_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwev_d_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwev_w_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwev_w_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwev_h_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwev_h_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwod_d_wu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwod_d_wu((v4u32)_1, (v4u32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwod_w_hu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwod_w_hu((v8u16)_1, (v8u16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwod_h_bu(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwod_h_bu((v16u8)_1, (v16u8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwev_d_wu_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwev_d_wu_w((v4u32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwev_w_hu_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwev_w_hu_h((v8u16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwev_h_bu_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwev_h_bu_b((v16u8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwod_d_wu_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwod_d_wu_w((v4u32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwod_w_hu_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwod_w_hu_h((v8u16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwod_h_bu_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwod_h_bu_b((v16u8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwev_q_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwev_q_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwod_q_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwod_q_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwev_q_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwev_q_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwod_q_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwod_q_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwev_q_du_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwev_q_du_d((v2u64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmulwod_q_du_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vmulwod_q_du_d((v2u64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhaddw_q_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhaddw_q_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhaddw_qu_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhaddw_qu_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhsubw_q_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhsubw_q_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vhsubw_qu_du(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vhsubw_qu_du((v2u64)_1, (v2u64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwev_d_w(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwev_d_w((v2i64)_1, (v4i32)_2, (v4i32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwev_w_h(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwev_w_h((v4i32)_1, (v8i16)_2, (v8i16)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwev_h_b(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwev_h_b((v8i16)_1, (v16i8)_2, (v16i8)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwev_d_wu(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwev_d_wu((v2u64)_1, (v4u32)_2, (v4u32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwev_w_hu(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwev_w_hu((v4u32)_1, (v8u16)_2, (v8u16)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwev_h_bu(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwev_h_bu((v8u16)_1, (v16u8)_2, (v16u8)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwod_d_w(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwod_d_w((v2i64)_1, (v4i32)_2, (v4i32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwod_w_h(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwod_w_h((v4i32)_1, (v8i16)_2, (v8i16)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwod_h_b(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwod_h_b((v8i16)_1, (v16i8)_2, (v16i8)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV4SI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwod_d_wu(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwod_d_wu((v2u64)_1, (v4u32)_2, (v4u32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV4SI, UV4SI, UV8HI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwod_w_hu(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwod_w_hu((v4u32)_1, (v8u16)_2, (v8u16)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV8HI, UV8HI, UV16QI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwod_h_bu(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwod_h_bu((v8u16)_1, (v16u8)_2, (v16u8)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, UV4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwev_d_wu_w(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwev_d_wu_w((v2i64)_1, (v4u32)_2, (v4i32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, UV8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwev_w_hu_h(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwev_w_hu_h((v4i32)_1, (v8u16)_2, (v8i16)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, UV16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwev_h_bu_b(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwev_h_bu_b((v8i16)_1, (v16u8)_2, (v16i8)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, UV4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwod_d_wu_w(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwod_d_wu_w((v2i64)_1, (v4u32)_2, (v4i32)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, UV8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwod_w_hu_h(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwod_w_hu_h((v4i32)_1, (v8u16)_2, (v8i16)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, UV16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwod_h_bu_b(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwod_h_bu_b((v8i16)_1, (v16u8)_2, (v16i8)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwev_q_d(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwev_q_d((v2i64)_1, (v2i64)_2, (v2i64)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwod_q_d(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwod_q_d((v2i64)_1, (v2i64)_2, (v2i64)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwev_q_du(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwev_q_du((v2u64)_1, (v2u64)_2, (v2u64)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: UV2DI, UV2DI, UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwod_q_du(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwod_q_du((v2u64)_1, (v2u64)_2, (v2u64)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, UV2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwev_q_du_d(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwev_q_du_d((v2i64)_1, (v2u64)_2, (v2i64)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, UV2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmaddwod_q_du_d(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vmaddwod_q_du_d((v2i64)_1, (v2u64)_2, (v2i64)_3); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vrotr_b(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vrotr_b((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vrotr_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vrotr_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vrotr_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vrotr_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vrotr_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vrotr_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vadd_q(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vadd_q((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vsub_q(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vsub_q((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, rj, si12. */ -+/* Data types in instruction templates: V16QI, CVPOINTER, SI. */ -+#define __lsx_vldrepl_b(/*void **/ _1, /*si12*/ _2) ((__m128i)__builtin_lsx_vldrepl_b((void *)(_1), (_2))) -+ -+/* Assembly instruction format: vd, rj, si11. */ -+/* Data types in instruction templates: V8HI, CVPOINTER, SI. */ -+#define __lsx_vldrepl_h(/*void **/ _1, /*si11*/ _2) ((__m128i)__builtin_lsx_vldrepl_h((void *)(_1), (_2))) -+ -+/* Assembly instruction format: vd, rj, si10. */ -+/* Data types in instruction templates: V4SI, CVPOINTER, SI. */ -+#define __lsx_vldrepl_w(/*void **/ _1, /*si10*/ _2) ((__m128i)__builtin_lsx_vldrepl_w((void *)(_1), (_2))) -+ -+/* Assembly instruction format: vd, rj, si9. */ -+/* Data types in instruction templates: V2DI, CVPOINTER, SI. */ -+#define __lsx_vldrepl_d(/*void **/ _1, /*si9*/ _2) ((__m128i)__builtin_lsx_vldrepl_d((void *)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmskgez_b(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vmskgez_b((v16i8)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vmsknz_b(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vmsknz_b((v16i8)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V8HI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vexth_h_b(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vexth_h_b((v16i8)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V4SI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vexth_w_h(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vexth_w_h((v8i16)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vexth_d_w(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vexth_d_w((v4i32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vexth_q_d(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vexth_q_d((v2i64)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: UV8HI, UV16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vexth_hu_bu(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vexth_hu_bu((v16u8)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: UV4SI, UV8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vexth_wu_hu(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vexth_wu_hu((v8u16)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: UV2DI, UV4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vexth_du_wu(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vexth_du_wu((v4u32)_1); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vexth_qu_du(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vexth_qu_du((v2u64)_1); -+} -+ -+/* Assembly instruction format: vd, vj, ui3. */ -+/* Data types in instruction templates: V16QI, V16QI, UQI. */ -+#define __lsx_vrotri_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vrotri_b((v16i8)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V8HI, V8HI, UQI. */ -+#define __lsx_vrotri_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vrotri_h((v8i16)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V4SI, V4SI, UQI. */ -+#define __lsx_vrotri_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vrotri_w((v4i32)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V2DI, V2DI, UQI. */ -+#define __lsx_vrotri_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vrotri_d((v2i64)(_1), (_2))) -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vextl_q_d(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vextl_q_d((v2i64)_1); -+} -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ -+#define __lsx_vsrlni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vsrlni_b_h((v16i8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ -+#define __lsx_vsrlni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vsrlni_h_w((v8i16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ -+#define __lsx_vsrlni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vsrlni_w_d((v4i32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui7. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ -+#define __lsx_vsrlni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vsrlni_d_q((v2i64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ -+#define __lsx_vsrlrni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vsrlrni_b_h((v16i8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ -+#define __lsx_vsrlrni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vsrlrni_h_w((v8i16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ -+#define __lsx_vsrlrni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vsrlrni_w_d((v4i32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui7. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ -+#define __lsx_vsrlrni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vsrlrni_d_q((v2i64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ -+#define __lsx_vssrlni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrlni_b_h((v16i8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ -+#define __lsx_vssrlni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrlni_h_w((v8i16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ -+#define __lsx_vssrlni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrlni_w_d((v4i32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui7. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ -+#define __lsx_vssrlni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrlni_d_q((v2i64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ -+#define __lsx_vssrlni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrlni_bu_h((v16u8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ -+#define __lsx_vssrlni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrlni_hu_w((v8u16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ -+#define __lsx_vssrlni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrlni_wu_d((v4u32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui7. */ -+/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ -+#define __lsx_vssrlni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrlni_du_q((v2u64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ -+#define __lsx_vssrlrni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrlrni_b_h((v16i8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ -+#define __lsx_vssrlrni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrlrni_h_w((v8i16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ -+#define __lsx_vssrlrni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrlrni_w_d((v4i32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui7. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ -+#define __lsx_vssrlrni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrlrni_d_q((v2i64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ -+#define __lsx_vssrlrni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrlrni_bu_h((v16u8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ -+#define __lsx_vssrlrni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrlrni_hu_w((v8u16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ -+#define __lsx_vssrlrni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrlrni_wu_d((v4u32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui7. */ -+/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ -+#define __lsx_vssrlrni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrlrni_du_q((v2u64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ -+#define __lsx_vsrani_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vsrani_b_h((v16i8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ -+#define __lsx_vsrani_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vsrani_h_w((v8i16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ -+#define __lsx_vsrani_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vsrani_w_d((v4i32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui7. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ -+#define __lsx_vsrani_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vsrani_d_q((v2i64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ -+#define __lsx_vsrarni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vsrarni_b_h((v16i8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ -+#define __lsx_vsrarni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vsrarni_h_w((v8i16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ -+#define __lsx_vsrarni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vsrarni_w_d((v4i32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui7. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ -+#define __lsx_vsrarni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vsrarni_d_q((v2i64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ -+#define __lsx_vssrani_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrani_b_h((v16i8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ -+#define __lsx_vssrani_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrani_h_w((v8i16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ -+#define __lsx_vssrani_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrani_w_d((v4i32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui7. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ -+#define __lsx_vssrani_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrani_d_q((v2i64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ -+#define __lsx_vssrani_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrani_bu_h((v16u8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ -+#define __lsx_vssrani_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrani_hu_w((v8u16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ -+#define __lsx_vssrani_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrani_wu_d((v4u32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui7. */ -+/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ -+#define __lsx_vssrani_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrani_du_q((v2u64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ -+#define __lsx_vssrarni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrarni_b_h((v16i8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ -+#define __lsx_vssrarni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrarni_h_w((v8i16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ -+#define __lsx_vssrarni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrarni_w_d((v4i32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui7. */ -+/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ -+#define __lsx_vssrarni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrarni_d_q((v2i64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui4. */ -+/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ -+#define __lsx_vssrarni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrarni_bu_h((v16u8)(_1), (v16i8)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui5. */ -+/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ -+#define __lsx_vssrarni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrarni_hu_w((v8u16)(_1), (v8i16)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui6. */ -+/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ -+#define __lsx_vssrarni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrarni_wu_d((v4u32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui7. */ -+/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ -+#define __lsx_vssrarni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrarni_du_q((v2u64)(_1), (v2i64)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, ui8. */ -+/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ -+#define __lsx_vpermi_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vpermi_w((v4i32)(_1), (v4i32)(_2), (_3))) -+ -+/* Assembly instruction format: vd, rj, si12. */ -+/* Data types in instruction templates: V16QI, CVPOINTER, SI. */ -+#define __lsx_vld(/*void **/ _1, /*si12*/ _2) ((__m128i)__builtin_lsx_vld((void *)(_1), (_2))) -+ -+/* Assembly instruction format: vd, rj, si12. */ -+/* Data types in instruction templates: VOID, V16QI, CVPOINTER, SI. */ -+#define __lsx_vst(/*__m128i*/ _1, /*void **/ _2, /*si12*/ _3) ((void)__builtin_lsx_vst((v16i8)(_1), (void *)(_2), (_3))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrlrn_b_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrlrn_b_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrlrn_h_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrlrn_h_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrlrn_w_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrlrn_w_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V8HI, V8HI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrln_b_h(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrln_b_h((v8i16)_1, (v8i16)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V8HI, V4SI, V4SI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrln_h_w(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrln_h_w((v4i32)_1, (v4i32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V2DI, V2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vssrln_w_d(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vssrln_w_d((v2i64)_1, (v2i64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vorn_v(__m128i _1, __m128i _2) -+{ -+ return (__m128i)__builtin_lsx_vorn_v((v16i8)_1, (v16i8)_2); -+} -+ -+/* Assembly instruction format: vd, i13. */ -+/* Data types in instruction templates: V2DI, HI. */ -+#define __lsx_vldi(/*i13*/ _1) ((__m128i)__builtin_lsx_vldi((_1))) -+ -+/* Assembly instruction format: vd, vj, vk, va. */ -+/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vshuf_b(__m128i _1, __m128i _2, __m128i _3) -+{ -+ return (__m128i)__builtin_lsx_vshuf_b((v16i8)_1, (v16i8)_2, (v16i8)_3); -+} -+ -+/* Assembly instruction format: vd, rj, rk. */ -+/* Data types in instruction templates: V16QI, CVPOINTER, DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vldx(void * _1, long int _2) -+{ -+ return (__m128i)__builtin_lsx_vldx((void *)_1, (long int)_2); -+} -+ -+/* Assembly instruction format: vd, rj, rk. */ -+/* Data types in instruction templates: VOID, V16QI, CVPOINTER, DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+void __lsx_vstx(__m128i _1, void * _2, long int _3) -+{ -+ return (void)__builtin_lsx_vstx((v16i8)_1, (void *)_2, (long int)_3); -+} -+ -+/* Assembly instruction format: vd, vj. */ -+/* Data types in instruction templates: UV2DI, UV2DI. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vextl_qu_du(__m128i _1) -+{ -+ return (__m128i)__builtin_lsx_vextl_qu_du((v2u64)_1); -+} -+ -+/* Assembly instruction format: cd, vj. */ -+/* Data types in instruction templates: SI, UV16QI. */ -+#define __lsx_bnz_b(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_b((v16u8)(_1))) -+ -+/* Assembly instruction format: cd, vj. */ -+/* Data types in instruction templates: SI, UV2DI. */ -+#define __lsx_bnz_d(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_d((v2u64)(_1))) -+ -+/* Assembly instruction format: cd, vj. */ -+/* Data types in instruction templates: SI, UV8HI. */ -+#define __lsx_bnz_h(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_h((v8u16)(_1))) -+ -+/* Assembly instruction format: cd, vj. */ -+/* Data types in instruction templates: SI, UV16QI. */ -+#define __lsx_bnz_v(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_v((v16u8)(_1))) -+ -+/* Assembly instruction format: cd, vj. */ -+/* Data types in instruction templates: SI, UV4SI. */ -+#define __lsx_bnz_w(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_w((v4u32)(_1))) -+ -+/* Assembly instruction format: cd, vj. */ -+/* Data types in instruction templates: SI, UV16QI. */ -+#define __lsx_bz_b(/*__m128i*/ _1) ((int)__builtin_lsx_bz_b((v16u8)(_1))) -+ -+/* Assembly instruction format: cd, vj. */ -+/* Data types in instruction templates: SI, UV2DI. */ -+#define __lsx_bz_d(/*__m128i*/ _1) ((int)__builtin_lsx_bz_d((v2u64)(_1))) -+ -+/* Assembly instruction format: cd, vj. */ -+/* Data types in instruction templates: SI, UV8HI. */ -+#define __lsx_bz_h(/*__m128i*/ _1) ((int)__builtin_lsx_bz_h((v8u16)(_1))) -+ -+/* Assembly instruction format: cd, vj. */ -+/* Data types in instruction templates: SI, UV16QI. */ -+#define __lsx_bz_v(/*__m128i*/ _1) ((int)__builtin_lsx_bz_v((v16u8)(_1))) -+ -+/* Assembly instruction format: cd, vj. */ -+/* Data types in instruction templates: SI, UV4SI. */ -+#define __lsx_bz_w(/*__m128i*/ _1) ((int)__builtin_lsx_bz_w((v4u32)(_1))) -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_caf_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_caf_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_caf_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_caf_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_ceq_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_ceq_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_ceq_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_ceq_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cle_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cle_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cle_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cle_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_clt_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_clt_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_clt_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_clt_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cne_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cne_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cne_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cne_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cor_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cor_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cor_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cor_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cueq_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cueq_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cueq_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cueq_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cule_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cule_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cule_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cule_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cult_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cult_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cult_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cult_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cun_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cun_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cune_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cune_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cune_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cune_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_cun_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_cun_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_saf_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_saf_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_saf_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_saf_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_seq_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_seq_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_seq_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_seq_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sle_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sle_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sle_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sle_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_slt_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_slt_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_slt_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_slt_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sne_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sne_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sne_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sne_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sor_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sor_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sor_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sor_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sueq_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sueq_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sueq_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sueq_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sule_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sule_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sule_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sule_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sult_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sult_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sult_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sult_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sun_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sun_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V2DI, V2DF, V2DF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sune_d(__m128d _1, __m128d _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sune_d((v2f64)_1, (v2f64)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sune_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sune_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, vj, vk. */ -+/* Data types in instruction templates: V4SI, V4SF, V4SF. */ -+extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -+__m128i __lsx_vfcmp_sun_s(__m128 _1, __m128 _2) -+{ -+ return (__m128i)__builtin_lsx_vfcmp_sun_s((v4f32)_1, (v4f32)_2); -+} -+ -+/* Assembly instruction format: vd, si10. */ -+/* Data types in instruction templates: V16QI, HI. */ -+#define __lsx_vrepli_b(/*si10*/ _1) ((__m128i)__builtin_lsx_vrepli_b((_1))) -+ -+/* Assembly instruction format: vd, si10. */ -+/* Data types in instruction templates: V2DI, HI. */ -+#define __lsx_vrepli_d(/*si10*/ _1) ((__m128i)__builtin_lsx_vrepli_d((_1))) -+ -+/* Assembly instruction format: vd, si10. */ -+/* Data types in instruction templates: V8HI, HI. */ -+#define __lsx_vrepli_h(/*si10*/ _1) ((__m128i)__builtin_lsx_vrepli_h((_1))) -+ -+/* Assembly instruction format: vd, si10. */ -+/* Data types in instruction templates: V4SI, HI. */ -+#define __lsx_vrepli_w(/*si10*/ _1) ((__m128i)__builtin_lsx_vrepli_w((_1))) -+ -+#endif /* defined(__loongarch_sx) */ -+#endif /* _GCC_LOONGSON_SXINTRIN_H */ -diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md -new file mode 100644 -index 000000000..20638559d ---- /dev/null -+++ b/gcc/config/loongarch/predicates.md -@@ -0,0 +1,639 @@ -+;; Predicate definitions for LARCH. -+;; Copyright (C) 2004-2018 Free Software Foundation, Inc. -+;; -+;; This file is part of GCC. -+;; -+;; GCC is free software; you can redistribute it and/or modify -+;; it under the terms of the GNU General Public License as published by -+;; the Free Software Foundation; either version 3, or (at your option) -+;; any later version. -+;; -+;; GCC is distributed in the hope that it will be useful, -+;; but WITHOUT ANY WARRANTY; without even the implied warranty of -+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+;; GNU General Public License for more details. -+;; -+;; You should have received a copy of the GNU General Public License -+;; along with GCC; see the file COPYING3. If not see -+;; . -+ -+(define_predicate "const_uns_arith_operand" -+ (and (match_code "const_int") -+ (match_test "SMALL_OPERAND_UNSIGNED (INTVAL (op))"))) -+ -+(define_predicate "uns_arith_operand" -+ (ior (match_operand 0 "const_uns_arith_operand") -+ (match_operand 0 "register_operand"))) -+ -+(define_predicate "const_lu32i_operand" -+ (and (match_code "const_int") -+ (match_test "LU32I_OPERAND (INTVAL (op))"))) -+ -+(define_predicate "const_lu52i_operand" -+ (and (match_code "const_int") -+ (match_test "LU52I_OPERAND (INTVAL (op))"))) -+ -+(define_predicate "const_arith_operand" -+ (and (match_code "const_int") -+ (match_test "IMM12_OPERAND (INTVAL (op))"))) -+ -+(define_predicate "const_imm16_operand" -+ (and (match_code "const_int") -+ (match_test "IMM16_OPERAND (INTVAL (op))"))) -+ -+(define_predicate "arith_operand" -+ (ior (match_operand 0 "const_arith_operand") -+ (match_operand 0 "register_operand"))) -+ -+(define_predicate "const_immlsa_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 1, 4)"))) -+ -+(define_predicate "const_lsx_branch_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), -1024, 1023)"))) -+ -+(define_predicate "const_uimm3_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) -+ -+(define_predicate "const_uimm4_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 0, 15)"))) -+ -+(define_predicate "const_uimm5_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 0, 31)"))) -+ -+(define_predicate "const_uimm6_operand" -+ (and (match_code "const_int") -+ (match_test "UIMM6_OPERAND (INTVAL (op))"))) -+ -+(define_predicate "const_uimm7_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 0, 127)"))) -+ -+(define_predicate "const_uimm8_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 0, 255)"))) -+ -+(define_predicate "const_uimm14_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 0, 16383)"))) -+ -+(define_predicate "const_uimm15_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 0, 32767)"))) -+ -+(define_predicate "const_imm5_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), -16, 15)"))) -+ -+(define_predicate "const_imm10_operand" -+ (and (match_code "const_int") -+ (match_test "IMM10_OPERAND (INTVAL (op))"))) -+ -+(define_predicate "const_imm12_operand" -+ (and (match_code "const_int") -+ (match_test "IMM12_OPERAND (INTVAL (op))"))) -+ -+(define_predicate "const_imm13_operand" -+ (and (match_code "const_int") -+ (match_test "IMM13_OPERAND (INTVAL (op))"))) -+ -+(define_predicate "reg_imm10_operand" -+ (ior (match_operand 0 "const_imm10_operand") -+ (match_operand 0 "register_operand"))) -+ -+(define_predicate "aq8b_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 0)"))) -+ -+(define_predicate "aq8h_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 1)"))) -+ -+(define_predicate "aq8w_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 2)"))) -+ -+(define_predicate "aq8d_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 3)"))) -+ -+(define_predicate "aq10b_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 0)"))) -+ -+(define_predicate "aq10h_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 1)"))) -+ -+(define_predicate "aq10w_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 2)"))) -+ -+(define_predicate "aq10d_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 3)"))) -+ -+(define_predicate "aq12b_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 12, 0)"))) -+ -+(define_predicate "aq12h_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 11, 1)"))) -+ -+(define_predicate "aq12w_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 2)"))) -+ -+(define_predicate "aq12d_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 9, 3)"))) -+ -+(define_predicate "sle_operand" -+ (and (match_code "const_int") -+ (match_test "SMALL_OPERAND (INTVAL (op) + 1)"))) -+ -+(define_predicate "sleu_operand" -+ (and (match_operand 0 "sle_operand") -+ (match_test "INTVAL (op) + 1 != 0"))) -+ -+(define_predicate "const_0_operand" -+ (and (match_code "const_int,const_double,const_vector") -+ (match_test "op == CONST0_RTX (GET_MODE (op))"))) -+ -+(define_predicate "const_m1_operand" -+ (and (match_code "const_int,const_double,const_vector") -+ (match_test "op == CONSTM1_RTX (GET_MODE (op))"))) -+ -+(define_predicate "reg_or_m1_operand" -+ (ior (match_operand 0 "const_m1_operand") -+ (match_operand 0 "register_operand"))) -+ -+(define_predicate "reg_or_0_operand" -+ (ior (match_operand 0 "const_0_operand") -+ (match_operand 0 "register_operand"))) -+ -+(define_predicate "const_1_operand" -+ (and (match_code "const_int,const_double,const_vector") -+ (match_test "op == CONST1_RTX (GET_MODE (op))"))) -+ -+(define_predicate "reg_or_1_operand" -+ (ior (match_operand 0 "const_1_operand") -+ (match_operand 0 "register_operand"))) -+ -+;; These are used in vec_merge, hence accept bitmask as const_int. -+(define_predicate "const_exp_2_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 1)"))) -+ -+(define_predicate "const_exp_4_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 3)"))) -+ -+(define_predicate "const_exp_8_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 7)"))) -+ -+(define_predicate "const_exp_16_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 15)"))) -+ -+(define_predicate "const_exp_32_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 31)"))) -+ -+;; This is used for indexing into vectors, and hence only accepts const_int. -+(define_predicate "const_0_or_1_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 0, 1)"))) -+ -+(define_predicate "const_2_or_3_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 2, 3)"))) -+ -+(define_predicate "const_0_to_3_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 0, 3)"))) -+ -+(define_predicate "const_0_to_7_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) -+ -+(define_predicate "const_4_to_7_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 4, 7)"))) -+ -+(define_predicate "const_8_to_15_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) -+ -+(define_predicate "const_16_to_31_operand" -+ (and (match_code "const_int") -+ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) -+ -+(define_predicate "qi_mask_operand" -+ (and (match_code "const_int") -+ (match_test "UINTVAL (op) == 0xff"))) -+ -+(define_predicate "hi_mask_operand" -+ (and (match_code "const_int") -+ (match_test "UINTVAL (op) == 0xffff"))) -+ -+(define_predicate "lu52i_mask_operand" -+ (and (match_code "const_int") -+ (match_test "UINTVAL (op) == 0xfffffffffffff"))) -+ -+(define_predicate "shift_mask_operand" -+ (and (match_code "const_int") -+ (ior (match_test "UINTVAL (op) == 0x3fffffffc") -+ (match_test "UINTVAL (op) == 0x1fffffffe") -+ (match_test "UINTVAL (op) == 0x7fffffff8") -+ (match_test "UINTVAL (op) == 0xffffffff0")))) -+ -+ -+ -+(define_predicate "si_mask_operand" -+ (and (match_code "const_int") -+ (match_test "UINTVAL (op) == 0xffffffff"))) -+ -+(define_predicate "and_load_operand" -+ (ior (match_operand 0 "qi_mask_operand") -+ (match_operand 0 "hi_mask_operand") -+ (match_operand 0 "si_mask_operand"))) -+ -+(define_predicate "low_bitmask_operand" -+ (and (match_code "const_int") -+ (match_test "low_bitmask_len (mode, INTVAL (op)) > 12"))) -+ -+(define_predicate "and_reg_operand" -+ (ior (match_operand 0 "register_operand") -+ (match_operand 0 "const_uns_arith_operand") -+ (match_operand 0 "low_bitmask_operand") -+ (match_operand 0 "si_mask_operand"))) -+ -+(define_predicate "and_operand" -+ (ior (match_operand 0 "and_load_operand") -+ (match_operand 0 "and_reg_operand"))) -+ -+(define_predicate "d_operand" -+ (and (match_code "reg") -+ (match_test "GP_REG_P (REGNO (op))"))) -+ -+(define_predicate "lwsp_swsp_operand" -+ (and (match_code "mem") -+ (match_test "lwsp_swsp_address_p (XEXP (op, 0), mode)"))) -+ -+(define_predicate "db4_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 4, 0)"))) -+ -+(define_predicate "db7_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 7, 0)"))) -+ -+(define_predicate "db8_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 8, 0)"))) -+ -+(define_predicate "ib3_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_unsigned_immediate_p (INTVAL (op) - 1, 3, 0)"))) -+ -+(define_predicate "sb4_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 4, 0)"))) -+ -+(define_predicate "sb5_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 5, 0)"))) -+ -+(define_predicate "sb8_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 0)"))) -+ -+(define_predicate "sd8_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 3)"))) -+ -+(define_predicate "ub4_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 0)"))) -+ -+(define_predicate "ub8_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 8, 0)"))) -+ -+(define_predicate "uh4_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 1)"))) -+ -+(define_predicate "uw4_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 2)"))) -+ -+(define_predicate "uw5_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 5, 2)"))) -+ -+(define_predicate "uw6_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 6, 2)"))) -+ -+(define_predicate "uw8_operand" -+ (and (match_code "const_int") -+ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 8, 2)"))) -+ -+(define_predicate "addiur2_operand" -+ (and (match_code "const_int") -+ (ior (match_test "INTVAL (op) == -1") -+ (match_test "INTVAL (op) == 1") -+ (match_test "INTVAL (op) == 4") -+ (match_test "INTVAL (op) == 8") -+ (match_test "INTVAL (op) == 12") -+ (match_test "INTVAL (op) == 16") -+ (match_test "INTVAL (op) == 20") -+ (match_test "INTVAL (op) == 24")))) -+ -+(define_predicate "addiusp_operand" -+ (and (match_code "const_int") -+ (ior (match_test "(IN_RANGE (INTVAL (op), 2, 257))") -+ (match_test "(IN_RANGE (INTVAL (op), -258, -3))")))) -+ -+(define_predicate "andi16_operand" -+ (and (match_code "const_int") -+ (ior (match_test "IN_RANGE (INTVAL (op), 1, 4)") -+ (match_test "IN_RANGE (INTVAL (op), 7, 8)") -+ (match_test "IN_RANGE (INTVAL (op), 15, 16)") -+ (match_test "IN_RANGE (INTVAL (op), 31, 32)") -+ (match_test "IN_RANGE (INTVAL (op), 63, 64)") -+ (match_test "INTVAL (op) == 255") -+ (match_test "INTVAL (op) == 32768") -+ (match_test "INTVAL (op) == 65535")))) -+ -+(define_predicate "movep_src_register" -+ (and (match_code "reg") -+ (ior (match_test ("IN_RANGE (REGNO (op), 2, 3)")) -+ (match_test ("IN_RANGE (REGNO (op), 16, 20)"))))) -+ -+(define_predicate "movep_src_operand" -+ (ior (match_operand 0 "const_0_operand") -+ (match_operand 0 "movep_src_register"))) -+ -+(define_predicate "fcc_reload_operand" -+ (and (match_code "reg,subreg") -+ (match_test "ST_REG_P (true_regnum (op))"))) -+ -+(define_predicate "muldiv_target_operand" -+ (match_operand 0 "register_operand")) -+ -+(define_predicate "const_call_insn_operand" -+ (match_code "const,symbol_ref,label_ref") -+{ -+ enum loongarch_symbol_type symbol_type; -+ -+ if (!loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_CALL, &symbol_type)) -+ return false; -+ -+ switch (symbol_type) -+ { -+ case SYMBOL_GOT_DISP: -+ /* Without explicit relocs, there is no special syntax for -+ loading the address of a call destination into a register. -+ Using "la $25,foo; jal $25" would prevent the lazy binding -+ of "foo", so keep the address of global symbols with the -+ jal macro. */ -+ return 1; -+ -+ default: -+ return false; -+ } -+}) -+ -+(define_predicate "call_insn_operand" -+ (ior (match_operand 0 "const_call_insn_operand") -+ (match_operand 0 "register_operand"))) -+ -+(define_predicate "is_const_call_local_symbol" -+ (and (match_operand 0 "const_call_insn_operand") -+ (ior (match_test "loongarch_global_symbol_p (op) == 0") -+ (match_test "loongarch_symbol_binds_local_p (op) != 0")) -+ (match_test "CONSTANT_P (op)"))) -+ -+(define_predicate "is_const_call_weak_symbol" -+ (and (match_operand 0 "const_call_insn_operand") -+ (not (match_operand 0 "is_const_call_local_symbol")) -+ (match_test "loongarch_weak_symbol_p (op) != 0") -+ (match_test "CONSTANT_P (op)"))) -+ -+(define_predicate "is_const_call_plt_symbol" -+ (and (match_operand 0 "const_call_insn_operand") -+ (match_test "flag_plt != 0") -+ (match_test "loongarch_global_symbol_noweak_p (op) != 0") -+ (match_test "CONSTANT_P (op)"))) -+ -+(define_predicate "is_const_call_global_noplt_symbol" -+ (and (match_operand 0 "const_call_insn_operand") -+ (match_test "flag_plt == 0") -+ (match_test "loongarch_global_symbol_noweak_p (op) != 0") -+ (match_test "CONSTANT_P (op)"))) -+ -+;; A legitimate CONST_INT operand that takes more than one instruction -+;; to load. -+(define_predicate "splittable_const_int_operand" -+ (match_code "const_int") -+{ -+ -+ /* Don't handle multi-word moves this way; we don't want to introduce -+ the individual word-mode moves until after reload. */ -+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD) -+ return false; -+ -+ /* Otherwise check whether the constant can be loaded in a single -+ instruction. */ -+// return !LUI_INT (op) && !SMALL_INT (op) && !SMALL_INT_UNSIGNED (op); -+ return !LUI_INT (op) && !SMALL_INT (op) && !SMALL_INT_UNSIGNED (op) -+ && !LU52I_INT (op); -+}) -+ -+(define_predicate "move_operand" -+ (match_operand 0 "general_operand") -+{ -+ enum loongarch_symbol_type symbol_type; -+ -+ /* The thinking here is as follows: -+ -+ (1) The move expanders should split complex load sequences into -+ individual instructions. Those individual instructions can -+ then be optimized by all rtl passes. -+ -+ (2) The target of pre-reload load sequences should not be used -+ to store temporary results. If the target register is only -+ assigned one value, reload can rematerialize that value -+ on demand, rather than spill it to the stack. -+ -+ (3) If we allowed pre-reload passes like combine and cse to recreate -+ complex load sequences, we would want to be able to split the -+ sequences before reload as well, so that the pre-reload scheduler -+ can see the individual instructions. This falls foul of (2); -+ the splitter would be forced to reuse the target register for -+ intermediate results. -+ -+ (4) We want to define complex load splitters for combine. These -+ splitters can request a temporary scratch register, which avoids -+ the problem in (2). They allow things like: -+ -+ (set (reg T1) (high SYM)) -+ (set (reg T2) (low (reg T1) SYM)) -+ (set (reg X) (plus (reg T2) (const_int OFFSET))) -+ -+ to be combined into: -+ -+ (set (reg T3) (high SYM+OFFSET)) -+ (set (reg X) (lo_sum (reg T3) SYM+OFFSET)) -+ -+ if T2 is only used this once. */ -+ switch (GET_CODE (op)) -+ { -+ case CONST_INT: -+ return !splittable_const_int_operand (op, mode); -+ -+ case CONST: -+ case SYMBOL_REF: -+ case LABEL_REF: -+ return (loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &symbol_type)); -+ default: -+ return true; -+ } -+}) -+ -+(define_predicate "consttable_operand" -+ (match_test "CONSTANT_P (op)")) -+ -+(define_predicate "symbolic_operand" -+ (match_code "const,symbol_ref,label_ref") -+{ -+ enum loongarch_symbol_type type; -+ return loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type); -+}) -+ -+(define_predicate "force_to_mem_operand" -+ (match_code "const,symbol_ref,label_ref") -+{ -+ enum loongarch_symbol_type symbol_type; -+ return (loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &symbol_type) -+ && loongarch_use_pcrel_pool_p[(int) symbol_type]); -+}) -+ -+(define_predicate "got_disp_operand" -+ (match_code "const,symbol_ref,label_ref") -+{ -+ enum loongarch_symbol_type type; -+ return (loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type) -+ && type == SYMBOL_GOT_DISP); -+}) -+ -+(define_predicate "symbol_ref_operand" -+ (match_code "symbol_ref")) -+ -+(define_predicate "stack_operand" -+ (and (match_code "mem") -+ (match_test "loongarch_stack_address_p (XEXP (op, 0), GET_MODE (op))"))) -+ -+ -+ -+(define_predicate "equality_operator" -+ (match_code "eq,ne")) -+ -+(define_predicate "extend_operator" -+ (match_code "zero_extend,sign_extend")) -+ -+(define_predicate "trap_comparison_operator" -+ (match_code "eq,ne,lt,ltu,ge,geu")) -+ -+(define_predicate "order_operator" -+ (match_code "lt,ltu,le,leu,ge,geu,gt,gtu")) -+ -+;; For NE, cstore uses sltu instructions in which the first operand is $0. -+ -+(define_predicate "loongarch_cstore_operator" -+ (ior (match_code "eq,gt,gtu,ge,geu,lt,ltu,le,leu") -+ (match_code "ne"))) -+ -+(define_predicate "small_data_pattern" -+ (and (match_code "set,parallel,unspec,unspec_volatile,prefetch") -+ (match_test "loongarch_small_data_pattern_p (op)"))) -+ -+(define_predicate "mem_noofs_operand" -+ (and (match_code "mem") -+ (match_code "reg" "0"))) -+ -+;; Return 1 if the operand is in non-volatile memory. -+(define_predicate "non_volatile_mem_operand" -+ (and (match_operand 0 "memory_operand") -+ (not (match_test "MEM_VOLATILE_P (op)")))) -+ -+(define_predicate "const_vector_same_val_operand" -+ (match_code "const_vector") -+{ -+ return loongarch_const_vector_same_val_p (op, mode); -+}) -+ -+(define_predicate "const_vector_same_simm5_operand" -+ (match_code "const_vector") -+{ -+ return loongarch_const_vector_same_int_p (op, mode, -16, 15); -+}) -+ -+(define_predicate "const_vector_same_uimm5_operand" -+ (match_code "const_vector") -+{ -+ return loongarch_const_vector_same_int_p (op, mode, 0, 31); -+}) -+ -+(define_predicate "const_vector_same_ximm5_operand" -+ (match_code "const_vector") -+{ -+ return loongarch_const_vector_same_int_p (op, mode, -31, 31); -+}) -+ -+(define_predicate "const_vector_same_uimm6_operand" -+ (match_code "const_vector") -+{ -+ return loongarch_const_vector_same_int_p (op, mode, 0, 63); -+}) -+ -+(define_predicate "const_vector_same_uimm8_operand" -+ (match_code "const_vector") -+{ -+ return loongarch_const_vector_same_int_p (op, mode, 0, 255); -+}) -+ -+(define_predicate "par_const_vector_shf_set_operand" -+ (match_code "parallel") -+{ -+ return loongarch_const_vector_shuffle_set_p (op, mode); -+}) -+ -+(define_predicate "reg_or_vector_same_val_operand" -+ (ior (match_operand 0 "register_operand") -+ (match_operand 0 "const_vector_same_val_operand"))) -+ -+(define_predicate "reg_or_vector_same_simm5_operand" -+ (ior (match_operand 0 "register_operand") -+ (match_operand 0 "const_vector_same_simm5_operand"))) -+ -+(define_predicate "reg_or_vector_same_uimm5_operand" -+ (ior (match_operand 0 "register_operand") -+ (match_operand 0 "const_vector_same_uimm5_operand"))) -+ -+(define_predicate "reg_or_vector_same_ximm5_operand" -+ (ior (match_operand 0 "register_operand") -+ (match_operand 0 "const_vector_same_ximm5_operand"))) -+ -+(define_predicate "reg_or_vector_same_uimm6_operand" -+ (ior (match_operand 0 "register_operand") -+ (match_operand 0 "const_vector_same_uimm6_operand"))) -diff --git a/gcc/config/loongarch/rtems.h b/gcc/config/loongarch/rtems.h -new file mode 100644 -index 000000000..bbb70b040 ---- /dev/null -+++ b/gcc/config/loongarch/rtems.h -@@ -0,0 +1,39 @@ -+/* Definitions for rtems targeting a LARCH using ELF. -+ Copyright (C) 1996-2018 Free Software Foundation, Inc. -+ Contributed by Joel Sherrill (joel@OARcorp.com). -+ -+ This file is part of GCC. -+ -+ GCC is free software; you can redistribute it and/or modify it -+ under the terms of the GNU General Public License as published -+ by the Free Software Foundation; either version 3, or (at your -+ option) any later version. -+ -+ GCC is distributed in the hope that it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -+ License for more details. -+ -+ Under Section 7 of GPL version 3, you are granted additional -+ permissions described in the GCC Runtime Library Exception, version -+ 3.1, as published by the Free Software Foundation. -+ -+ You should have received a copy of the GNU General Public License and -+ a copy of the GCC Runtime Library Exception along with this program; -+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+ . */ -+ -+/* Specify predefined symbols in preprocessor. */ -+ -+#define TARGET_OS_CPP_BUILTINS() \ -+do { \ -+ builtin_define ("__rtems__"); \ -+ builtin_define ("__USE_INIT_FINI__"); \ -+ builtin_assert ("system=rtems"); \ -+} while (0) -+ -+/* No sdata. -+ * The RTEMS BSPs expect -G0 -+ */ -+#undef LARCH_DEFAULT_GVALUE -+#define LARCH_DEFAULT_GVALUE 0 -diff --git a/gcc/config/loongarch/sde.opt b/gcc/config/loongarch/sde.opt -new file mode 100644 -index 000000000..321217d51 ---- /dev/null -+++ b/gcc/config/loongarch/sde.opt -@@ -0,0 +1,28 @@ -+; LARCH SDE options. -+; -+; Copyright (C) 2010-2018 Free Software Foundation, Inc. -+; -+; This file is part of GCC. -+; -+; GCC is free software; you can redistribute it and/or modify it under -+; the terms of the GNU General Public License as published by the Free -+; Software Foundation; either version 3, or (at your option) any later -+; version. -+; -+; GCC is distributed in the hope that it will be useful, but WITHOUT -+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -+; License for more details. -+; -+; You should have received a copy of the GNU General Public License -+; along with GCC; see the file COPYING3. If not see -+; . -+ -+; -mcode-xonly is a traditional alias for -mcode-readable=pcrel and -+; -mno-data-in-code is a traditional alias for -mcode-readable=no. -+ -+mno-data-in-code -+Target RejectNegative Alias(mcode-readable=, no) -+ -+mcode-xonly -+Target RejectNegative Alias(mcode-readable=, pcrel) -diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md -new file mode 100644 -index 000000000..e3eb43e16 ---- /dev/null -+++ b/gcc/config/loongarch/sync.md -@@ -0,0 +1,616 @@ -+;; Machine description for LARCH atomic operations. -+;; Copyright (C) 2011-2018 Free Software Foundation, Inc. -+;; Contributed by Andrew Waterman (andrew@sifive.com). -+;; Based on LARCH target for GNU compiler. -+ -+;; This file is part of GCC. -+ -+;; GCC is free software; you can redistribute it and/or modify -+;; it under the terms of the GNU General Public License as published by -+;; the Free Software Foundation; either version 3, or (at your option) -+;; any later version. -+ -+;; GCC is distributed in the hope that it will be useful, -+;; but WITHOUT ANY WARRANTY; without even the implied warranty of -+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+;; GNU General Public License for more details. -+ -+;; You should have received a copy of the GNU General Public License -+;; along with GCC; see the file COPYING3. If not see -+;; . -+ -+(define_c_enum "unspec" [ -+ UNSPEC_COMPARE_AND_SWAP -+ UNSPEC_COMPARE_AND_SWAP_ADD -+ UNSPEC_COMPARE_AND_SWAP_SUB -+ UNSPEC_COMPARE_AND_SWAP_AND -+ UNSPEC_COMPARE_AND_SWAP_XOR -+ UNSPEC_COMPARE_AND_SWAP_OR -+ UNSPEC_COMPARE_AND_SWAP_NAND -+ UNSPEC_SYNC_OLD_OP -+ UNSPEC_SYNC_EXCHANGE -+ UNSPEC_ATOMIC_STORE -+ UNSPEC_MEMORY_BARRIER -+]) -+ -+(define_code_iterator any_atomic [plus ior xor and]) -+(define_code_attr atomic_optab -+ [(plus "add") (ior "or") (xor "xor") (and "and")]) -+ -+;; This attribute gives the format suffix for atomic memory operations. -+(define_mode_attr amo [(SI "w") (DI "d")]) -+ -+;; expands to the name of the atomic operand that implements a particular code. -+(define_code_attr amop [(ior "or") -+ (xor "xor") -+ (and "and") -+ (plus "add")]) -+;; Memory barriers. -+ -+(define_expand "mem_thread_fence" -+ [(match_operand:SI 0 "const_int_operand" "")] ;; model -+ "" -+{ -+ if (INTVAL (operands[0]) != MEMMODEL_RELAXED) -+ { -+ rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode)); -+ MEM_VOLATILE_P (mem) = 1; -+ emit_insn (gen_mem_thread_fence_1 (mem, operands[0])); -+ } -+ DONE; -+}) -+ -+;; Until the LARCH memory model (hence its mapping from C++) is finalized, -+;; conservatively emit a full FENCE. -+(define_insn "mem_thread_fence_1" -+ [(set (match_operand:BLK 0 "" "") -+ (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER)) -+ (match_operand:SI 1 "const_int_operand" "")] ;; model -+ "" -+ "dbar\t0") -+ -+;; Atomic memory operations. -+ -+;; Implement atomic stores with amoswap. Fall back to fences for atomic loads. -+(define_insn "atomic_store" -+ [(set (match_operand:GPR 0 "memory_operand" "+ZB") -+ (unspec_volatile:GPR -+ [(match_operand:GPR 1 "reg_or_0_operand" "rJ") -+ (match_operand:SI 2 "const_int_operand")] ;; model -+ UNSPEC_ATOMIC_STORE))] -+ "" -+ "amswap%A2.\t$zero,%z1,%0" -+ [(set (attr "length") (const_int 8))]) -+ -+(define_insn "atomic_" -+ [(set (match_operand:GPR 0 "memory_operand" "+ZB") -+ (unspec_volatile:GPR -+ [(any_atomic:GPR (match_dup 0) -+ (match_operand:GPR 1 "reg_or_0_operand" "rJ")) -+ (match_operand:SI 2 "const_int_operand")] ;; model -+ UNSPEC_SYNC_OLD_OP))] -+ "" -+ "am%A2.\t$zero,%z1,%0" -+ [(set (attr "length") (const_int 8))]) -+ -+(define_insn "atomic_fetch_" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") -+ (match_operand:GPR 1 "memory_operand" "+ZB")) -+ (set (match_dup 1) -+ (unspec_volatile:GPR -+ [(any_atomic:GPR (match_dup 1) -+ (match_operand:GPR 2 "reg_or_0_operand" "rJ")) -+ (match_operand:SI 3 "const_int_operand")] ;; model -+ UNSPEC_SYNC_OLD_OP))] -+ "" -+ "am%A3.\t%0,%z2,%1" -+ [(set (attr "length") (const_int 8))]) -+ -+(define_insn "atomic_exchange" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") -+ (unspec_volatile:GPR -+ [(match_operand:GPR 1 "memory_operand" "+ZB") -+ (match_operand:SI 3 "const_int_operand")] ;; model -+ UNSPEC_SYNC_EXCHANGE)) -+ (set (match_dup 1) -+ (match_operand:GPR 2 "register_operand" "r"))] -+ "" -+ "amswap%A3.\t%0,%z2,%1" -+ [(set (attr "length") (const_int 8))]) -+ -+(define_insn "atomic_cas_value_strong" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") -+ (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (set (match_dup 1) -+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") -+ (match_operand:GPR 3 "reg_or_0_operand" "rJ") -+ (match_operand:SI 4 "const_int_operand") ;; mod_s -+ (match_operand:SI 5 "const_int_operand")] ;; mod_f -+ UNSPEC_COMPARE_AND_SWAP)) -+ (clobber (match_scratch:GPR 6 "=&r"))] -+ "" -+{ -+ return "%G5\n\t" -+ "1:\n\t" -+ "ll.\t%0,%1\n\t" -+ "bne\t%0,%z2,2f\n\t" -+ "or%i3\t%6,$zero,%3\n\t" -+ "sc.\t%6,%1\n\t" -+ "beq\t$zero,%6,1b\n\t" -+ "b\t3f\n\t" -+ "2:\n\t" -+ "dbar\t0x700\n\t" -+ "3:\n\t"; -+} -+ [(set (attr "length") (const_int 32))]) -+ -+(define_expand "atomic_compare_and_swap" -+ [(match_operand:SI 0 "register_operand" "") ;; bool output -+ (match_operand:GPR 1 "register_operand" "") ;; val output -+ (match_operand:GPR 2 "memory_operand" "") ;; memory -+ (match_operand:GPR 3 "reg_or_0_operand" "") ;; expected value -+ (match_operand:GPR 4 "reg_or_0_operand" "") ;; desired value -+ (match_operand:SI 5 "const_int_operand" "") ;; is_weak -+ (match_operand:SI 6 "const_int_operand" "") ;; mod_s -+ (match_operand:SI 7 "const_int_operand" "")] ;; mod_f -+ "" -+{ -+ emit_insn (gen_atomic_cas_value_strong (operands[1], operands[2], -+ operands[3], operands[4], -+ operands[6], operands[7])); -+ -+ rtx compare = operands[1]; -+ if (operands[3] != const0_rtx) -+ { -+ rtx difference = gen_rtx_MINUS (mode, operands[1], operands[3]); -+ compare = gen_reg_rtx (mode); -+ emit_insn (gen_rtx_SET (compare, difference)); -+ } -+ -+ if (word_mode != mode) -+ { -+ rtx reg = gen_reg_rtx (word_mode); -+ emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare))); -+ compare = reg; -+ } -+ -+ emit_insn (gen_rtx_SET (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx))); -+ DONE; -+}) -+ -+(define_expand "atomic_test_and_set" -+ [(match_operand:QI 0 "register_operand" "") ;; bool output -+ (match_operand:QI 1 "memory_operand" "+ZB") ;; memory -+ (match_operand:SI 2 "const_int_operand" "")] ;; model -+ "" -+{ -+ /* We have no QImode atomics, so use the address LSBs to form a mask, -+ then use an aligned SImode atomic. */ -+ rtx result = operands[0]; -+ rtx mem = operands[1]; -+ rtx model = operands[2]; -+ rtx addr = force_reg (Pmode, XEXP (mem, 0)); -+ rtx tmp_reg = gen_reg_rtx (Pmode); -+ rtx zero_reg = gen_rtx_REG (Pmode, 0); -+ -+ rtx aligned_addr = gen_reg_rtx (Pmode); -+ emit_move_insn (tmp_reg, gen_rtx_PLUS (Pmode, zero_reg, GEN_INT (-4))); -+ emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, tmp_reg)); -+ -+ rtx aligned_mem = change_address (mem, SImode, aligned_addr); -+ set_mem_alias_set (aligned_mem, 0); -+ -+ rtx offset = gen_reg_rtx (SImode); -+ emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr), -+ GEN_INT (3))); -+ -+ rtx tmp = gen_reg_rtx (SImode); -+ emit_move_insn (tmp, GEN_INT (1)); -+ -+ rtx shmt = gen_reg_rtx (SImode); -+ emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3))); -+ -+ rtx word = gen_reg_rtx (SImode); -+ emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp, shmt)); -+ -+ tmp = gen_reg_rtx (SImode); -+ emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model)); -+ -+ emit_move_insn (gen_lowpart (SImode, result), -+ gen_rtx_LSHIFTRT (SImode, tmp, shmt)); -+ DONE; -+}) -+ -+ -+ -+(define_insn "atomic_cas_value_cmp_and_7_" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") -+ (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (set (match_dup 1) -+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") -+ (match_operand:GPR 3 "reg_or_0_operand" "rJ") -+ (match_operand:GPR 4 "reg_or_0_operand" "rJ") -+ (match_operand:GPR 5 "reg_or_0_operand" "rJ") -+ (match_operand:SI 6 "const_int_operand")] ;; model -+ UNSPEC_COMPARE_AND_SWAP)) -+ (clobber (match_scratch:GPR 7 "=&r"))] -+ "" -+{ -+ return "%G6\n\t" -+ "1:\n\t" -+ "ll.\t%0,%1\n\t" -+ "and\t%7,%0,%2\n\t" -+ "bne\t%7,%z4,2f\n\t" -+ "and\t%7,%0,%z3\n\t" -+ "or%i5\t%7,%7,%5\n\t" -+ "sc.\t%7,%1\n\t" -+ "beq\t$zero,%7,1b\n\t" -+ "b\t3f\n\t" -+ "2:\n\t" -+ "dbar\t0x700\n\t" -+ "3:\n\t"; -+} -+ [(set (attr "length") (const_int 40))]) -+ -+ -+(define_expand "atomic_compare_and_swap" -+ [(match_operand:SI 0 "register_operand" "") ;; bool output -+ (match_operand:SHORT 1 "register_operand" "") ;; val output -+ (match_operand:SHORT 2 "memory_operand" "") ;; memory -+ (match_operand:SHORT 3 "reg_or_0_operand" "") ;; expected value -+ (match_operand:SHORT 4 "reg_or_0_operand" "") ;; desired value -+ (match_operand:SI 5 "const_int_operand" "") ;; is_weak -+ (match_operand:SI 6 "const_int_operand" "") ;; mod_s -+ (match_operand:SI 7 "const_int_operand" "")] ;; mod_f -+ "" -+{ -+ union loongarch_gen_fn_ptrs generator; -+ generator.fn_7 = gen_atomic_cas_value_cmp_and_7_si; -+ loongarch_expand_atomic_qihi (generator, -+ operands[1], -+ operands[2], -+ operands[3], -+ operands[4], -+ operands[7]); -+ -+ rtx compare = operands[1]; -+ if (operands[3] != const0_rtx) -+ { -+ machine_mode mode = GET_MODE (operands[3]); -+ rtx op1 = convert_modes (SImode, mode, operands[1], true); -+ rtx op3 = convert_modes (SImode, mode, operands[3], true); -+ rtx difference = gen_rtx_MINUS (SImode, op1, op3); -+ compare = gen_reg_rtx (SImode); -+ emit_insn (gen_rtx_SET (compare, difference)); -+ } -+ -+ if (word_mode != mode) -+ { -+ rtx reg = gen_reg_rtx (word_mode); -+ emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare))); -+ compare = reg; -+ } -+ -+ emit_insn (gen_rtx_SET (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx))); -+ DONE; -+}) -+ -+ -+ -+ -+(define_insn "atomic_cas_value_add_7_" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res -+ (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (set (match_dup 1) -+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask -+ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask -+ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ;; old val -+ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ;; new val -+ (match_operand:SI 6 "const_int_operand")] ;; model -+ UNSPEC_COMPARE_AND_SWAP_ADD)) -+ (clobber (match_scratch:GPR 7 "=&r")) -+ (clobber (match_scratch:GPR 8 "=&r"))] -+ "" -+{ -+ return "%G6\n\t" -+ "1:\n\t" -+ "ll.\t%0,%1\n\t" -+ "and\t%7,%0,%3\n\t" -+ "add.w\t%8,%0,%z5\n\t" -+ "and\t%8,%8,%z2\n\t" -+ "or%i8\t%7,%7,%8\n\t" -+ "sc.\t%7,%1\n\t" -+ "beq\t$zero,%7,1b"; -+} -+ -+ [(set (attr "length") (const_int 32))]) -+ -+ -+ -+(define_insn "atomic_cas_value_sub_7_" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res -+ (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (set (match_dup 1) -+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask -+ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask -+ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ;; old val -+ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ;; new val -+ (match_operand:SI 6 "const_int_operand")] ;; model -+ UNSPEC_COMPARE_AND_SWAP_SUB)) -+ (clobber (match_scratch:GPR 7 "=&r")) -+ (clobber (match_scratch:GPR 8 "=&r"))] -+ "" -+{ -+ return "%G6\n\t" -+ "1:\n\t" -+ "ll.\t%0,%1\n\t" -+ "and\t%7,%0,%3\n\t" -+ "sub.w\t%8,%0,%z5\n\t" -+ "and\t%8,%8,%z2\n\t" -+ "or%i8\t%7,%7,%8\n\t" -+ "sc.\t%7,%1\n\t" -+ "beq\t$zero,%7,1b"; -+} -+ [(set (attr "length") (const_int 32))]) -+ -+ -+ -+(define_insn "atomic_cas_value_and_7_" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res -+ (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (set (match_dup 1) -+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask -+ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask -+ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ;; old val -+ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ;; new val -+ (match_operand:SI 6 "const_int_operand")] ;; model -+ UNSPEC_COMPARE_AND_SWAP_AND)) -+ (clobber (match_scratch:GPR 7 "=&r")) -+ (clobber (match_scratch:GPR 8 "=&r"))] -+ "" -+{ -+ return "%G6\n\t" -+ "1:\n\t" -+ "ll.\t%0,%1\n\t" -+ "and\t%7,%0,%3\n\t" -+ "and\t%8,%0,%z5\n\t" -+ "and\t%8,%8,%z2\n\t" -+ "or%i8\t%7,%7,%8\n\t" -+ "sc.\t%7,%1\n\t" -+ "beq\t$zero,%7,1b"; -+} -+ [(set (attr "length") (const_int 32))]) -+ -+(define_insn "atomic_cas_value_xor_7_" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res -+ (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (set (match_dup 1) -+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask -+ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask -+ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ;; old val -+ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ;; new val -+ (match_operand:SI 6 "const_int_operand")] ;; model -+ UNSPEC_COMPARE_AND_SWAP_XOR)) -+ (clobber (match_scratch:GPR 7 "=&r")) -+ (clobber (match_scratch:GPR 8 "=&r"))] -+ "" -+{ -+ return "%G6\n\t" -+ "1:\n\t" -+ "ll.\t%0,%1\n\t" -+ "and\t%7,%0,%3\n\t" -+ "xor\t%8,%0,%z5\n\t" -+ "and\t%8,%8,%z2\n\t" -+ "or%i8\t%7,%7,%8\n\t" -+ "sc.\t%7,%1\n\t" -+ "beq\t$zero,%7,1b"; -+} -+ -+ [(set (attr "length") (const_int 32))]) -+ -+(define_insn "atomic_cas_value_or_7_" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res -+ (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (set (match_dup 1) -+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask -+ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask -+ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ;; old val -+ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ;; new val -+ (match_operand:SI 6 "const_int_operand")] ;; model -+ UNSPEC_COMPARE_AND_SWAP_OR)) -+ (clobber (match_scratch:GPR 7 "=&r")) -+ (clobber (match_scratch:GPR 8 "=&r"))] -+ "" -+{ -+ return "%G6\n\t" -+ "1:\n\t" -+ "ll.\t%0,%1\n\t" -+ "and\t%7,%0,%3\n\t" -+ "or\t%8,%0,%z5\n\t" -+ "and\t%8,%8,%z2\n\t" -+ "or%i8\t%7,%7,%8\n\t" -+ "sc.\t%7,%1\n\t" -+ "beq\t$zero,%7,1b"; -+} -+ -+ [(set (attr "length") (const_int 32))]) -+ -+(define_insn "atomic_cas_value_nand_7_" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res -+ (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (set (match_dup 1) -+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask -+ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask -+ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ;; old val -+ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ;; new val -+ (match_operand:SI 6 "const_int_operand")] ;; model -+ UNSPEC_COMPARE_AND_SWAP_NAND)) -+ (clobber (match_scratch:GPR 7 "=&r")) -+ (clobber (match_scratch:GPR 8 "=&r"))] -+ "" -+{ -+ return "%G6\n\t" -+ "1:\n\t" -+ "ll.\t%0,%1\n\t" -+ "and\t%7,%0,%3\n\t" -+ "and\t%8,%0,%z5\n\t" -+ "xor\t%8,%8,%z2\n\t" -+ "or%i8\t%7,%7,%8\n\t" -+ "sc.\t%7,%1\n\t" -+ "beq\t$zero,%7,1b"; -+} -+ [(set (attr "length") (const_int 32))]) -+ -+(define_expand "atomic_exchange" -+ [(set (match_operand:SHORT 0 "register_operand") -+ (unspec_volatile:SHORT -+ [(match_operand:SHORT 1 "memory_operand") -+ (match_operand:SI 3 "const_int_operand")] ;; model -+ UNSPEC_SYNC_EXCHANGE)) -+ (set (match_dup 1) -+ (match_operand:SHORT 2 "register_operand"))] -+ "" -+{ -+ union loongarch_gen_fn_ptrs generator; -+ generator.fn_7 = gen_atomic_cas_value_cmp_and_7_si; -+ loongarch_expand_atomic_qihi (generator, -+ operands[0], -+ operands[1], -+ operands[1], -+ operands[2], -+ operands[3]); -+ DONE; -+}) -+ -+ -+(define_expand "atomic_fetch_add" -+ [(set (match_operand:SHORT 0 "register_operand" "=&r") -+ (match_operand:SHORT 1 "memory_operand" "+ZB")) -+ (set (match_dup 1) -+ (unspec_volatile:SHORT -+ [(plus:SHORT (match_dup 1) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) -+ (match_operand:SI 3 "const_int_operand")] ;; model -+ UNSPEC_SYNC_OLD_OP))] -+ "" -+{ -+ union loongarch_gen_fn_ptrs generator; -+ generator.fn_7 = gen_atomic_cas_value_add_7_si; -+ loongarch_expand_atomic_qihi (generator, -+ operands[0], -+ operands[1], -+ operands[1], -+ operands[2], -+ operands[3]); -+ DONE; -+}) -+ -+(define_expand "atomic_fetch_sub" -+ [(set (match_operand:SHORT 0 "register_operand" "=&r") -+ (match_operand:SHORT 1 "memory_operand" "+ZB")) -+ (set (match_dup 1) -+ (unspec_volatile:SHORT -+ [(minus:SHORT (match_dup 1) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) -+ (match_operand:SI 3 "const_int_operand")] ;; model -+ UNSPEC_SYNC_OLD_OP))] -+ "" -+{ -+ union loongarch_gen_fn_ptrs generator; -+ generator.fn_7 = gen_atomic_cas_value_sub_7_si; -+ loongarch_expand_atomic_qihi (generator, -+ operands[0], -+ operands[1], -+ operands[1], -+ operands[2], -+ operands[3]); -+ DONE; -+}) -+ -+(define_expand "atomic_fetch_and" -+ [(set (match_operand:SHORT 0 "register_operand" "=&r") -+ (match_operand:SHORT 1 "memory_operand" "+ZB")) -+ (set (match_dup 1) -+ (unspec_volatile:SHORT -+ [(and:SHORT (match_dup 1) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) -+ (match_operand:SI 3 "const_int_operand")] ;; model -+ UNSPEC_SYNC_OLD_OP))] -+ "" -+{ -+ union loongarch_gen_fn_ptrs generator; -+ generator.fn_7 = gen_atomic_cas_value_and_7_si; -+ loongarch_expand_atomic_qihi (generator, -+ operands[0], -+ operands[1], -+ operands[1], -+ operands[2], -+ operands[3]); -+ DONE; -+}) -+ -+(define_expand "atomic_fetch_xor" -+ [(set (match_operand:SHORT 0 "register_operand" "=&r") -+ (match_operand:SHORT 1 "memory_operand" "+ZB")) -+ (set (match_dup 1) -+ (unspec_volatile:SHORT -+ [(xor:SHORT (match_dup 1) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) -+ (match_operand:SI 3 "const_int_operand")] ;; model -+ UNSPEC_SYNC_OLD_OP))] -+ "" -+{ -+ union loongarch_gen_fn_ptrs generator; -+ generator.fn_7 = gen_atomic_cas_value_xor_7_si; -+ loongarch_expand_atomic_qihi (generator, -+ operands[0], -+ operands[1], -+ operands[1], -+ operands[2], -+ operands[3]); -+ DONE; -+}) -+ -+(define_expand "atomic_fetch_or" -+ [(set (match_operand:SHORT 0 "register_operand" "=&r") -+ (match_operand:SHORT 1 "memory_operand" "+ZB")) -+ (set (match_dup 1) -+ (unspec_volatile:SHORT -+ [(ior:SHORT (match_dup 1) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) -+ (match_operand:SI 3 "const_int_operand")] ;; model -+ UNSPEC_SYNC_OLD_OP))] -+ "" -+{ -+ union loongarch_gen_fn_ptrs generator; -+ generator.fn_7 = gen_atomic_cas_value_or_7_si; -+ loongarch_expand_atomic_qihi (generator, -+ operands[0], -+ operands[1], -+ operands[1], -+ operands[2], -+ operands[3]); -+ DONE; -+}) -+ -+(define_expand "atomic_fetch_nand" -+ [(set (match_operand:SHORT 0 "register_operand" "=&r") -+ (match_operand:SHORT 1 "memory_operand" "+ZB")) -+ (set (match_dup 1) -+ (unspec_volatile:SHORT -+ [(not:SHORT (and:SHORT (match_dup 1) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ"))) -+ (match_operand:SI 3 "const_int_operand")] ;; model -+ UNSPEC_SYNC_OLD_OP))] -+ "" -+{ -+ union loongarch_gen_fn_ptrs generator; -+ generator.fn_7 = gen_atomic_cas_value_nand_7_si; -+ loongarch_expand_atomic_qihi (generator, -+ operands[0], -+ operands[1], -+ operands[1], -+ operands[2], -+ operands[3]); -+ DONE; -+}) -diff --git a/gcc/config/loongarch/t-linux b/gcc/config/loongarch/t-linux -new file mode 100644 -index 000000000..479f4293e ---- /dev/null -+++ b/gcc/config/loongarch/t-linux -@@ -0,0 +1,23 @@ -+# Copyright (C) 2003-2018 Free Software Foundation, Inc. -+# -+# This file is part of GCC. -+# -+# GCC is free software; you can redistribute it and/or modify -+# it under the terms of the GNU General Public License as published by -+# the Free Software Foundation; either version 3, or (at your option) -+# any later version. -+# -+# GCC is distributed in the hope that it will be useful, -+# but WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+# GNU General Public License for more details. -+# -+# You should have received a copy of the GNU General Public License -+# along with GCC; see the file COPYING3. If not see -+# . -+ -+MULTILIB_OSDIRNAMES := ../lib$(call if_multiarch,:loongarch64-linux-gnu) -+MULTIARCH_DIRNAME := $(call if_multiarch,loongarch64-linux-gnu) -+ -+# haven't supported lp32 yet -+MULTILIB_EXCEPTIONS = mabi=lp32 -diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch -new file mode 100644 -index 000000000..5689da44a ---- /dev/null -+++ b/gcc/config/loongarch/t-loongarch -@@ -0,0 +1,45 @@ -+# Copyright (C) 2002-2018 Free Software Foundation, Inc. -+# -+# This file is part of GCC. -+# -+# GCC is free software; you can redistribute it and/or modify -+# it under the terms of the GNU General Public License as published by -+# the Free Software Foundation; either version 3, or (at your option) -+# any later version. -+# -+# GCC is distributed in the hope that it will be useful, -+# but WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+# GNU General Public License for more details. -+# -+# You should have received a copy of the GNU General Public License -+# along with GCC; see the file COPYING3. If not see -+# . -+ -+$(srcdir)/config/loongarch/loongarch-tables.opt: $(srcdir)/config/loongarch/genopt.sh \ -+ $(srcdir)/config/loongarch/loongarch-cpus.def -+ $(SHELL) $(srcdir)/config/loongarch/genopt.sh $(srcdir)/config/loongarch > \ -+ $(srcdir)/config/loongarch/loongarch-tables.opt -+ -+frame-header-opt.o: $(srcdir)/config/loongarch/frame-header-opt.c -+ $(COMPILE) $< -+ $(POSTCOMPILE) -+ -+loongarch-c.o: $(srcdir)/config/loongarch/loongarch-c.c $(CONFIG_H) $(SYSTEM_H) \ -+ coretypes.h $(TM_H) $(TREE_H) output.h $(C_COMMON_H) $(TARGET_H) -+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ -+ $(srcdir)/config/loongarch/loongarch-c.c -+ -+loongarch-builtins.o: $(srcdir)/config/loongarch/loongarch-builtins.c $(CONFIG_H) \ -+ $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TREE_H) $(RECOG_H) langhooks.h \ -+ $(DIAGNOSTIC_CORE_H) $(OPTABS_H) $(srcdir)/config/loongarch/loongarch-ftypes.def \ -+ $(srcdir)/config/loongarch/loongarch-modes.def -+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ -+ $(srcdir)/config/loongarch/loongarch-builtins.c -+loongarch-d.o: $(srcdir)/config/loongarch/loongarch-d.c -+ $(COMPILE) $< -+ $(POSTCOMPILE) -+ -+comma=, -+MULTILIB_OPTIONS = $(subst $(comma),/, $(patsubst %, mabi=%, $(subst $(comma),$(comma)mabi=,$(TM_MULTILIB_CONFIG)))) -+MULTILIB_DIRNAMES = $(subst $(comma), ,$(TM_MULTILIB_CONFIG)) -diff --git a/gcc/config/loongarch/x-native b/gcc/config/loongarch/x-native -new file mode 100644 -index 000000000..827d21f1a ---- /dev/null -+++ b/gcc/config/loongarch/x-native -@@ -0,0 +1,3 @@ -+driver-native.o : $(srcdir)/config/loongarch/driver-native.c \ -+ $(CONFIG_H) $(SYSTEM_H) -+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -diff --git a/gcc/configure.ac b/gcc/configure.ac -index a6eb3526b..d9677f0c5 100644 ---- a/gcc/configure.ac -+++ b/gcc/configure.ac -@@ -845,6 +845,9 @@ AC_ARG_ENABLE(fixed-point, - mips*-*-*) - enable_fixed_point=yes - ;; -+ loongarch*-*-*) -+ enable_fixed_point=yes -+ ;; - *) - AC_MSG_WARN([fixed-point is not supported for this target, ignored]) - enable_fixed_point=no -@@ -3323,6 +3326,17 @@ x: - tls_first_minor=16 - tls_as_opt='-32 --fatal-warnings' - ;; -+ loongarch*-*-*) -+ conftest_s=' -+ .section .tdata,"awT",@progbits -+x: .word 2 -+ .text -+ la.tls.gd $a0,x -+ bl __tls_get_addr' -+ tls_first_major=0 -+ tls_first_minor=0 -+ tls_as_opt='--fatal-warnings' -+ ;; - m68k-*-*) - conftest_s=' - .section .tdata,"awT",@progbits -@@ -4859,6 +4873,17 @@ pointers into PC-relative form.]) - [Requesting --with-nan= requires assembler support for -mnan=]) - fi - ;; -+ loongarch*-*-*) -+ gcc_GAS_CHECK_FEATURE([.dtprelword support], -+ gcc_cv_as_loongarch_dtprelword, [2,18,0],, -+ [.section .tdata,"awT",@progbits -+x: -+ .word 2 -+ .text -+ .dtprelword x+0x8000],, -+ [AC_DEFINE(HAVE_AS_DTPRELWORD, 1, -+ [Define if your assembler supports .dtprelword.])]) -+ ;; - s390*-*-*) - gcc_GAS_CHECK_FEATURE([.gnu_attribute support], - gcc_cv_as_s390_gnu_attribute, [2,18,0],, -@@ -4892,11 +4917,11 @@ pointers into PC-relative form.]) - ;; - esac - --# Mips and HP-UX need the GNU assembler. -+# Mips, LoongArch and HP-UX need the GNU assembler. - # Linux on IA64 might be able to use the Intel assembler. - - case "$target" in -- mips*-*-* | *-*-hpux* ) -+ mips*-*-* | loongarch*-*-* | *-*-hpux* ) - if test x$gas_flag = xyes \ - || test x"$host" != x"$build" \ - || test ! -x "$gcc_cv_as" \ -@@ -4916,9 +4941,9 @@ esac - # ??? Once 2.11 is released, probably need to add first known working - # version to the per-target configury. - case "$cpu_type" in -- aarch64 | alpha | arc | arm | avr | bfin | cris | i386 | m32c | m68k \ -- | microblaze | mips | nios2 | pa | riscv | rs6000 | score | sparc | spu \ -- | tilegx | tilepro | visium | xstormy16 | xtensa) -+ aarch64 | alpha | arc | arm | avr | bfin | cris | i386 | loongarch | m32c \ -+ | m68k | microblaze | mips | nios2 | pa | riscv | rs6000 | score | sparc \ -+ | spu | tilegx | tilepro | visium | xstormy16 | xtensa) - insn="nop" - ;; - ia64 | s390) -diff --git a/gcc/targhooks.c b/gcc/targhooks.c -index fafcc6c51..9a6baaf4b 100644 ---- a/gcc/targhooks.c -+++ b/gcc/targhooks.c -@@ -1806,7 +1806,7 @@ default_print_patchable_function_entry (FILE *file, - - unsigned i; - for (i = 0; i < patch_area_size; ++i) -- fprintf (file, "\t%s\n", nop_templ); -+ output_asm_insn (nop_templ, NULL); - } - - bool -diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C -index 2e0ef685f..424979a60 100644 ---- a/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C -+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C -@@ -1,6 +1,6 @@ - // PR c++/49673: check that test_data goes into .rodata - // { dg-do compile { target c++11 } } --// { dg-additional-options -G0 { target { { alpha*-*-* frv*-*-* ia64-*-* lm32*-*-* m32r*-*-* microblaze*-*-* mips*-*-* nios2-*-* powerpc*-*-* rs6000*-*-* } && { ! { *-*-darwin* *-*-aix* alpha*-*-*vms* } } } } } -+// { dg-additional-options -G0 { target { { alpha*-*-* frv*-*-* ia64-*-* lm32*-*-* m32r*-*-* microblaze*-*-* mips*-*-* loongarch*-*-* nios2-*-* powerpc*-*-* rs6000*-*-* } && { ! { *-*-darwin* *-*-aix* alpha*-*-*vms* } } } } } - // { dg-final { scan-assembler "\\.rdata" { target mips*-*-* } } } - // { dg-final { scan-assembler "rodata" { target { { *-*-linux-gnu *-*-gnu* *-*-elf } && { ! { mips*-*-* riscv*-*-* } } } } } } - -diff --git a/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C b/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C -index 341735879..141182b0d 100644 ---- a/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C -+++ b/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C -@@ -7,7 +7,7 @@ - function. However, some platforms use all bits to encode a - function pointer. Such platforms use the lowest bit of the delta, - that is shifted left by one bit. */ --#if defined __MN10300__ || defined __SH5__ || defined __arm__ || defined __thumb__ || defined __mips__ || defined __aarch64__ -+#if defined __MN10300__ || defined __SH5__ || defined __arm__ || defined __thumb__ || defined __mips__ || defined __aarch64__ || defined __loongarch__ - #define ADJUST_PTRFN(func, virt) ((void (*)())(func)) - #define ADJUST_DELTA(delta, virt) (((delta) << 1) + !!(virt)) - #else -diff --git a/gcc/testsuite/g++.old-deja/g++.pt/ptrmem6.C b/gcc/testsuite/g++.old-deja/g++.pt/ptrmem6.C -index 9f4bbe43f..8f8f7017a 100644 ---- a/gcc/testsuite/g++.old-deja/g++.pt/ptrmem6.C -+++ b/gcc/testsuite/g++.old-deja/g++.pt/ptrmem6.C -@@ -25,7 +25,7 @@ int main() { - h<&B::j>(); // { dg-error "" } - g<(void (A::*)()) &A::f>(); // { dg-error "" "" { xfail c++11 } } - h<(int A::*) &A::i>(); // { dg-error "" "" { xfail c++11 } } -- g<(void (A::*)()) &B::f>(); // { dg-error "" "" { xfail { c++11 && { aarch64*-*-* arm*-*-* mips*-*-* } } } } -+ g<(void (A::*)()) &B::f>(); // { dg-error "" "" { xfail { c++11 && { aarch64*-*-* arm*-*-* mips*-*-* loongarch*-*-* } } } } - h<(int A::*) &B::j>(); // { dg-error "" } - g<(void (A::*)()) 0>(); // { dg-error "" "" { target { ! c++11 } } } - h<(int A::*) 0>(); // { dg-error "" "" { target { ! c++11 } } } -diff --git a/gcc/testsuite/gcc.dg/20020312-2.c b/gcc/testsuite/gcc.dg/20020312-2.c -index f5929e0b0..9bbbdf617 100644 ---- a/gcc/testsuite/gcc.dg/20020312-2.c -+++ b/gcc/testsuite/gcc.dg/20020312-2.c -@@ -35,6 +35,8 @@ extern void abort (void); - /* PIC register is r1, but is used even without -fpic. */ - #elif defined(__lm32__) - /* No pic register. */ -+#elif defined(__loongarch__) -+/* No pic register. */ - #elif defined(__M32R__) - /* No pic register. */ - #elif defined(__m68k__) -diff --git a/gcc/testsuite/gcc.dg/loop-8.c b/gcc/testsuite/gcc.dg/loop-8.c -index 842c0e773..95ec8d8d8 100644 ---- a/gcc/testsuite/gcc.dg/loop-8.c -+++ b/gcc/testsuite/gcc.dg/loop-8.c -@@ -1,6 +1,6 @@ - /* { dg-do compile } */ - /* { dg-options "-O1 -fdump-rtl-loop2_invariant" } */ --/* { dg-skip-if "unexpected IV" { "hppa*-*-* mips*-*-* visium-*-* powerpc*-*-* riscv*-*-*" } } */ -+/* { dg-skip-if "unexpected IV" { "hppa*-*-* mips*-*-* loongarch*-*-* visium-*-* powerpc*-*-* riscv*-*-*" } } */ - - void - f (int *a, int *b) -diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c -index eda711822..00f8fcb4f 100644 ---- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c -+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c -@@ -5,7 +5,7 @@ - - When the condition is true, we distribute "(int) (a + b)" as - "(int) a + (int) b", otherwise we keep the original. */ --/* { dg-do compile { target { { ! mips64 } && { ! spu-*-* } } } } */ -+/* { dg-do compile { target { { ! mips64 } && { ! spu-*-* } && { ! loongarch-*-* } } } } */ - /* { dg-options "-O -fno-tree-forwprop -fno-tree-ccp -fwrapv -fdump-tree-fre1-details" } */ - - /* From PR14844. */ -diff --git a/gcc/testsuite/gcc.target/loongarch/insn_correctness_check.c b/gcc/testsuite/gcc.target/loongarch/insn_correctness_check.c -new file mode 100644 -index 000000000..fa24ed4dd ---- /dev/null -+++ b/gcc/testsuite/gcc.target/loongarch/insn_correctness_check.c -@@ -0,0 +1,159432 @@ -+/* { dg-do run } */ -+/* { dg-options "-mlsx -mlasx -w" } */ -+/* { dg-timeout 500 } */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#define ASSERTEQ_64(line, ref, res) \ -+do{ \ -+ int fail = 0; \ -+ for(size_t i = 0; i < sizeof(res)/sizeof(res[0]); ++i){ \ -+ long *temp_ref = &ref[i], *temp_res = &res[i]; \ -+ if(abs(*temp_ref - *temp_res) > 0){ \ -+ printf(" error: %s at line %ld , expected "#ref"[%ld]:0x%lx, got: 0x%lx\n", \ -+ __FILE__, line, i, *temp_ref, *temp_res); \ -+ fail = 1; \ -+ } \ -+ } \ -+ if(fail == 1) abort(); \ -+}while(0) -+ -+#define ASSERTEQ_32(line, ref, res) \ -+do{ \ -+ int fail = 0; \ -+ for(size_t i = 0; i < sizeof(res)/sizeof(res[0]); ++i){ \ -+ int *temp_ref = &ref[i], *temp_res = &res[i]; \ -+ if(abs(*temp_ref - *temp_res) > 0){ \ -+ printf(" error: %s at line %ld , expected "#ref"[%ld]:0x%x, got: 0x%x\n", \ -+ __FILE__, line, i, *temp_ref, *temp_res); \ -+ fail = 1; \ -+ } \ -+ } \ -+ if(fail == 1) abort(); \ -+}while(0) -+ -+#define ASSERTEQ_int(line, ref, res) \ -+do{ \ -+ if (ref != res){ \ -+ printf(" error: %s at line %ld , expected %d, got %d\n", \ -+ __FILE__, line, ref, res); \ -+ } \ -+}while(0) -+ -+int main() { -+ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; -+ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; -+ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; -+ -+ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; -+ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; -+ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; -+ -+ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; -+ long int long_op0, long_op1, long_op2, lont_out, lont_result; -+ long int long_int_out, long_int_result; -+ unsigned int unsigned_int_out, unsigned_int_result; -+ unsigned long int unsigned_long_int_out, unsigned_long_int_result; -+ -+ *((int*)& __m128_op0[3]) = 0x0000c77c; -+ *((int*)& __m128_op0[2]) = 0x000047cd; -+ *((int*)& __m128_op0[1]) = 0x0000c0f1; -+ *((int*)& __m128_op0[0]) = 0x00006549; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_op1[3]) = 0x34ec5670cd4b5ec0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4f111e4b8e0d7291; -+ *((unsigned long*)& __m256i_op1[1]) = 0xeaa81f47dc3bdd09; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0e0d5fde5df99830; -+ *((unsigned long*)& __m256i_op2[3]) = 0x80c72fcd40fb3bc0; -+ *((unsigned long*)& __m256i_op2[2]) = 0x84bd087966d4ace0; -+ *((unsigned long*)& __m256i_op2[1]) = 0x26aa68b274dc1322; -+ *((unsigned long*)& __m256i_op2[0]) = 0xe072db2bb9d4cd40; -+ *((unsigned long*)& __m256i_result[3]) = 0x044819410d87e69a; -+ *((unsigned long*)& __m256i_result[2]) = 0x21d3905ae3e93be0; -+ *((unsigned long*)& __m256i_result[1]) = 0x5125883a30da0f20; -+ *((unsigned long*)& __m256i_result[0]) = 0x6d7b2d3ac2777aeb; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0xb9884ab93b0b80a0; -+ *((unsigned long*)& __m128i_result[0]) = 0xf11e970c68000000; -+ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1b71a083b3dec3cd; -+ *((unsigned long*)& __m128i_op1[0]) = 0x373a13323b4cdbc1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0802010808400820; -+ *((unsigned long*)& __m128i_result[0]) = 0x8004080408100802; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000c77c000047cd; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000c0f100006549; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa486083e6536d81d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x58bc43853ea123ed; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000a486083e; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000058bc4385; -+ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1828f0e09bad7249; -+ *((unsigned long*)& __m256i_op0[2]) = 0x07ffc1b723953cec; -+ *((unsigned long*)& __m256i_op0[1]) = 0x61f2e9b333aab104; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6bf742aa0d7856a0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_d(__m256i_op0,12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x34ec5670cd4b5ec0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4f111e4b8e0d7291; -+ *((unsigned long*)& __m256i_op0[1]) = 0xeaa81f47dc3bdd09; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0e0d5fde5df99830; -+ *((unsigned long*)& __m256i_op1[3]) = 0x67390c19e4b17547; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbacda0f96d2cec01; -+ *((unsigned long*)& __m256i_op1[1]) = 0xee20ad1adae2cc16; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5a2003c6a406fe53; -+ *((unsigned long*)& __m256i_op2[3]) = 0x80c72fcd40fb3bc0; -+ *((unsigned long*)& __m256i_op2[2]) = 0x84bd087966d4ace0; -+ *((unsigned long*)& __m256i_op2[1]) = 0x26aa68b274dc1322; -+ *((unsigned long*)& __m256i_op2[0]) = 0xe072db2bb9d4cd40; -+ *((unsigned long*)& __m256i_result[3]) = 0x372e9d75e8aab100; -+ *((unsigned long*)& __m256i_result[2]) = 0x5464fbfc416b9f71; -+ *((unsigned long*)& __m256i_result[1]) = 0x31730b5beb7c99f5; -+ *((unsigned long*)& __m256i_result[0]) = 0x0d8264202b8ea3f0; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa486c90f6537b8d7; -+ *((unsigned long*)& __m128i_op0[0]) = 0x58bcc2013ea1cc1e; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffa486c90f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000058bcc201; -+ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0xf3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x372e9d75e8aab100; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5464fbfc416b9f71; -+ *((unsigned long*)& __m256i_op0[1]) = 0x31730b5beb7c99f5; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0d8264202b8ea3f0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x80c72fcd40fb3bc0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x84bd087966d4ace0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x26aa68b274dc1322; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe072db2bb9d4cd40; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffcd42ffffecc0; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000475ffff4c51; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000740dffffad17; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003f4bffff7130; -+ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffa486c90f; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000058bcc201; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffa486c90f; -+ *((unsigned long*)& __m128d_result[0]) = 0x1f52d710bf295626; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x81f7f2599f0509c2; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x51136d3c78388916; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffc0fcffffcf83; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000288a00003c1c; -+ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x053531f7c6334908; -+ *((unsigned long*)& __m256d_op0[2]) = 0x8e41dcbff87e7900; -+ *((unsigned long*)& __m256d_op0[1]) = 0x12eb8332e3e15093; -+ *((unsigned long*)& __m256d_op0[0]) = 0x9a7491f9e016ccd4; -+ *((unsigned long*)& __m256d_op1[3]) = 0x345947dcd192b5c4; -+ *((unsigned long*)& __m256d_op1[2]) = 0x182100c72280e687; -+ *((unsigned long*)& __m256d_op1[1]) = 0x4a1c80bb8e892e00; -+ *((unsigned long*)& __m256d_op1[0]) = 0x063ecfbd58abc4b7; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x34598d0fd19314cb; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1820939b2280fa86; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4a1c269b8e892a3a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x063f2bb758abc664; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffc0fcffffcf83; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000288a00003c1c; -+ *((unsigned long*)& __m256i_result[3]) = 0x3459730f2f6d1435; -+ *((unsigned long*)& __m256i_result[2]) = 0x19212d61237f2b03; -+ *((unsigned long*)& __m256i_result[1]) = 0x4a1c266572772a3a; -+ *((unsigned long*)& __m256i_result[0]) = 0x063f032d58557648; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3133c6409eecf8b0; -+ *((unsigned long*)& __m256i_op0[2]) = 0xddf50db3c617a115; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa432ea5a0913dc8e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x29d403af367b4545; -+ *((unsigned long*)& __m256i_op1[3]) = 0x38a966b31be83ee9; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5f6108dc25b8e028; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf41a56e8a20878d7; -+ *((unsigned long*)& __m256i_op1[0]) = 0x683b8b67e20c8ee5; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x81f7f2599f0509c2; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x51136d3c78388916; -+ *((unsigned long*)& __m256i_op1[3]) = 0x044819410d87e69a; -+ *((unsigned long*)& __m256i_op1[2]) = 0x21d3905ae3e93be0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x5125883a30da0f20; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6d7b2d3ac2777aeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x000019410000e69a; -+ *((unsigned long*)& __m256i_result[2]) = 0xf259905a09c23be0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000883a00000f20; -+ *((unsigned long*)& __m256i_result[0]) = 0x6d3c2d3a89167aeb; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xa486c90f; -+ *((int*)& __m128_op0[2]) = 0x157ca12e; -+ *((int*)& __m128_op0[1]) = 0x58bcc201; -+ *((int*)& __m128_op0[0]) = 0x2e635d65; -+ *((int*)& __m128_op1[3]) = 0x6d564875; -+ *((int*)& __m128_op1[2]) = 0xf8760005; -+ *((int*)& __m128_op1[1]) = 0x8dc5a4d1; -+ *((int*)& __m128_op1[0]) = 0x79ffa22f; -+ *((int*)& __m128_op2[3]) = 0xffffffff; -+ *((int*)& __m128_op2[2]) = 0xd2436487; -+ *((int*)& __m128_op2[1]) = 0x0fa96b88; -+ *((int*)& __m128_op2[0]) = 0x5f94ab13; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xd24271c4; -+ *((int*)& __m128_result[1]) = 0x2711bad1; -+ *((int*)& __m128_result[0]) = 0xe8e309ed; -+ __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1828f0e09bad7249; -+ *((unsigned long*)& __m256i_op0[2]) = 0x07ffc1b723953cec; -+ *((unsigned long*)& __m256i_op0[1]) = 0x61f2e9b333aab104; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6bf742aa0d7856a0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000019410000e69a; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf259905a09c23be0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000883a00000f20; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6d3c2d3a89167aeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000090100008492; -+ *((unsigned long*)& __m256i_result[2]) = 0xf000104808420300; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000e20; -+ *((unsigned long*)& __m256i_result[0]) = 0x04082d108006284b; -+ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x04481940fbb7e6bf; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf2781966e6991966; -+ *((unsigned long*)& __m256i_op0[1]) = 0x51258839aeda77c6; -+ *((unsigned long*)& __m256i_op0[0]) = 0xcf25f0e00f1ff0e0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0501030100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001030100000301; -+ *((unsigned long*)& __m256i_result[1]) = 0x0102000200000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0002000004030000; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffd24271c4; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2711bad1e8e309ed; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0020002000200020; -+ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x38a966b31be83ee9; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5f6108dc25b8e028; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf41a56e8a20878d7; -+ *((unsigned long*)& __m256i_op0[0]) = 0x683b8b67e20c8ee5; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffcd42ffffecc0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000475ffff4c51; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000740dffffad17; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003f4bffff7130; -+ *((unsigned long*)& __m256i_result[3]) = 0x38a966b31be83ee9; -+ *((unsigned long*)& __m256i_result[2]) = 0x5f6108dc25b80001; -+ *((unsigned long*)& __m256i_result[1]) = 0xf41a56e8a20878d7; -+ *((unsigned long*)& __m256i_result[0]) = 0x683b8b67e20c0001; -+ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000001b3c4c0a5c; -+ *((unsigned long*)& __m256i_result[3]) = 0x3c4c0a5c3c4c0a5c; -+ *((unsigned long*)& __m256i_result[2]) = 0x3c4c0a5c3c4c0a5c; -+ *((unsigned long*)& __m256i_result[1]) = 0x3c4c0a5c3c4c0a5c; -+ *((unsigned long*)& __m256i_result[0]) = 0x3c4c0a5c3c4c0a5c; -+ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffa486c90f; -+ *((unsigned long*)& __m128i_op2[0]) = 0x1f52d710bf295626; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0501030102141923; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffd5020738b43ddb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x010200023b8e4174; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff4ff4e11410b40; -+ *((unsigned long*)& __m256i_op1[3]) = 0x01fa022a01a401e5; -+ *((unsigned long*)& __m256i_op1[2]) = 0x030d03aa0079029b; -+ *((unsigned long*)& __m256i_op1[1]) = 0x024c01f901950261; -+ *((unsigned long*)& __m256i_op1[0]) = 0x008102c2008a029f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101070102041903; -+ *((unsigned long*)& __m256i_result[2]) = 0xdfd506073ab435db; -+ *((unsigned long*)& __m256i_result[1]) = 0x110202023bae4176; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff6ff4a15418b40; -+ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0501030102141923; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffd5020738b43ddb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x010200023b8e4174; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff4ff4e11410b40; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000019410000e69a; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf259905a09c23be0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000883a00000f20; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6d3c2d3a89167aeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000501e99b; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000109973de7; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001020f22; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000001890b7a39; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x372e9d75e8aab100; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc5c085372cfabfba; -+ *((unsigned long*)& __m256i_op0[1]) = 0x31730b5beb7c99f5; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0658f2dc0eb21e3c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000501e99b; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000109973de7; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001020f22; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000001890b7a39; -+ *((unsigned long*)& __m256i_result[3]) = 0x1b974ebaf6d64d4e; -+ *((unsigned long*)& __m256i_result[2]) = 0x62e0429c1b48fed1; -+ *((unsigned long*)& __m256i_result[1]) = 0x18b985adf63f548c; -+ *((unsigned long*)& __m256i_result[0]) = 0x032c796ecbdecc3b; -+ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x8a228acac14e440a; -+ *((unsigned long*)& __m128d_op1[0]) = 0xc77c47cdc0f16549; -+ *((unsigned long*)& __m128d_op2[1]) = 0xffffffffd24271c4; -+ *((unsigned long*)& __m128d_op2[0]) = 0x2711bad1e8e309ed; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffd24271c4; -+ *((unsigned long*)& __m128d_result[0]) = 0x2711bad1e8e309ed; -+ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; -+ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x38a966b31be83ee9; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5f6108dc25b80001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf41a56e8a20878d7; -+ *((unsigned long*)& __m256i_op0[0]) = 0x683b8b67e20c0001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000501e99b; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000109973de7; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001020f22; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000001890b7a39; -+ *((unsigned long*)& __m256i_result[3]) = 0x38a966b301f41ffd; -+ *((unsigned long*)& __m256i_result[2]) = 0x5f6108ee13ff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0xf41a56e8d10201f6; -+ *((unsigned long*)& __m256i_result[0]) = 0x683b8b34f1020001; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x38a966b301f41ffd; -+ *((unsigned long*)& __m256d_op0[2]) = 0x5f6108ee13ff0000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xf41a56e8d10201f6; -+ *((unsigned long*)& __m256d_op0[0]) = 0x683b8b34f1020001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_d(__m128i_op0,0x35); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x372e9d75e8aab100; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc5c085372cfabfba; -+ *((unsigned long*)& __m256i_op0[1]) = 0x31730b5beb7c99f5; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0658f2dc0eb21e3c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000019410000e69a; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf259905a0c126604; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000883a00000f20; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6d3c2d3aa1c82947; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000f647000007d6; -+ *((unsigned long*)& __m256i_result[2]) = 0x031b358c021ee663; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000faaf0000f9f8; -+ *((unsigned long*)& __m256i_result[0]) = 0x02b4fdadfa9704df; -+ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000015d050192cb; -+ *((unsigned long*)& __m256d_op0[2]) = 0x028e509508b16ee9; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000033ff01020e23; -+ *((unsigned long*)& __m256d_op0[0]) = 0x151196b58fd1114d; -+ *((unsigned long*)& __m256d_op1[3]) = 0x372e9d75e8aab100; -+ *((unsigned long*)& __m256d_op1[2]) = 0xc5c085372cfabfba; -+ *((unsigned long*)& __m256d_op1[1]) = 0x31730b5beb7c99f5; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0658f2dc0eb21e3c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000019410000e69a; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf259905a0c126604; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000883a00000f20; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6d3c2d3aa1c82947; -+ *((unsigned long*)& __m256i_result[3]) = 0x000019410000e6aa; -+ *((unsigned long*)& __m256i_result[2]) = 0xf259905a0c126614; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000883a00000f30; -+ *((unsigned long*)& __m256i_result[0]) = 0x6d3c2d3aa1c82957; -+ __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x0001ffaa; -+ *((int*)& __m256_op1[6]) = 0x0000040e; -+ *((int*)& __m256_op1[5]) = 0x00007168; -+ *((int*)& __m256_op1[4]) = 0x00007bb6; -+ *((int*)& __m256_op1[3]) = 0x0001ffe8; -+ *((int*)& __m256_op1[2]) = 0x0001fe9c; -+ *((int*)& __m256_op1[1]) = 0x00002282; -+ *((int*)& __m256_op1[0]) = 0x00001680; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff60090958; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0fa96b88d9944d42; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00001802041b0013; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1828f0e09bad7249; -+ *((unsigned long*)& __m256i_op0[2]) = 0x07ffc1b723953cec; -+ *((unsigned long*)& __m256i_op0[1]) = 0x61f2e9b333aab104; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6bf742aa0d7856a0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0d41c9a7bdd239a7; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0b025d0ef8fdf987; -+ *((unsigned long*)& __m256i_op1[1]) = 0x002944f92da5a708; -+ *((unsigned long*)& __m256i_op1[0]) = 0x038cf4ea999922ef; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff0000ffff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0xff000000ffffff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffff00ff; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000015d050192cb; -+ *((unsigned long*)& __m256i_op0[2]) = 0x028e509508b16ee9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000033ff01020e23; -+ *((unsigned long*)& __m256i_op0[0]) = 0x151196b58fd1114d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff0000ffff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff000000ffffff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffffffff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000fffffaff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffd7200fffff74f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000702f; -+ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xc0c00000c0c00000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xc0c00c01c2cd0009; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0003ff540000081c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0003ffd00003fd38; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001ffaa0000040e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000716800007bb6; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001ffe80001fe9c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000228200001680; -+ *((unsigned long*)& __m256i_op2[3]) = 0x372e9d75e8aab100; -+ *((unsigned long*)& __m256i_op2[2]) = 0xc5c085372cfabfba; -+ *((unsigned long*)& __m256i_op2[1]) = 0x31730b5beb7c99f5; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0658f2dc0eb21e3c; -+ *((unsigned long*)& __m256i_result[3]) = 0x002e4db200000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000315ac0000d658; -+ *((unsigned long*)& __m256i_result[1]) = 0x00735278007cf94c; -+ *((unsigned long*)& __m256i_result[0]) = 0x0003ed8800031b38; -+ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001ffff; -+ __m128i_out = __lsx_vsrli_w(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; -+ __m128i_out = __lsx_vfclass_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000015d050192cb; -+ *((unsigned long*)& __m256i_op0[2]) = 0x028e509508b16ee9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000033ff01020e23; -+ *((unsigned long*)& __m256i_op0[0]) = 0x151196b58fd1114d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001ffaa0000040e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000716800007bb6; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001ffe80001fe9c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000228200001680; -+ *((unsigned long*)& __m256i_result[3]) = 0x000100ab000500a0; -+ *((unsigned long*)& __m256i_result[2]) = 0x000200b800080124; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001011b000200aa; -+ *((unsigned long*)& __m256i_result[0]) = 0x00150118008f0091; -+ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000019410000e69a; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf259905a0c126604; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000883a00000f20; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6d3c2d3aa1c82947; -+ *((unsigned long*)& __m256i_op1[3]) = 0x372e9d75e8aab100; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc5c085372cfabfba; -+ *((unsigned long*)& __m256i_op1[1]) = 0x31730b5beb7c99f5; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0658f2dc0eb21e3c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000b6b60001979a; -+ *((unsigned long*)& __m256i_result[2]) = 0x00011591000125be; -+ *((unsigned long*)& __m256i_result[1]) = 0x000093950000a915; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001201600004783; -+ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x372e9d75e8aab100; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc5c085372cfabfba; -+ *((unsigned long*)& __m256i_op0[1]) = 0x31730b5beb7c99f5; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0658f2dc0eb21e3c; -+ *((int*)& __m256_result[7]) = 0x4e5cba76; -+ *((int*)& __m256_result[6]) = 0xcdbaaa78; -+ *((int*)& __m256_result[5]) = 0xce68fdeb; -+ *((int*)& __m256_result[4]) = 0x4e33eaff; -+ *((int*)& __m256_result[3]) = 0x4e45cc2d; -+ *((int*)& __m256_result[2]) = 0xcda41b30; -+ *((int*)& __m256_result[1]) = 0x4ccb1e5c; -+ *((int*)& __m256_result[0]) = 0x4d6b21e4; -+ __m256_out = __lasx_xvffint_s_w(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0013; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00001802041b0013; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc0c00000c0c00000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc0c00c01c2cd0009; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0e2d5626ff75cdbc; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5db4b156e2002a78; -+ *((unsigned long*)& __m256i_op0[1]) = 0xeeffbeb03ba3e6b0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0c16e25eb28d27ea; -+ *((unsigned long*)& __m256d_result[3]) = 0x41ac5aac4c000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xc161464880000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xc1b1004150000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x41cdd1f358000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00001802041b0013; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000007f7f02; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x1828f0e09bad7249; -+ *((unsigned long*)& __m256d_op0[2]) = 0x07ffc1b723953cec; -+ *((unsigned long*)& __m256d_op0[1]) = 0x61f2e9b333aab104; -+ *((unsigned long*)& __m256d_op0[0]) = 0x6bf742aa0d7856a0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4e5cba76cdbaaa78; -+ *((unsigned long*)& __m256i_op0[2]) = 0xce68fdeb4e33eaff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4e45cc2dcda41b30; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4ccb1e5c4d6b21e4; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x44bb2cd3a35c2fd0; -+ *((unsigned long*)& __m256i_result[0]) = 0xca355ba46a95e31c; -+ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x002e4db200000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000315ac0000d658; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00735278007cf94c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0003ed8800031b38; -+ *((unsigned long*)& __m256i_result[3]) = 0xffd1b24e00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffcea54ffff29a8; -+ *((unsigned long*)& __m256i_result[1]) = 0xff8cad88ff8306b4; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffc1278fffce4c8; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffd1b24e00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffcea54ffff29a8; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff8cad88ff8306b4; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffc1278fffce4c8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0e2d5626ff75cdbc; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5db4b156e2002a78; -+ *((unsigned long*)& __m256i_op1[1]) = 0xeeffbeb03ba3e6b0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0c16e25eb28d27ea; -+ *((unsigned long*)& __m256i_result[3]) = 0xf96d674800000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x44a4330e2c7116c0; -+ *((unsigned long*)& __m256i_result[1]) = 0x14187a7822b653c0; -+ *((unsigned long*)& __m256i_result[0]) = 0xfbe0b866962b96d0; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc1bdceee242071db; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe8c7b756d76aa578; -+ *((unsigned long*)& __m128i_result[1]) = 0xe0dee7779210b8ed; -+ *((unsigned long*)& __m128i_result[0]) = 0xf463dbabebb5d2bc; -+ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x44bb2cd3a35c2fd0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xca355ba46a95e31c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000100ab000500a0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000200b800080124; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001011b000200aa; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00150118008f0091; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f057f0b7f5b007f; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff0000ffff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff000000ffffff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffffffff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x01fa022a01a401e5; -+ *((unsigned long*)& __m256i_op1[2]) = 0x030d03aa0079029b; -+ *((unsigned long*)& __m256i_op1[1]) = 0x024c01f901950261; -+ *((unsigned long*)& __m256i_op1[0]) = 0x008102c2008a029f; -+ *((unsigned long*)& __m256i_op2[3]) = 0x002e4db200000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000315ac0000d658; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00735278007cf94c; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0003ed8800031b38; -+ *((unsigned long*)& __m256i_result[3]) = 0x01a72334ffff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0xff4f6838ff937648; -+ *((unsigned long*)& __m256i_result[1]) = 0x00a2afb7fff00ecb; -+ *((unsigned long*)& __m256i_result[0]) = 0xffce110f004658c7; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0013; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x00001802041b0014; -+ __m128i_out = __lsx_vsub_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffd1b24e00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffcea54ffff29a8; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff8cad88ff8306b4; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffc1278fffce4c8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0802010000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0806030008080001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0801010108010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0806000008060302; -+ __m256i_out = __lasx_xvclo_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x44bb2cd3a35c2fd0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xca355ba46a95e31c; -+ *((unsigned long*)& __m256i_result[3]) = 0x1d1d1d1d1d1d1d1d; -+ *((unsigned long*)& __m256i_result[2]) = 0x1d1d1d1d1d1d1d1d; -+ *((unsigned long*)& __m256i_result[1]) = 0x61d849f0c0794ced; -+ *((unsigned long*)& __m256i_result[0]) = 0xe75278c187b20039; -+ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf96d674800000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x44a4330e2c7116c0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x14187a7822b653c0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfbe0b866962b96d0; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffd1b24e00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffcea54ffff29a8; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff8cad88ff8306b4; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffc1278fffce4c8; -+ *((unsigned long*)& __m256i_result[3]) = 0xebfd15f000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x01700498ff8f1600; -+ *((unsigned long*)& __m256i_result[1]) = 0xf520c7c024221300; -+ *((unsigned long*)& __m256i_result[0]) = 0x00802fd0ff540a80; -+ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xebfd15f000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01700498ff8f1600; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf520c7c024221300; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00802fd0ff540a80; -+ *((unsigned long*)& __m256i_op1[3]) = 0xebfd15f000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01700498ff8f1600; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf520c7c024221300; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00802fd0ff540a80; -+ *((unsigned long*)& __m256i_op2[3]) = 0xf96d674800000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x44a4330e2c7116c0; -+ *((unsigned long*)& __m256i_op2[1]) = 0x14187a7822b653c0; -+ *((unsigned long*)& __m256i_op2[0]) = 0xfbe0b866962b96d0; -+ *((unsigned long*)& __m256i_result[3]) = 0xebfd15f000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x015c6a7facc39600; -+ *((unsigned long*)& __m256i_result[1]) = 0xfa070a51cbd95300; -+ *((unsigned long*)& __m256i_result[0]) = 0x00c7463075439280; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x1b976395; -+ *((int*)& __m256_op0[6]) = 0x2fc4c101; -+ *((int*)& __m256_op0[5]) = 0xe37affb4; -+ *((int*)& __m256_op0[4]) = 0x2fc05f69; -+ *((int*)& __m256_op0[3]) = 0x18b988e6; -+ *((int*)& __m256_op0[2]) = 0x4facb558; -+ *((int*)& __m256_op0[1]) = 0xe5fb66c8; -+ *((int*)& __m256_op0[0]) = 0x1da8e5bb; -+ *((int*)& __m256_op1[7]) = 0x01a72334; -+ *((int*)& __m256_op1[6]) = 0xffff00ff; -+ *((int*)& __m256_op1[5]) = 0xff4f6838; -+ *((int*)& __m256_op1[4]) = 0xff937648; -+ *((int*)& __m256_op1[3]) = 0x00a2afb7; -+ *((int*)& __m256_op1[2]) = 0xfff00ecb; -+ *((int*)& __m256_op1[1]) = 0xffce110f; -+ *((int*)& __m256_op1[0]) = 0x004658c7; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f057f0b7f5b007f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7f00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f7fff7fff7fff00; -+ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf96d674800000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x44a4330e2c7116c0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x14187a7822b653c0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfbe0b866962b96d0; -+ *((unsigned long*)& __m256i_result[3]) = 0xf90c0c0c00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0ca40c0c0c0c0cc0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0c0c0c0c0cb60cc0; -+ *((unsigned long*)& __m256i_result[0]) = 0xfbe0b80c960c96d0; -+ __m256i_out = __lasx_xvmini_b(__m256i_op0,12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xf90c0c0c00000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0ca40c0c0c0c0cc0; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0c0c0c0c0cb60cc0; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfbe0b80c960c96d0; -+ *((unsigned long*)& __m256d_op1[3]) = 0x1b9763952fc4c101; -+ *((unsigned long*)& __m256d_op1[2]) = 0xe37affb42fc05f69; -+ *((unsigned long*)& __m256d_op1[1]) = 0x18b988e64facb558; -+ *((unsigned long*)& __m256d_op1[0]) = 0xe5fb66c81da8e5bb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f057f0b7f5b007f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000007f007f5; -+ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1b9763952fc4c101; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe37affb42fc05f69; -+ *((unsigned long*)& __m256i_op1[1]) = 0x18b988e64facb558; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe5fb66c81da8e5bb; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xe37affb42fc05f69; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x65fb66c81da8e5ba; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1d1d1d1d1d1d1d1d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1d1d1d1d1d1d1d1d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x61d849f0c0794ced; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe75278c187b20039; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf90c0c0c00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0ca40c0c0c0c0cc0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0c0c0c0c0cb60cc0; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfbe0b80c960c96d0; -+ *((unsigned long*)& __m256i_result[3]) = 0x8b1414140e0e0e0e; -+ *((unsigned long*)& __m256i_result[2]) = 0x146014141414146e; -+ *((unsigned long*)& __m256i_result[1]) = 0x36722a7e66972cd6; -+ *((unsigned long*)& __m256i_result[0]) = 0xf19998668e5f4b84; -+ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000007f007f5; -+ *((unsigned long*)& __m256i_op1[3]) = 0x002e4db200000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000315ac0000d658; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00735278007cf94c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0003ed8800031b38; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x3d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8b1414140e0e0e0e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x146014141414146e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x36722a7e66972cd6; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf19998668e5f4b84; -+ long_op1 = 0x0000007942652524; -+ *((unsigned long*)& __m256i_result[3]) = 0x8b1414140e0e0e0e; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000007942652524; -+ *((unsigned long*)& __m256i_result[1]) = 0x36722a7e66972cd6; -+ *((unsigned long*)& __m256i_result[0]) = 0xf19998668e5f4b84; -+ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; -+ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0013; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00001802041b0014; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003004; -+ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8b1414140e0e0e0e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00d6c1c830160048; -+ *((unsigned long*)& __m256i_op1[1]) = 0x36722a7e66972cd6; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe3aebaf4df958004; -+ *((unsigned long*)& __m256i_result[3]) = 0xc58a0a0a07070706; -+ *((unsigned long*)& __m256i_result[2]) = 0x006b60e4180b0023; -+ *((unsigned long*)& __m256i_result[1]) = 0x1b39153f334b966a; -+ *((unsigned long*)& __m256i_result[0]) = 0xf1d75d79efcac002; -+ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f7f02; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00003f803f800100; -+ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xe37affb42fc05f69; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x65fb66c81da8e5ba; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x8b1414140e0e0e0e; -+ *((unsigned long*)& __m256d_op2[2]) = 0x00d6c1c830160048; -+ *((unsigned long*)& __m256d_op2[1]) = 0x36722a7e66972cd6; -+ *((unsigned long*)& __m256d_op2[0]) = 0xe3aebaf4df958004; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0x00d6c1c830160048; -+ *((unsigned long*)& __m256d_result[1]) = 0x36722a7e66972cd6; -+ *((unsigned long*)& __m256d_result[0]) = 0xe3aebaf4df958004; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0014; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000c01020d8009; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00d6c1c830160048; -+ *((unsigned long*)& __m256i_op0[1]) = 0x36722a7e66972cd6; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe3aebaf4df958004; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x006be0e4180b0024; -+ *((unsigned long*)& __m256i_result[1]) = 0x1b39153f334b166b; -+ *((unsigned long*)& __m256i_result[0]) = 0xf1d7dd7aefcac002; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f800000976801fe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x837c1ae57f8012ed; -+ *((unsigned long*)& __m128i_result[1]) = 0x976801fd6897fe02; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f8012ec807fed13; -+ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f801fa06451ef11; -+ *((unsigned long*)& __m128i_op0[0]) = 0x68bcf93435ed25ed; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffb64c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000003900; -+ *((unsigned long*)& __m128i_result[0]) = 0x68bcf93435ed25ed; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffc00; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffc00; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003900; -+ *((unsigned long*)& __m128i_op0[0]) = 0x68bcf93435ed25ed; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_wu(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00003f803f800100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000c01020d8009; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000003004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000c01020d5005; -+ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f801fa06451ef11; -+ *((unsigned long*)& __m128i_op1[0]) = 0x68bcf93435ed25ed; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01fa022a01a401e5; -+ *((unsigned long*)& __m256i_op0[2]) = 0x030d03aa0079029b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x024c01f901950261; -+ *((unsigned long*)& __m256i_op0[0]) = 0x008102c2008a029f; -+ *((unsigned long*)& __m256i_result[3]) = 0x54000000ca000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x5400000036000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xf2000000c2000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x840000003e000000; -+ __m256i_out = __lasx_xvslli_w(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000400000004000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000400000007004; -+ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xc58a0a0a; -+ *((int*)& __m256_op0[6]) = 0x07070706; -+ *((int*)& __m256_op0[5]) = 0x006b60e4; -+ *((int*)& __m256_op0[4]) = 0x180b0023; -+ *((int*)& __m256_op0[3]) = 0x1b39153f; -+ *((int*)& __m256_op0[2]) = 0x334b966a; -+ *((int*)& __m256_op0[1]) = 0xf1d75d79; -+ *((int*)& __m256_op0[0]) = 0xefcac002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00003004; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0xff800000; -+ *((int*)& __m128_result[1]) = 0xff800000; -+ *((int*)& __m128_result[0]) = 0xc3080000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc58a0a0a07070706; -+ *((unsigned long*)& __m256i_op0[2]) = 0x006b60e4180b0023; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1b39153f334b966a; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf1d75d79efcac002; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,-1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000400000004000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000400000007004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0x00a300a300a300a3; -+ *((unsigned long*)& __m128i_result[0]) = 0x00a300a300a300a3; -+ __m128i_out = __lsx_vldi(1187); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8b1414140e0e0e0e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00d6c1c830160048; -+ *((unsigned long*)& __m256i_op0[1]) = 0x36722a7e66972cd6; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe3aebaf4df958004; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffe000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100020001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000fffffffffffe; -+ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0x80000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00a300a3; -+ *((int*)& __m128_op1[2]) = 0x00a300a3; -+ *((int*)& __m128_op1[1]) = 0x00a300a3; -+ *((int*)& __m128_op1[0]) = 0x00a300a3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffe000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100020001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fffffffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8b1414140e0e0e0e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00d6c1c830160048; -+ *((unsigned long*)& __m256i_op1[1]) = 0x36722a7e66972cd6; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe3aebaf4df958004; -+ *((unsigned long*)& __m256i_result[3]) = 0xc5890a0a07070707; -+ *((unsigned long*)& __m256i_result[2]) = 0x006be0e4180b8024; -+ *((unsigned long*)& __m256i_result[1]) = 0x1b399540334c966c; -+ *((unsigned long*)& __m256i_result[0]) = 0x71d7dd7aefcac001; -+ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc5890a0a07070707; -+ *((unsigned long*)& __m256i_op1[2]) = 0x006be0e4180b8024; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1b399540334c966c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x71d7dd7aefcac001; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8b1414140e0e0e0e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00d6c1c830160048; -+ *((unsigned long*)& __m256i_op0[1]) = 0x36722a7e66972cd6; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe3aebaf4df958004; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8b1414140e0e0e0e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x36722a7e66972cd6; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0013; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_wu(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001ffff00000000; -+ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x2f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00001802; -+ *((int*)& __m128_op0[0]) = 0x041b0013; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00001802; -+ *((int*)& __m128_op0[0]) = 0x041b0013; -+ *((int*)& __m128_op1[3]) = 0xff800000; -+ *((int*)& __m128_op1[2]) = 0xff800000; -+ *((int*)& __m128_op1[1]) = 0xff800000; -+ *((int*)& __m128_op1[0]) = 0xc3080000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8b1414140e0e0e0e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x36722a7e66972cd6; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc58a0a0a07070706; -+ *((unsigned long*)& __m256i_op1[2]) = 0x006b60e4180b0023; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1b39153f334b966a; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf1d75d79efcac002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x006b60e40e0e0e0e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x36722a7e66972cd6; -+ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0edf8d7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffbe8bc70f; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe0edf8d7; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffbe8bc70f; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe06df8d7; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffbe8b470f; -+ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x8b141414; -+ *((int*)& __m256_op0[4]) = 0x0e0e0e0e; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x36722a7e; -+ *((int*)& __m256_op0[0]) = 0x66972cd6; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001ffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff800000c3080000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff81ffffc3080000; -+ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffc00; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffc00; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0xbf800000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0xcf000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; -+ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x00000045eef14fe8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffc00; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffc00; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffc00; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffc00; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000020000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000020000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x23); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000020000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000020000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000003ffffffff; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbf8000000000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcf00000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xbf80000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xcf00000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1040400000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0961000100000001; -+ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x10404000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x09610001; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff0001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003a099512; -+ *((unsigned long*)& __m256i_op0[1]) = 0x280ac9da313763f5; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe032c738adcc6bbf; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xfffe000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0001000100020001; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000fffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0001; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000003a099512; -+ *((unsigned long*)& __m256i_result[1]) = 0x280ac9da313763f5; -+ *((unsigned long*)& __m256i_result[0]) = 0xe032c738adcc6bbf; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffe000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100020001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fffffffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff000000010000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000095120000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc9da000063f50000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc7387fff6bbfffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffc81aca; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003a0a9512; -+ *((unsigned long*)& __m256i_op0[1]) = 0x280ac9da313863f4; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe032c739adcc6bbd; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffe000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100020001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000fffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffdffffffc81aca; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff3a0b9512; -+ *((unsigned long*)& __m256i_result[1]) = 0x280bc9db313a63f5; -+ *((unsigned long*)& __m256i_result[0]) = 0xe032c738adcb6bbb; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffc81aca; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003a0a9512; -+ *((unsigned long*)& __m256i_op0[1]) = 0x280ac9da313863f4; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe032c739adcc6bbd; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x006b58e20e1e0e0f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3672227c66a72cd7; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000003594; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000082fb80e; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000c7e8; -+ *((unsigned long*)& __m256i_result[0]) = 0x1ad6119c12def7bb; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbf8000000000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xcf00000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x074132a240000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbf8000000000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xcf00000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff000000010000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000095120000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc9da000063f50000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc7387fff6bbfffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbf8000000000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xcf00000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xbf8000000000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xcf00000000000000; -+ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; -+ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffff000000010000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000095120000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xc9da000063f50000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xc7387fff6bbfffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xfffe000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x4001000100020000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; -+ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff000000010000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000095120000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc9da000063f50000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc7387fff6bbfffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffdffffffc81aca; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff3a0b9512; -+ *((unsigned long*)& __m256i_op1[1]) = 0x280bc9db313a63f5; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe032c738adcb6bbb; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff800001010400; -+ *((unsigned long*)& __m256i_result[2]) = 0x000180009d120004; -+ *((unsigned long*)& __m256i_result[1]) = 0xc9da080067f50020; -+ *((unsigned long*)& __m256i_result[0]) = 0xc73c7fff6bbfffff; -+ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe06df8d7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffbe8b470f; -+ *((unsigned long*)& __m256i_result[3]) = 0x7ffffffffffff7ff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe06df0d7; -+ *((unsigned long*)& __m256i_result[1]) = 0x7ffffffffffff7ff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffbe8b470f; -+ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbf8000000000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xcf00000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x003f00000000003f; -+ *((unsigned long*)& __m128i_result[0]) = 0x003f000000000000; -+ __m128i_out = __lsx_vsat_hu(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff800000c3080002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_d(__m128i_op0,7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x639c3fffb5dffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xb8c7800094400001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0063009c003f00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00b500df00ff00fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x00b800c700800000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0094004000000001; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; -+ __m256i_out = __lasx_xvldi(-4080); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x639c3fffb5dffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xb8c7800094400001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0008000e000c000f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0009000100040001; -+ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x7ffffffffffff7ff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffe06df0d7; -+ *((unsigned long*)& __m256d_op1[1]) = 0x7ffffffffffff7ff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffbe8b470f; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ffffffffffff7ff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ffffffffffff7ff; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffbe8b470f; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000800080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc9d8080067f50020; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc70000020000c000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf000f00000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000f000f0000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xf0f008000ff5000f; -+ *((unsigned long*)& __m256i_result[0]) = 0xf00000020000f000; -+ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xc090c40000000000; -+ __m128d_out = __lsx_vflogb_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbf8000000000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcf00000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x92); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000800080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc9d8080067f50020; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc70000020000c000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ffffffffffff7ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe06df0d7; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ffffffffffff7ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffbe8b470f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007ffffffff7ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x49d8080067f4f81f; -+ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8001000080000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000800080000728; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8001800080008000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x800080008000b8f1; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000ffff8000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff80008000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x800080008000b8f1; -+ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00007ffffffff7ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x49d8080067f4f81f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007f00fffff7ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xd8490849f467f867; -+ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0xb7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003f00000000003f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003f000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00007ffffffff7ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x49d8080067f4f81f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7ffff7ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x080008000800f81f; -+ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0xa8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000ffff8000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff80008000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x800080008000b8f1; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x074132a240000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000ffff8000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x06f880008000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x800080008000b8f1; -+ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xbf800000; -+ *((int*)& __m128_op0[2]) = 0x0000ffff; -+ *((int*)& __m128_op0[1]) = 0xcf000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x003f0000; -+ *((int*)& __m128_op1[2]) = 0x0000003f; -+ *((int*)& __m128_op1[1]) = 0x003f0000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe06df8d7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffbe8b470f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ffffffffffff7ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe06df0d7; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ffffffffffff7ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffbe8b470f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbf8000000000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcf00000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007f00; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7ffe7fffeffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffd84900000849; -+ *((unsigned long*)& __m256i_op0[0]) = 0x07fffc670800f086; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x311d9b643ec1fe01; -+ *((unsigned long*)& __m256i_op1[0]) = 0x344ade20fe00fd01; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007f00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x311d73ad3ec2064a; -+ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007f00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x311d73ad3ec2064a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000001fc000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000c475ceb40000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000fb0819280000; -+ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ffffffffffff7ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe06df0d7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x988eb37e000fb33d; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffed95be394b1e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000ffff8000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x06f880008000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x800080008000b8f1; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000ffff8000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x06f880008000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x800080008000b8f1; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000010180000101; -+ *((unsigned long*)& __m256i_result[2]) = 0xfa08800080000101; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x800080008000480f; -+ __m256i_out = __lasx_xvneg_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fc000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000c475ceb40000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fb0819280000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x074132a240000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000003a0200; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000c9; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x0000ffff; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x0000ffff; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00ff00ff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0040000000ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0040000000000000; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x36); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007f00; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7ffe7fffeffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffd84900000849; -+ *((unsigned long*)& __m256i_op0[0]) = 0x07fffc670800f086; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; -+ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3922d40000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000c85221c0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf7ebfab800000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000f20; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000009f0; -+ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000ffff8000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x06f880008000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x800080008000b8f1; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000010180000101; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfa08800080000101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x800080008000480f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001010000010100; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101000000010100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000000010100; -+ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f20; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000009f0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000001000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000800000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000800080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc9d8080067f50020; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc70000020000c000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000010100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000001000100; -+ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000800000000000; -+ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0040000000ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0040000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0040000000ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0040000000000000; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvflogb_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0040000000ff00ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0040000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0020000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0020c00000000000; -+ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000001000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffbf7f7fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffe651bfff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000010100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000001000100; -+ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f20; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000009f0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00010101; -+ *((int*)& __m256_op1[6]) = 0x01010101; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00010100; -+ *((int*)& __m256_op1[1]) = 0x00010000; -+ *((int*)& __m256_op1[0]) = 0x01000100; -+ *((int*)& __m256_result[7]) = 0x00010101; -+ *((int*)& __m256_result[6]) = 0x01010101; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00010100; -+ *((int*)& __m256_result[1]) = 0x00010000; -+ *((int*)& __m256_result[0]) = 0x01000100; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00010101; -+ *((int*)& __m256_op0[6]) = 0x01010101; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00010100; -+ *((int*)& __m256_op0[1]) = 0x00010000; -+ *((int*)& __m256_op0[0]) = 0x01000100; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xbf7f7fff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xe651bfff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0001010101010101; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000010100; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0001000001000100; -+ *((unsigned long*)& __m256d_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op2[2]) = 0xffffffffbf7f7fff; -+ *((unsigned long*)& __m256d_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op2[0]) = 0xffffffffe651bfff; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffbf7f7fff; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffe651bfff; -+ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0cc08723ff900001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xcc9b89f2f6cef440; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0cc08723006fffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x3364760e09310bc0; -+ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0cc08723ff900001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xcc9b89f2f6cef440; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x7); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_w(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000020202; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000002020202; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000020200; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x25); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xbf7f7fff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xe651bfff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0xffffffff; -+ *((int*)& __m256_op2[2]) = 0xf328dfff; -+ *((int*)& __m256_op2[1]) = 0x6651bfff; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffff328dfff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6651bfff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffff328dfff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6651bfff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffe0001c3fe4001; -+ *((unsigned long*)& __m256i_result[0]) = 0x8ffe800100000000; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffbf7f7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe651bfff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffbf7f7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffe651bfff; -+ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0cc08723ff900001; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xcc9b89f2f6cef440; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xfffffff8; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xff800000; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xfffffff8; -+ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x07070707; -+ *((int*)& __m256_op0[5]) = 0x01020400; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00020100; -+ *((int*)& __m256_op0[1]) = 0x07030200; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffff80; -+ *((int*)& __m256_op1[6]) = 0xfefeff00; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x01000400; -+ *((int*)& __m256_op1[3]) = 0xffffff80; -+ *((int*)& __m256_op1[2]) = 0xfeff0000; -+ *((int*)& __m256_op1[1]) = 0x02020080; -+ *((int*)& __m256_op1[0]) = 0x5c800400; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0xffffffff; -+ *((int*)& __m256_op2[2]) = 0xf328dfff; -+ *((int*)& __m256_op2[1]) = 0x6651bfff; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xffffff80; -+ *((int*)& __m256_result[6]) = 0x46867f79; -+ *((int*)& __m256_result[5]) = 0x80000000; -+ *((int*)& __m256_result[4]) = 0x80000000; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xf328dfff; -+ *((int*)& __m256_result[1]) = 0x6651bfff; -+ *((int*)& __m256_result[0]) = 0x80000000; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffff8046867f79; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffff328dfff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6651bfff80000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffff8046867f79; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffff328dfff; -+ *((unsigned long*)& __m256i_result[0]) = 0x6651bfff80000000; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffff8046867f79; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffff328dfff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6651bfff80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ff80; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000468600007f79; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000f3280000dfff; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffff8046867f79; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffff328dfff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6651bfff80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00010001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00010001; -+ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffbf7f7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe651bfff; -+ *((unsigned long*)& __m256i_result[3]) = 0x1d1d1d1d1d1d1d1d; -+ *((unsigned long*)& __m256i_result[2]) = 0x1d1d1d1ddd9d9d1d; -+ *((unsigned long*)& __m256i_result[1]) = 0x1d1d1d1d1d1d1d1d; -+ *((unsigned long*)& __m256i_result[0]) = 0x1d1d1d1d046fdd1d; -+ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffbf7f00007fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffe651ffffbfff; -+ *((int*)& __m256_result[7]) = 0x4f800000; -+ *((int*)& __m256_result[6]) = 0x4f800000; -+ *((int*)& __m256_result[5]) = 0x4f7fffbf; -+ *((int*)& __m256_result[4]) = 0x46fffe00; -+ *((int*)& __m256_result[3]) = 0x4f800000; -+ *((int*)& __m256_result[2]) = 0x4f800000; -+ *((int*)& __m256_result[1]) = 0x4f7fffe6; -+ *((int*)& __m256_result[0]) = 0x4f7fffc0; -+ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ff80; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000468600007f79; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000f3280000dfff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000007070707; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0102040000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000020100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0703020000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000707; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000010200000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000070300000000; -+ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000707; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000010200000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000070300000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ff80; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000468600007f79; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000f3280000dfff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1d1d1d1d1d1d1d1d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1d1d1d1ddd9d9d1d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1d1d1d1d1d1d1d1d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1d1d1d1d046fdd1d; -+ *((unsigned long*)& __m256i_result[3]) = 0x00001d1d00001d1d; -+ *((unsigned long*)& __m256i_result[2]) = 0x00001d1d00007f79; -+ *((unsigned long*)& __m256i_result[1]) = 0x00001d1d00001d1d; -+ *((unsigned long*)& __m256i_result[0]) = 0x00001d1d0000dd1d; -+ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfe02fe02fee5fe22; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff49fe4200000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffff800000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffff00fe81; -+ *((unsigned long*)& __m256i_result[0]) = 0xfe808d00eefffff8; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000007070707; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0102040000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000020100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0703020000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfe02fe02fee5fe22; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff49fe4200000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0003f8040002f607; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0002728b00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffff328dfff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6651bfff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0003f8040002f607; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffff328dfff; -+ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffff328dfff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6651bfff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0202020201010000; -+ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4f8000004f800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4f7fffbf0000fe00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000004f800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4f7fffe64f7fffc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfe02fe02fee5fe22; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff49fe4200000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffbf0000fe000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fe020000fe22; -+ *((unsigned long*)& __m256i_result[0]) = 0xffe6fe42ffc00000; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0002000200000022; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0049004200000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000007f00000022; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000007f00000000; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xf328dfff; -+ *((int*)& __m256_op1[1]) = 0x6651bfff; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x0000ffff; -+ *((int*)& __m256_op2[6]) = 0x0000ff80; -+ *((int*)& __m256_op2[5]) = 0x00004686; -+ *((int*)& __m256_op2[4]) = 0x00007f79; -+ *((int*)& __m256_op2[3]) = 0x0000ffff; -+ *((int*)& __m256_op2[2]) = 0x0000ffff; -+ *((int*)& __m256_op2[1]) = 0x0000f328; -+ *((int*)& __m256_op2[0]) = 0x0000dfff; -+ *((int*)& __m256_result[7]) = 0x0000ffff; -+ *((int*)& __m256_result[6]) = 0x0000ff80; -+ *((int*)& __m256_result[5]) = 0x00004686; -+ *((int*)& __m256_result[4]) = 0x00007f79; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0x0000ffff; -+ *((int*)& __m256_result[1]) = 0x0000f328; -+ *((int*)& __m256_result[0]) = 0x0000dfff; -+ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xfe02fe02; -+ *((int*)& __m256_op0[2]) = 0xfee5fe22; -+ *((int*)& __m256_op0[1]) = 0xff49fe42; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x0000ffff; -+ *((int*)& __m256_op1[6]) = 0x0000ff80; -+ *((int*)& __m256_op1[5]) = 0x00004686; -+ *((int*)& __m256_op1[4]) = 0x00007f79; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0x0000ffff; -+ *((int*)& __m256_op1[1]) = 0x0000f328; -+ *((int*)& __m256_op1[0]) = 0x0000dfff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ff80; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000468600007f79; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000f3280000dfff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xfe02fe02fee5fe22; -+ *((unsigned long*)& __m256d_op1[0]) = 0xff49fe4200000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x00020001ffb6ffe0; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0049004200000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256d_result[0]) = 0xbf28b0686066be60; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0202020201010000; -+ int_op1 = 0x00000045eef14fe8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000eef14fe8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0202020201010000; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000eef14fe8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0202020201010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000eef14fe8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0202020201010000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xfe02fe02fee5fe22; -+ *((unsigned long*)& __m256i_op2[0]) = 0xff49fe4200000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000eef14fe8; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffe928f1313c9cc; -+ *((unsigned long*)& __m256i_result[0]) = 0x4244020201010000; -+ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfe02fe02fee5fe22; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff49fe4200000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbf28b0686066be60; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xff49fe4200000000; -+ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0xbf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000007f00000022; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000007f00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000007f00000022; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000007f00000000; -+ __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_wu(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00020001ffb6ffe0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0049004200000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ff80; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000468600007f79; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000f3280000dfff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffb7; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000004c00000000; -+ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbf28b0686066be60; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x40d74f979f99419f; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000007f00000022; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000007f00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000003f00000011; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000003f00000000; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xc2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000460086; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f0079; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000f30028; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000df00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbf28b0686066be60; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffff00ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff00ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ffffff00ff; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000460086; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f0079; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000f30028; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000df00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_w(__m256i_op0,-8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x40d74f979f99419f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000022; -+ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ff80; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000468600007f79; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000f3280000dfff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000022; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff80; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000468600008078; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffff328ffffe021; -+ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfsub_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7f7f7f7f00007f7f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3f28306860663e60; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x40d74f979f99419f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fff01fd7fff7fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007fff7fff7fff; -+ __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x40d74f979f99419f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xbf28b0686066be60; -+ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000022; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_w(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ff80; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000468600007f79; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000f3280000dfff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fff01fd7fff7fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00007fff7fff7fff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ff80; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000468600007f79; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000f3280000dfff; -+ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ int_result = 0x000000000000ffff; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x6); -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((int*)& __m128_result[3]) = 0x4b7f00ff; -+ *((int*)& __m128_result[2]) = 0x4b7f00ff; -+ *((int*)& __m128_result[1]) = 0x4b7f00ff; -+ *((int*)& __m128_result[0]) = 0x4b7f00ff; -+ __m128_out = __lsx_vffint_s_w(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fff01fd7fff7fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00007fff7fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007ffe81fdfe03; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x40d74f979f99419f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x40d74f979f99419f; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000fff8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff01fd7fff7fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fff01fd7fff7fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007fff7fff7fff; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; -+ __m128i_out = __lsx_vmskltz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; -+ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfeffffffffffffff; -+ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x38); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000030000; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xc9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0001; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00007ffe81fdfe03; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_du(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0000ff80; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0000ffff; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x60b53246; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x60b5054d; -+ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfeffffffffff0002; -+ __m128i_out = __lsx_vadd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x72); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffff0002; -+ *((unsigned long*)& __m128i_op2[1]) = 0x54beed87bc3f2be1; -+ *((unsigned long*)& __m128i_op2[0]) = 0x8024d8f6a494afcb; -+ *((unsigned long*)& __m128i_result[1]) = 0xa8beed87bc3f2be1; -+ *((unsigned long*)& __m128i_result[0]) = 0x0024d8f6a494006a; -+ __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa8beed87bc3f2be1; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0024d8f6a494006a; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x5641127843c0d41e; -+ *((unsigned long*)& __m128i_result[0]) = 0xfedb27095b6bff95; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5641127843c0d41e; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfedb27095b6bff95; -+ *((unsigned long*)& __m128i_op1[1]) = 0xa8beed87bc3f2be1; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0024d8f6a494006a; -+ *((unsigned long*)& __m128i_result[1]) = 0xff7fffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xff7fffffffffffff; -+ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff7fffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff7fffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffff7ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x64); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00001f41ffffbf00; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00001f41ffffbf00; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00001f41ffffbf00; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000000; -+ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x2b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa8beed87bc3f2be1; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0024d8f6a494006a; -+ *((unsigned long*)& __m128i_result[1]) = 0xa8beed87bc3f2bd3; -+ *((unsigned long*)& __m128i_result[0]) = 0x0024d8f6a494005c; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa8beed87bc3f2be1; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0024d8f6a494006a; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001a8beed86; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000010024d8f5; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x54beed87bc3f2be1; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8024d8f6a494afcb; -+ *((unsigned long*)& __m128i_result[1]) = 0x54feed87bc3f2be1; -+ *((unsigned long*)& __m128i_result[0]) = 0x8064d8f6a494afcb; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x36); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x54feed87; -+ *((int*)& __m128_op0[2]) = 0xbc3f2be1; -+ *((int*)& __m128_op0[1]) = 0x8064d8f6; -+ *((int*)& __m128_op0[0]) = 0xa494afcb; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0xff800000; -+ *((int*)& __m128_result[1]) = 0xff800000; -+ *((int*)& __m128_result[0]) = 0xff800000; -+ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x54feed87bc3f2be1; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8064d8f6a494afcb; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00001f41ffffbf00; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000040000fff8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfedb27095b6bff95; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_h(__m128i_op0,9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x56411278; -+ *((int*)& __m128_op0[2]) = 0x43c0d41e; -+ *((int*)& __m128_op0[1]) = 0x0124d8f6; -+ *((int*)& __m128_op0[0]) = 0xa494006b; -+ *((int*)& __m128_op1[3]) = 0x7f800000; -+ *((int*)& __m128_op1[2]) = 0xff800000; -+ *((int*)& __m128_op1[1]) = 0xff800000; -+ *((int*)& __m128_op1[0]) = 0xff800000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc2409edab019323f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x460f3b393ef4be3a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x460f3b393ef4be3a; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0100000100010001; -+ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x56411278; -+ *((int*)& __m128_op0[2]) = 0x43c0d41e; -+ *((int*)& __m128_op0[1]) = 0x0124d8f6; -+ *((int*)& __m128_op0[0]) = 0xa494006b; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x460f3b393ef4be3a; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xc2409eda; -+ *((int*)& __m128_op1[2]) = 0xb019323f; -+ *((int*)& __m128_op1[1]) = 0x460f3b39; -+ *((int*)& __m128_op1[0]) = 0x3ef4be3a; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x460f3b39; -+ *((int*)& __m128_result[0]) = 0x3ef4be3a; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xc2409edab019323f; -+ *((unsigned long*)& __m128d_op0[0]) = 0x460f3b393ef4be3a; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0100000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000000040000fff8; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x000000040000fff8; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x000000040000fff8; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000000040000fff8; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x000000040000fff8; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000040000fff8; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00001f41ffffbf00; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x010180068080fff9; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vbitrev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000300; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000303; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff01fd7fff7fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fff01fd7fff7fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00007fff7fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fff01fd7fff7fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007fff7fff7fff; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x7a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000040000fff8; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; -+ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x010180068080fff9; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x7fff7fff; -+ *((int*)& __m256_op0[4]) = 0x7fff7fff; -+ *((int*)& __m256_op0[3]) = 0x7fff01fd; -+ *((int*)& __m256_op0[2]) = 0x7fff7fff; -+ *((int*)& __m256_op0[1]) = 0x00007fff; -+ *((int*)& __m256_op0[0]) = 0x7fff7fff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfc2f3183ef7ffff7; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_w(__m256i_op0,0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; -+ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0xc5c5c5c5c5c5c5c5; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1515151515151515; -+ *((unsigned long*)& __m256i_result[2]) = 0x1515151515151515; -+ *((unsigned long*)& __m256i_result[1]) = 0x1515151515151515; -+ *((unsigned long*)& __m256i_result[0]) = 0x1515151515151515; -+ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1515151515151515; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1515151515151515; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1515151515151515; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1515151515151515; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; -+ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00007ffe81fdfe03; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7ffe800000000000; -+ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff3cff3cff3cff3c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff3cff3cff3cff3c; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff3cff3cff3cff3c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff3cff3cff3cff3c; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff3cff3cff3cff3c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff3cff3cff3cff3c; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff3cff3cff3cff3c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff3cff3cff3cff3c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00007ffe81fdfe03; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x80007ffe81fdfe03; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3fff3fff3fff3fff; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_h(__m256i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x7fff7fff; -+ *((int*)& __m256_op0[4]) = 0x7fff7fff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x7fff7fff; -+ *((int*)& __m256_op0[0]) = 0x7fff7fff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000001b3c4c0a5c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x3d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000007fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000007fff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x2a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1000000000000000; -+ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x4f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000011; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000011; -+ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0010100000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0010100000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010000000000000; -+ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x33); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000011; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000011; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000011; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000011; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0010100000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0010100000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0feff00000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0feff00000000000; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000001; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000001; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000001; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000001; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000001; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vandi_b(__m128i_op0,0x36); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0feff00000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0feff00000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffffffff; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; -+ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff1001100100000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010100000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff1001100100000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010100000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfcc4004400400000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0040400000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfcc4004400400000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0040400000000000; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000001; -+ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x36); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_h(__m128i_op0,3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0010100000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0010100000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; -+ __m128i_out = __lsx_vfclass_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; -+ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffffffff; -+ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_du(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000010000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000010000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000010000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000010000000; -+ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000a0000000a; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000a0000000a; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffeb; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffeb; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffefffffffef; -+ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffefffffffef; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_w(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsub_q(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffefffffffef; -+ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x3d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff00ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff00ff00; -+ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000f0000000f; -+ __m256i_out = __lasx_xvclz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xbff0000000000000; -+ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xdff8000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xdff8000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xdff8000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xdff8000000000000; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrp_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256d_op1[3]) = 0xdff8000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xdff8000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xdff8000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xdff8000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff00ff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff00ff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x01010101fe01fe01; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x01010101fe01fe01; -+ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000040100000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040100000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000040100000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040100000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0080200000802000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0080200000802000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_wu(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xbff0000000000000; -+ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0080200000802000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0080200000802000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0080200000802000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0080200000802000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_du(__m128i_op0,0x20); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x5d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1e18000000000000; -+ __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_b(__m128i_op0,13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x1e180000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x1e180000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x1e180000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x1e180000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00802000; -+ *((int*)& __m256_op1[6]) = 0x00802000; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0x00802000; -+ *((int*)& __m256_op1[2]) = 0x00802000; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1e1800001e180000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1e1800001e180000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1e18000000000000; -+ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xfe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xbff0800000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xbff0800000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x1e1800001e180000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x1e1800001e180000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x1e1800001e180000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x1e1800001e180000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x2f03988e2052463e; -+ *((unsigned long*)& __m256d_result[2]) = 0x2f03988e1409212e; -+ *((unsigned long*)& __m256d_result[1]) = 0x2f03988e2052463e; -+ *((unsigned long*)& __m256d_result[0]) = 0x2f03988e1409212e; -+ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0200020002000200; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[0]) = 0xff01ff01ff01ff01; -+ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000397541c58; -+ *((unsigned long*)& __m256i_result[3]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256i_result[2]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256i_result[1]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256i_result[0]) = 0x97541c5897541c58; -+ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0080200000802000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0080200000802000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00200020ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x1e0000001e000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00200020ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x1e0000001e000000; -+ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffe0ffe0ffe0ffe0; -+ *((unsigned long*)& __m256i_result[2]) = 0xffe0ffe0ffe0ffe0; -+ *((unsigned long*)& __m256i_result[1]) = 0xffe0ffe0ffe0ffe0; -+ *((unsigned long*)& __m256i_result[0]) = 0xffe0ffe0ffe0ffe0; -+ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0080200000802000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0080200000802000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00800080ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00800080ffffffff; -+ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_h(__m128i_op0,6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffe0ffe0ffe0ffe0; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffe0ffe0ffe0ffe0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffe0ffe0ffe0ffe0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffe0ffe0ffe0ffe0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1e1800001e180000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1e1800001e180000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1e18000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffe0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000001e18; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffe0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000001e18; -+ __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x70); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00800080ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00800080ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffe0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001e18; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffe0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001e18; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff001f; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000007fe268; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff001f; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000007fe268; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1e17ffffd0fc6772; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1e17ffffebf6ded2; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1e17ffffd0fc6772; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1e17ffffebf6ded2; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xe1e800002f03988d; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xe1e800002f03988d; -+ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xffff001f; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x007fe268; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xffff001f; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x007fe268; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0xffff001f; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x007fe268; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0xffff001f; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x007fe268; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0xffff001f; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0xffff001f; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7a7cad6eca32ccc1; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7a7cad6efe69abd1; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7a7cad6eca32ccc1; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7a7cad6efe69abd1; -+ *((unsigned long*)& __m256i_result[3]) = 0xff86005300360034; -+ *((unsigned long*)& __m256i_result[2]) = 0xff86005300020055; -+ *((unsigned long*)& __m256i_result[1]) = 0xff86005300360034; -+ *((unsigned long*)& __m256i_result[0]) = 0xff86005300020055; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256i_op0[2]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256i_op0[1]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256i_op0[0]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffc00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffc00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffc00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffc00000000; -+ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x22); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xffffffff; -+ *((int*)& __m128_result[1]) = 0xff800000; -+ *((int*)& __m128_result[0]) = 0xff800000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffe0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000001e18; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffe0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000001e18; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffe0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001e18; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffe0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001e18; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000010000ffe1; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000101001e18; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000010000ffe1; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000101001e18; -+ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffeff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffeff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff001f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff001f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000000000000ffe0; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000001e18; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000000000ffe0; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000001e18; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff1f; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffeff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff1f; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffeff; -+ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffdfe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffdfe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe1e800002f03988d; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe1e800002f03988d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff0f400001781cc4; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff0f400001781cc4; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; -+ __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256d_op0[2]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256d_op0[1]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256d_op0[0]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256d_op1[3]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256d_op1[2]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256d_op1[1]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256d_op1[0]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256i_op0[2]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256i_op0[1]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256i_op0[0]) = 0x97541c5897541c58; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffeffffff88; -+ *((unsigned long*)& __m256i_op0[2]) = 0x61e0000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffeffffff88; -+ *((unsigned long*)& __m256i_op0[0]) = 0x61e0000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffefe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffefe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01fe02; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01fe02; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; -+ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_h(__m128i_op0,-3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; -+ __m128d_out = __lsx_vfrecip_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x7ff80000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffff7fffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffff8000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffff7fffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffff8000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000003fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7ff8010000000001; -+ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0005252800052528; -+ *((unsigned long*)& __m128i_result[0]) = 0x0005252800052528; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_d(__m128i_op0,7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_result[1]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_result[0]) = 0x52527d7d52527d7d; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001010101; -+ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x3f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000010000ffe1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000101001e18; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000010000ffe1; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000101001e18; -+ *((unsigned long*)& __m256i_op1[3]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op1[2]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op1[1]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op1[0]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000000010000ffe1; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000101001e18; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000010000ffe1; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000101001e18; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000101001e18; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000101001e18; -+ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op0[2]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op0[1]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op0[0]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffeffffff88; -+ *((unsigned long*)& __m256i_op1[2]) = 0x61e0000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffeffffff88; -+ *((unsigned long*)& __m256i_op1[0]) = 0x61e0000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010ffc80010ff52; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff1ffca0011ffcb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010ffc80010ff52; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff1ffca0011ffcb; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffefe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffefb; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffefb; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; -+ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffefe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x67eee33567eee435; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x67eee33567eee435; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00e0000000e00000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0010ffc80010ff52; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff1ffca0011ffcb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0010ffc80010ff52; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff1ffca0011ffcb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010bfc80010bf52; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff1bfca0011bfcb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010bfc80010bf52; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff1bfca0011bfcb; -+ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffff7fffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffff8000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000808081; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000808081; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000808081; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000808081; -+ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op0[2]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op0[1]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op0[0]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op1[3]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op1[2]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op1[1]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_op1[0]) = 0x98111cca98111cca; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000399400003994; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000399400003994; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000399400003994; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000399400003994; -+ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00e0000000e00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000e0000000e0; -+ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000e0000000e0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00e0000000e00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000e0000000e0; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x01010101; -+ *((int*)& __m256_op0[5]) = 0x55555501; -+ *((int*)& __m256_op0[4]) = 0xfefefeab; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x01010101; -+ *((int*)& __m256_op0[1]) = 0x55555501; -+ *((int*)& __m256_op0[0]) = 0xfefefeab; -+ *((int*)& __m256_op1[7]) = 0x00000105; -+ *((int*)& __m256_op1[6]) = 0xfffffefb; -+ *((int*)& __m256_op1[5]) = 0xffffff02; -+ *((int*)& __m256_op1[4]) = 0x000000fe; -+ *((int*)& __m256_op1[3]) = 0x00000105; -+ *((int*)& __m256_op1[2]) = 0xfffffefb; -+ *((int*)& __m256_op1[1]) = 0xffffff02; -+ *((int*)& __m256_op1[0]) = 0x000000fe; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000e0000000e0; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000fc00; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000fc00; -+ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x01010101; -+ *((int*)& __m256_op0[5]) = 0x55555501; -+ *((int*)& __m256_op0[4]) = 0xfefefeab; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x01010101; -+ *((int*)& __m256_op0[1]) = 0x55555501; -+ *((int*)& __m256_op0[0]) = 0xfefefeab; -+ *((int*)& __m256_op1[7]) = 0x0010bfc8; -+ *((int*)& __m256_op1[6]) = 0x0010bf52; -+ *((int*)& __m256_op1[5]) = 0xfff1bfca; -+ *((int*)& __m256_op1[4]) = 0x0011bfcb; -+ *((int*)& __m256_op1[3]) = 0x0010bfc8; -+ *((int*)& __m256_op1[2]) = 0x0010bf52; -+ *((int*)& __m256_op1[1]) = 0xfff1bfca; -+ *((int*)& __m256_op1[0]) = 0x0011bfcb; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x1414141414141415; -+ *((unsigned long*)& __m128i_result[0]) = 0x1414141414141415; -+ __m128i_out = __lsx_vaddi_bu(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffff1f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffeff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffff1f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffeff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0010ffc80010ff52; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff1ffca0011ffcb; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0010ffc80010ff52; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff1ffca0011ffcb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff1ffca0011feca; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff1ffca0011feca; -+ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0010bfc80010bf52; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff1bfca0011bfcb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0010bfc80010bf52; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff1bfca0011bfcb; -+ *((unsigned long*)& __m256i_result[3]) = 0xf5f5bfc8f5f5bff5; -+ *((unsigned long*)& __m256i_result[2]) = 0xf5f1bfcaf5f5bfcb; -+ *((unsigned long*)& __m256i_result[1]) = 0xf5f5bfc8f5f5bff5; -+ *((unsigned long*)& __m256i_result[0]) = 0xf5f1bfcaf5f5bfcb; -+ __m256i_out = __lasx_xvmini_b(__m256i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffefb; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffefb; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; -+ int_op1 = 0x0000000059815d00; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; -+ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000399400003994; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000399400003994; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000399400003994; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000399400003994; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000fff00000fff; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_du(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128d_op1[0]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000052527d7d; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000052527d7d; -+ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x000000000000fc00; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000fc00; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf5f5bfbaf5f5bfbe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf5f0bfb8f5d8bfe8; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf5f5bfbaf5f5bfbe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf5f0bfb8f5d8bfe8; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf5f5bfbaf5f5bfbe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf5f0bfb8f5d8bfe8; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf5f5bfbaf5f5bfbe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf5f0bfb8f5d8bfe8; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff5f5c; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x6c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256d_op2[3]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256d_op2[2]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256d_op2[1]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256d_op2[0]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffff5f5c; -+ __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff5f5c; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; -+ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000105fffffefb; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffff02000000fe; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000105fffffefb; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffff02000000fe; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffff1f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffeff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffff1f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffeff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000105fffffefb; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffff02000000fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000105fffffefb; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff02000000fe; -+ *((unsigned long*)& __m256i_result[3]) = 0xf7ffffffffffff1f; -+ *((unsigned long*)& __m256i_result[2]) = 0xbffffffffffffeff; -+ *((unsigned long*)& __m256i_result[1]) = 0xf7ffffffffffff1f; -+ *((unsigned long*)& __m256i_result[0]) = 0xbffffffffffffeff; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf7ffffffffffff1f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbffffffffffffeff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf7ffffffffffff1f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbffffffffffffeff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff6fffefffe005b; -+ *((unsigned long*)& __m256i_result[2]) = 0xffbefffefffe005a; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff6fffefffe005b; -+ *((unsigned long*)& __m256i_result[0]) = 0xffbefffefffe005a; -+ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000e0000000e0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000e0000000e0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000c400; -+ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00217f19ffde80e6; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00037f94fffc806b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00217f19ffde80e6; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00037f94fffc806b; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff000000000000; -+ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000fff00000fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00ff0fff005f0f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00ff0fff005f0f; -+ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffff000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf7ffffffffffff1f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbffffffffffffeff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf7ffffffffffff1f; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbffffffffffffeff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x7); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; -+ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff605a; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff605a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101000000000000; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffff5f5c; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffff5f5c; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffff5f5c; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffff5f5c; -+ *((int*)& __m256_op2[7]) = 0x0000000f; -+ *((int*)& __m256_op2[6]) = 0x0000000f; -+ *((int*)& __m256_op2[5]) = 0xff00ff0f; -+ *((int*)& __m256_op2[4]) = 0xff005f0f; -+ *((int*)& __m256_op2[3]) = 0x0000000f; -+ *((int*)& __m256_op2[2]) = 0x0000000f; -+ *((int*)& __m256_op2[1]) = 0xff00ff0f; -+ *((int*)& __m256_op2[0]) = 0xff005f0f; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffff5f5c; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffff5f5c; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffff5f5c; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffff5f5c; -+ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000c400; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x001000100010c410; -+ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001000100010c410; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007fff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007fff7fffffff; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; -+ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0xfe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff605a; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff605a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffebeb8; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffebeb8; -+ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffff5f5c; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffff605a; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffff5f5c; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffff605a; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffff5f5c; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffff605a; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffff5f5c; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffff605a; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256d_op0[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfffffffffffebeb8; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfffffffffffebeb8; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001000100010c410; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00007fff7fffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00007fff7fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x37); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff605a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff605a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff605a; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff5f5c; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff605a; -+ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x2d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0x0060005a; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0x0060005a; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0x5f13ccf5; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0x5f13ccf5; -+ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); -+ *((int*)& __m256_op0[7]) = 0xfffffff8; -+ *((int*)& __m256_op0[6]) = 0xffffff08; -+ *((int*)& __m256_op0[5]) = 0x00ff00f8; -+ *((int*)& __m256_op0[4]) = 0x00ffcff8; -+ *((int*)& __m256_op0[3]) = 0xfffffff8; -+ *((int*)& __m256_op0[2]) = 0xffffff08; -+ *((int*)& __m256_op0[1]) = 0x00ff00f8; -+ *((int*)& __m256_op0[0]) = 0x00ffcff8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000008000000080; -+ __m256i_out = __lasx_xvfclass_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffff605a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffff605a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffff605a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffff605a; -+ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000000000000; -+ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101008000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101008000000080; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_w(__m128i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_result[2]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_result[0]) = 0x45c5c5c545c5c5c5; -+ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x3a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op2[0]) = 0x001000100010c410; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0x64); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_result[2]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_result[0]) = 0x45c5c5c545c5c5c5; -+ __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xc5c5c5c4; -+ *((int*)& __m256_op0[6]) = 0xc5c5c5c4; -+ *((int*)& __m256_op0[5]) = 0x45c5c5c5; -+ *((int*)& __m256_op0[4]) = 0x45c5c5c5; -+ *((int*)& __m256_op0[3]) = 0xc5c5c5c4; -+ *((int*)& __m256_op0[2]) = 0xc5c5c5c4; -+ *((int*)& __m256_op0[1]) = 0x45c5c5c5; -+ *((int*)& __m256_op0[0]) = 0x45c5c5c5; -+ *((int*)& __m256_result[7]) = 0xc5c5c800; -+ *((int*)& __m256_result[6]) = 0xc5c5c800; -+ *((int*)& __m256_result[5]) = 0x45c5c800; -+ *((int*)& __m256_result[4]) = 0x45c5c800; -+ *((int*)& __m256_result[3]) = 0xc5c5c800; -+ *((int*)& __m256_result[2]) = 0xc5c5c800; -+ *((int*)& __m256_result[1]) = 0x45c5c800; -+ *((int*)& __m256_result[0]) = 0x45c5c800; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x44); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x4370100000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x4370100000000000; -+ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000008000000080; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000008000000080; -+ *((unsigned long*)& __m256d_op1[3]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256d_op1[2]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256d_op1[1]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256d_op1[0]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256d_result[2]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256d_result[0]) = 0x45c5c5c545c5c5c5; -+ __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_result[3]) = 0xc5c4c5c5c5c5c5c5; -+ *((unsigned long*)& __m256i_result[2]) = 0xc5c545c545c545c5; -+ *((unsigned long*)& __m256i_result[1]) = 0xc5c4c5c5c5c5c5c5; -+ *((unsigned long*)& __m256i_result[0]) = 0xc5c545c545c545c5; -+ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x3d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; -+ __m128d_out = __lsx_vfrecip_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffff01ffffff08; -+ *((unsigned long*)& __m256i_op1[2]) = 0x43700f0100003008; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffff01ffffff08; -+ *((unsigned long*)& __m256i_op1[0]) = 0x43700f0100003008; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000f8; -+ *((unsigned long*)& __m256i_result[2]) = 0xbc8ff0ffffffcff8; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000f8; -+ *((unsigned long*)& __m256i_result[0]) = 0xbc8ff0ffffffcff8; -+ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc5c4c5c5c5c5c5c5; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc5c545c545c545c5; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc5c4c5c5c5c5c5c5; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc5c545c545c545c5; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000ff000000f8; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbc8ff0ffffffcff8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000f8; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbc8ff0ffffffcff8; -+ *((unsigned long*)& __m256i_result[3]) = 0xfcfcfcfcfc040404; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fbfffffc; -+ *((unsigned long*)& __m256i_result[1]) = 0xfcfcfcfcfc040404; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fbfffffc; -+ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000059815d00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vneg_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000007942652524; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4265252400000000; -+ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x000000ff; -+ *((int*)& __m256_op0[6]) = 0x000000f8; -+ *((int*)& __m256_op0[5]) = 0xbc8ff0ff; -+ *((int*)& __m256_op0[4]) = 0xffffcff8; -+ *((int*)& __m256_op0[3]) = 0x000000ff; -+ *((int*)& __m256_op0[2]) = 0x000000f8; -+ *((int*)& __m256_op0[1]) = 0xbc8ff0ff; -+ *((int*)& __m256_op0[0]) = 0xffffcff8; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffff8ffffff08; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00f800ffcff8; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffff8ffffff08; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00f800ffcff8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000003868686a20; -+ *((unsigned long*)& __m256i_result[2]) = 0x0045b8ae81bce1d8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000003868686a20; -+ *((unsigned long*)& __m256i_result[0]) = 0x0045b8ae81bce1d8; -+ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x21); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_h(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000003868686a20; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0045b8ae81bce1d8; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000003868686a20; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0045b8ae81bce1d8; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000003868686a20; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0045b8ae81bce1d8; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000003868686a20; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0045b8ae81bce1d8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4ee85545068f3133; -+ *((unsigned long*)& __m128i_op0[0]) = 0x870968c1f56bb3cd; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x004e005500060031; -+ *((unsigned long*)& __m128i_result[0]) = 0xff870068fff5ffb3; -+ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x42652524; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000003900000000; -+ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x004e005500060031; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff870068fff5ffb3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xc5c5c5c5c5c5c5c5; -+ *((unsigned long*)& __m256i_result[2]) = 0x45c5c5c645c5c5c6; -+ *((unsigned long*)& __m256i_result[1]) = 0xc5c5c5c5c5c5c5c5; -+ *((unsigned long*)& __m256i_result[0]) = 0x45c5c5c645c5c5c6; -+ __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000003868686a20; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0045b8ae81bce1d8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000003868686a20; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0045b8ae81bce1d8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001a00000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000900000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001a00000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000900000000; -+ __m256i_out = __lasx_xvclz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0027002a00030018; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f4300177f7a7f59; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0027002a00080018; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f4300177f7a7f59; -+ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_result[2]) = 0x45c5c5c545c5c5c5; -+ *((unsigned long*)& __m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; -+ *((unsigned long*)& __m256i_result[0]) = 0x45c5c5c545c5c5c5; -+ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xb0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000003868686a20; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0045b8ae81bce1d8; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000003868686a20; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0045b8ae81bce1d8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x47); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000003868686a20; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0045b8ae81bce1d8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000003868686a20; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0045b8ae81bce1d8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00386a20b8aee1d8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00386a20b8aee1d8; -+ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x004e005500060031; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff870068fff5ffb3; -+ *((unsigned long*)& __m128i_op1[1]) = 0x004e005500060031; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff870068fff5ffb3; -+ *((unsigned long*)& __m128i_result[1]) = 0x04e00060ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x04e00060ffffffff; -+ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x454c2996; -+ *((int*)& __m128_op0[2]) = 0x0ffe354e; -+ *((int*)& __m128_op0[1]) = 0x9e063f80; -+ *((int*)& __m128_op0[0]) = 0x2742ba3e; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x42652524; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; -+ __m256i_out = __lasx_xvclz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x04e00060ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x04e00060ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x04e00060ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x04e00060ffffffff; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4ee85545ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x870968c1f56bb3cd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x870968c1f56bb3cd; -+ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000001a00000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000900000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000001a00000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000900000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x870968c1f56bb3cd; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000001a00000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000900000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000001a00000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000900000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000001a; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000001a; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; -+ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000007fff800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x52527d7d52527d7d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000007fff800000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2e2b34ca59fa4c88; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3b2c8aefd44be966; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_hu(__m128i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2e2b34ca59fa4c88; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3b2c8aefd44be966; -+ *((unsigned long*)& __m128i_result[1]) = 0x3e2b34ca59fa4c88; -+ *((unsigned long*)& __m128i_result[0]) = 0x3b2c8aefd44be966; -+ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000001a00000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000900000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000001a00000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000900000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; -+ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x04e00060ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x04e00060ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x007fffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x007fffffffffffff; -+ __m128i_out = __lsx_vsat_w(__m128i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfe01fe01fe01fe01; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fe01fe01; -+ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_d(__m256i_op0,11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3e2b34ca59fa4c88; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3b2c8aefd44be966; -+ *((unsigned long*)& __m128i_result[1]) = 0x0007658000115de0; -+ *((unsigned long*)& __m128i_result[0]) = 0x001a8960001d2cc0; -+ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_vpickve2gr_b(__m128i_op0,0x4); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m256i_result[2]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m256i_result[1]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m256i_result[0]) = 0xe9e9e9e9e9e9e9e9; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8080808000008080; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080000080800000; -+ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080808000008080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080000080800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; -+ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfe01fe01fe01fe01; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fe01fe01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m256i_result[3]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m256i_result[2]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m256i_result[1]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m256i_result[0]) = 0xe9e9e9e9e9e9e9e9; -+ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0xf7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808000008080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080000080800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001010100010100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x2f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0007658000115de0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001a8960001d2cc0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x4000400040004000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4000400040004000; -+ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00007fffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00007fffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff8001; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff8001; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000017ffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000017ffffffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000017ffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000017ffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff8001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff0ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff0ffff0000; -+ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0007658000115de0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001a8960001d2cc0; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ffffff00ffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ffffff; -+ __m128i_out = __lsx_vslt_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfe07e5fe; -+ *((int*)& __m128_op0[2]) = 0xfefdddfe; -+ *((int*)& __m128_op0[1]) = 0x00020100; -+ *((int*)& __m128_op0[0]) = 0xfedd0c00; -+ *((int*)& __m128_result[3]) = 0x7fc00000; -+ *((int*)& __m128_result[2]) = 0x7fc00000; -+ *((int*)& __m128_result[1]) = 0x1e801ffc; -+ *((int*)& __m128_result[0]) = 0x7fc00000; -+ __m128_out = __lsx_vfsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xef0179a47c793879; -+ *((unsigned long*)& __m128d_op0[0]) = 0x9f9e7e3e9ea3ff41; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x1e801ffc7fc00000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffc000007fc00000; -+ *((unsigned long*)& __m128d_result[0]) = 0x9e801ffc7fc00000; -+ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff80fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xd52aaaaa555555ab; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff80fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xd52aaaaa555555ab; -+ *((unsigned long*)& __m256i_result[3]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_result[2]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_result[1]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_result[0]) = 0x555555ab555555ab; -+ __m256i_out = __lasx_xvreplve0_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1e801ffc7fc00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfe07e5fefefdddfe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00020100fedd0c00; -+ *((unsigned long*)& __m128i_result[1]) = 0xff02ff1bff02ff23; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffff02fff4; -+ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffc0008001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffffc0008001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffc0008001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffffc0008001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffffc0007fe9; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffffc0007fe9; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffc0007fe9; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffffc0007fe9; -+ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1e801ffc7fc00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00003fe00ffe3fe0; -+ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op1[3]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op1[2]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op1[1]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op1[0]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_result[2]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_result[1]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_result[0]) = 0x555555ab555555ab; -+ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0007658000115de0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001a8960001d2cc0; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffc000007fc00000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9e801ffc7fc00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ffff0000ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff00ff0000ff; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000017ffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000017ffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000017ffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000017ffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff0ffff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff0ffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000017ffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000017ffffffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000017ffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000017ffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; -+ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xff02ff1bff02ff23; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000ffffff02fff4; -+ *((unsigned long*)& __m128i_result[1]) = 0xff02ff1bff02ff23; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00003fe00ffe3fe0; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff02ff1bff02ff23; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffff02fff4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; -+ __m256i_out = __lasx_xvmini_w(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00010000; -+ __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; -+ int_op1 = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_result[2]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_result[1]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_result[0]) = 0x555555ab555555ab; -+ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_result[3]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_result[2]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_result[1]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_result[0]) = 0x005500550055ffab; -+ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000080008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; -+ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op1[2]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op1[1]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op1[0]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000080008000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000080008000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; -+ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; -+ __m128i_out = __lsx_vmaxi_d(__m128i_op0,2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f0000007f000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f0000007f000000; -+ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffff0ffff0000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffffff0ffff0000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff02ff1bff02ff23; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffff02fff4; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff02ff1bff02ff23; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffff02fff4; -+ *((unsigned long*)& __m128i_op2[1]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x1e801ffc7fc00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7e44bde9b842ff23; -+ *((unsigned long*)& __m128i_result[0]) = 0x00011e80007edff8; -+ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; -+ *((unsigned long*)& __m256i_result[3]) = 0x1555156a1555156a; -+ *((unsigned long*)& __m256i_result[2]) = 0x1555156a1555156a; -+ *((unsigned long*)& __m256i_result[1]) = 0x1555156a1555156a; -+ *((unsigned long*)& __m256i_result[0]) = 0x1555156a1555156a; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff020000fff4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1e801ffc7fc00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00001ee100000000; -+ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f0000007f000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f0000007f000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1555156a1555156a; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1555156a1555156a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1555156a1555156a; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1555156a1555156a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7e44bde9b842ff23; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00011e80007edff8; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001ffffff; -+ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1e801ffc7fc00000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00ed0008005e00a2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x007a007600150077; -+ *((unsigned long*)& __m128i_result[1]) = 0x0003000000010000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0007007f03fe0000; -+ __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3ff0000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff020000fff4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ed0008005e00a2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x007a007600150077; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00ed0008005e00a2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x007a007600150077; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fc0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1e801ffc00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff020000fff4; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fc0000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1e801ffc00000000; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; -+ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0000ffff; -+ *((int*)& __m256_op0[6]) = 0xc0008001; -+ *((int*)& __m256_op0[5]) = 0x0000ffff; -+ *((int*)& __m256_op0[4]) = 0xc0008001; -+ *((int*)& __m256_op0[3]) = 0x0000ffff; -+ *((int*)& __m256_op0[2]) = 0xc0008001; -+ *((int*)& __m256_op0[1]) = 0x0000ffff; -+ *((int*)& __m256_op0[0]) = 0xc0008001; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffe; -+ __m256i_out = __lasx_xvftint_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ int_op1 = 0x0000007942652524; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff2524ffffffff; -+ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_op0[2]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_op0[1]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_op0[0]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_result[3]) = 0x0004000400040805; -+ *((unsigned long*)& __m256i_result[2]) = 0x0004000400040805; -+ *((unsigned long*)& __m256i_result[1]) = 0x0004000400040805; -+ *((unsigned long*)& __m256i_result[0]) = 0x0004000400040805; -+ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0ff8010000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0ff8010000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff020000fff4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fc0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1e801ffc00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000080007f80800; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000000; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0004007c00fc0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x3ff1808001020101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x3ff1808001020101; -+ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0004007c00fc0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x047c0404fc00fcfc; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x8a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xe17fe003; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0004007c00fc0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000fc0000; -+ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfe07e5fefefdddfe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00020100fedd0c00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000b0000000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000201000000000b; -+ __m128i_out = __lsx_vmaxi_w(__m128i_op0,11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000b0000000b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fc0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000b0000000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002010000fc000b; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000b0000000b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000b0000000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000201000000000b; -+ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000080007f80800; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000001000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x000000ff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x000000ff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x0000ffff; -+ *((int*)& __m256_op1[6]) = 0xc0008001; -+ *((int*)& __m256_op1[5]) = 0x0000ffff; -+ *((int*)& __m256_op1[4]) = 0xc0008001; -+ *((int*)& __m256_op1[3]) = 0x0000ffff; -+ *((int*)& __m256_op1[2]) = 0xc0008001; -+ *((int*)& __m256_op1[1]) = 0x0000ffff; -+ *((int*)& __m256_op1[0]) = 0xc0008001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000080007f80800; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00047fff00007fff; -+ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000b0000000b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0005000501800005; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001fc0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3ff1808001020101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3ff1808001020101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000ff7f1080ef8; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0100000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000ff7f1080ef8; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0100000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x003ff18080010201; -+ *((unsigned long*)& __m256i_result[2]) = 0x0100000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x003ff18080010201; -+ *((unsigned long*)& __m256i_result[0]) = 0x0100000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_op0[2]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_op0[1]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_op0[0]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_op1[3]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_op1[2]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_op1[1]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_op1[0]) = 0x005500550055ffab; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x003ff18080010201; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0100000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x003ff18080010201; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0100000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000f18080010000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000f18080010000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000b0000000b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000201000000000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000201000000000b; -+ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000fffe00010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000fffe00010001; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc06500550055ffab; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc06500550055ffab; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_d(__m128i_op0,15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0555550000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0555550000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xc06500550055ffab; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xc06500550055ffab; -+ *((unsigned long*)& __m256i_result[3]) = 0x0555550000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0555550000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[2]) = 0x3ff1808001020101; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[0]) = 0x3ff1808001020101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc06500550055ffab; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc06500550055ffab; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00550000ffab0001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00550000ffab0001; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000f18080010000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000f18080010000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000078c0c0008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000078c0c0008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8493941335f5cc0c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x625a7312befcb21e; -+ *((unsigned long*)& __m128d_result[1]) = 0x43e092728266beba; -+ *((unsigned long*)& __m128d_result[0]) = 0x43d8969cc4afbf2d; -+ __m128d_out = __lsx_vffint_d_lu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00fc0000; -+ *((int*)& __m128_op1[3]) = 0xfe07e5fe; -+ *((int*)& __m128_op1[2]) = 0xfefdddfe; -+ *((int*)& __m128_op1[1]) = 0x00020100; -+ *((int*)& __m128_op1[0]) = 0xfedd0c00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ long_int_result = 0x0000000000000000; -+ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x0); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fc0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; -+ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfe07e5fefefdddfe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00020100fedd0c00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0005000501800005; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfe07e5fefefdddfe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00020100fedd0008; -+ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8493941335f5cc0c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x625a7312befcb21e; -+ *((unsigned long*)& __m128i_result[1]) = 0x8493941300000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000002befcb21e; -+ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[2]) = 0x3ff1808001020101; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[0]) = 0x3ff1808001020101; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0ff80100ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0ff80100ffffffff; -+ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000201000000000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000020100; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000fc0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000007fff8000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001008100000005; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x84939413; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000002; -+ *((int*)& __m128_op0[0]) = 0xbefcb21e; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000017000000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc06500550055ffab; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000017000000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc06500550055ffab; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000017000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000017000000080; -+ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000007fff8000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001008100000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0800080077ff8800; -+ *((unsigned long*)& __m128i_result[0]) = 0x0801088108000805; -+ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x7fff8000; -+ *((int*)& __m128_op0[1]) = 0x00010081; -+ *((int*)& __m128_op0[0]) = 0x00000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000020000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000100; -+ __m128i_out = __lsx_vfclass_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x01000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x01000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000f18080010000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000f18080010000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x3b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000017000000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc06500550055ffab; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000017000000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc06500550055ffab; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x43e092728266beba; -+ *((unsigned long*)& __m128i_op1[0]) = 0x43d8969cc4afbf2d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001e; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001e; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000017000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000017000000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001700080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001700080; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000020000000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000100000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000080000; -+ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000210011084; -+ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001e1f; -+ __m128i_out = __lsx_vmsknz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x3d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000100000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000080000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_hu(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0x0000ffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0ff80100ffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0ff80100ffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000017000000080; -+ *((unsigned long*)& __m256d_op1[2]) = 0xc06500550055ffab; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000017000000080; -+ *((unsigned long*)& __m256d_op1[0]) = 0xc06500550055ffab; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000170; -+ *((int*)& __m256_op0[6]) = 0x00000080; -+ *((int*)& __m256_op0[5]) = 0xc0650055; -+ *((int*)& __m256_op0[4]) = 0x0055ffab; -+ *((int*)& __m256_op0[3]) = 0x00000170; -+ *((int*)& __m256_op0[2]) = 0x00000080; -+ *((int*)& __m256_op0[1]) = 0xc0650055; -+ *((int*)& __m256_op0[0]) = 0x0055ffab; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; -+ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x7f800000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x7f800000; -+ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001700080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001700080; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x4177000800000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x4177000800000000; -+ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; -+ __m128i_out = __lsx_vpcnt_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000210011084; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001700080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001700080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001700080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001700080; -+ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; -+ __m128i_out = __lsx_vfclass_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_wu(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffe90ffffff80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffe90ffffff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffff90ffffff80; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff90ffffff80; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ long_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vinsgr2vr_d(__m128i_op0,long_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; -+ __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffff90ffffff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffff90ffffff80; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffff90ffffff80; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff90ffffff80; -+ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; -+ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101010100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; -+ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000210011084; -+ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ff000000ff; -+ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffff90ffffff80; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffff90ffffff80; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffff90ffffff80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff90ffffff80; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffff90ffffff80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff90ffffff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff01ff70ff01ff80; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff01ff70ff01ff80; -+ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffff90ffffff80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff90ffffff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000006f0000007f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000006f0000007f; -+ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000007fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001001; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000feff01; -+ *((unsigned long*)& __m128i_result[0]) = 0x00feff0100000000; -+ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffff90; -+ *((int*)& __m256_op0[4]) = 0xffffff80; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffff90; -+ *((int*)& __m256_op0[0]) = 0xffffff80; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_h(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x3); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff6fff6fff6fff6; -+ __m256i_out = __lasx_xvmini_h(__m256i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001ffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001ffff; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfrint_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0001ffff00000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m128d_result[1]) = 0x5ff6a0a40ea8f47c; -+ *((unsigned long*)& __m128d_result[0]) = 0x5ff6a0a40e9da42a; -+ __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5ff6a0a40ea8f47c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5ff6a0a40e9da42a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0001ffff00000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; -+ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrml_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0xdb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000006f0000007f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000006f0000007f; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffff90ffffff81; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffff90ffffff81; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff90ff81; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff90ff81; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000007f; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff90ff81; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff90ff81; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,-3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffff90ffffff81; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffff90ffffff81; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff000000ff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000ff000000ff00; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ff0000ff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x01fc020000fe0100; -+ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffecffffffec; -+ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op2[2]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op2[1]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op2[0]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffecffffffec; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5ff6a0a40ea8f47c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5ff6a0a40e9da42a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001ffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff0000ff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x01fc020000fe0100; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff0000ff0000; -+ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrml_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001ffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; -+ __m128d_out = __lsx_vflogb_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff0000ff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x01fc020000fe0100; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x7); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff0000ff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x4b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffff90ffffff81; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffff90ffffff81; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff90ff81; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff90ff81; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; -+ int_result = 0x000000000000007f; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x4); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x0000ffff; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x0000ffff; -+ *((int*)& __m128_op1[0]) = 0x0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0000ffff; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x0000ffff; -+ *((int*)& __m128_op0[0]) = 0x0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffff6ff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffff6ff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000f6ff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000f6ff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ff0000ff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x01fc020000fe0100; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000f6ff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000f6ff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffff6ff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffff6ff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000900ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000900ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_d(__m256i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff0000ff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x01fc020000fe0100; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000003fc0003; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x56); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000017fda829; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffff6ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffff6ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x28); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000f4012ceb; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000f4012ceb; -+ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000017fda829; -+ __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000017f0a82; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000fb8000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000fb8000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000017f0a82; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000003f; -+ __m128i_out = __lsx_vsat_w(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000017f0a82; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000017fda829; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x27); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000017fda829; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000f6ff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000f6ff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000f6ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000f6ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x007fffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x007fffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x007fffff00000000; -+ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; -+ *((unsigned long*)& __m128i_result[1]) = 0x0040004000400040; -+ *((unsigned long*)& __m128i_result[0]) = 0x0040004017fda869; -+ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x17fda829; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x14131211100f0e0d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0c0b0a0908070605; -+ *((unsigned long*)& __m256i_op0[1]) = 0x14131211100f0e0d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0c0b0a0908070605; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0a09080706050403; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0a09080706050403; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000017fda829; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000017fda829; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000f6ff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000f6ff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000001e5; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x5000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[0]) = 0xff01ff01ff01ff01; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000f6ff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000f6ff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000001; -+ *((int*)& __m128_op0[2]) = 0xfffffffe; -+ *((int*)& __m128_op0[1]) = 0x00000001; -+ *((int*)& __m128_op0[0]) = 0xfffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvffint_s_w(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_hu(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vslti_b(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0a09080706050403; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0a09080706050403; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0504840303028201; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0504840303028201; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x007fffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000fffe0001fffe; -+ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000003ffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000003ffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000003ffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000003ffffffffff; -+ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x29); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffe6ffffffe6; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffe6ffffffe6; -+ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0a09080706050403; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0a09080706050403; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0003000200000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0003000200000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000017fda829; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x5c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000055555501; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000005555555554; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000005555555554; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000005555555554; -+ __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff00ff7f00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x32); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x007fffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001000f000e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000fff1000ffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000f000e; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000ffffe; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000005555555554; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000005555555554; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001000f000e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000fff1000ffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002a55005501; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002a55000001; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000002a55005501; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000002a55000001; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x36280000; -+ *((int*)& __m128_result[1]) = 0x42a00000; -+ *((int*)& __m128_result[0]) = 0x42a02000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m128_op0[3]) = 0xff00ff7f; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000f000e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000ffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x003fffff00070007; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000007ffff; -+ __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000036280000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x42a0000042a02000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000036280000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x42a0000042a02000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0x9f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0x2c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; -+ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff7fffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0040000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x80808080; -+ *((int*)& __m256_op0[6]) = 0x80808080; -+ *((int*)& __m256_op0[5]) = 0x80808080; -+ *((int*)& __m256_op0[4]) = 0x80808080; -+ *((int*)& __m256_op0[3]) = 0x80808080; -+ *((int*)& __m256_op0[2]) = 0x80808080; -+ *((int*)& __m256_op0[1]) = 0x80808080; -+ *((int*)& __m256_op0[0]) = 0x80808080; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x80000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrmh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefefe; -+ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000005555555554; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000005555555554; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0xe2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffe0000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000036280001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x42a0000042a02001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000005555555554; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000005555555554; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000036280001; -+ *((unsigned long*)& __m128i_result[0]) = 0x42a0000042a02001; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000036280001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x42a0000042a02001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000036280001; -+ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xe0000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xe0000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xe0000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xe0000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x80000000; -+ *((int*)& __m256_op1[4]) = 0x80000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x80000000; -+ *((int*)& __m256_op1[0]) = 0x80000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0x80000000; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0x80000000; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x004200a000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x004200a000200001; -+ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff80000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff80000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff80000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff80000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfc00000000000000; -+ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001c; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001c; -+ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffe0000000; -+ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001c; -+ *((unsigned long*)& __m128i_result[1]) = 0x004200a000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x004200a000200000; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffe0000000; -+ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x004200a000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x004200a000200000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200000; -+ *((unsigned long*)& __m128i_result[1]) = 0x004200a000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x004200a000200000; -+ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff00007fff7fff; -+ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x004200a000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x004200a000200001; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7fff00007fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x004200a000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x004200a000200000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffffff; -+ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; -+ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x004200a0; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x004200a0; -+ *((int*)& __m128_op0[0]) = 0x00200001; -+ *((int*)& __m128_op1[3]) = 0x004200a0; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x004200a0; -+ *((int*)& __m128_op1[0]) = 0x00200000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff000000; -+ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffe003c1f0077; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffff0074230438; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff0000000438; -+ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200001; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; -+ __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000efffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffeffffffff; -+ __m128i_out = __lsx_vneg_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_result[3]) = 0x1fffffff1fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0383634303836343; -+ *((unsigned long*)& __m256i_result[1]) = 0x1fffffff1fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0383634303836343; -+ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x23); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_result[3]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_result[2]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_result[1]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_result[0]) = 0x1c1b1a191c1b1a19; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffeffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffeffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffeffffffff; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001000000; -+ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x28); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xefffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000efffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrp_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000efffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vsrlrn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xfffffffe; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000efffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000efffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000002; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000002; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0x51); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1fffffff1fffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0383634303836343; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1fffffff1fffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0383634303836343; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001000000; -+ __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffeff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffeff00000000; -+ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000401000000; -+ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_result[3]) = 0xffe4ffe6ffe5ffe6; -+ *((unsigned long*)& __m256i_result[2]) = 0xffe4ffe6ffe5ffe6; -+ *((unsigned long*)& __m256i_result[1]) = 0xffe4ffe6ffe5ffe6; -+ *((unsigned long*)& __m256i_result[0]) = 0xffe4ffe6ffe5ffe6; -+ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x68); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1fffffff1fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0383634303836343; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1fffffff1fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0383634303836343; -+ *((unsigned long*)& __m256i_result[3]) = 0x0002ffff0002ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0002ffff0002ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0002000200020002; -+ __m256i_out = __lasx_xvmini_h(__m256i_op0,2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000400000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000400000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000400000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000000; -+ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x1); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffff1fffffff1; -+ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_du(__m128i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x6c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffe4ffe6; -+ *((int*)& __m256_op0[6]) = 0xffe5ffe6; -+ *((int*)& __m256_op0[5]) = 0xffe4ffe6; -+ *((int*)& __m256_op0[4]) = 0xffe5ffe6; -+ *((int*)& __m256_op0[3]) = 0xffe4ffe6; -+ *((int*)& __m256_op0[2]) = 0xffe5ffe6; -+ *((int*)& __m256_op0[1]) = 0xffe4ffe6; -+ *((int*)& __m256_op0[0]) = 0xffe5ffe6; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000401000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000402000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000402000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000402000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000402000000; -+ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_h(__m256i_op0,-8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_result[3]) = 0x0036003200360032; -+ *((unsigned long*)& __m256i_result[2]) = 0x0036003200360032; -+ *((unsigned long*)& __m256i_result[1]) = 0x0036003200360032; -+ *((unsigned long*)& __m256i_result[0]) = 0x0036003200360032; -+ __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0xc4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0036003200360032; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0036003200360032; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0036003200360032; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0036003200360032; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x28); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((int*)& __m128_result[3]) = 0xffffe000; -+ *((int*)& __m128_result[2]) = 0xffffe000; -+ *((int*)& __m128_result[1]) = 0xffffe000; -+ *((int*)& __m128_result[0]) = 0xffffe000; -+ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0002fffeffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0002fffeffff; -+ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000900000009; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0x99); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; -+ __m128i_out = __lsx_vpcnt_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00007fff; -+ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffff0002fffeffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffff0002fffeffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_result[2]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_result[1]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_result[0]) = 0x1c1b1a191c1b1a19; -+ __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0xd2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0xffffffffffffffff; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x1); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_h(__m128i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffe001ffffe001; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffe001ffffe001; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x1d1a1b181d1a1b18; -+ *((unsigned long*)& __m256i_result[2]) = 0x9c9b9a999c9b9a99; -+ *((unsigned long*)& __m256i_result[1]) = 0x1d1a1b181d1a1b18; -+ *((unsigned long*)& __m256i_result[0]) = 0x9c9b9a999c9b9a99; -+ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,-16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x03ff03ff03ff03ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x03ff03ff03ff03ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x438ff81ff81ff820; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffint_d_l(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x438ff81ff81ff820; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x03ff03ff03ff03ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000043; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; -+ __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x78); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0202020202020202; -+ *((unsigned long*)& __m128i_result[0]) = 0x0202020202020202; -+ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffe001ffffe001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffe001ffffe001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7f00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff000000000000; -+ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7f00000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff000000000000; -+ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7f00000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7f00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2000200020002000; -+ *((unsigned long*)& __m256i_result[2]) = 0x2000200020002000; -+ *((unsigned long*)& __m256i_result[1]) = 0x2000200020002000; -+ *((unsigned long*)& __m256i_result[0]) = 0x2000200020002000; -+ __m256i_out = __lasx_xvbitrevi_h(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0200020002000200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0200020002000200; -+ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7f00000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7f00000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x73); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0200020002000200; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0200020002000200; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff02000200; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff02000200; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffe00001ffe200; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff02000200; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffdfff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffdfff; -+ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffdfff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffdfff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffe00001ffe200; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffdfff; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffe001; -+ *((int*)& __m128_op0[2]) = 0xffffe001; -+ *((int*)& __m128_op0[1]) = 0xffffe001; -+ *((int*)& __m128_op0[0]) = 0xffffe001; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffe000; -+ *((int*)& __m128_op1[0]) = 0x01ffe200; -+ *((int*)& __m128_op2[3]) = 0x04040383; -+ *((int*)& __m128_op2[2]) = 0x83838404; -+ *((int*)& __m128_op2[1]) = 0x04040383; -+ *((int*)& __m128_op2[0]) = 0x83838404; -+ *((int*)& __m128_result[3]) = 0xffffe001; -+ *((int*)& __m128_result[2]) = 0xffffe001; -+ *((int*)& __m128_result[1]) = 0xffffe001; -+ *((int*)& __m128_result[0]) = 0xffffe001; -+ __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x30); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffff00000000; -+ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_op2[1]) = 0x03ff03ff03ff03ff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvori_b(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2000200020002000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2000200020002000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2000200020002000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2000200020002000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7f00000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007f000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff0000; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000003fb000003fb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000003fb000003fb; -+ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_d(__m128i_op0,15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffff4fffffff4; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffff4fffffff4; -+ __m128i_out = __lsx_vmini_w(__m128i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000007f000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7f00000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffdfff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffdfff; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffe000; -+ *((int*)& __m128_op1[0]) = 0x01ffe200; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0403cfcf01c1595e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x837cd5db43fc55d4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff80007fff; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_d(__m256i_op0,5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0403cfcf01c1595e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x837cd5db43fc55d4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000cb4a; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000cb4a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000f909; -+ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf000e001bf84df83; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff8e001ff84e703; -+ *((unsigned long*)& __m128i_result[1]) = 0x14042382c3ffa481; -+ *((unsigned long*)& __m128i_result[0]) = 0x040c238283ff9d01; -+ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0403cfcf01c1595e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x837cd5db43fc55d4; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x7f800000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x7f800000; -+ __m256_out = __lasx_xvfrecip_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf000e001bf84df83; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff8e001ff84e703; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ca354688; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff35cab978; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff35cab978; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff35cab978; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010035; -+ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ca354688; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_result[1]) = 0x00040003ff83ff84; -+ *((unsigned long*)& __m128i_result[0]) = 0x00040003ff4dffca; -+ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000f909; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0403cfcf01c1595e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x837cd5db43fc55d4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0404038383838404; -+ *((unsigned long*)& __m128i_result[1]) = 0x0007005200440062; -+ *((unsigned long*)& __m128i_result[0]) = 0x0080005e007f00d8; -+ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010100000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000040400000383; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff8383ffff7d0d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000040400000383; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffe000ffff1fff; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0c03e17edd781b11; -+ *((unsigned long*)& __m128i_op0[0]) = 0x342caf9be5579ebe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000f909; -+ *((unsigned long*)& __m128i_result[1]) = 0x0c03e17edd781b11; -+ *((unsigned long*)& __m128i_result[0]) = 0x342caf9be55700b5; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ca354688; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00040003ff83ff84; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00040003ff4dffca; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000040400000383; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffff1fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000383ffff1fff; -+ __m128i_out = __lsx_vsrlrn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000383ffff1fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ca354688; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000038335ca2777; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000404; -+ *((int*)& __m128_op1[2]) = 0x00000383; -+ *((int*)& __m128_op1[1]) = 0xffffe000; -+ *((int*)& __m128_op1[0]) = 0xffff1fff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x7f800000; -+ *((int*)& __m256_op1[6]) = 0x7f800000; -+ *((int*)& __m256_op1[5]) = 0x7f800000; -+ *((int*)& __m256_op1[4]) = 0x7f800000; -+ *((int*)& __m256_op1[3]) = 0x7f800000; -+ *((int*)& __m256_op1[2]) = 0x7f800000; -+ *((int*)& __m256_op1[1]) = 0x7f800000; -+ *((int*)& __m256_op1[0]) = 0x7f800000; -+ *((int*)& __m256_result[7]) = 0xff800000; -+ *((int*)& __m256_result[6]) = 0xff800000; -+ *((int*)& __m256_result[5]) = 0xff800000; -+ *((int*)& __m256_result[4]) = 0xff800000; -+ *((int*)& __m256_result[3]) = 0xff800000; -+ *((int*)& __m256_result[2]) = 0xff800000; -+ *((int*)& __m256_result[1]) = 0xff800000; -+ *((int*)& __m256_result[0]) = 0xff800000; -+ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000038335ca2777; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000800800000; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000463fd2902d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5ccd54bbfcac806c; -+ unsigned_int_result = 0x00000000000000ac; -+ unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0x2); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000800800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000800800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000004000000000; -+ __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007ff000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007ff000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007ff000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0c03e17edd781b11; -+ *((unsigned long*)& __m128i_op0[0]) = 0x342caf9be55700b5; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00040003ff83ff84; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00040003ff4dffca; -+ *((unsigned long*)& __m128i_result[1]) = 0x0c07e181ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x3430af9effffffff; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000004000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000040400000383; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffff1fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000007; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffc0ffff003f; -+ __m128i_out = __lsx_vsrai_h(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0c07e181ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3430af9effffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000040000000400; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000040000000400; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0c03e17edd781b11; -+ *((unsigned long*)& __m128i_op0[0]) = 0x342caf9be55700b5; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000040400000383; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff1fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0c03e17edd781b11; -+ *((unsigned long*)& __m128i_result[0]) = 0x342caf9bffff1fff; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0xcc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000001; -+ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0c03e17edd781b11; -+ *((unsigned long*)& __m128i_op0[0]) = 0x342caf9bffff1fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000040000000400; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0c037fff342c7fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000004000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffc000000000; -+ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x34); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000040400000383; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffe000ffff1fff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000040400000383; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffe000ffff1fff; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffe000ffff1fff; -+ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0028e0a1; -+ *((int*)& __m128_op0[2]) = 0xa000a041; -+ *((int*)& __m128_op0[1]) = 0x01000041; -+ *((int*)& __m128_op0[0]) = 0x00010001; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x01000001; -+ *((int*)& __m128_op1[1]) = 0x00010001; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x01000001; -+ *((int*)& __m128_op2[1]) = 0xffffe000; -+ *((int*)& __m128_op2[0]) = 0xffff1fff; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x01000001; -+ *((int*)& __m128_result[1]) = 0xffffe000; -+ *((int*)& __m128_result[0]) = 0xffff1fff; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vslei_wu(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000401000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000004; -+ __m128i_out = __lsx_vmaxi_w(__m128i_op0,4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffc000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff0000; -+ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x2); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000401000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000040100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010000; -+ __m128i_out = __lsx_vbsrl_v(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000401000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000004; -+ *((int*)& __m128_result[3]) = 0x40800000; -+ *((int*)& __m128_result[2]) = 0x4b800000; -+ *((int*)& __m128_result[1]) = 0x47800080; -+ *((int*)& __m128_result[0]) = 0x40800000; -+ __m128_out = __lsx_vffint_s_w(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffff1fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000040400000383; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff1fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000383; -+ *((unsigned long*)& __m128i_result[0]) = 0xe400000003ffc001; -+ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000401000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff1fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000401000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000001; -+ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000383; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe400000003ffc001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff1fff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffe000ffff2382; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[2]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[1]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[0]) = 0x0005000500050005; -+ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffff1fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000090100000a; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffe009ffff2008; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000040000000400; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_d(__m128i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000040100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff2382; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000040100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010000; -+ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvffint_s_w(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[2]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[1]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[0]) = 0x0005000500050005; -+ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00040100; -+ *((int*)& __m128_op0[1]) = 0x00010001; -+ *((int*)& __m128_op0[0]) = 0x00010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000040100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000384; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe3f0200004003ffd; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff00ff00ff00; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000007f00000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000401000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000004; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00000000007f0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; -+ *((unsigned long*)& __m128i_result[0]) = 0x0404040404000404; -+ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000007f00000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000401000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000110000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000007f00000004; -+ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000007f0000; -+ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000007f0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvneg_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000501000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000008; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000040100; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010400100203; -+ *((unsigned long*)& __m128i_result[0]) = 0x0103010301020109; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000007f00; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000000; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000050005; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000007f00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000007f00; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000000; -+ __m128i_out = __lsx_vmaxi_d(__m128i_op0,-4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00007f00; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x01000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffffffc; -+ __m128i_out = __lsx_vneg_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x3a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000050005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101010400100203; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0103010301020109; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000110000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000007f00000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0202000402020202; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000200000010000; -+ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0001000100000004; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000501000002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000008; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000050005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000505; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000505; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000505; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x02020004; -+ *((int*)& __m128_op0[2]) = 0x02020202; -+ *((int*)& __m128_op0[1]) = 0x00002000; -+ *((int*)& __m128_op0[0]) = 0x00010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000505; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000004fb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2001240128032403; -+ *((unsigned long*)& __m128i_op1[0]) = 0x288b248c00010401; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffdfffefffff7ffe; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000004fb; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0008; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0008; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000004fb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000004fb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000004fb; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0800000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xfffefffe; -+ *((int*)& __m128_op0[0]) = 0xfffffffc; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xfffefffe; -+ *((int*)& __m128_op1[0]) = 0xfffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000505; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefefefefefefefe; -+ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000800000008; -+ __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000505; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000004fb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000004fb; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000004fb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000004fb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffef000004ea; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffefffffffef; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffef000004ea; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffef000004ea; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffefffffffef; -+ __m256i_out = __lasx_xvslli_h(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffef000004ea; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffefffffffef; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000002020202; -+ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000004fb; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000004fb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xef); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffeffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100010102; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000001; -+ *((int*)& __m256_op0[4]) = 0x00010102; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000101; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0018796d; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffffffc; -+ __m128i_out = __lsx_vmini_b(__m128i_op0,4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_h(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001010300010102; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000410041; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002020202; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x5b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickve_w(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000081; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffcff; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000102; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000102; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000102; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010103; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffcff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfffffeff; -+ *((int*)& __m128_op0[2]) = 0xfffffeff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xfffffcff; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00fffefe; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128d_result[1]) = 0x800000ff000000ff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x800000ff000000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x800000ff080000ff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000102; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000fffffffefe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000fffefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000fffefe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000808080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfe02fe02; -+ *((int*)& __m128_op0[2]) = 0xfe02fe02; -+ *((int*)& __m128_op0[1]) = 0xfe02fe02; -+ *((int*)& __m128_op0[0]) = 0xfe02fe02; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000800000008; -+ __m128i_out = __lsx_vfclass_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000800000008; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000000020000; -+ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff8000000000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000800000000ffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x697eba2bedfa9c82; -+ *((unsigned long*)& __m128i_op2[0]) = 0xd705c77a7025c899; -+ *((unsigned long*)& __m128i_result[1]) = 0xffcb410000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffeb827ffffffff; -+ __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000808080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000808080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080404040; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0002000000020000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000fffefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000fffefe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0a0a0a0a0a0a0a0a; -+ *((unsigned long*)& __m256i_result[2]) = 0x0a0a0a0a0a0a0a0a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0a0a0a0a0a0a0a0a; -+ *((unsigned long*)& __m256i_result[0]) = 0x0a0a0a0a0a0a0a0a; -+ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x697eba2bedfa9c82; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd705c77a7025c899; -+ unsigned_int_result = 0x000000000000edfa; -+ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x5); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000102; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefd; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffefffffefd; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2700000000002727; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000002727; -+ *((unsigned long*)& __m128i_op1[1]) = 0x697eba2bedfa9c82; -+ *((unsigned long*)& __m128i_op1[0]) = 0xd705c77a7025c899; -+ *((unsigned long*)& __m128i_result[1]) = 0xc9c00000000009c9; -+ *((unsigned long*)& __m128i_result[0]) = 0x0013938000000000; -+ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_hu(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffcb410000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffeb827ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000800000008; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; -+ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x697eba2bedfa9c82; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd705c77a7025c899; -+ *((unsigned long*)& __m128i_result[1]) = 0xedfaedfaedfaedfa; -+ *((unsigned long*)& __m128i_result[0]) = 0xedfaedfaedfaedfa; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fff7fff7fffdefd; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; -+ __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x800000ff000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ffffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000800000008; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000009; -+ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000009; -+ *((unsigned long*)& __m128i_op1[1]) = 0x697eba2bedfa9c82; -+ *((unsigned long*)& __m128i_op1[0]) = 0xd705c77a7025c899; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x03fdfffcfefe03fe; -+ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffefefefe; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010000800100008; -+ __m128i_out = __lsx_vclz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefd; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffbf4; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffc; -+ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffff01; -+ __m128i_out = __lsx_vneg_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x6c6c6c6c6c6c6c6c; -+ *((unsigned long*)& __m256i_result[2]) = 0x6c6c6c6c6c6c6c6c; -+ *((unsigned long*)& __m256i_result[1]) = 0x6c6c6c6c6c6c6c6c; -+ *((unsigned long*)& __m256i_result[0]) = 0x6c6c6c6c6c6c6c6c; -+ __m256i_out = __lasx_xvori_b(__m256i_op0,0x6c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffbf4; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000006; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000308; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x6c6c6c6c6c6c6c6c; -+ *((unsigned long*)& __m256i_op1[2]) = 0x6c6c6c6c6c6c6c6c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x6c6c6c6c6c6c6c6c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6c6c6c6c6c6c6c6c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; -+ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefefe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000003c; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffbf4; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256i_result[3]) = 0xf800f800f800c000; -+ *((unsigned long*)& __m256i_result[2]) = 0xf800f800f800a000; -+ *((unsigned long*)& __m256i_result[1]) = 0xf800f800f800e000; -+ *((unsigned long*)& __m256i_result[0]) = 0xf800f800f800e000; -+ __m256i_out = __lasx_xvslli_h(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; -+ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x7c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff0002fffefffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0002ff7e8286; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff0002fffefffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0002ffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0202000002020202; -+ *((unsigned long*)& __m256i_result[2]) = 0x0202000002010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0202000002020202; -+ *((unsigned long*)& __m256i_result[0]) = 0x0202000002020000; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000fff08; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000fff09; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff80ff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff80000000ffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff80ff0010ff06; -+ *((unsigned long*)& __m128i_result[0]) = 0x00007f01000eff0a; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff80ff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff80000000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000808; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xedfaedfaedfaedfa; -+ *((unsigned long*)& __m128i_op0[0]) = 0xedfaedfaedfaedfa; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xedfaedfaedfaedfa; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff80ff0010ff06; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00007f01000eff0a; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff80ff0010ff06; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; -+ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000804000004141; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00017fff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007fff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf800f800f800c000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf800f800f800a000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf800f800f800e000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf800f800f800e000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf800f800f800c000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf800f800f800a000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf800f800f800e000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf800f800f800e000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; -+ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff80ff0010ff06; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xedfaedfaedfaedfa; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xf6fd377cf705f680; -+ *((unsigned long*)& __m128i_result[0]) = 0xc0000000bfff8000; -+ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff80ff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff80000000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000001fffe; -+ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffff80ff0010ff06; -+ *((unsigned long*)& __m128d_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xedfaedfaedfaedfa; -+ *((unsigned long*)& __m128d_op1[0]) = 0xedfaedfaedfaedfa; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000300000003; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0202000002020202; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0202000002010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0202000002020202; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0202000002020000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x01fe000000ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x01fe000001fe0000; -+ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x01fe000000ff00ff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x01fe000001fe0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xcccccccc0000cccc; -+ *((unsigned long*)& __m128i_result[0]) = 0xcccccccc0000cccc; -+ __m128i_out = __lsx_vnori_b(__m128i_op0,0x33); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000040000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x4000000010000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000040000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000040000010; -+ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000001fffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f7f7f7f00107f04; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f0000fd7f0000fd; -+ *((unsigned long*)& __m128i_result[1]) = 0x7e7e7e7eff0f7f04; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f0000fd7f01fffb; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000808; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff0000fffe0000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000fefc0000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000fffe0000; -+ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000fffffefc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000fffffffe0; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000fffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000fffffffff; -+ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; -+ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x7b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf800f800f800c000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf800f800f800a000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf800f800f800e000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf800f800f800e000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff00ffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff8080000004000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000080000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff8080000000000; -+ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000022666621; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffdd9999da; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f7f7f7f00107f04; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f0000fd7f0000fd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000066621; -+ *((unsigned long*)& __m128i_result[0]) = 0x01ff00085e9900ab; -+ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf800f800f800c000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf800f800f800a000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf800f800f800e000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf800f800f800e000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff00ffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0001000100010000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x020afefb08140000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0003fffc00060000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf800f7fff8ffc0ff; -+ *((unsigned long*)& __m256i_result[2]) = 0xf8fff7fff7ffa000; -+ *((unsigned long*)& __m256i_result[1]) = 0xf800f800f800e000; -+ *((unsigned long*)& __m256i_result[0]) = 0xf800f800f800e000; -+ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000001f; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000300000003; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000002; -+ *((int*)& __m128_op0[2]) = 0x00000002; -+ *((int*)& __m128_op0[1]) = 0x00000003; -+ *((int*)& __m128_op0[0]) = 0x00000003; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrecip_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3fc000003fc00000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3fc000003fc00000; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3fc000003fc00000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3fc000003fc00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_w(__m128i_op0,1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x5eff0000; -+ *((int*)& __m128_result[2]) = 0x5eff0000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3fc000003fc00000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3fc000003fc00000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3fc000003fc00000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3fc000003fc00000; -+ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f7f7f7f00107f04; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f0000fd7f0000fd; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_d(__m256i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffff00ffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffff00ffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7f800000; -+ *((int*)& __m128_op0[2]) = 0x7f800000; -+ *((int*)& __m128_op0[1]) = 0x7f800000; -+ *((int*)& __m128_op0[0]) = 0x7f800000; -+ *((int*)& __m128_op1[3]) = 0x00000002; -+ *((int*)& __m128_op1[2]) = 0x00000002; -+ *((int*)& __m128_op1[1]) = 0x00000003; -+ *((int*)& __m128_op1[0]) = 0x00000003; -+ *((int*)& __m128_op2[3]) = 0x3fc00000; -+ *((int*)& __m128_op2[2]) = 0x3fc00000; -+ *((int*)& __m128_op2[1]) = 0x3fc00000; -+ *((int*)& __m128_op2[0]) = 0x3fc00000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000800000008; -+ __m128i_out = __lsx_vpcnt_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f8000007f800000; -+ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000020afefb1; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f350104f7ebffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000003fffc1; -+ *((unsigned long*)& __m256i_op1[0]) = 0x005c0003fff9ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000fe6a021; -+ *((unsigned long*)& __m256i_result[1]) = 0x2000000020000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000b8000; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x23); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00feff0000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00feff0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ffff0000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ffff0000000000; -+ __m128i_out = __lsx_vslti_b(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00010001; -+ *((int*)& __m256_op0[6]) = 0x00010000; -+ *((int*)& __m256_op0[5]) = 0x020afefb; -+ *((int*)& __m256_op0[4]) = 0x08140000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000001; -+ *((int*)& __m256_op0[1]) = 0x0003fffc; -+ *((int*)& __m256_op0[0]) = 0x00060000; -+ *((int*)& __m256_op1[7]) = 0x80000000; -+ *((int*)& __m256_op1[6]) = 0x40000000; -+ *((int*)& __m256_op1[5]) = 0x40000000; -+ *((int*)& __m256_op1[4]) = 0x10000010; -+ *((int*)& __m256_op1[3]) = 0x80000000; -+ *((int*)& __m256_op1[2]) = 0x40000000; -+ *((int*)& __m256_op1[1]) = 0x80000000; -+ *((int*)& __m256_op1[0]) = 0x40000010; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x000000ff; -+ *((int*)& __m256_op2[4]) = 0x0001ffff; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x0000ffff; -+ *((int*)& __m256_op2[0]) = 0x00010000; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0x80020000; -+ *((int*)& __m256_result[5]) = 0x828aff0b; -+ *((int*)& __m256_result[4]) = 0x8001ffff; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0x80000002; -+ *((int*)& __m256_result[1]) = 0x8000ffff; -+ *((int*)& __m256_result[0]) = 0x800d0002; -+ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000300000003; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffc0003fffa0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x01fb010201f900ff; -+ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x020afefb08140000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0003fffc00060000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff00ffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff0001ff02; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff020afefc; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000003fefd; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xfffffffe; -+ *((int*)& __m256_op0[5]) = 0xfffffffe; -+ *((int*)& __m256_op0[4]) = 0xfffffefc; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xfffffffe; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xfffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffefffffefc; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0209fefb08140000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0003fffc00060000; -+ *((unsigned long*)& __m256d_result[3]) = 0x6100000800060005; -+ *((unsigned long*)& __m256d_result[2]) = 0x5ee1c073b800c916; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x5ff00007fff9fff3; -+ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff0001ff02; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff020afefc; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000003fefd; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffefffefff7fff7; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7ffffffbfffb; -+ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7f800000; -+ *((int*)& __m128_op0[2]) = 0x7f800000; -+ *((int*)& __m128_op0[1]) = 0x7f800000; -+ *((int*)& __m128_op0[0]) = 0x7f800000; -+ *((int*)& __m128_op1[3]) = 0x7f800000; -+ *((int*)& __m128_op1[2]) = 0x7f800000; -+ *((int*)& __m128_op1[1]) = 0x7f800000; -+ *((int*)& __m128_op1[0]) = 0x7f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00ffff0000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00ffff0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x007f7f80807f7f80; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ffff0000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ffff0000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f80000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ff00000000; -+ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f80000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0701000007010000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0701000000000000; -+ __m128i_out = __lsx_vpcnt_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000ff00000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00ffff0000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00ffff0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000020afefb1; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7f350104f7ebffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000003fffc1; -+ *((unsigned long*)& __m256d_op0[0]) = 0x005c0003fff9ffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0209fefb08140000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0003fffc00060000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffff80cb; -+ *((int*)& __m256_op1[6]) = 0xfffffdf8; -+ *((int*)& __m256_op1[5]) = 0x00000815; -+ *((int*)& __m256_op1[4]) = 0x00000104; -+ *((int*)& __m256_op1[3]) = 0xffffffa4; -+ *((int*)& __m256_op1[2]) = 0xfffffffd; -+ *((int*)& __m256_op1[1]) = 0x00000007; -+ *((int*)& __m256_op1[0]) = 0x00000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_du(__m128i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff80cbfffffdf8; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000081500000104; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffa4fffffffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000700000002; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff80cbfffffdf8; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffa4fffffffd; -+ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0209fefb08140000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc00060000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00080000000cc916; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000006fff3; -+ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0209fefb08140000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc00060000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ffff00ff000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff8080000004000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000080000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff8080000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; -+ __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000005f000000f0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000f9; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000f3; -+ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0001ff02; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff020afefc; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000003fefd; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0209fefb08140000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc00060000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff0001ff04; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff02a0fefc; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000cfefd; -+ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x807f7f8000ffff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff00feff00; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0000ffff; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0001ff04; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff02a0fefc; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000cfefd; -+ *((unsigned long*)& __m256i_op1[3]) = 0x6100000800060005; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5ee1c073b800c916; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5ff00007fff9fff3; -+ *((unsigned long*)& __m256i_op2[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op2[2]) = 0xfffffffefffffefc; -+ *((unsigned long*)& __m256i_op2[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op2[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffff7fffbfefa; -+ *((unsigned long*)& __m256i_result[2]) = 0xff1eff1902a0fea4; -+ *((unsigned long*)& __m256i_result[1]) = 0xff10000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff10fff9ff13fd17; -+ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00080000000cc916; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000006fff3; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00f8000000f41bfb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000fa0106; -+ __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x56); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x807f7f8000ffff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff00feff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0107070100080800; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000080800070800; -+ __m128i_out = __lsx_vpcnt_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ffff00ff000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00080005c073c916; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000100000007fff3; -+ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000000010000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000000010000; -+ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0209fefb08140000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc00060000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000bf6e0000c916; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000030000fff3; -+ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f78787f00f7f700; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000f7f700f7f700; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800000004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000bf6e0000c916; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000030000fff3; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000bf6e0000c916; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000030000fff3; -+ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xbea2e127; -+ *((int*)& __m256_op1[6]) = 0xc046721f; -+ *((int*)& __m256_op1[5]) = 0x1729c073; -+ *((int*)& __m256_op1[4]) = 0x816edebe; -+ *((int*)& __m256_op1[3]) = 0xde91f010; -+ *((int*)& __m256_op1[2]) = 0x000006f9; -+ *((int*)& __m256_op1[1]) = 0x5ef1f90e; -+ *((int*)& __m256_op1[0]) = 0xfefaf30d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010102; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010201010204; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010102; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010102; -+ __m256i_out = __lasx_xvneg_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbea2e127c046721f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1729c073816edebe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xde91f010000006f9; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5ef1f90efefaf30d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000060000108; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001060005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fef0001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xbfa3e127c147721f; -+ *((unsigned long*)& __m256i_result[2]) = 0x1729c173836edfbe; -+ *((unsigned long*)& __m256i_result[1]) = 0xdf91f111808007fb; -+ *((unsigned long*)& __m256i_result[0]) = 0x5ff1f90ffffbf30f; -+ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffc500000002d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000034; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbfa3e127c147721f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1729c173836edfbe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xdf91f111808007fb; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5ff1f90ffffbf30f; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff280016; -+ *((unsigned long*)& __m256i_result[2]) = 0xd193a30f94b9b7df; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000001001a; -+ *((unsigned long*)& __m256i_result[0]) = 0xc88840fdf887fd87; -+ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000bea20000e127; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000c0460000721f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000de910000f010; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000006f9; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000bea20; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000c0460; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000de910; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000fff8fff8; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff80000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x37); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800000004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000bf6e0000c916; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000030000fff3; -+ *((unsigned long*)& __m256i_op1[3]) = 0x001175f10e4330e8; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff8f0842ff29211e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffff8d9ffa7103d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000e00ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff00ff; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01480000052801a2; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffdcff64; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbea2e127c046721f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1729c073816edebe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xde91f010000006f9; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5ef1f90efefaf30d; -+ *((unsigned long*)& __m256i_result[3]) = 0x515f93f023600fb9; -+ *((unsigned long*)& __m256i_result[2]) = 0x948b39e0b7405f6f; -+ *((unsigned long*)& __m256i_result[1]) = 0x48ef087800007c83; -+ *((unsigned long*)& __m256i_result[0]) = 0x78af877c7d7f86f9; -+ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0101010101010102; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0101010201010204; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0101010101010102; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0101010101010102; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000e00ff00ff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010201010204; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010102; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010102; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x515f93f0; -+ *((int*)& __m256_op0[6]) = 0x23600fb9; -+ *((int*)& __m256_op0[5]) = 0x948b39e0; -+ *((int*)& __m256_op0[4]) = 0xb7405f6f; -+ *((int*)& __m256_op0[3]) = 0x48ef0878; -+ *((int*)& __m256_op0[2]) = 0x00007c83; -+ *((int*)& __m256_op0[1]) = 0x78af877c; -+ *((int*)& __m256_op0[0]) = 0x7d7f86f9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000df93f0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000077843; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x001175f10e4330e8; -+ *((unsigned long*)& __m256d_op0[2]) = 0xff8f0842ff29211e; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffff8d9ffa7103d; -+ *((unsigned long*)& __m256d_op1[3]) = 0x001175f10e4330e8; -+ *((unsigned long*)& __m256d_op1[2]) = 0xff8f0842ff29211e; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfffff8d9ffa7103d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaxi_h(__m128i_op0,-1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01480000052801a2; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffdcff64; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbea2e127c046721f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1729c073816edebe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xde91f010000006f9; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5ef1f90efefaf30d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00170000028500de; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fd02f20d; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010203; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvneg_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x01480000052801a2; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffdcff64; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0101010101010203; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000060000108; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001060005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fef0001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x001175f10e4330e8; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff8f0842ff29211e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffff8d9ffa7103d; -+ *((unsigned long*)& __m256i_result[3]) = 0x001175f10e4330e8; -+ *((unsigned long*)& __m256i_result[2]) = 0xff8f0842ff29211e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffff8d9ffa7103d; -+ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x39); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x001175f10e4330e8; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff8f0842ff29211e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffff8d9ffa7103d; -+ *((unsigned long*)& __m256i_result[3]) = 0x001151510a431048; -+ *((unsigned long*)& __m256i_result[2]) = 0x5b0b08425b09011a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x5b5b58595b031019; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x5b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01480000052801a2; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffdcff64; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x001175f10e4330e8; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff8f0842ff29211e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffff8d9ffa7103d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000df93f0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000077843; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000003800000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x001175f10e4330e8; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff8f0842ff29211e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffff8d9ffa7103d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010203; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffcfa; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffefefffffefe; -+ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000fff8fff8; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff80000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000fff8fff8; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fff80000; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffcfa; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fff8fff8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff80000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000fff8fff8; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff80000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00070007; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0007ffff; -+ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fff8fff8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff80000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff0000; -+ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000000fff8fff8; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000fff80000; -+ *((unsigned long*)& __m128d_result[1]) = 0x80000000fff8fff8; -+ *((unsigned long*)& __m128d_result[0]) = 0x80000000fff80000; -+ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffcfa; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffcfa; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffcfa; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff8fffffff8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff8fc000000; -+ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x25); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x80000000fff8fff8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff80000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xc6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x60000108; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x01060005; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x7fef0001; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000001; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0xfffffff8; -+ *((int*)& __m256_op1[4]) = 0xfffffff8; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0xfffffff8; -+ *((int*)& __m256_op1[0]) = 0xfc000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0101010101010203; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffcfa; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffefefffffefe; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xfff8fff8; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0xfff80000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0xfff8fff8; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0xfff80000; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x6d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffe2ffe2ffe2ffe2; -+ *((unsigned long*)& __m128i_result[0]) = 0xffe2ffe2ffe2ffe2; -+ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000010100000102; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010100000102; -+ __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffcfa; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xfffffff8fffffff8; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xfffffff8fc000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfafafafafafafafa; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000fefefe; -+ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffefefffffefe; -+ __m256i_out = __lasx_xvslli_h(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x80000000fff8fff8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff80000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f800000fff8fff8; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f800000fff80000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_result[0]) = 0x80000000fff80000; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ff7f0000ff7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ff7f0000ff7f; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfafafafafafafafa; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000fefefe; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0101010101010203; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000010100000102; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000010100000102; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffefd; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffefd; -+ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff80000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x80000000fff80000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000004000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff8004000000000; -+ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xc08f780000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256d_result[1]) = 0xc08f780000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvflogb_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfafafafafafafafa; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fefefe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000004000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff8004000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8282828282828282; -+ *((unsigned long*)& __m128i_result[0]) = 0x8282828282828282; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0x82); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfafafafafafafafa; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fefefe; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xf9fbf9fbf9fbf9fb; -+ *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0xfdfffdfffdfffdff; -+ *((unsigned long*)& __m256i_result[0]) = 0xff01ff01fffffdff; -+ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaxi_w(__m128i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffefd; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffefd; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xc08f7800; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xfffffefd; -+ *((int*)& __m256_op0[3]) = 0xc08f7800; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000101; -+ *((int*)& __m256_op1[4]) = 0x00000102; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000101; -+ *((int*)& __m256_op1[0]) = 0x00000102; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000a0a08000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5350a08000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffefd; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffefd; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffefd; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffefd; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fd; -+ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f017f807f017d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f017f807f017f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000017f0000017d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000017f0000017f; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00007dfd; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00007dfd; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_b(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000004000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff8004000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000001; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000001; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000017f0000017d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000017f0000017f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000017f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000017f; -+ __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000017f0000017d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000017f0000017f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000017f0000017d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000017f0000017f; -+ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000004000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff8004000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffc002000000000; -+ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x2e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; -+ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000010001; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000017f0000017d; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000017f0000017f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000017f; -+ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffc002000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffc002000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffc002000000000; -+ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_result[2]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_result[1]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_result[0]) = 0x1717171717171717; -+ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfffc002000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffc002000000000; -+ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff000607f7; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000010017e7d1; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff000607f7; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000001001807f1; -+ *((unsigned long*)& __m256i_result[3]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_result[2]) = 0x000607f700000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_result[0]) = 0x000607f700000001; -+ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffc001fffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffc001fffffffff; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff000607f7; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000010017e7d1; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff000607f7; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000001001807f1; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; -+ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000017f0000017d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000017f0000017f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000002e0000002e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000002e0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000002e0000002e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000002e0000fffe; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000002e0000002e; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000002e0000ffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000002e0000002e; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000002e0000fffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000f7bc0001f7bd; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000f93b0000017c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000f7bc0001f7bd; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000f93b0000017b; -+ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffc001fffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000200000; -+ *((unsigned long*)& __m128i_result[0]) = 0x001fff8004000000; -+ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffefdfffffefd; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((int*)& __m256_result[7]) = 0x4b808080; -+ *((int*)& __m256_result[6]) = 0x4b808080; -+ *((int*)& __m256_result[5]) = 0x4f800000; -+ *((int*)& __m256_result[4]) = 0x4f7fffff; -+ *((int*)& __m256_result[3]) = 0x4b808080; -+ *((int*)& __m256_result[2]) = 0x4b808080; -+ *((int*)& __m256_result[1]) = 0x4f800000; -+ *((int*)& __m256_result[0]) = 0x4f800000; -+ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0000007f; -+ *((int*)& __m128_op0[2]) = 0x0000007f; -+ *((int*)& __m128_op0[1]) = 0x0000007f; -+ *((int*)& __m128_op0[0]) = 0x0000007f; -+ *((int*)& __m128_op1[3]) = 0x3ff00000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xfffc0020; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffc001f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010202050120; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010102020202; -+ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000f7bc0001f7bd; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000f93b0000017c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000f7bc0001f7bd; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000f93b0000017b; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff2f7bcfff2f7bd; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff2f93bfff2fff2; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff2f7bcfff2f7bd; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff2f93bfff2fff2; -+ __m256i_out = __lasx_xvmini_h(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101010202050120; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101010102020202; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000607f700000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000607f700000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffe81; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffc001fffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010000200020002; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff2f7bcfff2f7bd; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff2f93bfff2fff2; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff2f7bcfff2f7bd; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff2f93bfff2fff2; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffcf800fffcfffc; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffcfffc; -+ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefdfffffefd; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0xfffffffffffffefd; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x4); -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000100; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000100; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xfffffefdfffffefd; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000100; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffff7d80000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000100; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffe81; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe81; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0001ffff8002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0010000400020004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff20ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffc0020ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x07fff80000008000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000007ffe001; -+ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00cf01fe01fe01fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000301de01fe01fe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffc002000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0f00000000000000; -+ __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x17171717; -+ *((int*)& __m256_op0[6]) = 0x17171717; -+ *((int*)& __m256_op0[5]) = 0x000607f7; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x17171717; -+ *((int*)& __m256_op0[2]) = 0x17171717; -+ *((int*)& __m256_op0[1]) = 0x000607f7; -+ *((int*)& __m256_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; -+ __m128i_out = __lsx_vclo_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffc002000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00003ff000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000fffc00000000; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffcf800fffcf800; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffcf800fffcf800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; -+ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000607f700000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000607f700000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000002e0000002e; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000002e0000ffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000002e0000002e; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000002e0000fffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_result[2]) = 0x000607f700000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x1717171717171717; -+ *((unsigned long*)& __m256i_result[0]) = 0x000607f700000001; -+ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00003ff000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000fffc00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000002e0000002e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000002e0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000002e0000002e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000002e0000fffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000002e; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000002e; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000002e; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000fffe; -+ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00003ff000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000fffc00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffc001fffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00001ff800000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x7ffe800e80000000; -+ __m128i_out = __lsx_vavgr_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000307fffe72e800; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00003ff000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000fffc00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00001ff800000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ffe800e80000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00005555; -+ *((int*)& __m256_op1[6]) = 0x00005555; -+ *((int*)& __m256_op1[5]) = 0x000307ff; -+ *((int*)& __m256_op1[4]) = 0xfe72e815; -+ *((int*)& __m256_op1[3]) = 0x00005555; -+ *((int*)& __m256_op1[2]) = 0x00005555; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000015; -+ *((int*)& __m256_result[7]) = 0x00005555; -+ *((int*)& __m256_result[6]) = 0x00005555; -+ *((int*)& __m256_result[5]) = 0x000307ff; -+ *((int*)& __m256_result[4]) = 0xfe72e815; -+ *((int*)& __m256_result[3]) = 0x00005555; -+ *((int*)& __m256_result[2]) = 0x00005555; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000015; -+ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00003ff000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000fffc00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000fffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0000ffff; -+ *((int*)& __m128_op0[2]) = 0x0000ffff; -+ *((int*)& __m128_op0[1]) = 0x0000ffff; -+ *((int*)& __m128_op0[0]) = 0x0000fffe; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; -+ __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; -+ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffcf800fffcf800; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00003fee; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000004; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000002; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff100fffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffdf100fffc; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff100fffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000000; -+ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x21); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001ffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001ffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; -+ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x30); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_d(__m256i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffcf800fffcf800; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffcf800fffcf800; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; -+ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7fffffff; -+ *((int*)& __m128_op0[2]) = 0x7fffffff; -+ *((int*)& __m128_op0[1]) = 0x7fffffff; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffcf800fffcf800; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_result[3]) = 0x0008000800000003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0806050008060500; -+ *((unsigned long*)& __m256i_result[1]) = 0x0008000800000003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010000000100; -+ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; -+ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001fffe00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001ffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001ffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_du(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0000ffff; -+ *((int*)& __m128_op0[2]) = 0x0000ffff; -+ *((int*)& __m128_op0[1]) = 0x0000ffff; -+ *((int*)& __m128_op0[0]) = 0x0000fffe; -+ *((int*)& __m128_op1[3]) = 0x0000ffff; -+ *((int*)& __m128_op1[2]) = 0x0000ffff; -+ *((int*)& __m128_op1[1]) = 0x0000ffff; -+ *((int*)& __m128_op1[0]) = 0x0000fffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x16161616a16316b0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x16161616a16316b0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_d_q(__m128i_op0,__m128i_op1,0x7c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[3]) = 0xf9f8f9f8f9f9f900; -+ *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f9f9f9f9e0; -+ *((unsigned long*)& __m256i_result[1]) = 0xf9f8f9f8f9f9f900; -+ *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f9f9f9f900; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000a16316b0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000063636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000a1630000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000080000; -+ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[3]) = 0xff01ff010000fff9; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff19; -+ *((unsigned long*)& __m256i_result[1]) = 0xff02ff020001fffa; -+ *((unsigned long*)& __m256i_result[0]) = 0x000100010001fffa; -+ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000080000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000a16316b0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000063636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x16161616a16316b0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000a16316b0; -+ *((unsigned long*)& __m128i_result[0]) = 0x16161616a16316b0; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xa7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfe82fe0200000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe82fe0200000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xc177d01fe0000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff01ff010000fff9; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff19; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff02ff020001fffa; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000100010001fffa; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[3]) = 0x00fe01ff0006ffcf; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000e62f8f; -+ *((unsigned long*)& __m256i_result[1]) = 0x00fe02fe0006ffd6; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000006ffd6; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff100fffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff100fffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff01ff010000fff9; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff19; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff02ff020001fffa; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000100010001fffa; -+ *((unsigned long*)& __m256i_result[3]) = 0x807f807f00000380; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007380; -+ *((unsigned long*)& __m256i_result[1]) = 0xc03fc03f000001c0; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001c0; -+ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000a16316b0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x16161616a16316b0; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ffffa10016; -+ *((unsigned long*)& __m128i_result[0]) = 0x01150115ffa10016; -+ __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x27); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000a1630000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000a1630000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0001ffff0001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000a163000016b0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0303000103030001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000030300000303; -+ __m128i_out = __lsx_vmini_bu(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff7100fffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ffffa10016; -+ *((unsigned long*)& __m128i_op1[0]) = 0x01150115ffa10016; -+ *((unsigned long*)& __m128i_result[1]) = 0x000100fe000070a1; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000115ffffffa1; -+ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000f9f900; -+ *((unsigned long*)& __m256i_op0[2]) = 0x79f9f9f900f9f9e0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000f9f900; -+ *((unsigned long*)& __m256i_op0[0]) = 0x79f9f9f900f9f900; -+ *((unsigned long*)& __m256i_result[3]) = 0x00f9f90079f9f9f9; -+ *((unsigned long*)& __m256i_result[2]) = 0x79f9f9f900000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00f9f90079f9f9f9; -+ *((unsigned long*)& __m256i_result[0]) = 0x79f9f9f900000000; -+ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0x97); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x000100fe000070a1; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000115ffffffa1; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000d; -+ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe86ce7eb5e9ce950; -+ *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; -+ *((unsigned long*)& __m128i_result[0]) = 0xec68e3ef5a98ed54; -+ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000008; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00080000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffe40; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00f9f90079f9f9f9; -+ *((unsigned long*)& __m256i_op1[2]) = 0x79f9f9f900000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00f9f90079f9f9f9; -+ *((unsigned long*)& __m256i_op1[0]) = 0x79f9f9f900000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff8c80; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffe40; -+ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xf436f3f5; -+ *((int*)& __m128_op0[0]) = 0x2f4ef4a8; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfrint_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffe40; -+ __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_w(__m128i_op0,1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffint_d_lu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0004000000040000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0004000000040000; -+ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff6fff6fff6fff6; -+ __m128i_out = __lsx_vmini_h(__m128i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; -+ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_d(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0004000000040000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0004000000040000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0004000000040000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0004000000040000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0100010001000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0100010001000000; -+ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0100010001000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0100010001000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0004000400040004; -+ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x807f807f00000380; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007380; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc03fc03f000001c0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000001c0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_result[3]) = 0x807f807f00000380; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007380; -+ *((unsigned long*)& __m256i_result[1]) = 0xc03fc03f000001c0; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001c0; -+ __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0100010001000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf436f3f52f4ef4a8; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; -+ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00f9f90079f9f9f9; -+ *((unsigned long*)& __m256i_op1[2]) = 0x79f9f9f900000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00f9f90079f9f9f9; -+ *((unsigned long*)& __m256i_op1[0]) = 0x79f9f9f900000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x2000200020002000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff80000000000000; -+ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffc0; -+ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffe40; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000040004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff0e400; -+ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff8000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff8000000000; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf436f3f52f4ef4a8; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xf4b6f3f52f4ef4a8; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xf436f3f5; -+ *((int*)& __m128_op0[0]) = 0x2f4ef4a8; -+ *((int*)& __m128_op1[3]) = 0xff800000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xff800000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0xff800000; -+ *((int*)& __m128_result[0]) = 0x2f4ef4a8; -+ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0004000400040004; -+ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffff8000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffff8000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff8000000000; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001600000016; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001600000016; -+ __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00800000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xf4b6f3f5; -+ *((int*)& __m128_op0[0]) = 0x2f4ef4a8; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0e400; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007380; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000f1c00; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffbfffc; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001600000016; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001600000016; -+ *((int*)& __m128_result[3]) = 0x41b00000; -+ *((int*)& __m128_result[2]) = 0x41b00000; -+ *((int*)& __m128_result[1]) = 0x41b00000; -+ *((int*)& __m128_result[0]) = 0x41b00000; -+ __m128_out = __lsx_vffint_s_wu(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0e400; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff0e400; -+ __m256i_out = __lasx_xvmini_w(__m256i_op0,12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff8000002f4ef4a8; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000f4a8; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffbfffc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; -+ __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffbfffc; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff0e400; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000fff0e400; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000007380; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000f1c00; -+ *((unsigned long*)& __m256d_op2[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op2[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256d_op2[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op2[0]) = 0x00000000fff0e400; -+ *((unsigned long*)& __m256d_result[3]) = 0x80000000ffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0x80000000ffff8c80; -+ *((unsigned long*)& __m256d_result[1]) = 0x80000000ffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0x80000000fff0e400; -+ __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0e400; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffe40; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff0e400; -+ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x000000ff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0000ff00; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x7f800000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x7f800000; -+ __m256_out = __lasx_xvfrecip_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_b(__m256i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0080000000000000; -+ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc1bdceee242070db; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe8c7b756d76aa478; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; -+ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffbfffc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; -+ *((unsigned long*)& __m256i_op1[3]) = 0x80000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x80000000ffff8c80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x80000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x80000000fff0e400; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000f1a40; -+ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf4b6f3f52f4ef4a8; -+ *((unsigned long*)& __m128i_result[1]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xf4b6f3f52f4ef4a8; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xf4b6f3f52f4ef4a8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0e400; -+ *((unsigned long*)& __m256i_op1[3]) = 0x80000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x80000000ffff8c80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x80000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x80000000fff0e400; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff01ff01; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff01c000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff01ff01; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000f1000000; -+ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff00fff0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000007f7f; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000007f7f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000007f7f; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007f007f78; -+ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002ff5; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc2cf2471e9b7d7a4; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000027f5; -+ *((unsigned long*)& __m128i_result[0]) = 0xc2cf2471e9b7d7a4; -+ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff01ff01; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff01c000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff01ff01; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000f1000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000001341c4000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001000310000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000033e87ef1; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000002e2100; -+ __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff1739ffff48aa; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff2896ffff5b88; -+ *((unsigned long*)& __m128i_result[1]) = 0x3f3f17393f3f3f3f; -+ *((unsigned long*)& __m128i_result[0]) = 0x3f3f283f3f3f3f3f; -+ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000033e87ef1; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002e2100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000033007e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000021; -+ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007f7f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f7f7f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007f7f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f007f78; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000033007e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000021; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007f7f00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007f7f00007fff; -+ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xff800000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xff800000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffff1739; -+ *((int*)& __m128_op1[2]) = 0xffff48aa; -+ *((int*)& __m128_op1[1]) = 0xffff2896; -+ *((int*)& __m128_op1[0]) = 0xffff5b88; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007f7f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f7f7f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007f7f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f007f78; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffbfffc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f00007f7f0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f00fffb7f78fffc; -+ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001341c4000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001000310000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000033e87ef1; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000002e2100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000011c00; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000e8f1; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000103100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000002e00; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00007f7f00000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00007f7f00007fff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000f1a40; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x2757de72; -+ *((int*)& __m128_op0[2]) = 0x33d771a3; -+ *((int*)& __m128_op0[1]) = 0x166891d5; -+ *((int*)& __m128_op0[0]) = 0x1e8b7eff; -+ *((int*)& __m128_op1[3]) = 0x2757de72; -+ *((int*)& __m128_op1[2]) = 0x33d771a3; -+ *((int*)& __m128_op1[1]) = 0x166891d5; -+ *((int*)& __m128_op1[0]) = 0x1e8b7eff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001341c4000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001000310000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00007f7f00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00007f7f00007fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000007f00340040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000007f000000ff; -+ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000033e87ef1; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002e2100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x80008000b3e8fef1; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x80008000802ea100; -+ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; -+ __m128i_out = __lsx_vfclass_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf4b6f3f52f4ef4a8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0080000000000000; -+ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7ff77fff7ff7; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7ff77fff7ff7; -+ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x195f307a5d04acbb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6a1a3fbb3c90260e; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x195f307a5d04acbb; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x195f307a5d04acbb; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6a1a3fbb3c90260e; -+ *((unsigned long*)& __m128i_result[1]) = 0x19df307a5d04acbb; -+ *((unsigned long*)& __m128i_result[0]) = 0x5ed032b06bde1ab6; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff5fff4002ffff5; -+ __m128i_out = __lsx_vsrari_h(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7ff77fff7ff7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7ff77fff7ff7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001000010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000022; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000022; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000004; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x3e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x19df307a5d04acbb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5ed032b06bde1ab6; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x19de307a5d04acba; -+ *((unsigned long*)& __m128i_result[0]) = 0x5ed032b06bde1ab6; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002e2100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001000010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000012e2110; -+ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff5fff4002ffff5; -+ *((unsigned long*)& __m128i_op1[1]) = 0xaa858644fb8b3d49; -+ *((unsigned long*)& __m128i_op1[0]) = 0x18499e2cee2cc251; -+ *((unsigned long*)& __m128i_result[1]) = 0x8644000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xaed495f03343a685; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000012e2110; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_result[2]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_result[1]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80008000b3e8fef1; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80008000802ea100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000012e2110; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x012e2110012e2110; -+ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffbfffc; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xfffffffe; -+ *((int*)& __m128_op0[0]) = 0xbe6ed565; -+ *((int*)& __m128_op1[3]) = 0x195f307a; -+ *((int*)& __m128_op1[2]) = 0x5d04acbb; -+ *((int*)& __m128_op1[1]) = 0x6a1a3fbb; -+ *((int*)& __m128_op1[0]) = 0x3c90260e; -+ *((int*)& __m128_op2[3]) = 0xffffffff; -+ *((int*)& __m128_op2[2]) = 0xffffffff; -+ *((int*)& __m128_op2[1]) = 0xfffffffe; -+ *((int*)& __m128_op2[0]) = 0xbe6ed565; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xffffffff; -+ *((int*)& __m128_result[1]) = 0xfffffffe; -+ *((int*)& __m128_result[0]) = 0x3e730941; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x012e2110012e2110; -+ int_op1 = 0x00000000000000ac; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000ac; -+ *((unsigned long*)& __m256i_result[0]) = 0x012e2110012e2110; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x195f307a5d04acbb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6a1a3fbb3c90260e; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xe6a0cf86a2fb5345; -+ *((unsigned long*)& __m128i_result[0]) = 0x95e5c045c36fd9f2; -+ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffebe6ed565; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffebe6ed565; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffbe6ed563; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00007f7f00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00007f7f00007fff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000007f00340040; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000007f000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020200008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0008010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00007f7f00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007f7f00007fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000040000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007f7f00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000040000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007f7f00007fff; -+ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x2a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x195f307a5d04acbb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x002e2100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0d1bffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd915e98e2d8df4d1; -+ *((unsigned long*)& __m128i_result[1]) = 0xd0b1ffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x9d519ee8d2d84f1d; -+ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020200008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0008010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020000020200000; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020000020200000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0008000001010000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101000001010000; -+ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8644000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xaed495f03343a685; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffbe6ed563; -+ *((unsigned long*)& __m128i_result[1]) = 0x8644ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000fffe; -+ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0080000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0080000000000000; -+ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000007f00340040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000007f000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbe6ed563; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd0b1ffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9d519ee8d2d84f1d; -+ *((unsigned long*)& __m128i_result[1]) = 0xfefd7f7f7f7f7f7e; -+ *((unsigned long*)& __m128i_result[0]) = 0xdffdbffeba6f5543; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbe6ed563; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd0b1ffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9d519ee8d2d84f1d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8644ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000fffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4a6d0000ffff0000; -+ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002e2100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000040002; -+ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,-3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfefd7f7e7f7f7f7f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9d519ee8d2d84f1d; -+ *((unsigned long*)& __m128i_op2[1]) = 0x8644ffff0000ffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff0000fffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x85bd6b0e94d89998; -+ *((unsigned long*)& __m128i_result[0]) = 0xd83c8081ffff8080; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfefd7f7f7f7f7f7e; -+ *((unsigned long*)& __m128i_op0[0]) = 0xdffdbffeba6f5543; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ffffff000000ff; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x2020000020200000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x2020000020200000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0008000001010000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0101000001010000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ffffff000000ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ffffff000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff000000ff00; -+ __m128i_out = __lsx_vbsll_v(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020000020200000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020000020200000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0008000001010000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101000001010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020000020200000; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020000020200000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0008000001010000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101000001010000; -+ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020000020200000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020000020200000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0008000001010000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101000001010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020000020200000; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020000020200000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0008000001010000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101000001010000; -+ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffff000000ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; -+ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000040002; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x000000000000007f; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x85bd6b0e94d89998; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd83c8081ffff8080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000f; -+ __m128i_out = __lsx_vmskltz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x000000000000007f; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000020001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x3b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000040002; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000040002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xefefefefefefefef; -+ *((unsigned long*)& __m256i_result[2]) = 0xefefefefefefefef; -+ *((unsigned long*)& __m256i_result[1]) = 0xefefefefefefef6e; -+ *((unsigned long*)& __m256i_result[0]) = 0xeeeeeeeeeeeeeeee; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; -+ __m128d_out = __lsx_vflogb_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0xefefefef; -+ *((int*)& __m256_op0[6]) = 0xefefefef; -+ *((int*)& __m256_op0[5]) = 0xefefefef; -+ *((int*)& __m256_op0[4]) = 0xefefefef; -+ *((int*)& __m256_op0[3]) = 0xefefefef; -+ *((int*)& __m256_op0[2]) = 0xefefef6e; -+ *((int*)& __m256_op0[1]) = 0xeeeeeeee; -+ *((int*)& __m256_op0[0]) = 0xeeeeeeee; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x85bd6b0e94d89998; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd83c8081ffff8080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x85bd6b0e94d89998; -+ *((unsigned long*)& __m128i_result[0]) = 0xd83c8081ffff8080; -+ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x2020000020200000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2020000020200000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0008000001010000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101000001010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x85bd6b0e94d89998; -+ *((unsigned long*)& __m128i_op1[0]) = 0xd83c8081ffff8080; -+ *((unsigned long*)& __m128i_result[1]) = 0x85bd6b0e94d89998; -+ *((unsigned long*)& __m128i_result[0]) = 0xd83c8081ffff808f; -+ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0018001800180018; -+ *((unsigned long*)& __m128i_result[0]) = 0x0018001800180018; -+ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xefefefefefefefef; -+ *((unsigned long*)& __m256i_op0[2]) = 0xefefefefefefefef; -+ *((unsigned long*)& __m256i_op0[1]) = 0xefefefefefefef6e; -+ *((unsigned long*)& __m256i_op0[0]) = 0xeeeeeeeeeeeeeeee; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x1010101010101012; -+ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101012; -+ *((unsigned long*)& __m256i_result[1]) = 0x1010101010101093; -+ *((unsigned long*)& __m256i_result[0]) = 0x1111111111111113; -+ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfefd7f7f7f7f7f7e; -+ *((unsigned long*)& __m128d_op0[0]) = 0xdffdbffeba6f5543; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfefd7f7f7f7f7f7e; -+ *((unsigned long*)& __m128d_op1[0]) = 0xdffdbffeba6f5543; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff00fff0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff00fffffff0; -+ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xfefd7f7f; -+ *((int*)& __m128_op1[2]) = 0x7f7f7f7e; -+ *((int*)& __m128_op1[1]) = 0xdffdbffe; -+ *((int*)& __m128_op1[0]) = 0xba6f5543; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x7f7f7f7e; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m256_op0[7]) = 0x10101010; -+ *((int*)& __m256_op0[6]) = 0x10101012; -+ *((int*)& __m256_op0[5]) = 0x10101010; -+ *((int*)& __m256_op0[4]) = 0x10101012; -+ *((int*)& __m256_op0[3]) = 0x10101010; -+ *((int*)& __m256_op0[2]) = 0x10101093; -+ *((int*)& __m256_op0[1]) = 0x11111111; -+ *((int*)& __m256_op0[0]) = 0x11111113; -+ *((int*)& __m256_result[7]) = 0xc2be0000; -+ *((int*)& __m256_result[6]) = 0xc2be0000; -+ *((int*)& __m256_result[5]) = 0xc2be0000; -+ *((int*)& __m256_result[4]) = 0xc2be0000; -+ *((int*)& __m256_result[3]) = 0xc2be0000; -+ *((int*)& __m256_result[2]) = 0xc2be0000; -+ *((int*)& __m256_result[1]) = 0xc2ba0000; -+ *((int*)& __m256_result[0]) = 0xc2ba0000; -+ __m256_out = __lasx_xvflogb_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xe2ecd48adedc7c82; -+ *((unsigned long*)& __m128i_op0[0]) = 0x25d666472b01d18d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0303020102020001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000000000201; -+ __m128i_out = __lsx_vclo_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000020001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0018001800180018; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0018001800180018; -+ *((unsigned long*)& __m128i_op1[1]) = 0x85bd6b0e94d89998; -+ *((unsigned long*)& __m128i_op1[0]) = 0xd83c8081ffff808f; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff489b693120950; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffc45a851c40c18; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0018001800180018; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0018001800180018; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd8248069ffe78077; -+ *((unsigned long*)& __m128i_op1[1]) = 0x85bd6b0e94d89998; -+ *((unsigned long*)& __m128i_op1[0]) = 0xd83c8081ffff808f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xd82480697f678077; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0303020102020001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000201; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xd82480697f678077; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0301020100000004; -+ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1010101010101012; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1010101010101012; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1010101010101093; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1111111111111113; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1010101110101011; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1111111211111112; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ long_int_result = 0x0000000000000000; -+ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x2); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000020001; -+ *((unsigned long*)& __m256i_result[3]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_result[1]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_result[0]) = 0x1010101010121011; -+ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x10101011; -+ *((int*)& __m256_op1[4]) = 0x10101011; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x11111112; -+ *((int*)& __m256_op1[0]) = 0x11111112; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfefd7f7f7f7f7f7e; -+ *((unsigned long*)& __m128d_op0[0]) = 0xdffdbffeba6f5543; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff489b693120950; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffc45a851c40c18; -+ *((unsigned long*)& __m128i_result[1]) = 0xe0d56a9774f3ea31; -+ *((unsigned long*)& __m128i_result[0]) = 0xe0dd268932a5edf9; -+ __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd82480697f678077; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffff00fffffff0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00fffffff0; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8080808080808081; -+ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808081; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1010101110101011; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1111111211111112; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004444; -+ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x2e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xe0d56a9774f3ea31; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe0dd268932a5edf9; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe0d56a9774f3ea31; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe0dd268932a5edf9; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xd8248069ffe78077; -+ *((unsigned long*)& __m128i_result[1]) = 0xe0d56a9774f3ea31; -+ *((unsigned long*)& __m128i_result[0]) = 0xbddaa86803e33c2a; -+ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd82480697f678077; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8080808080808081; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8080808080808081; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000808000008080; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000808000008081; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffff00fffffff0; -+ *((unsigned long*)& __m256i_result[3]) = 0x9f9f9f9f9f9f9f9f; -+ *((unsigned long*)& __m256i_result[2]) = 0x9f9f9f9fffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x9f9f9f9f9f9f9f9f; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff9fffffffff; -+ __m256i_out = __lasx_xvori_b(__m256i_op0,0x9f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xe0d56a9774f3ea31; -+ *((unsigned long*)& __m128i_op0[0]) = 0xbddaa86803e33c2a; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe0d56a9774f3ea31; -+ *((unsigned long*)& __m128i_op1[0]) = 0xbddaa86803e33c2a; -+ *((unsigned long*)& __m128i_result[1]) = 0xff0600d50e9ef518; -+ *((unsigned long*)& __m128i_result[0]) = 0xffefffa8007c000f; -+ __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff489b693120950; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffc45a851c40c18; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000a; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000a; -+ __m128i_out = __lsx_vmaxi_d(__m128i_op0,10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff489b693120950; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffc45a851c40c18; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c63636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff489b693120950; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffc45a851c40c18; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4811fda96793b23a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8f10624016be82fd; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfda9b23a624082fd; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; -+ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd8248069ffe78077; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0d0d0d0d0d0d0d0d; -+ __m128i_out = __lsx_vmini_bu(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd8248069ffe78077; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xd8248069ffe78077; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xe31c86e90cda86f7; -+ __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe31c86e90cda86f7; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000000e3; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x38); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff489b693120950; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffc45a851c40c18; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffc45a851c40c18; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x48); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff0600d50e9ef518; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffefffa8007c000f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c63636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmsknz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c63636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x9c9c9c9c00000000; -+ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c63636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000e3; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfda9b23a624082fd; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; -+ *((int*)& __m128_result[3]) = 0x43630000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0xdc159371; -+ *((int*)& __m128_result[0]) = 0x4f7fff00; -+ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000808000008080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000808000008081; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000808000008080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000808000008081; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000081; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x68); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_d(__m128i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfda9b23a624082fd; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x03574e3a62407e03; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001010000; -+ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfda9b23a624082fd; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7da9b23a624082fd; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x03574e3a62407e03; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001010000; -+ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xd8248069; -+ *((int*)& __m128_op0[0]) = 0x7f678077; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xd8248069; -+ *((int*)& __m128_op1[0]) = 0x7f678077; -+ *((int*)& __m128_result[3]) = 0x7fc00000; -+ *((int*)& __m128_result[2]) = 0x7fc00000; -+ *((int*)& __m128_result[1]) = 0x3f800000; -+ *((int*)& __m128_result[0]) = 0x3f800000; -+ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7da9b23a624082fd; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x2002040404010420; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010180800101; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2002040404010420; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0101010180800101; -+ *((unsigned long*)& __m128i_result[1]) = 0x2002040404010420; -+ *((unsigned long*)& __m128i_result[0]) = 0x9c9c9c9c80800101; -+ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x03574e3a62407e03; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x03574e3a03574e3a; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x9c9c9c9c00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; -+ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x03574e3a; -+ *((int*)& __m128_op1[2]) = 0x03574e3a; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x03574e3a62407e03; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x03574e3a62407e03; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vslei_wu(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffff00fffffff0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffff00; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_result[2]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_result[1]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_result[0]) = 0x0202020202020202; -+ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7da9b23a624082fd; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x03574e39e496cbc9; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001010000; -+ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7da9b23a624082fd; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0505050505050505; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000005050000; -+ __m128i_out = __lsx_vmini_bu(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x03574e39e496cbc9; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x03574e38e496cbc9; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x05050505; -+ *((int*)& __m128_op0[2]) = 0x05050505; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x05050000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x03574e38; -+ *((int*)& __m128_op1[0]) = 0xe496cbc9; -+ *((int*)& __m128_result[3]) = 0x05050505; -+ *((int*)& __m128_result[2]) = 0x05050505; -+ *((int*)& __m128_result[1]) = 0x03574e38; -+ *((int*)& __m128_result[0]) = 0xe496cbc9; -+ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x3e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000f; -+ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000005050000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0505000005050505; -+ *((unsigned long*)& __m128i_result[1]) = 0x0028280000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0028280000282800; -+ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x03574e3b94f2ca31; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000001f807b89; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000005050000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0505000005050505; -+ *((unsigned long*)& __m128i_result[1]) = 0x000d02540000007e; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001400140014; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000005050000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0505000005050505; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000d02540000007e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001400140014; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0505050505050505; -+ *((unsigned long*)& __m128i_op2[0]) = 0x03574e38e496cbc9; -+ *((unsigned long*)& __m128i_result[1]) = 0x0005000400000004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0400001001150404; -+ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000006597cc3d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7505853d654185f5; -+ *((unsigned long*)& __m128i_op1[0]) = 0x01010000fefe0101; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000006595cc1d; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0505050505050505; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000005050000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0028280000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0028280000282800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0028280000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000282800; -+ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0005000400000004; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0400001001150404; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0005000400000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0400001001150404; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000800000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000000; -+ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x23); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0028280000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0028280000282800; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x7505853d654185f5; -+ *((unsigned long*)& __m128i_op2[0]) = 0x01010000fefe0101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0028280000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x012927ffff272800; -+ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcfb799f1; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0282800002828282; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5555001400005111; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffabbeab55110140; -+ *((unsigned long*)& __m128i_result[1]) = 0xaaaaffebcfb748e0; -+ *((unsigned long*)& __m128i_result[0]) = 0xfd293eab528e7ebe; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5555001400005111; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffabbeab55110140; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5555001400005111; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffabbeab55110140; -+ *((unsigned long*)& __m128i_result[1]) = 0xaaaa00280000a222; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe567c56aa220280; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0028280000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x012927ffff272800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0028280000000000; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7505853d654185f5; -+ *((unsigned long*)& __m128i_op0[0]) = 0x01010000fefe0101; -+ *((unsigned long*)& __m128i_result[1]) = 0x7545c57d6541c5f5; -+ *((unsigned long*)& __m128i_result[0]) = 0x41414040fefe4141; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0x40); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x000d0254; -+ *((int*)& __m128_op0[2]) = 0x0000007e; -+ *((int*)& __m128_op0[1]) = 0x00000014; -+ *((int*)& __m128_op0[0]) = 0x00140014; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xaaaaffebcfb748e0; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfd293eab528e7ebe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0xffeb48e03eab7ebe; -+ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xaaaaffebcfb748e0; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfd293eab528e7ebe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xaaaaffebcfb748e0; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfd293eab528e7ebe; -+ *((unsigned long*)& __m128i_result[1]) = 0xf6e91c0000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x51cfd7c000000000; -+ __m128i_out = __lsx_vslli_d(__m128i_op0,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffeb48e03eab7ebe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffc0fac01200f800; -+ *((unsigned long*)& __m128i_result[0]) = 0x0f80eac01f80ef80; -+ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000001fc1a568; -+ *((unsigned long*)& __m128i_op0[0]) = 0x02693fe0e7beb077; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_d(__m128i_op0,-6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000001fc1a568; -+ *((unsigned long*)& __m128i_op0[0]) = 0x02693fe0e7beb077; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000030000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0006000200000000; -+ __m128i_out = __lsx_vclz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcfb799f1; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0282800002828282; -+ *((int*)& __m128_result[3]) = 0xffffe000; -+ *((int*)& __m128_result[2]) = 0xffffe000; -+ *((int*)& __m128_result[1]) = 0xc1f6e000; -+ *((int*)& __m128_result[0]) = 0xbb3e2000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m128_op0[3]) = 0xf6e91c00; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x51cfd7c0; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x880c91b8; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x2d1da85b; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrecip_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x80008000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x80008000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x80008000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x80008000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; -+ *((unsigned long*)& __m128i_result[1]) = 0x7404443064403aec; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000d6eefefc0498; -+ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf6e91c0000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x51cfd7c000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffd000700000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0014fff500000000; -+ __m128i_out = __lsx_vsrai_h(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0x7f800000; -+ *((int*)& __m128_op0[1]) = 0x2d1da85b; -+ *((int*)& __m128_op0[0]) = 0x7f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000007fffffff; -+ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7505443065413aed; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0100d6effefd0498; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x2e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7404443064403aec; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000d6eefefc0498; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff7f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2d1da85b7f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x002d001dd6a8ee5b; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe7ffc8004009800; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7505443065413aed; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0100d6effefd0498; -+ *((unsigned long*)& __m128i_result[1]) = 0xb71289fdfbea3f69; -+ *((unsigned long*)& __m128i_result[0]) = 0x4e17c2ffb4851a40; -+ __m128i_out = __lsx_vmul_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xda4643d5301c4000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc1fc0d3bf55c4000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7505853d654185f5; -+ *((unsigned long*)& __m128i_op1[0]) = 0x01010000fefe0101; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000800000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000800000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000800000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000800000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000800000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000800000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000800000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100010000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100010000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100010000; -+ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff7f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2d1da85b7f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7505853d654185f5; -+ *((unsigned long*)& __m128i_op1[0]) = 0x01010000fefe0101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000013d; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x40); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffd000700000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0014fff500000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f03000780000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f15000a7f010101; -+ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000013d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000030000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0006000200000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0006000200000000; -+ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; -+ __m256d_out = __lasx_xvflogb_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00100010; -+ *((int*)& __m128_op0[2]) = 0x00030000; -+ *((int*)& __m128_op0[1]) = 0x00060002; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfrint_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100010000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100010000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100010000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100010000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100010080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100010000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100010080; -+ __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000750500006541; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000100fffffefd; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0110000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0110000000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0110000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0110000000000080; -+ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0110000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0110000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0110000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0110000000000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0110000000000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0110000000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0110000000000004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0110000000000080; -+ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000030000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0006000200000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7505445465593af1; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0100d6effefd0498; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000030000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0006000200000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff0000000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff0000000000080; -+ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0000001a; -+ *((int*)& __m128_op0[2]) = 0xfffffff7; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001afffffff7; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000750500006541; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000100fffffefd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000000; -+ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000080; -+ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x7f80780000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7f80780000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004000; -+ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000002400180004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000024; -+ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff00000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x7fffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7e00fe0000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000030000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00060001fffe8003; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; -+ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7f80780000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7f80780000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00001000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00001000; -+ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7505445465593af1; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_hu(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xf000000000000000; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0x00001000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0x00001000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1000000000000000; -+ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; -+ *((int*)& __m256_result[7]) = 0xc6000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0xc6000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000024; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000024; -+ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffc0ff81000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000600000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffc0ff81000000; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xc600000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xc600000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000750500006541; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000100fffffefd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7f80780000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7f80780000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00200010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1c80780000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1c80780000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7f80780000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7f80780000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000f0000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000f0000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1fe01e0000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1fe01e0000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x22); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x6b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffc0ff81000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffff0ffe04000; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1090918800000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1090918800000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1c80780000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1c80780000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1c80780000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1c80780000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004000; -+ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1fe01e0000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1fe01e0000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1fe01e0000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1fe01e0000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000400000004000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000400000204010; -+ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1fe01e0000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1fe01e0000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1fe01e0100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x1fe01e0100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf000f00000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf000f00000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xf000f00000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xf000f00000000001; -+ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1fe01e0000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1fe01e0000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000f0000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000f0000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xfffffff0ffe04000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001fc0000; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_w(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1fe01e0100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1fe01e0100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1fe01e0100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1fe01e0100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000400000004000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000400000204010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000020000010200; -+ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffff0ffe04000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_d(__m128i_op0,0x3f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf000f00000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf000f00000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x6300000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xf000f00000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x6300000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xf000f00000000001; -+ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x41); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000001; -+ *((int*)& __m256_op0[6]) = 0x00000001; -+ *((int*)& __m256_op0[5]) = 0x00000001; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x00000001; -+ *((int*)& __m256_op0[2]) = 0x00000001; -+ *((int*)& __m256_op0[1]) = 0x00000001; -+ *((int*)& __m256_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x39); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffff0ffe04000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_w(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001fc0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000040004000100; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001fc0000; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffffc00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffffc00; -+ __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffc00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; -+ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9cffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9cffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xce7ffffffffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xce7ffffffffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0x6300000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000002010; -+ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; -+ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001fc0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000002010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001fbdff0; -+ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9cffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9cffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; -+ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_h(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; -+ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9cffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9cffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_result[0]) = 0x0400040004000400; -+ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xce7ffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xce7ffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x327f010101010102; -+ *((unsigned long*)& __m256i_result[2]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x327f010101010102; -+ *((unsigned long*)& __m256i_result[0]) = 0x6300000000000000; -+ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x22); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaxi_h(__m128i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7fc00000; -+ *((int*)& __m128_result[2]) = 0x7fc00000; -+ *((int*)& __m128_result[1]) = 0x7fc00000; -+ *((int*)& __m128_result[0]) = 0x7fc00000; -+ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m256_op0[7]) = 0x327f0101; -+ *((int*)& __m256_op0[6]) = 0x01010102; -+ *((int*)& __m256_op0[5]) = 0x63000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x327f0101; -+ *((int*)& __m256_op0[2]) = 0x01010102; -+ *((int*)& __m256_op0[1]) = 0x63000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xce7fffff; -+ *((int*)& __m256_op1[6]) = 0xfffffffe; -+ *((int*)& __m256_op1[5]) = 0x63000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0xce7fffff; -+ *((int*)& __m256_op1[2]) = 0xfffffffe; -+ *((int*)& __m256_op1[1]) = 0x63000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x327f010101010102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x327f010101010102; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff4; -+ __m256i_out = __lasx_xvmini_d(__m256i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xce7ffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xce7ffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6300000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff39ffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff39ffffff; -+ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x5e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0400040004000400; -+ *((unsigned long*)& __m128d_result[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128d_result[0]) = 0x0400040004000400; -+ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000040004000100; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0x39ffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0x39ffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7f800000; -+ *((int*)& __m128_op0[2]) = 0x7f800000; -+ *((int*)& __m128_op0[1]) = 0x7f800000; -+ *((int*)& __m128_op0[0]) = 0x7f800000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0400040004000400; -+ unsigned_int_result = 0x0000000000000400; -+ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x5); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; -+ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; -+ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; -+ __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_hu(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9cffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9cffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x6300000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x6300000000000001; -+ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000001000000010; -+ *((unsigned long*)& __m256d_op1[3]) = 0x45d5555545d55555; -+ *((unsigned long*)& __m256d_op1[2]) = 0x74555555e8aaaaaa; -+ *((unsigned long*)& __m256d_op1[1]) = 0x45d5555545d55555; -+ *((unsigned long*)& __m256d_op1[0]) = 0x74555555e8aaaaaa; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff39ffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff39ffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; -+ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; -+ *((unsigned long*)& __m128i_result[0]) = 0x0404040404040404; -+ __m128i_out = __lsx_vxori_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x5555555536aaaaac; -+ *((unsigned long*)& __m256i_op0[2]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256i_op0[1]) = 0x5555555536aaaaac; -+ *((unsigned long*)& __m256i_op0[0]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x5555555536aaaaac; -+ *((unsigned long*)& __m256i_result[2]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256i_result[1]) = 0x5555555536aaaaac; -+ *((unsigned long*)& __m256i_result[0]) = 0x55555555aaaaaaac; -+ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffff39ffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffff39ffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextl_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x5555555536aaaaac; -+ *((unsigned long*)& __m256i_op1[2]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256i_op1[1]) = 0x5555555536aaaaac; -+ *((unsigned long*)& __m256i_op1[0]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; -+ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x6300000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6300000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x5555555536aaaaac; -+ *((unsigned long*)& __m256i_op0[2]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256i_op0[1]) = 0x5555555536aaaaac; -+ *((unsigned long*)& __m256i_op0[0]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff39ffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff39ffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x2b2b2b2b1bd5d5d6; -+ *((unsigned long*)& __m256i_result[2]) = 0x2a2a2a2af2d5d5d6; -+ *((unsigned long*)& __m256i_result[1]) = 0x2b2b2b2b1bd5d5d6; -+ *((unsigned long*)& __m256i_result[0]) = 0x2a2a2a2af2d5d5d6; -+ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000aaaa00008bfe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000aaaa0000aaaa; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000aaaa00008bfe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000aaaa0000aaaa; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000aaaa00008bfe; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000aaaa0000aaaa; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000aaaa00008bfe; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000aaaa0000aaaa; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000aaaa00008bfe; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000aaaa0000aaaa; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000aaaa00008bfe; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000aaaa0000aaaa; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000aaaa00008bfe; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000aaaa0000aaaa; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000aaaa00008bfe; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000aaaa0000aaaa; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2b2b2b2b1bd5d5d6; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2a2a2a2af2d5d5d6; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2b2b2b2b1bd5d5d6; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2a2a2a2af2d5d5d6; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002a0000002a; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002a0000002a; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffff2ffffffd5; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffd5ffffffd6; -+ __m256i_out = __lasx_vext2xv_w_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x55555555; -+ *((int*)& __m256_op0[6]) = 0x36aaaaac; -+ *((int*)& __m256_op0[5]) = 0x55555555; -+ *((int*)& __m256_op0[4]) = 0xaaaaaaac; -+ *((int*)& __m256_op0[3]) = 0x55555555; -+ *((int*)& __m256_op0[2]) = 0x36aaaaac; -+ *((int*)& __m256_op0[1]) = 0x55555555; -+ *((int*)& __m256_op0[0]) = 0xaaaaaaac; -+ *((unsigned long*)& __m256i_result[3]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_result[2]) = 0x5555555580000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_result[0]) = 0x5555555580000000; -+ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000010; -+ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000001fffe; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5555555580000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5555555580000000; -+ int_result = 0x0000000055555555; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x5); -+ *((int*)& __m128_op0[3]) = 0xc1bdceee; -+ *((int*)& __m128_op0[2]) = 0x242070db; -+ *((int*)& __m128_op0[1]) = 0xe8c7b756; -+ *((int*)& __m128_op0[0]) = 0xd76aa478; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff800000000000; -+ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5555555580000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5555555580000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x555555553f800000; -+ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x55555555; -+ *((int*)& __m256_op0[6]) = 0x3f800000; -+ *((int*)& __m256_op0[5]) = 0x55555555; -+ *((int*)& __m256_op0[4]) = 0x80000000; -+ *((int*)& __m256_op0[3]) = 0x55555555; -+ *((int*)& __m256_op0[2]) = 0x3f800000; -+ *((int*)& __m256_op0[1]) = 0x55555555; -+ *((int*)& __m256_op0[0]) = 0x80000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x0001fffe; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x0001fffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0000aaaa; -+ *((int*)& __m256_op0[6]) = 0x00008bfe; -+ *((int*)& __m256_op0[5]) = 0x0000aaaa; -+ *((int*)& __m256_op0[4]) = 0x0000aaaa; -+ *((int*)& __m256_op0[3]) = 0x0000aaaa; -+ *((int*)& __m256_op0[2]) = 0x00008bfe; -+ *((int*)& __m256_op0[1]) = 0x0000aaaa; -+ *((int*)& __m256_op0[0]) = 0x0000aaaa; -+ *((unsigned long*)& __m256d_result[3]) = 0x3795554000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x37917fc000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x3795554000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x37917fc000000000; -+ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff5556aaaa; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff5556aaaa; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0006ffff0004ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0006ffff0004ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0006ffff0004ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00020000aaa95556; -+ *((unsigned long*)& __m256i_result[1]) = 0x0006ffff0004ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00020000aaa95556; -+ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; -+ __m128i_out = __lsx_vslli_h(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x2b2b2b2b1bd68080; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2a2ad4d4f2d8807e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x2b2b2b2b1bd68080; -+ *((unsigned long*)& __m256i_op1[0]) = 0x2a2ad4d4f2d8807e; -+ *((unsigned long*)& __m256i_result[3]) = 0xd4d5d4d5e42a7f80; -+ *((unsigned long*)& __m256i_result[2]) = 0xd5d62b2c0d287f82; -+ *((unsigned long*)& __m256i_result[1]) = 0xd4d5d4d5e42a7f80; -+ *((unsigned long*)& __m256i_result[0]) = 0xd5d62b2c0d287f82; -+ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xfffffffc; -+ *((int*)& __m256_op0[4]) = 0x5556aaa8; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xfffffffc; -+ *((int*)& __m256_op0[0]) = 0x5556aaa8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ long_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrecip_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000055; -+ *((int*)& __m256_op0[6]) = 0x36aaaaac; -+ *((int*)& __m256_op0[5]) = 0x55555555; -+ *((int*)& __m256_op0[4]) = 0xaaaaaaac; -+ *((int*)& __m256_op0[3]) = 0x00000055; -+ *((int*)& __m256_op0[2]) = 0x36aaaaac; -+ *((int*)& __m256_op0[1]) = 0x55555555; -+ *((int*)& __m256_op0[0]) = 0xaaaaaaac; -+ *((int*)& __m256_op1[7]) = 0x00060000; -+ *((int*)& __m256_op1[6]) = 0x00040000; -+ *((int*)& __m256_op1[5]) = 0x00025555; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00060000; -+ *((int*)& __m256_op1[2]) = 0x00040000; -+ *((int*)& __m256_op1[1]) = 0x00025555; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffc5556aaa8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffc5556aaa8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000007070205; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000002020100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000007070205; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000002020100; -+ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0002555500000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0002555500000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffdaaaaffffffff; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0002555500000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0002555500000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0002555400000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0002555400000000; -+ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000005536aaaaac; -+ *((unsigned long*)& __m256d_op0[2]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000005536aaaaac; -+ *((unsigned long*)& __m256d_op0[0]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0002555400000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0002555400000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x3f2c678e38d1104c; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x3f2c678e38d1104c; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000005554; -+ *((unsigned long*)& __m256i_op1[2]) = 0xaaaa0000aaacfffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000005554; -+ *((unsigned long*)& __m256i_op1[0]) = 0xaaaa0000aaacfffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000054; -+ *((unsigned long*)& __m256i_result[2]) = 0x00aa000000ac00fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000054; -+ *((unsigned long*)& __m256i_result[0]) = 0x00aa000000ac00fe; -+ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000005536aaaaac; -+ *((unsigned long*)& __m256i_op0[2]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000005536aaaaac; -+ *((unsigned long*)& __m256i_op0[0]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000060102150101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000060102150101; -+ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_w(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x6664666466646664; -+ *((unsigned long*)& __m128i_result[0]) = 0x6664666466646664; -+ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x66); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000054; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00aa000000ac00fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000054; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00aa000000ac00fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0002a80000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0002b0000003f800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0002a80000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0002b0000003f800; -+ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_w(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000005536aaaaac; -+ *((unsigned long*)& __m256i_op0[2]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000005536aaaaac; -+ *((unsigned long*)& __m256i_op0[0]) = 0x55555555aaaaaaac; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000005136aaaaa8; -+ *((unsigned long*)& __m256i_result[2]) = 0x55515551aaaaaaa8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000005136aaaaa8; -+ *((unsigned long*)& __m256i_result[0]) = 0x55515551aaaaaaa8; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3f2c678e38d1104c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3f2c678e38d1104c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00aa000000ac00fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00aa000000ac00fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xff00000000000000; -+ __m128i_out = __lsx_vbsll_v(__m128i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x3f2c678e38d1104c; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x3f2c678e38d1104c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128d_op0[0]) = 0xff00000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000008; -+ __m128i_out = __lsx_vfclass_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3f2c678e38d1104c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3f2c678e38d1104c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x3f2c678e38d1104c; -+ *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x3f2c678e38d1104c; -+ *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff5556aaaa; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff5556aaaa; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; -+ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fffffffbffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffdaaaaffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfe7ffffffeffffc0; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfe7ffffffeffffc0; -+ __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_b(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; -+ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0002555500000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0002555500000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1818181818181818; -+ *((unsigned long*)& __m256i_result[2]) = 0x1818181818181818; -+ *((unsigned long*)& __m256i_result[1]) = 0x1818181818181818; -+ *((unsigned long*)& __m256i_result[0]) = 0x1818181818181818; -+ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x555555553f800000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffe0000fffe0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffe0000fffe0000; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0002555500000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0002555500000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x3b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0007000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0007000000000000; -+ __m256i_out = __lasx_xvmini_hu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x40000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x40000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; -+ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000060000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000060000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000fe00ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000ff00fe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000fe00ff; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextl_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000fe00ff; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffe1; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x5980000000000000; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000016600000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000016600000000; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000016600000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000016600000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000016600000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000016600000000; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x3); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000060000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000060000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000060000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000060000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x0000fffe; -+ *((int*)& __m128_op0[0]) = 0x0000ffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x5980000000000000; -+ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00060000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00060000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000166; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000166; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00555555553f8000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00555555553f8000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000fffe0000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001fffe; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x555555553f800000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x555555553f800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x59800000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x59800000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x59800000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x59800000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x2c27000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x2c27000000000000; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00fe00ff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x5900000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x5900000000000000; -+ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x59800000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x59800000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x0eb7aaaa; -+ *((int*)& __m256_op1[6]) = 0xa6e6ac80; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x0eb7aaaa; -+ *((int*)& __m256_op1[2]) = 0xa6e6ac80; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x555555553f800000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x555555553f800000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x353bb67af686ad9b; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x353bb67af686ad9b; -+ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_result[2]) = 0x5982000200020002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_result[0]) = 0x5982000200020002; -+ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x353bb67af686ad9b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x353bb67af686ad9b; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0200000200000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2c27000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0200000200000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x2c27000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1cfd000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1cfd000000000000; -+ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0200000200000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2c27000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0200000200000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x2c27000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000400000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000400000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x41d6600000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x41d6600000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_w(__m128i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1cfd000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1cfd000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1cfd000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1cfd000000000000; -+ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1cfd000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1cfd000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1cfd000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1cfd000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; -+ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x59800000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x59800000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x41d66000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x41d66000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x41d6600000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x41d6600000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0x41d6600000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0x41d6600000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7fffffffffffffff; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1cfd000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1cfd000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1cfd000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1cfd000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x56a09e662ab46b31; -+ *((unsigned long*)& __m128i_op0[0]) = 0xb4b8122ef4054bb3; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x56a09e662ab46b31; -+ *((unsigned long*)& __m128i_result[0]) = 0xb4b8122ef4054bb3; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000400000001; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000400000001; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x56a09e662ab46b31; -+ *((unsigned long*)& __m128i_op1[0]) = 0xb4b8122ef4054bb3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x02b504f305a5c091; -+ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_result[2]) = 0x6aeaeaeaeaeaeaea; -+ *((unsigned long*)& __m256i_result[1]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_result[0]) = 0x6aeaeaeaeaeaeaea; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x02b504f305a5c091; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x02b504f305a5c091; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000005602d2; -+ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x00000000000000ac; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_op1[2]) = 0x6aeaeaeaeaeaeaea; -+ *((unsigned long*)& __m256i_op1[1]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6aeaeaeaeaeaeaea; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,-3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_d(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x3c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000fe00ff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x0000ffff; -+ *((int*)& __m256_op0[4]) = 0x0000ffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x0000ffff; -+ *((int*)& __m256_op0[0]) = 0x0000ffff; -+ *((int*)& __m256_op1[7]) = 0x0eb7aaaa; -+ *((int*)& __m256_op1[6]) = 0xa6e6ac80; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x0eb7aaaa; -+ *((int*)& __m256_op1[2]) = 0xa6e6ac80; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffff01ff01; -+ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000fe00ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffff01ff01; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000101fd01fe; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000101fd01fe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0001000101fd01fe; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0020000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0020000000000000; -+ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x4b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x73); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000020; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0020000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0020000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffff01ff01; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffff01ff01; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0020000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0020000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffff01ff01; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffff01ff01; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffff01ff01; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffefefffffefe; -+ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff02; -+ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff02; -+ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000101fd01fe; -+ *((unsigned long*)& __m128i_result[1]) = 0xff80ff80ff80ff80; -+ *((unsigned long*)& __m128i_result[0]) = 0xff80ff8080008000; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000101fd01fe; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff80ff80ff80ff80; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff80ff8080008000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000101fd01fe; -+ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; -+ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ff02; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000001fe; -+ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0xff800000; -+ *((int*)& __m128_result[1]) = 0xffffffff; -+ *((int*)& __m128_result[0]) = 0xffffffff; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001400000014; -+ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000001fe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128d_op1[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x3c600000ff800000; -+ *((unsigned long*)& __m128d_result[0]) = 0xfffffffffffffffe; -+ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001400000014; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000001; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xfffffffe; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0xffffff02; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0011001100110011; -+ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001400000014; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff02; -+ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_result[0]) = 0x04000400fbfffb02; -+ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x3c600000ff800000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffefe00000000; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3c600000ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0f180000ffe00000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0f180000ffe00000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3c600000ff800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x3c5fffffff7fffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffefffeff00feff; -+ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_h(__m256i_op0,-3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3c600000ff800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xff01ff01; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0xffffffff; -+ *((int*)& __m128_op2[2]) = 0xffffffff; -+ *((int*)& __m128_op2[1]) = 0xffffffff; -+ *((int*)& __m128_op2[0]) = 0xff01ff01; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xffffffff; -+ *((int*)& __m128_result[1]) = 0xffffffff; -+ *((int*)& __m128_result[0]) = 0x7f01ff01; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3c600000ff800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0xc39fffff007fffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000fe00fd; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefe00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x7f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3c5fffffff7fffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffefffeff00feff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefe00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x03fc03fc03f803f8; -+ *((unsigned long*)& __m256i_result[2]) = 0x03fc03fc03f803f8; -+ *((unsigned long*)& __m256i_result[1]) = 0x03fc03fc03f803f8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000013ffffffec; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000013ffffebd8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000013ffffffec; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000013ffffebd8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x03fc03fc03f803f8; -+ *((unsigned long*)& __m256d_op0[2]) = 0x03fc03fc03f803f8; -+ *((unsigned long*)& __m256d_op0[1]) = 0x03fc03fc03f803f8; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7be2468acf15f39c; -+ *((unsigned long*)& __m256d_result[2]) = 0x7be2468acf15f39c; -+ *((unsigned long*)& __m256d_result[1]) = 0x7be2468acf15f39c; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc39fffff007fffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00fd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffff0e700000000; -+ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x32); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xc39fffff007fffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000fe00fd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0e7ffffc01fffffc; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000003f803f4; -+ __m128i_out = __lsx_vslli_w(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefe00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffffefe00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffefe00000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0e7ffffc01fffffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000003f803f4; -+ *((unsigned long*)& __m128i_result[1]) = 0x1000000010000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100100000; -+ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0e7ffffc01fffffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000003f803f4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0e7ffffc01fffffc; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000003f803f4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0e7ffffc01fffffc; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001003f803f4; -+ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7be2468acf15f39c; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7be2468acf15f39c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7be2468acf15f39c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7be2468acf15f39c; -+ *((unsigned long*)& __m256i_result[2]) = 0x7be2468acf15f39c; -+ *((unsigned long*)& __m256i_result[1]) = 0x7be2468acf15f39c; -+ *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; -+ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xc39fffff007fffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000fe00fd; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x78c00000ff000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x61cf003f0000007f; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000003c607f80; -+ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000013ffffffec; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000013ffffebd8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000013ffffffec; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000013ffffebd8; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffec; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffebd8; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffec; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffebd8; -+ __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff7f01ff01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x78c00000ff000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff7f01ff01; -+ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefe00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000003ff000003ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000003ff000003ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x36); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x85); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffec; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffebd8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffec; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffebd8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffec; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffec; -+ __m256i_out = __lasx_xvexth_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x78c00000ff000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x78c00000ff000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x78c00000ff000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x78c00000ff000000; -+ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000003ff000003ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000003ff000003ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffec; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffec; -+ *((unsigned long*)& __m256i_result[3]) = 0x000003ff000003ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000003ff000003ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1000000010000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100100000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff1; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff1; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfffffefe00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffff1; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x78c00000ff000000; -+ int_op1 = 0x0000000000000400; -+ *((unsigned long*)& __m128i_result[1]) = 0xff000000ff000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff000000ff000000; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff000000ff000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff000000ff000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff000000ff000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff000000ff000000; -+ __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000003ff000003ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000003ff000003ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xfffffefe00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000003ff000003ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000003ff000003ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000003ff000003ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000003ff000003ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff7f01ff01; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000d; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x3b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000400; -+ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_result[0]) = 0x0400040004000400; -+ __m128i_out = __lsx_vreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x78c00000ff000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000078c00000; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffec; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffec; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010000000100000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010000000100000; -+ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0010000000100000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0010000000100000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_b(__m128i_op0,-2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff7f01ff01; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff7f01ff01; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffe03; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffe03; -+ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefe00000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x0000000d; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xfffffe03; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xfffffe03; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefe00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffefe00000000; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_hu(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1000000010000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100100000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x2000000020000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200200000; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000b5207f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2000000020000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200200000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x6a57a30ff0000000; -+ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefe00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000d; -+ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x37); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x4f800000; -+ __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000078c00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000078c00000; -+ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000078c00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000d; -+ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0xf7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff00000000; -+ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000b5207f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x00000000b5207f80; -+ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x00000000b5207f80; -+ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1000000010000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000180100100000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000b5207f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00001801b5307f80; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000078c00000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6a57a30ff0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000f0000000; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff01fffffffeff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff01fffffffeff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff01fffffffeff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff01fffffffeff; -+ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ long_int_result = 0x0000000000000000; -+ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x2); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_w(__m128i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000f0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vslei_wu(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000b5207f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000b5207f80; -+ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000400; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128d_op0[0]) = 0x6a57a30ff0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x6a57a30ff0000000; -+ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00001801f0307f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00001801f0307f80; -+ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000f0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6a57a30ff0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x01fe01fe01fe01fe; -+ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfrecip_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; -+ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x01fe01fe00000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_result[1]) = 0x000d000d000d000d; -+ *((unsigned long*)& __m128i_result[0]) = 0x000d000d000d000d; -+ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000b5207f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000400; -+ *((unsigned long*)& __m256i_result[3]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_result[2]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_result[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_result[0]) = 0x0400040004000400; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000d000d000d000d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000d000d000d000d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000680000006800; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_result[3]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_result[2]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_result[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_result[0]) = 0x0400040004000400; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x2d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x01fe01fe00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x01fe01fe00000000; -+ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_result[0]) = 0x040004000400040d; -+ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_op0[0]) = 0x040004000400040d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0501050105010501; -+ *((unsigned long*)& __m128i_result[0]) = 0x050105010501050c; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_w(__m256i_op0,-2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0400040004000400; -+ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_op0[0]) = 0x040004000400040d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_result[0]) = 0x040004000400040d; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffc0000fffc0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffc0000fffc0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffc0000fffc0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffc0000fffc0000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x05010501; -+ *((int*)& __m128_op1[2]) = 0x05010501; -+ *((int*)& __m128_op1[1]) = 0x05010501; -+ *((int*)& __m128_op1[0]) = 0x0501050c; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffc0000fffc0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffc0000fffc0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffc0000fffc0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffc0000fffc0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0002000200020002; -+ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ long_op0 = 0x0000000000000400; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000400; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000400; -+ __m128i_out = __lsx_vreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x6a57a30ff0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000400; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000400; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000400; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000400; -+ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_result[1]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; -+ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x1410141014101410; -+ *((unsigned long*)& __m256i_result[2]) = 0x1410141014101410; -+ *((unsigned long*)& __m256i_result[1]) = 0x1410141014101410; -+ *((unsigned long*)& __m256i_result[0]) = 0x1410141014101410; -+ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000400; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000040d; -+ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000040d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000040d; -+ __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x000000000000040d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000400; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000400; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000040d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; -+ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xcc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000040d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vbitrev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x25); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000006; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x33); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000fff3; -+ __m128i_out = __lsx_vneg_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_b(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000006; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000006; -+ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21f32eafa486fd38; -+ *((unsigned long*)& __m128i_op0[0]) = 0x407c2ca3d3430357; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x21f32eaf5b7a02c8; -+ *((unsigned long*)& __m128i_result[0]) = 0x407c2ca32cbd0357; -+ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00010001; -+ *((int*)& __m256_op1[6]) = 0x00010001; -+ *((int*)& __m256_op1[5]) = 0x00010001; -+ *((int*)& __m256_op1[4]) = 0x00010001; -+ *((int*)& __m256_op1[3]) = 0x00010001; -+ *((int*)& __m256_op1[2]) = 0x00010001; -+ *((int*)& __m256_op1[1]) = 0x00010001; -+ *((int*)& __m256_op1[0]) = 0x00010001; -+ *((int*)& __m256_result[7]) = 0x00010001; -+ *((int*)& __m256_result[6]) = 0x00010001; -+ *((int*)& __m256_result[5]) = 0x00010001; -+ *((int*)& __m256_result[4]) = 0x00010001; -+ *((int*)& __m256_result[3]) = 0x00010001; -+ *((int*)& __m256_result[2]) = 0x00010001; -+ *((int*)& __m256_result[1]) = 0x00010001; -+ *((int*)& __m256_result[0]) = 0x00010001; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21f32eaf5b7a02c8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x407c2ca32cbd0357; -+ *((unsigned long*)& __m128i_result[1]) = 0x10f917d72d3d01e4; -+ *((unsigned long*)& __m128i_result[0]) = 0x203e16d116de012b; -+ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000fff3; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x000000000000040d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010400; -+ __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00010400; -+ *((int*)& __m128_op1[3]) = 0x10f917d7; -+ *((int*)& __m128_op1[2]) = 0x2d3d01e4; -+ *((int*)& __m128_op1[1]) = 0x203e16d1; -+ *((int*)& __m128_op1[0]) = 0x16de012b; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x21f32eaf; -+ *((int*)& __m128_op0[2]) = 0x5b7a02c8; -+ *((int*)& __m128_op0[1]) = 0x407c2ca3; -+ *((int*)& __m128_op0[0]) = 0x2cbd0357; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00010400; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x10f917d72d3d01e4; -+ *((unsigned long*)& __m128i_op0[0]) = 0x203e16d116de012b; -+ *((unsigned long*)& __m128i_result[1]) = 0x887c8beb969e00f2; -+ *((unsigned long*)& __m128i_result[0]) = 0x101f8b680b6f8095; -+ __m128i_out = __lsx_vrotri_h(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000040d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff0008ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff0008ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x10f917d72d3d01e4; -+ *((unsigned long*)& __m128i_op1[0]) = 0x203e16d116de012b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x10f917d72d3d01e4; -+ *((unsigned long*)& __m128i_op0[0]) = 0x203e16d116de012b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000017d7000001e4; -+ *((unsigned long*)& __m128i_result[0]) = 0x000016d10000012b; -+ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x887c8beb; -+ *((int*)& __m128_op0[2]) = 0x969e00f2; -+ *((int*)& __m128_op0[1]) = 0x101f8b68; -+ *((int*)& __m128_op0[0]) = 0x0b6f8095; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x10f917d72d3d01e4; -+ *((unsigned long*)& __m128i_op1[0]) = 0x203e16d116de012b; -+ *((unsigned long*)& __m128i_result[1]) = 0x10f917d72d3d01e4; -+ *((unsigned long*)& __m128i_result[0]) = 0x203e16d116de012b; -+ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000800080008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000800080008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000800080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000800080008000; -+ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfa31dfa21672e711; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1304db85e468073a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x80008000; -+ *((int*)& __m256_op0[6]) = 0x80008000; -+ *((int*)& __m256_op0[5]) = 0x80008000; -+ *((int*)& __m256_op0[4]) = 0x80008000; -+ *((int*)& __m256_op0[3]) = 0x80008000; -+ *((int*)& __m256_op0[2]) = 0x80008000; -+ *((int*)& __m256_op0[1]) = 0x80008000; -+ *((int*)& __m256_op0[0]) = 0x80008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfa31dfa21672e711; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1304db85e468073a; -+ *((unsigned long*)& __m128i_op2[1]) = 0x887c8beb969e00f2; -+ *((unsigned long*)& __m128i_op2[0]) = 0x101f8b680b6f8095; -+ *((unsigned long*)& __m128i_result[1]) = 0x7582ed22cb1c6e12; -+ *((unsigned long*)& __m128i_result[0]) = 0x35aaa61c944f34c2; -+ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x10f917d72d3d01e4; -+ *((unsigned long*)& __m128i_op0[0]) = 0x203e16d116de012b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x10f917d72d3d01e4; -+ *((unsigned long*)& __m128i_result[0]) = 0x203e16d116de012b; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000800080008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000800080008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000800080008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007fff; -+ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff51cf8da; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffd6040188; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000101fffff8b68; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000b6fffff8095; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffff51cffffd604; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_w(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0000101f; -+ *((int*)& __m128_op0[2]) = 0xffff8b68; -+ *((int*)& __m128_op0[1]) = 0x00000b6f; -+ *((int*)& __m128_op0[0]) = 0xffff8095; -+ *((int*)& __m128_op1[3]) = 0x10f917d7; -+ *((int*)& __m128_op1[2]) = 0x2d3d01e4; -+ *((int*)& __m128_op1[1]) = 0x203e16d1; -+ *((int*)& __m128_op1[0]) = 0x16de012b; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrecip_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x10f917d72d3d01e4; -+ *((unsigned long*)& __m128i_op1[0]) = 0x203e16d116de012b; -+ *((unsigned long*)& __m128i_result[1]) = 0x00f900d7003d00e4; -+ *((unsigned long*)& __m128i_result[0]) = 0x003e00d100de002b; -+ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf51cf8dad6040188; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0982e2daf234ed87; -+ *((unsigned long*)& __m128i_result[1]) = 0xf51cf8dad6040188; -+ *((unsigned long*)& __m128i_result[0]) = 0x0982eadaf234ed87; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x2b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00f900d7003d00e4; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003e00d100de002b; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f4000007f040000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f0200007f020000; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000101fffff8b68; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000b6fffff8095; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000b6fffff8095; -+ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff51cf8da; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffd6040188; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffff8f8dada; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff01018888; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x50); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf51cf8dad6040188; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0982e2daf234ed87; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff51cf8da; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffd6040188; -+ *((unsigned long*)& __m128i_result[1]) = 0x00020002000d0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000020f2300ee; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00020002000d0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000020f2300ee; -+ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x7f4000007f040000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7f0200007f020000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfffffffff8f8dada; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffff01018888; -+ *((unsigned long*)& __m128d_result[1]) = 0xfffffffff8f8dada; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffff01018888; -+ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xfffffffff8f8dada; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff01018888; -+ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7f400000; -+ *((int*)& __m128_op0[2]) = 0x7f040000; -+ *((int*)& __m128_op0[1]) = 0x7f020000; -+ *((int*)& __m128_op0[0]) = 0x7f020000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0x0014002c; -+ *((int*)& __m128_op1[1]) = 0xfffefffe; -+ *((int*)& __m128_op1[0]) = 0x003b0013; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0x3ea5016b; -+ *((int*)& __m128_result[1]) = 0xfffefffe; -+ *((int*)& __m128_result[0]) = 0x3f6fb04d; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff8f8dada; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff01018888; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff8f8dada; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff01018888; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffff8f8dada; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff01018888; -+ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff8f8dada; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff01018888; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff3ea5016b; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffefffe3f6fb04d; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000d96f; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffd83b; -+ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000002aaad555; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000002aaad555; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007fff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007fff00000000; -+ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff8f8dada; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff01018888; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010108082626; -+ *((unsigned long*)& __m128i_result[0]) = 0x01010101ffff7878; -+ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff8f8dada; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff01018888; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000145ad; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000300003e6e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff8f8da00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff01018888; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffff00ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00ffff00; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x73); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf51cf8dad6040188; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0982e2daf234ed87; -+ *((unsigned long*)& __m128i_result[1]) = 0xf51cf8dad6040188; -+ *((unsigned long*)& __m128i_result[0]) = 0x0982e2daf234ed87; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00007fff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007fff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_result[2]) = 0x0202810102020202; -+ *((unsigned long*)& __m256i_result[1]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_result[0]) = 0x0202810102020202; -+ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfffffffff8f8da00; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffff01018888; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000000003ea5016c; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfffefefd3f7027c5; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf51cf8dad6040188; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0982e2daf234ed87; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0ae3072529fbfe78; -+ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0202020202020202; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0202810102020202; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0202020202020202; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0202810102020202; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x00007fff00000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x00007fff00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x00007fff00000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x00007fff00000000; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0202810102020202; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0202810102020202; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; -+ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000003f00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000003f00000000; -+ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf51cf8dad6040188; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0982e2daf234ed87; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xf51df8dbd6050189; -+ *((unsigned long*)& __m128i_result[0]) = 0x0983e2dbf235ed87; -+ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000003f00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000003f00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000003f00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000003f00000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000003f00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000003f00000000; -+ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xf51df8db; -+ *((int*)& __m128_op0[2]) = 0xd6050189; -+ *((int*)& __m128_op0[1]) = 0x0983e2db; -+ *((int*)& __m128_op0[0]) = 0xf235ed87; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0x3ea5016b; -+ *((int*)& __m128_op1[1]) = 0xfffefffe; -+ *((int*)& __m128_op1[0]) = 0x3f6fb04d; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x4000400000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000040004000; -+ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffff00ff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00ffff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffe000000f6; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe000000f6; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x01010101ffffff00; -+ *((unsigned long*)& __m128i_result[0]) = 0x01010101000000f6; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000003f00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000003f00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0202810102020202; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0202020202020202; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0202810102020202; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000fefe0000fefe; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007fff0000fefe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fefe0000fefe; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007fff0000fefe; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffff0000010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfe00fe00fe00fd01; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fffefe0100f6; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffff0000010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0100010000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0100010000010000; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000003f00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000003f00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000003f0000; -+ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0100010000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0100010000010000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffff0000010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfe00fe00fe00fd01; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe00fffefe0100f6; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff0001ffffff0a; -+ __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffff700000009; -+ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; -+ int_op1 = 0x0000000000000400; -+ *((unsigned long*)& __m256i_result[3]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_result[2]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_result[1]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_result[0]) = 0x003f003f003f003f; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000003f0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000003f0; -+ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x003f003f003f003f; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_result[2]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_result[1]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_result[0]) = 0x003f003f003f003f; -+ __m256i_out = __lasx_xvreplve_w(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xfffffff7; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffff0000010000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x5d5d5d5d5d5d5d55; -+ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x5d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_op2[2]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_op2[1]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_op2[0]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000017e; -+ __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000008; -+ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x8f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfc01fcfefc02fdf7; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fcfffe01fd01; -+ *((unsigned long*)& __m128i_result[1]) = 0xfc01fd13fc02fe0c; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe00fd14fe01fd16; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000003f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000003f0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x30); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfc01fd13fc02fe0c; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fd14fe01fd16; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffff0000010000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfc01fd1300000001; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe00fd1400010000; -+ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000001f; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001f; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000001f; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001f; -+ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff0001ffffff0a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100000101; -+ *((unsigned long*)& __m128i_result[0]) = 0x000100ff010101f6; -+ __m128i_out = __lsx_vneg_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000003f0000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfc01fd1300000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fd1400010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfc01fd1300000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe00fd1400010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfc01fcfefc02fdf7; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fcfffe01fd01; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5d5d5d5d5d5d5d55; -+ *((unsigned long*)& __m128i_result[1]) = 0xfc01fcfefc02fdf7; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe00fcfffe21fd01; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfc01fcfefc02fdf7; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfe00fcfffe01fd01; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfc01fd1300000001; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfe00fd1400010000; -+ *((unsigned long*)& __m128d_op2[1]) = 0xfc01fcfefc02fdf7; -+ *((unsigned long*)& __m128d_op2[0]) = 0xfe00fcfffe01fd01; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; -+ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffff0000010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xabff54f1ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xa5f7458b000802ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fff7fc01; -+ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5d5d5d5d5d5d5d55; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x5d5d5d005d5d5d55; -+ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000017e; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000005e02; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000005e02; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000005e02; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000005e02; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfffffff700000009; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfffffff700000009; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xabff54e911f71b07; -+ *((unsigned long*)& __m128i_op0[0]) = 0xa9ec4882f216ea11; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfc01fcfefc02fdf7; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe00fcfffe01fd01; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xaa0051e90ff91808; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfc01fd1300000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fd1400010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff7fc01; -+ *((unsigned long*)& __m128i_result[1]) = 0xfe00fe8980000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff007e8a7ffc7e00; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff7fc01; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x01ff000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x01ff000000000000; -+ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff7fc01; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x80000000fff7fc01; -+ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000003effe1; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000003effe1; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000003effe1; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000003effe1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000005e02; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000005e02; -+ *((unsigned long*)& __m256i_result[3]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_result[2]) = 0xc2c2c2c2c2c29cc0; -+ *((unsigned long*)& __m256i_result[1]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_result[0]) = 0xc2c2c2c2c2c29cc0; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0xc2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000005e02; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000005e02; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc2c2c2c2c2c29cc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc2c2c2c2c2c29cc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfc01fcfefc02fdf7; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fcfffe01fd01; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfc01fd1300000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe00fd1400010000; -+ *((unsigned long*)& __m128i_result[1]) = 0xc72ef153fc02fdf7; -+ *((unsigned long*)& __m128i_result[0]) = 0xca31bf15fd010000; -+ __m128i_out = __lsx_vmul_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfc01fd1300000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe00fd1400010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f0000007f000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080000180800100; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff7fc01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x80000000fff6fc00; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000080000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x80000000fff6fc00; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000080000000; -+ __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff6fc00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f0000007f000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080000180800100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff00ffff; -+ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffc01; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffc01; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001fffffffe; -+ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256d_op0[2]) = 0xc2c2c2c2c2c29cc0; -+ *((unsigned long*)& __m256d_op0[1]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256d_op0[0]) = 0xc2c2c2c2c2c29cc0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xe161616161616161; -+ *((unsigned long*)& __m256i_op2[2]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_op2[1]) = 0xe161616161616161; -+ *((unsigned long*)& __m256i_op2[0]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128d_op1[1]) = 0xb55ccf30f52a6a68; -+ *((unsigned long*)& __m128d_op1[0]) = 0x4e0018eceb82c53a; -+ *((unsigned long*)& __m128d_result[1]) = 0x355ccf30f52a6a68; -+ *((unsigned long*)& __m128d_result[0]) = 0xce0018eceb82c53a; -+ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefff6fff80002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff000000fefb0000; -+ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefff6fff80002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x82c53a0000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc72ef153fc02fdf7; -+ *((unsigned long*)& __m128i_result[1]) = 0x007d00c500ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0038000e0003ff03; -+ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0x80000000; -+ *((int*)& __m256_result[5]) = 0x80000000; -+ *((int*)& __m256_result[4]) = 0x80000000; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0x80000000; -+ *((int*)& __m256_result[1]) = 0x80000000; -+ *((int*)& __m256_result[0]) = 0x80000000; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefff6fff80002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x82c53a0000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc72ef153fc02fdf7; -+ *((unsigned long*)& __m128i_result[1]) = 0x82c539ffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xc72df14afbfafdf9; -+ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1716151416151413; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1514131214131211; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff3fff3fff3fff3; -+ __m128i_out = __lsx_vmini_h(__m128i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe161616161616161; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe161616161616161; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe161616161614e60; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_result[2]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_result[1]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_result[0]) = 0xe161616161614e60; -+ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000aaaaaaaa; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000aaab555b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000aaaaaaaa; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000aaab555b; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x007d00c50177ac5b; -+ *((unsigned long*)& __m128i_op0[0]) = 0xac82aa88a972a36a; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000c5ac01015b; -+ *((unsigned long*)& __m128i_result[0]) = 0xaaacac88a3a9a96a; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x7c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslli_h(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x82c539ffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc72df14afbfafdf9; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7d3ac60000000000; -+ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xe161616161616161; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_op1[1]) = 0xe161616161616161; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000061; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000061; -+ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff7fc01; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000f; -+ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffff800fffff800; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffff800fffff800; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffff800fffff800; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffff800fffff800; -+ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x82c539ffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc72df14afbfafdf9; -+ *((unsigned long*)& __m128i_op1[1]) = 0x82c539ffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc72df14afbfafdf9; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff7fc01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x82c539ffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc72df14afbfafdf9; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; -+ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x23); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fbf83468; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fbf83468; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe161616161616161; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe161616161616161; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7d3ac60000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000007d3ac600; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xfffefff6; -+ *((int*)& __m128_op0[0]) = 0xfff80002; -+ *((int*)& __m128_op1[3]) = 0x000000c5; -+ *((int*)& __m128_op1[2]) = 0xac01015b; -+ *((int*)& __m128_op1[1]) = 0xaaacac88; -+ *((int*)& __m128_op1[0]) = 0xa3a9a96a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe161616161616161; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe161616161616161; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256d_result[3]) = 0xc1be9e9e9f000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x41d8585858400000; -+ *((unsigned long*)& __m256d_result[1]) = 0xc1be9e9e9f000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x41d8585858400000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256d_op0[2]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256d_op0[1]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256d_op0[0]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007d3ac600; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_vpickve2gr_b(__m128i_op0,0x7); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffefff6fff80002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x08fdc221bfdb1927; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4303c67e9b7fb213; -+ *((unsigned long*)& __m128i_op1[1]) = 0x08fdc221bfdb1927; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4303c67e9b7fb213; -+ *((unsigned long*)& __m128i_result[1]) = 0x00100184017e0032; -+ *((unsigned long*)& __m128i_result[0]) = 0x0086018c01360164; -+ __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000800080008000; -+ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000c5ac01015b; -+ *((unsigned long*)& __m128i_op1[0]) = 0xaaacac88a3a9a96a; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000f; -+ __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00100184017e0032; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0086018c01360164; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffff33c4b1e67; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000800c0004300c; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x08fdc221; -+ *((int*)& __m128_op0[2]) = 0xbfdb1927; -+ *((int*)& __m128_op0[1]) = 0x4303c67e; -+ *((int*)& __m128_op0[0]) = 0x9b7fb213; -+ *((int*)& __m128_op1[3]) = 0x0000800c; -+ *((int*)& __m128_op1[2]) = 0x0004300c; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_d(__m256i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x009500b10113009c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x009500b10113009c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000005d5d; -+ __m128i_out = __lsx_vmsknz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xc1be9e9e9f000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x41d8585858400000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xc1be9e9e9f000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x41d8585858400000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000005d5d; -+ *((unsigned long*)& __m128d_op1[1]) = 0x08fdc221bfdb1927; -+ *((unsigned long*)& __m128d_op1[0]) = 0x4303c67e9b7fb213; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_w(__m256i_op0,15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffefff6fff80002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xc1be9e9e9f000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x41d8585858400000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xc1be9e9e9f000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x41d8585858400000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc1be9e9e9f000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x41d8585858400000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc1be9e9e9f000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x41d8585858400000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1076000016160000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1610000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1076000016160000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1610000000000000; -+ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000000000000; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x31); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000005d5d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000005d5d; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x014200c200c200ae; -+ *((unsigned long*)& __m256i_op0[2]) = 0x014200c200c200ae; -+ *((unsigned long*)& __m256i_op0[1]) = 0x014200c200c200ae; -+ *((unsigned long*)& __m256i_op0[0]) = 0x014200c200c200ae; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_h(__m256i_op0,-4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe161616161614e60; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xe161616161614f61; -+ *((unsigned long*)& __m256i_result[2]) = 0xe161616161614f61; -+ *((unsigned long*)& __m256i_result[1]) = 0xe161616161614f61; -+ *((unsigned long*)& __m256i_result[0]) = 0xe161616161614f61; -+ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000005d5d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x41); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0002000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0002000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00ff00ff00ff00fe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xffffffff; -+ *((int*)& __m128_result[1]) = 0xffffffff; -+ *((int*)& __m128_result[0]) = 0xffffffff; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xe161616161614f61; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe161616161614f61; -+ *((unsigned long*)& __m256i_op1[1]) = 0xe161616161614f61; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe161616161614f61; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000616100004f61; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000616100004f61; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000616100004f61; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000616100004f61; -+ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfebdff3eff3dff52; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfebdff3eff3dff52; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfebdff3eff3dff52; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfebdff3eff3dff52; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1086658a18ba3594; -+ *((unsigned long*)& __m256i_op1[2]) = 0x160fe9f000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1086658a18ba3594; -+ *((unsigned long*)& __m256i_op1[0]) = 0x160fe9f000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x07a232640bfc1a73; -+ *((unsigned long*)& __m256i_result[2]) = 0x0a66f497ff9effa9; -+ *((unsigned long*)& __m256i_result[1]) = 0x07a232640bfc1a73; -+ *((unsigned long*)& __m256i_result[0]) = 0x0a66f497ff9effa9; -+ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfebdff3eff3dff52; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfebdff3eff3dff52; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfebdff3eff3dff52; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfebdff3eff3dff52; -+ *((unsigned long*)& __m256i_result[3]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_result[2]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_result[1]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_result[0]) = 0xffc0ffc0ffc0ffc0; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1086658a18ba3594; -+ *((unsigned long*)& __m256i_op0[2]) = 0x160fe9f000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1086658a18ba3594; -+ *((unsigned long*)& __m256i_op0[0]) = 0x160fe9f000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xe161616161614f61; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe161616161614f61; -+ *((unsigned long*)& __m256i_op1[1]) = 0xe161616161614f61; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe161616161614f61; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000616100004f61; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000616100004f61; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000616100004f61; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000616100004f61; -+ *((unsigned long*)& __m256i_result[3]) = 0x108659e46485f7e1; -+ *((unsigned long*)& __m256i_result[2]) = 0x4df5b1a3ed5e02c1; -+ *((unsigned long*)& __m256i_result[1]) = 0x108659e46485f7e1; -+ *((unsigned long*)& __m256i_result[0]) = 0x4df5b1a3ed5e02c1; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[0]) = 0xff01ff01ff01ff01; -+ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000c5ac01015b; -+ *((unsigned long*)& __m128i_op0[0]) = 0xaaacac88a3a9a96a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ffffff1e9e9e9e; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff9e9eb09e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ffffff1e9e9e9e; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff9e9eb09e; -+ *((unsigned long*)& __m256i_result[3]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_result[2]) = 0xffc00000ffc0ffc0; -+ *((unsigned long*)& __m256i_result[1]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_result[0]) = 0xffc00000ffc0ffc0; -+ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x108659e46485f7e1; -+ *((unsigned long*)& __m256d_op1[2]) = 0x4df5b1a3ed5e02c1; -+ *((unsigned long*)& __m256d_op1[1]) = 0x108659e46485f7e1; -+ *((unsigned long*)& __m256d_op1[0]) = 0x4df5b1a3ed5e02c1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0a0a0a0a0a0a0a0a; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a0a0a0a0a0a0a0a; -+ __m128i_out = __lsx_vmaxi_b(__m128i_op0,10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfebdff3eff3dff52; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfebdff3eff3dff52; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfebdff3eff3dff52; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfebdff3eff3dff52; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffc00000ffc0ffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffc00000ffc0ffc0; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff90000fff9fff9; -+ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000000e; -+ __m128i_out = __lsx_vmini_bu(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; -+ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x108659e46485f7e1; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4df5b1a3ed5e02c1; -+ *((unsigned long*)& __m256i_op1[1]) = 0x108659e46485f7e1; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4df5b1a3ed5e02c1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff0004ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff0004ff; -+ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000e13; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000e13; -+ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00ffffff1e9e9e9e; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff9e9eb09e; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00ffffff1e9e9e9e; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff9e9eb09e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x66); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfrint_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_du(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffc00000ffc0ffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffc00000ffc0ffc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x108659e46485f7e1; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4df5b1a3ed5e02c1; -+ *((unsigned long*)& __m256i_op1[1]) = 0x108659e46485f7e1; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4df5b1a3ed5e02c1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffcfee0fe00ffe0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffcfee0fe00ffe0; -+ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_b(__m128i_op0,11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000001300000013; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000001300000013; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ffffffffff; -+ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_b(__m128i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x108659e46485f7e1; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4df5b1a3ed5e02c1; -+ *((unsigned long*)& __m256i_op1[1]) = 0x108659e46485f7e1; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4df5b1a3ed5e02c1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000001fff9fff8; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000001fff9fff8; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000001fff9fff8; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000001fff9fff8; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x108659e46485f7e1; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4df5b1a3ed5e02c1; -+ *((unsigned long*)& __m256i_op1[1]) = 0x108659e46485f7e1; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4df5b1a3ed5e02c1; -+ *((unsigned long*)& __m256i_result[3]) = 0x081abb9d36ee1037; -+ *((unsigned long*)& __m256i_result[2]) = 0x1617eb17129bfd38; -+ *((unsigned long*)& __m256i_result[1]) = 0x081abb9d36ee1037; -+ *((unsigned long*)& __m256i_result[0]) = 0x1617eb17129bfd38; -+ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmini_h(__m256i_op0,6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x80000000; -+ *((int*)& __m128_result[2]) = 0x80000000; -+ *((int*)& __m128_result[1]) = 0x80000000; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000001fff9fff8; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fff9fff8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000001fff9fff8; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fff9fff8; -+ *((unsigned long*)& __m256i_op1[3]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_op1[1]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffcfee0fe00ffe0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffcfee0fe00ffe0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fffc0000fee0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000fe000000ffe0; -+ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x2); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0080001300000013; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0080001300000013; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0080001300000013; -+ *((unsigned long*)& __m128i_result[0]) = 0x0080001300000013; -+ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001300000013; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffefffefffefffe; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff900000003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff900000003; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x3f3f3f3900000003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x3f3f3f3900000003; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x3f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffc0000fee0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fe000000ffe0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff900000003; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff900000003; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7ffe00007f000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; -+ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_result[1]) = 0x0303030303030303; -+ *((unsigned long*)& __m128i_result[0]) = 0x0303030303030303; -+ __m128i_out = __lsx_vpcnt_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0052005200520052; -+ *((unsigned long*)& __m128i_result[0]) = 0x0052005200520052; -+ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff900000003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff900000003; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0000; -+ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0xffff0000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0xffff0000; -+ *((int*)& __m256_op1[7]) = 0xfffefffe; -+ *((int*)& __m256_op1[6]) = 0xfffefffe; -+ *((int*)& __m256_op1[5]) = 0xfffefffe; -+ *((int*)& __m256_op1[4]) = 0xfffefffe; -+ *((int*)& __m256_op1[3]) = 0xfffefffe; -+ *((int*)& __m256_op1[2]) = 0xfffefffe; -+ *((int*)& __m256_op1[1]) = 0xfffefffe; -+ *((int*)& __m256_op1[0]) = 0xfffefffe; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0xffff0000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0xffff0000; -+ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ffe00007f000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe00007f000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x1616161616161616; -+ *((unsigned long*)& __m256i_result[2]) = 0x161616167fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7ffe16167f161616; -+ *((unsigned long*)& __m256i_result[0]) = 0x161616167fffffff; -+ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_wu(__m128i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256d_op1[1]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000800000000; -+ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff90000fff9fff9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe00007f000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff000100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff7fff00007f00; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff000100007fff; -+ __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xcd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x79); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe00007f000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x017e00ff017e00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff017e01fe; -+ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7fff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7f007f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f7f7f7f7fff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_result[2]) = 0xbfbfbfbfbfff807f; -+ *((unsigned long*)& __m256i_result[1]) = 0xbf803fbfbfbfbfbf; -+ *((unsigned long*)& __m256i_result[0]) = 0xbfbfbfbfbfff807f; -+ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_result[0]) = 0x5252525252525252; -+ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff00; -+ __m256i_out = __lasx_xvmskgez_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3a2a3a2a3a2a3a2a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3a2a3a2a3aaa45aa; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3a553f7f7a2a3a2a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3a2a3a2a3aaa45aa; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x1d949d949d949d95; -+ *((unsigned long*)& __m256i_result[2]) = 0x1d949d949e1423d4; -+ *((unsigned long*)& __m256i_result[1]) = 0x1de9a03f3dd41d95; -+ *((unsigned long*)& __m256i_result[0]) = 0x1d949d949e1423d4; -+ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000003fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000003fffffff; -+ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x1f3d2101; -+ *((int*)& __m256_op0[6]) = 0x1f3d2101; -+ *((int*)& __m256_op0[5]) = 0x1f3d2101; -+ *((int*)& __m256_op0[4]) = 0xd07dbf01; -+ *((int*)& __m256_op0[3]) = 0x9f1fd080; -+ *((int*)& __m256_op0[2]) = 0x1f3d2101; -+ *((int*)& __m256_op0[1]) = 0x1f3d2101; -+ *((int*)& __m256_op0[0]) = 0xd07dbf01; -+ *((int*)& __m256_op1[7]) = 0x1d949d94; -+ *((int*)& __m256_op1[6]) = 0x9d949d95; -+ *((int*)& __m256_op1[5]) = 0x1d949d94; -+ *((int*)& __m256_op1[4]) = 0x9e1423d4; -+ *((int*)& __m256_op1[3]) = 0x1de9a03f; -+ *((int*)& __m256_op1[2]) = 0x3dd41d95; -+ *((int*)& __m256_op1[1]) = 0x1d949d94; -+ *((int*)& __m256_op1[0]) = 0x9e1423d4; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x8001b72e; -+ *((int*)& __m256_result[6]) = 0x0001b72e; -+ *((int*)& __m256_result[5]) = 0x8001b72e; -+ *((int*)& __m256_result[4]) = 0xaf12d5f0; -+ *((int*)& __m256_result[3]) = 0x00024763; -+ *((int*)& __m256_result[2]) = 0x9d9cb530; -+ *((int*)& __m256_result[1]) = 0x8001b72e; -+ *((int*)& __m256_result[0]) = 0xaf12d5f0; -+ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfffefffe; -+ *((int*)& __m256_op0[6]) = 0xfffefffe; -+ *((int*)& __m256_op0[5]) = 0xfffefffe; -+ *((int*)& __m256_op0[4]) = 0xfffefffe; -+ *((int*)& __m256_op0[3]) = 0xfffefffe; -+ *((int*)& __m256_op0[2]) = 0xfffefffe; -+ *((int*)& __m256_op0[1]) = 0xfffefffe; -+ *((int*)& __m256_op0[0]) = 0xfffefffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vneg_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x017e017e01dd61de; -+ *((unsigned long*)& __m256d_op0[2]) = 0x5d637d043bc4fc43; -+ *((unsigned long*)& __m256d_op0[1]) = 0x01dcc2dce31bc35d; -+ *((unsigned long*)& __m256d_op0[0]) = 0x5e041d245b85fc43; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x5d637d043bc4fc43; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x5e041d245b85fc43; -+ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x017e00ff017e00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_op1[1]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_result[3]) = 0x1f9d9f9d1f9db29f; -+ *((unsigned long*)& __m256i_result[2]) = 0x1f9d9f9d201cb39e; -+ *((unsigned long*)& __m256i_result[1]) = 0x201c9f9d201cb29f; -+ *((unsigned long*)& __m256i_result[0]) = 0x1f9d9f9d201cb39e; -+ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1616161616161616; -+ *((unsigned long*)& __m256i_op0[2]) = 0x161616167fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe16167f161616; -+ *((unsigned long*)& __m256i_op0[0]) = 0x161616167fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x2c2c2c2c2c2c2c2c; -+ *((unsigned long*)& __m256i_result[2]) = 0x2c2c2c2cfefefefe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfefc2c2cfe2c2c2c; -+ *((unsigned long*)& __m256i_result[0]) = 0x2c2c2c2cfefefefe; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1616161616161616; -+ *((unsigned long*)& __m256i_op0[2]) = 0x161616167fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe16167f161616; -+ *((unsigned long*)& __m256i_op0[0]) = 0x161616167fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xc7c7c7c7c7c7c7c7; -+ *((unsigned long*)& __m256i_result[2]) = 0xc7c7c7c7ae2e2e2e; -+ *((unsigned long*)& __m256i_result[1]) = 0xae2fc7c7aec7c7c7; -+ *((unsigned long*)& __m256i_result[0]) = 0xc7c7c7c7ae2e2e2e; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0xd1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1f9d9f9d1f9db29f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1f9d9f9d201cb39e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x201c9f9d201cb29f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1f9d9f9d201cb39e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007773; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000003373; -+ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; -+ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dffbfff00000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0200400000000001; -+ unsigned_int_result = 0x0000000000000001; -+ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x2); -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007773; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003373; -+ *((unsigned long*)& __m256i_result[3]) = 0xbbbbbbbbbbbbbbbb; -+ *((unsigned long*)& __m256i_result[2]) = 0xbbbbbbbbbbbb8888; -+ *((unsigned long*)& __m256i_result[1]) = 0xbbbbbbbbbbbbbbbb; -+ *((unsigned long*)& __m256i_result[0]) = 0xbbbbbbbbbbbb8888; -+ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x44); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000007773; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000003373; -+ *((unsigned long*)& __m256d_op1[3]) = 0x1616161616161616; -+ *((unsigned long*)& __m256d_op1[2]) = 0x161616167fffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x7ffe16167f161616; -+ *((unsigned long*)& __m256d_op1[0]) = 0x161616167fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000100000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x2c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007773; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003373; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_du(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0800000008000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0800000008000000; -+ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_du(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; -+ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000100000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; -+ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x000100010001fffe; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000003fffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000003fffffff; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x1); -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffff2fffffff2; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff2fffffff2; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffff2fffffff2; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff2fffffff2; -+ __m256i_out = __lasx_xvmini_w(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; -+ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000100010001fffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000100010001fffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000020002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000020002; -+ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_result[3]) = 0x0703030307030203; -+ *((unsigned long*)& __m256i_result[2]) = 0x0703030307030203; -+ *((unsigned long*)& __m256i_result[1]) = 0x0703030307030203; -+ *((unsigned long*)& __m256i_result[0]) = 0x0703030307030203; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000003fffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000003fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x003fffffff000000; -+ __m128i_out = __lsx_vbsrl_v(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000020002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0303030303030303; -+ *((unsigned long*)& __m128i_result[0]) = 0x0303030303030303; -+ __m128i_out = __lsx_vmaxi_bu(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00005555aaabfffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003fffffff000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000000ab; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0003000300030003; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003000700020005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0003000300030003; -+ *((unsigned long*)& __m128i_result[0]) = 0x0003000700020005; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000100010001fffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfrint_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff000100000000; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0003000300030003; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0003000700020005; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfrint_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000017e007ffe02; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000100010001fffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000100010001fffd; -+ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000100010001fffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000800080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf6f6f6f6f6f6f6f6; -+ *((unsigned long*)& __m256i_result[2]) = 0xf6f6f6f6f6f6f6f6; -+ *((unsigned long*)& __m256i_result[1]) = 0xf6f6f6f6f6f6f6f6; -+ *((unsigned long*)& __m256i_result[0]) = 0xf6f6f6f6f6f6f6f6; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00ff00ff; -+ *((int*)& __m256_op0[6]) = 0x00ff00ff; -+ *((int*)& __m256_op0[5]) = 0x00ff00ff; -+ *((int*)& __m256_op0[4]) = 0x017e01fe; -+ *((int*)& __m256_op0[3]) = 0x017e00ff; -+ *((int*)& __m256_op0[2]) = 0x017e00ff; -+ *((int*)& __m256_op0[1]) = 0x00ff00ff; -+ *((int*)& __m256_op0[0]) = 0x017e01fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01fe8001b72e0001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xb72e8001b72eaf12; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01fe000247639d9c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xb5308001b72eaf12; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0002ff80ffb70000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffb7ff80ffd0ffd8; -+ *((unsigned long*)& __m256i_result[1]) = 0x00010000002fff9e; -+ *((unsigned long*)& __m256i_result[0]) = 0xffb5ff80ffd0ffd8; -+ __m256i_out = __lasx_xvdiv_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000100010001fffd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x38); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vexth_du_wu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8001b72e0001b72e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8001b72eaf12d5f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000247639d9cb530; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8001b72eaf12d5f0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_result[3]) = 0xff81ffe50001ffe5; -+ *((unsigned long*)& __m256i_result[2]) = 0xff81ffe5ffa6ffc6; -+ *((unsigned long*)& __m256i_result[1]) = 0x000200aafe9affe5; -+ *((unsigned long*)& __m256i_result[0]) = 0xff81ffe5ffa6ffc6; -+ __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x017e00ff017e00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x01fe8001b72e0001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xb72e8001b72eaf12; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01fe000247639d9c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xb5308001b72eaf12; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff017e00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x017e00ff017e01fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff017e00ff; -+ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffff000100000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff000100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff000100000000; -+ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xb70036db12c4007e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xb7146213fc1e0049; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000fefe02fffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xb71c413b199d04b5; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e00ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e01fe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xb70012c4b714fc1e; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff017e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fe02b71c199d; -+ *((unsigned long*)& __m256i_result[0]) = 0x017e017e00ff017e; -+ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01fe8001b72e0001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xb72e8001b72eaf12; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01fe000247639d9c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xb5308001b72eaf12; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x26); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007fffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007fffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000100010001fffd; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000100010; -+ __m128i_out = __lsx_vpcnt_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000100010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000100010; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vhsubw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((int*)& __m128_op2[3]) = 0x00307028; -+ *((int*)& __m128_op2[2]) = 0x003f80b0; -+ *((int*)& __m128_op2[1]) = 0x0040007f; -+ *((int*)& __m128_op2[0]) = 0xff800000; -+ *((int*)& __m128_result[3]) = 0x80307028; -+ *((int*)& __m128_result[2]) = 0xffffffff; -+ *((int*)& __m128_result[1]) = 0x8040007f; -+ *((int*)& __m128_result[0]) = 0xffffffff; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xff00ff00ff00ff00; -+ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xb70036db; -+ *((int*)& __m256_op1[6]) = 0x12c4007e; -+ *((int*)& __m256_op1[5]) = 0xb7146213; -+ *((int*)& __m256_op1[4]) = 0xfc1e0049; -+ *((int*)& __m256_op1[3]) = 0x000000fe; -+ *((int*)& __m256_op1[2]) = 0xfe02fffe; -+ *((int*)& __m256_op1[1]) = 0xb71c413b; -+ *((int*)& __m256_op1[0]) = 0x199d04b5; -+ *((int*)& __m256_op2[7]) = 0xb70036db; -+ *((int*)& __m256_op2[6]) = 0x12c4007e; -+ *((int*)& __m256_op2[5]) = 0xb7146213; -+ *((int*)& __m256_op2[4]) = 0xfc1e0049; -+ *((int*)& __m256_op2[3]) = 0x000000fe; -+ *((int*)& __m256_op2[2]) = 0xfe02fffe; -+ *((int*)& __m256_op2[1]) = 0xb71c413b; -+ *((int*)& __m256_op2[0]) = 0x199d04b5; -+ *((int*)& __m256_result[7]) = 0x370036db; -+ *((int*)& __m256_result[6]) = 0x92c4007e; -+ *((int*)& __m256_result[5]) = 0x37146213; -+ *((int*)& __m256_result[4]) = 0x7c1e0049; -+ *((int*)& __m256_result[3]) = 0x800000fe; -+ *((int*)& __m256_result[2]) = 0x7e02fffe; -+ *((int*)& __m256_result[1]) = 0x371c413b; -+ *((int*)& __m256_result[0]) = 0x999d04b5; -+ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m128_op0[3]) = 0x80307028; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x8040007f; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0002ff80ffb70000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffb7ff80ffd0ffd8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00010000002fff9e; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffb5ff80ffd0ffd8; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0048007f002f0028; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x004a007f002f0028; -+ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc080ffff0049ffd2; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0049ffd2; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffeffb9ff9d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01620133004b0032; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0002ff80ffb70000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffb7ff80ffd0ffd8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00010000002fff9e; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffb5ff80ffd0ffd8; -+ *((unsigned long*)& __m256i_result[3]) = 0xc080ffff0049ffd2; -+ *((unsigned long*)& __m256i_result[2]) = 0x0002ff80ffb70000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fffeffb9ff9d; -+ *((unsigned long*)& __m256i_result[0]) = 0x00010000002fff9e; -+ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000020302030; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000020302030; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xb70036db12c4007e; -+ *((unsigned long*)& __m256i_op1[2]) = 0xb7146213fc1e0049; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000fefe02fffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xb71c413b199d04b5; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x43); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000020302030; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000020302030; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000100010; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000100010; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc080ffff0049ffd2; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0049ffd2; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffeffb9ff9d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00630064004bffd0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x80307028ffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x8040007fffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00307028003f80b0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0040007fff800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000003f80b0; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff800000; -+ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000008000000080; -+ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000000003f80b0; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ff800000; -+ *((unsigned long*)& __m128d_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xb70036db12c4007e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xb7146213fc1e0049; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000fefe02fffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xb71c413b199d04b5; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff00ff00ffff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xff000000ff00ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffff00ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00000000ff00ff; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xb327b9363c99d32e; -+ *((unsigned long*)& __m128i_op0[0]) = 0xa1e7b475d925730f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000003f80b0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff800000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m128i_result[1]) = 0xb327b9363c992b2e; -+ *((unsigned long*)& __m128i_result[0]) = 0xa1e7b475d925730f; -+ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8001b72e0001b72e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8001b72eaf12d5f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000247639d9cb530; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8001b72eaf12d5f0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe056fd9d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffceba70; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000003f80b0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xb327b9363c992b2e; -+ *((unsigned long*)& __m128i_op1[0]) = 0xa1e7b475d925730f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000001ff00; -+ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x370036db92c4007e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x371462137c1e0049; -+ *((unsigned long*)& __m256i_op0[1]) = 0x800000fe7e02fffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x371c413b999d04b5; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0002ff80ffb70000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffb7ff80ffd0ffd8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00010000002fff9e; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffb5ff80ffd0ffd8; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffff00ff00ffff00; -+ *((unsigned long*)& __m256i_op2[2]) = 0xff000000ff00ff00; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffff00ffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xff00000000ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x37fe365b920d007e; -+ *((unsigned long*)& __m256i_result[2]) = 0x381462137d1e0149; -+ *((unsigned long*)& __m256i_result[1]) = 0x80ff00fe7e020060; -+ *((unsigned long*)& __m256i_result[0]) = 0x381c413b99cd04dd; -+ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xb327b9363c992b2e; -+ *((unsigned long*)& __m128i_op1[0]) = 0xa1e7b475d925730f; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff3c992b2e; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff730f; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vnori_b(__m128i_op0,0xa6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x80307028ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8040007fffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0101ff010101; -+ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff00ff00ffff00; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff000000ff00ff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffff00ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00000000ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000180000000; -+ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffe5; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00307028003f80b0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0040007fff800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffc0ffffff81; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff008000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0060e050007f0160; -+ *((unsigned long*)& __m128i_result[0]) = 0x0040007fff800000; -+ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00007f8000007f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000003fc; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000003fc; -+ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xb70036db12c4007e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xb7146213fc1e0049; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000fefe02fffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xb71c413b199d04b5; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvclo_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffff81ffffeb2f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003f6ee0570b4e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000018de; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffb4ffcec0f1; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffff81ffffeb2f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003f6ee0570b4e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000018de; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffb4ffcec0f1; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000001ffffeab0; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000e0574abc; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000018de; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000001ffcec0a5; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0060e050007f0160; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0040007fff800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc080ffff0049ffd2; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0002ff80ffb70000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffeffb9ff9d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00010000002fff9e; -+ *((int*)& __m256_result[7]) = 0x34000000; -+ *((int*)& __m256_result[6]) = 0xfff00000; -+ *((int*)& __m256_result[5]) = 0xfff6e000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x33800000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x363c0000; -+ *((int*)& __m256_result[0]) = 0xfff3c000; -+ __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003fc; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000003fc; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0x3c992b2e; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffff730f; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff730f; -+ __m128i_out = __lsx_vfrintrz_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101017f0101017f; -+ __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0002ff80ffb70000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffb7ff80ffd0ffd8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00010000002fff9e; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffb5ff80ffd0ffd8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0002ff80ffb70000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffb7ff80ffd0ffd8; -+ *((unsigned long*)& __m256i_result[1]) = 0x00010000002fff9e; -+ *((unsigned long*)& __m256i_result[0]) = 0xffb5ff80ffd0ffd8; -+ __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000180000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc080ffff0049ffd2; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0002ff80ffb70000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000fffeffb9ff9d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00010000002fff9e; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffd2; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ff8000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000080000000; -+ __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x34000000fff00000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff6e00000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3380000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x363c0000fff3c000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffb7146213; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffc1e0049; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffb71c413b; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf3317da580000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x34000000fff00000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff6e00000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x3380000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x363c0000fff3c000; -+ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x34000000fff00000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff6e00000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3380000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x363c0000fff3c000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000030000000c; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001100000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000500000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000010; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1268f057137a0267; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0048137ef886fae0; -+ *((unsigned long*)& __m128i_result[1]) = 0xff000000ff00ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xff00ff0000000000; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff946c; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff946b; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff3c992b2e; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff730f; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffff946c; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffff946b; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff946c; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffdffff946c; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1268f057137a0267; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0048137ef886fae0; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000490000004d; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffffe2; -+ __m128i_out = __lsx_vsrai_w(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000000f3; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000f3; -+ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xb70036db12c4007e; -+ *((unsigned long*)& __m256i_op1[2]) = 0xb7146213fc1e0049; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000fefe02fffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xb71c413b199d04b5; -+ *((unsigned long*)& __m256i_op2[3]) = 0xb70036db12c4007e; -+ *((unsigned long*)& __m256i_op2[2]) = 0xb7146213fc1e0049; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000fefe02fffe; -+ *((unsigned long*)& __m256i_op2[0]) = 0xb71c413b199d04b5; -+ *((unsigned long*)& __m256i_result[3]) = 0xd100645944100004; -+ *((unsigned long*)& __m256i_result[2]) = 0xd1908469108400d1; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000404040104; -+ *((unsigned long*)& __m256i_result[0]) = 0xd1108199714910f9; -+ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff3c992b2e; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff730f; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff3c992b2e; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff730f; -+ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x34000000fff00000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfff6e00000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x3380000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x363c0000fff3c000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x000000030000000c; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000001100000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000500000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000800000010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff000000ff00ff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0100000001000100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0100010000000000; -+ __m128i_out = __lsx_vneg_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xd100645944100004; -+ *((unsigned long*)& __m256i_op0[2]) = 0xd1908469108400d1; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000404040104; -+ *((unsigned long*)& __m256i_op0[0]) = 0xd1108199714910f9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000004040104; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffd1108199; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000714910f9; -+ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100010000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff732a; -+ *((unsigned long*)& __m128i_result[1]) = 0x807f7fff807f807f; -+ *((unsigned long*)& __m128i_result[0]) = 0x807f807f7fff3995; -+ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000490000004d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001ffffffff; -+ long_int_result = 0x00000001ffffffff; -+ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x0); -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000004; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000004040104; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffd1108199; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000714910f9; -+ *((unsigned long*)& __m256d_op1[3]) = 0x000000030000000c; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000001100000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000500000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000800000010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffe5; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff2; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff2; -+ __m128i_out = __lsx_vavgr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000490000004d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff000000ff00ff00; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff00ff0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000049ffffff4d; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff01ffffffff; -+ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000001faea9ec; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000003; -+ *((int*)& __m256_op1[6]) = 0x0000000c; -+ *((int*)& __m256_op1[5]) = 0x00000011; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000005; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000008; -+ *((int*)& __m256_op1[0]) = 0x00000010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000004040104; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffd1108199; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000714910f9; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffd10000006459; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000441000000004; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000040400000104; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffd10000000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffd1108199; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000104; -+ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffe5; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffe5; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100010000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff732a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0100000001000100; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0100010000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100010000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000490000004d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ffffff00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ffffffffff; -+ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ffffff00ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1268f057137a0267; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0048137ef886fae0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; -+ __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000006; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ffffff00ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ffffff00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ffffffffff; -+ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000073; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000002a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000003a; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000015; -+ __m128i_out = __lsx_vavgr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffd10000006459; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000441000000004; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000040400000104; -+ *((unsigned long*)& __m256i_result[3]) = 0x0f0f0f0f0f0f6459; -+ *((unsigned long*)& __m256i_result[2]) = 0x0f0f44100f0f0f0f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0f0f0f0f0f0f0f0f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0f0f0f0f0f0f0f0f; -+ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100010000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00ffffff00ff00ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000010001000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff00ff00ffffff; -+ __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000049; -+ *((int*)& __m128_op0[2]) = 0x0000004d; -+ *((int*)& __m128_op0[1]) = 0x00000001; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000001; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000001; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x80000000; -+ *((int*)& __m128_result[2]) = 0x80000000; -+ *((int*)& __m128_result[1]) = 0x80000001; -+ *((int*)& __m128_result[0]) = 0xffffffff; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; -+ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000490000004d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000490000004d; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001fffffff9; -+ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000006; -+ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000490000004d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000073; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000002a; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000049000000c0; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffff29; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ffffff00ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000049000000c0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffff29; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ffff7f00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff007f0101017f; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000006; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffd10000006459; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000441000000004; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000040400000104; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdb801b6d0962003f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xdb8a3109fe0f0024; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000007fff01ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xdb8e209d0cce025a; -+ *((unsigned long*)& __m256i_result[3]) = 0x88888a6d0962002e; -+ *((unsigned long*)& __m256i_result[2]) = 0xdb8a3109fe0f0020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000007fff01fffb; -+ *((unsigned long*)& __m256i_result[0]) = 0xdb8e20990cce025a; -+ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x88); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000073; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000002a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00ffffff00ff00ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdb801b6d0962003f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xdb8a3109fe0f0024; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000007fff01ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xdb8e209d0cce025a; -+ *((unsigned long*)& __m256i_op1[3]) = 0xb70036db12c4007e; -+ *((unsigned long*)& __m256i_op1[2]) = 0xb7146213fc1e0049; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000fefe02fffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xb71c413b199d04b5; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffcc8000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000000007dfdff4b; -+ *((unsigned long*)& __m256i_result[3]) = 0xdb801b6d0962003f; -+ *((unsigned long*)& __m256i_result[2]) = 0xdb8a3109fe0f0024; -+ *((unsigned long*)& __m256i_result[1]) = 0x9a7f997fff01ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xbe632a4f1c3c5653; -+ __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xb70036db12c4007e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xb7146213fc1e0049; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000fefe02fffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xb71c413b199d04b5; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00b7003600120000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00b7006200fc0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000fe00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00b7004100190004; -+ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00b7003600120000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00b7006200fc0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000fe00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00b7004100190004; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdb801b6d0962003f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xdb8a3109fe0f0024; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9a7f997fff01ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbe632a4f1c3c5653; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffe54affffffd3; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffcfae000000d8; -+ *((unsigned long*)& __m256i_result[1]) = 0x00006681000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffd668ffffa9c6; -+ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffcc8000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007dfdff4b; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x003ffff300000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000001f7f7f; -+ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000003a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000015; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000049000000c0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffff29; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000049000000c0; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff29; -+ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000049000000c0; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffffff29; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000100000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000c0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffff29; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000020000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000183fffffe5; -+ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ffff7f00ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff007f0101017f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000020000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000183fffffe5; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000073; -+ *((unsigned long*)& __m128i_op2[0]) = 0x000000000000002a; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ffff7f00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff007f0101017f; -+ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000073; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000010000002b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000400000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffcc8000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007dfdff4b; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xff01ff3400000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff83ff01; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff01ff3400000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff83ff01; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000020000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000183fffffe5; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000400000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000400000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x2b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_result[2]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_result[1]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_result[0]) = 0xbabababababababa; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0xba); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x88888a6d0962002e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xdb8a3109fe0f0020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000007fff01fffb; -+ *((unsigned long*)& __m256i_op0[0]) = 0xdb8e20990cce025a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff01ff3400000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff83ff01; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0962002efe0f0020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff01fffb8667012d; -+ __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_op1[1]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xbabababababababa; -+ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000c0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001ffffff29; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00000000000000c0; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00000001ffffff29; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff2900000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdb801b6d0962003f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xdb8a3109fe0f0024; -+ *((unsigned long*)& __m256i_op0[1]) = 0x9a7f997fff01ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbe632a4f1c3c5653; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00ff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xbabababababababa; -+ *((unsigned long*)& __m256d_op0[2]) = 0xbabababababababa; -+ *((unsigned long*)& __m256d_op0[1]) = 0xbabababababababa; -+ *((unsigned long*)& __m256d_op0[0]) = 0xbabababababababa; -+ *((unsigned long*)& __m256d_op1[3]) = 0x88888a6d0962002e; -+ *((unsigned long*)& __m256d_op1[2]) = 0xdb8a3109fe0f0020; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000007fff01fffb; -+ *((unsigned long*)& __m256d_op1[0]) = 0xdb8e20990cce025a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000400000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff2900000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000401000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff2900000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xa41aa42e; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xa41aa42e; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffcc80; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x7dfdff4b; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000005be55bd2; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000401000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000800; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000800; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000800; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000800; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000401000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000401000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0080200000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000401000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000080000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000080000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000080000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000080000000000; -+ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdb801b6d0962003f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xdb8a3109fe0f0024; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9a7f997fff01ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbe632a4f1c3c5653; -+ *((unsigned long*)& __m256i_result[3]) = 0x247fe49409620040; -+ *((unsigned long*)& __m256i_result[2]) = 0x2475cef801f0ffdd; -+ *((unsigned long*)& __m256i_result[1]) = 0x6580668200fe0002; -+ *((unsigned long*)& __m256i_result[0]) = 0x419cd5b11c3c5654; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000401000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_hu(__m128i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; -+ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; -+ __m128i_out = __lsx_vslli_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x247fe49409620040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2475cef801f0ffdd; -+ *((unsigned long*)& __m256i_op1[1]) = 0x6580668200fe0002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x419cd5b11c3c5654; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffcc8000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000000007dfdff4b; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xbabababababababa; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xbabababababababa; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000005be55bd2; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffcc8000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007dfdff4b; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x247fe49409620040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2475cef801f0ffdd; -+ *((unsigned long*)& __m256i_op0[1]) = 0x6580668200fe0002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x419cd5b11c3c5654; -+ *((unsigned long*)& __m256i_op1[3]) = 0x247fe49409620040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2475cef801f0ffdd; -+ *((unsigned long*)& __m256i_op1[1]) = 0x6580668200fe0002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x419cd5b11c3c5654; -+ *((unsigned long*)& __m256i_result[3]) = 0x247fe49409620040; -+ *((unsigned long*)& __m256i_result[2]) = 0x247fe49409620040; -+ *((unsigned long*)& __m256i_result[1]) = 0x6580668200fe0002; -+ *((unsigned long*)& __m256i_result[0]) = 0x6580668200fe0002; -+ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff6; -+ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x247fe49409620040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2475cef801f0ffdd; -+ *((unsigned long*)& __m256i_op0[1]) = 0x6580668200fe0002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x419cd5b11c3c5654; -+ *((unsigned long*)& __m256i_result[3]) = 0x247fe49409620040; -+ *((unsigned long*)& __m256i_result[2]) = 0x2475cef801f0ffdd; -+ *((unsigned long*)& __m256i_result[1]) = 0x6580668200fe0002; -+ *((unsigned long*)& __m256i_result[0]) = 0x419cd5b11c3c5654; -+ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x3f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffcc8000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff82037dfd0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; -+ __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0xbf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256d_op1[1]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffff6; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x247fe49409620040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x247fe49409620040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x6580668200fe0002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6580668200fe0002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x247fe49409620040; -+ *((unsigned long*)& __m256i_result[2]) = 0x247fe49409620040; -+ *((unsigned long*)& __m256i_result[1]) = 0x6580668200fe0002; -+ *((unsigned long*)& __m256i_result[0]) = 0x6580668200fe0002; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x3f3f3f3f3f3f3f3f; -+ *((unsigned long*)& __m256i_result[2]) = 0x3f3f3f3f3f3f3f3f; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000003f3f3f3f; -+ *((unsigned long*)& __m256i_result[0]) = 0x3f3f3f3f00000000; -+ __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffff6; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffff6; -+ *((unsigned long*)& __m256i_op2[3]) = 0x3f3f3f3f3f3f3f3f; -+ *((unsigned long*)& __m256i_op2[2]) = 0x3f3f3f3f3f3f3f3f; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000003f3f3f3f; -+ *((unsigned long*)& __m256i_op2[0]) = 0x3f3f3f3f00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000003f3f3f3c; -+ *((unsigned long*)& __m256i_result[2]) = 0xc6c6c6c68787878a; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000003f3f3f3c; -+ *((unsigned long*)& __m256i_result[0]) = 0x8787878a00000000; -+ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0019081900190019; -+ *((unsigned long*)& __m128i_result[0]) = 0x0019081900190019; -+ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffff0000; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007f7f7f7f0000; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; -+ int_op1 = 0x00000000000000ac; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; -+ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe17cec8fe08008ac; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe0801f41e0800168; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9240f24a84b18025; -+ *((unsigned long*)& __m256i_op1[2]) = 0x9240f24a84b18025; -+ *((unsigned long*)& __m256i_op1[1]) = 0xb2c0b341807f8006; -+ *((unsigned long*)& __m256i_op1[0]) = 0xb2c0b341807f8006; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000012481e4950; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001658166830; -+ __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x5b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007f7f7f7f0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; -+ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xf6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000c0; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000c0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000c0; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000c0; -+ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbsll_v(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00190819; -+ *((int*)& __m128_op1[2]) = 0x00190019; -+ *((int*)& __m128_op1[1]) = 0x00190819; -+ *((int*)& __m128_op1[0]) = 0x00190019; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000c0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000c0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000c0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000c0; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000012481e4950; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000001658166830; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; -+ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; -+ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000080; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000080; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff7fff; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vandi_b(__m128i_op0,0x39); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x3f3f3f3c; -+ *((int*)& __m256_op0[5]) = 0xc6c6c6c6; -+ *((int*)& __m256_op0[4]) = 0x8787878a; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x3f3f3f3c; -+ *((int*)& __m256_op0[1]) = 0x8787878a; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff9c9d00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff7fff7fff7fff7; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff7fff7fff7fff7; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff7fff7fff7fff7; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff7fff7fff7fff7; -+ __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x9240f24a84b18025; -+ *((unsigned long*)& __m256i_op0[2]) = 0x9240f24a84b18025; -+ *((unsigned long*)& __m256i_op0[1]) = 0xb2c0b341807f8006; -+ *((unsigned long*)& __m256i_op0[0]) = 0xb2c0b341807f8006; -+ *((unsigned long*)& __m256i_result[3]) = 0x009200f200840080; -+ *((unsigned long*)& __m256i_result[2]) = 0x009200f200840080; -+ *((unsigned long*)& __m256i_result[1]) = 0x00b200b300800080; -+ *((unsigned long*)& __m256i_result[0]) = 0x00b200b300800080; -+ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000003f3f3f3c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc6c6c6c68787878a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000003f3f3f3c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8787878a00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_d(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffff800; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; -+ __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0008000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfffffffffffff800; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffff800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffff800; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffff6; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffff6; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000000003f3f3f3c; -+ *((unsigned long*)& __m256i_op2[2]) = 0xc6c6c6c68787878a; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000003f3f3f3c; -+ *((unsigned long*)& __m256i_op2[0]) = 0x8787878a00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffe3; -+ *((unsigned long*)& __m256i_result[2]) = 0x63636344c3c3c4f6; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffc3; -+ *((unsigned long*)& __m256i_result[0]) = 0xc3c3c500fffffff6; -+ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x009200f200840080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x009200f200840080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00b200b300800080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00b200b300800080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x009200f200840080; -+ *((unsigned long*)& __m256i_result[2]) = 0x009200f200840080; -+ *((unsigned long*)& __m256i_result[1]) = 0x00b200b300800080; -+ *((unsigned long*)& __m256i_result[0]) = 0x00b200b300800080; -+ __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000003f3f3f3c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc6c6c6c68787878a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000003f3f3f3c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8787878a00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003f3fc6c68787; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003f3f87870000; -+ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; -+ __m128i_out = __lsx_vfclass_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffff0000; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00003f3f; -+ *((int*)& __m256_op1[4]) = 0xc6c68787; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00003f3f; -+ *((int*)& __m256_op1[0]) = 0x87870000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffff0000; -+ *((int*)& __m128_op0[1]) = 0x00ff0000; -+ *((int*)& __m128_op0[0]) = 0x00ff0000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000800; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0xffffffff; -+ *((int*)& __m128_op2[2]) = 0xfffff800; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xfffff800; -+ *((int*)& __m128_result[1]) = 0x80000000; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffe15; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffe15; -+ __m128i_out = __lsx_vldi(3605); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x9240000000008025; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffff24affff8025; -+ *((unsigned long*)& __m256i_op0[1]) = 0xb2c0000000008006; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffb341ffff8006; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9240000000008025; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffff24affff8025; -+ *((unsigned long*)& __m256i_op1[1]) = 0xb2c0000000008006; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffb341ffff8006; -+ *((unsigned long*)& __m256i_result[3]) = 0xff2400000000ff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffeffe4fffeff00; -+ *((unsigned long*)& __m256i_result[1]) = 0xff6400000000ff00; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffeff66fffeff00; -+ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffe15; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffe15; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x009200f200840080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x009200f200840080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00b200b300800080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00b200b300800080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffff800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffc0000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffc0000000000000; -+ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x83); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000e00000080; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000e00000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000e00000080; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000e00000080; -+ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xff240000; -+ *((int*)& __m256_op0[6]) = 0x0000ff00; -+ *((int*)& __m256_op0[5]) = 0xfffeffe4; -+ *((int*)& __m256_op0[4]) = 0xfffeff00; -+ *((int*)& __m256_op0[3]) = 0xff640000; -+ *((int*)& __m256_op0[2]) = 0x0000ff00; -+ *((int*)& __m256_op0[1]) = 0xfffeff66; -+ *((int*)& __m256_op0[0]) = 0xfffeff00; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffff0000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000080; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffe; -+ __m256i_out = __lasx_xvaddwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffd; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffff800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x001fffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x4b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003f3fc6c68787; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f87870000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003f3fc6c68787; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003f3f87870000; -+ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003f3fc6c68787; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f87870000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003e3ec6c68686; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000fffffeff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003e3e87870000; -+ __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff2400000000ff00; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffeffe4fffeff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff6400000000ff00; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffeff66fffeff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0080808080808080; -+ *((unsigned long*)& __m256i_result[2]) = 0x0080808080808080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0080808100808080; -+ *((unsigned long*)& __m256i_result[0]) = 0x0080808000808080; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfffffffffffff800; -+ *((unsigned long*)& __m128d_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffff00000080; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01fe04; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01fe04; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x01010101; -+ *((int*)& __m128_op0[2]) = 0x01010101; -+ *((int*)& __m128_op0[1]) = 0x01010101; -+ *((int*)& __m128_op0[0]) = 0x01010101; -+ *((int*)& __m128_result[3]) = 0xc2fa0000; -+ *((int*)& __m128_result[2]) = 0xc2fa0000; -+ *((int*)& __m128_result[1]) = 0xc2fa0000; -+ *((int*)& __m128_result[0]) = 0xc2fa0000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x21); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000100da000100fd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001ffe20001fefd; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001009a000100fd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001ff640001fefd; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000edff00fffd; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000fff10000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000cdff00fffd; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ff320000ffff; -+ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x47000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_w(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x01010101010000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; -+ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; -+ __m128i_out = __lsx_vclz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffff800; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffefffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffef800; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080807; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080807; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffef; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffef; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; -+ __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0x5f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01010101010000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffef; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffef; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0100feff0100eeef; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000001010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0100feff00feef11; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000001010; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffef; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffef; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0404ffff00000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0404040800000010; -+ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfffefffe; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xfffefffe; -+ *((int*)& __m256_op0[2]) = 0xfffefffd; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffdfffffffe0; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffdfffffffe0; -+ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000100da000100fd; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0001ffe20001fefd; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0001009a000100fd; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0001ff640001fefd; -+ *((unsigned long*)& __m256i_result[3]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3ff0000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000c2f90000bafa; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000c2fa8000c2fa; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000c2f90000bafa; -+ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; -+ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000002020000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000201eff0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000002020000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001fef010; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010001000000000; -+ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffff800; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0002000400000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020006; -+ unsigned_int_result = 0x0000000000020006; -+ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x0); -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffefffefffefffd; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xfffefffefffefffd; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfrint_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01010101010000ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x8080808280808082; -+ *((unsigned long*)& __m256i_result[2]) = 0x8080808280808082; -+ *((unsigned long*)& __m256i_result[1]) = 0x8080808280808080; -+ *((unsigned long*)& __m256i_result[0]) = 0x8080808280808082; -+ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000c2f90000bafa; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000fffff800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x80808082; -+ *((int*)& __m256_op0[6]) = 0x80808082; -+ *((int*)& __m256_op0[5]) = 0x80808082; -+ *((int*)& __m256_op0[4]) = 0x80808082; -+ *((int*)& __m256_op0[3]) = 0x80808082; -+ *((int*)& __m256_op0[2]) = 0x80808080; -+ *((int*)& __m256_op0[1]) = 0x80808082; -+ *((int*)& __m256_op0[0]) = 0x80808082; -+ *((int*)& __m256_op1[7]) = 0x55555555; -+ *((int*)& __m256_op1[6]) = 0x55555555; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x55555555; -+ *((int*)& __m256_op1[2]) = 0x55555555; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_h(__m256i_op0,14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000100da000100fd; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001ffe20001fefd; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001009a000100fd; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001ff640001fefd; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000100da000100fd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001ffe20001fefd; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001009a000100fd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001ff640001fefd; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007ff90000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000001ff60000; -+ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x8080808280808082; -+ *((unsigned long*)& __m256d_op0[2]) = 0x8080808280808082; -+ *((unsigned long*)& __m256d_op0[1]) = 0x8080808280808080; -+ *((unsigned long*)& __m256d_op0[0]) = 0x8080808280808082; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cule_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffd; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff00000000; -+ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000c2f90000bafa; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000c2f90000bafa; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000c2fa8000c2fa; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff3d06ffff4506; -+ *((unsigned long*)& __m128i_result[0]) = 0x7ffffffe7ffff800; -+ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffd; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000100da000100fd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001ffe20001fefd; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001009a000100fd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001ff640001fefd; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffff3d06ffff4506; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7ffffffe7ffff800; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffd; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffff800; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffff800; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffff800; -+ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x8a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; -+ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff3d06ffff4506; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ffffffe7ffff800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000c2f90000bafa; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000c2fa8000c2fa; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xc2f9bafac2fac2fa; -+ __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x7ff90000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x1ff60000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0xfffffffe; -+ *((int*)& __m256_op1[4]) = 0x00000001; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0xfffffffe; -+ *((int*)& __m256_op1[0]) = 0x00000001; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000001; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000001; -+ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfffebd06fffe820c; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7fff7ffe7fff3506; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f7e3f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffc6cc05c64d960e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f7e3f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff874dc687870000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000003f7e3f; -+ *((unsigned long*)& __m256i_result[2]) = 0xffc6cc05c64d960e; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000003f7e3f; -+ *((unsigned long*)& __m256i_result[0]) = 0xff874dc687870000; -+ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc2f9bafac2fac2fa; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbdf077eee7e20468; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe3b1cc6953e7db29; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000e7e20468; -+ *((unsigned long*)& __m128i_result[0]) = 0xc2fac2fa53e7db29; -+ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000000003f7e3f; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffc6cc05c64d960e; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000003f7e3f; -+ *((unsigned long*)& __m256d_op0[0]) = 0xff874dc687870000; -+ *((unsigned long*)& __m256d_result[3]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; -+ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffff8001; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001fffffffe; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; -+ long_int_result = 0x1f0fdf7f3e3b31d4; -+ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x1); -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000e7e20468; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc2fac2fa53e7db29; -+ *((unsigned long*)& __m128i_result[1]) = 0xff84fff4ff84fff4; -+ *((unsigned long*)& __m128i_result[0]) = 0x00a6ffceffb60052; -+ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffebd06fffe820c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7ffe7fff3506; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffebd06fffe820c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7ffe7fff3506; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff0cffffff18; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefffefffeff6a0c; -+ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff84fff4ff84fff4; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00a6ffceffb60052; -+ unsigned_int_result = 0x0000000000000084; -+ unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0xa); -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff0cffffff18; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfefffefffeff6a0c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc2f9bafac2fac2fa; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffefefe6a; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; -+ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256i_op2[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256i_op2[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x61f1000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0108000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x61f1a18100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0108000000000000; -+ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f7e3f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffc6cc05c64d960e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f7e3f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff874dc687870000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fdf000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fdf000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fdf7fff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fdf7fff00000000; -+ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x35); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000c2f90000bafa; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000c2fa8000c2fa; -+ *((unsigned long*)& __m128i_result[1]) = 0x7474f6fd7474fefe; -+ *((unsigned long*)& __m128i_result[0]) = 0xf474f6fef474f6fe; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0x74); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x7474f6fd7474fefe; -+ *((unsigned long*)& __m128d_op0[0]) = 0xf474f6fef474f6fe; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x01fc03e000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x01fc03e000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff84fff4ff84fff4; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00a6ffceffb60052; -+ *((unsigned long*)& __m128i_result[1]) = 0xff84fff4ff84fff4; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff0; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,-16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefe6a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffefe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffc2ba; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xff84fff4; -+ *((int*)& __m128_op0[2]) = 0xff84fff4; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xfffffff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x41dfffc000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x41dfffdfffc00000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffff0c8000c212; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfefffeff7f002d06; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc2f9bafac2fac2fa; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01fc03e000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01fc03e000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00fffb0402fddf20; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00fffb0402fddf20; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001fbf9fbe29f52; -+ *((unsigned long*)& __m256i_result[2]) = 0x5b409c0000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001fbf9fbe29f52; -+ *((unsigned long*)& __m256i_result[0]) = 0x5b409c0000000000; -+ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f7e3f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffc6cc05c64d960e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f7e3f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff874dc687870000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x41dfffc000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x41dfffdfffc00000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0001fbf9fbe29f52; -+ *((unsigned long*)& __m256i_op2[2]) = 0x5b409c0000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0001fbf9fbe29f52; -+ *((unsigned long*)& __m256i_op2[0]) = 0x5b409c0000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfbba01c0003f7e3f; -+ *((unsigned long*)& __m256i_result[2]) = 0xffc6cc05c64d960e; -+ *((unsigned long*)& __m256i_result[1]) = 0xfbd884e7003f7e3f; -+ *((unsigned long*)& __m256i_result[0]) = 0xff874dc687870000; -+ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff84fff4ff84fff4; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00a6ffceffb60052; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xff84fff4ff84fff4; -+ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefe6a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfbba01c0003f7e3f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffc6cc05c64d960e; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfbd884e7003f7e3f; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff874dc687870000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfbba01c0003f7e3f; -+ *((unsigned long*)& __m256i_result[2]) = 0xffc6cc05c64d960e; -+ *((unsigned long*)& __m256i_result[1]) = 0xfbd884e7003f7e3f; -+ *((unsigned long*)& __m256i_result[0]) = 0xff874dc687870000; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00fffb04; -+ *((int*)& __m256_op0[6]) = 0x02fddf20; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00fffb04; -+ *((int*)& __m256_op0[2]) = 0x02fddf20; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x41dfffc0; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x41dfffdf; -+ *((int*)& __m256_op1[2]) = 0xffc00000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff84fff4ff84fff4; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff84fff4ff84fff4; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m128i_result[1]) = 0xff84fff4ff84fff4; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff0; -+ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xff84fff4; -+ *((int*)& __m128_op0[2]) = 0xff84fff4; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xfffffff0; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xff84fff4; -+ *((int*)& __m128_op0[2]) = 0xff84fff4; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xfffffff0; -+ *((int*)& __m128_op1[3]) = 0xff84fff4; -+ *((int*)& __m128_op1[2]) = 0xff84fff4; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xfffffff0; -+ *((int*)& __m128_result[3]) = 0xffc4fff4; -+ *((int*)& __m128_result[2]) = 0xffc4fff4; -+ *((int*)& __m128_result[1]) = 0xffffffff; -+ *((int*)& __m128_result[0]) = 0xfffffff0; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_d(__m128i_op0,-4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrm_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfbba01c0003f7e3f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffc6cc05c64d960e; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfbd884e7003f7e3f; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff874dc687870000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffe367cc82f8989a; -+ *((unsigned long*)& __m256i_result[2]) = 0x4f90000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffc3aaa8d58f43c8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffe367cc82f8989a; -+ *((unsigned long*)& __m256d_op0[2]) = 0x4f90000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffc3aaa8d58f43c8; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; -+ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x1f0fdf7f; -+ *((int*)& __m256_op0[6]) = 0x3e3b31d4; -+ *((int*)& __m256_op0[5]) = 0x7ff80000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x1f0fdf7f; -+ *((int*)& __m256_op0[2]) = 0x3e3b31d4; -+ *((int*)& __m256_op0[1]) = 0x7ff80000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0x80000000; -+ *((int*)& __m256_result[5]) = 0x7ff80000; -+ *((int*)& __m256_result[4]) = 0x80000000; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0x80000000; -+ *((int*)& __m256_result[1]) = 0x7ff80000; -+ *((int*)& __m256_result[0]) = 0x80000000; -+ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000002a5429; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000002a5429; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x30); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefe6a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000fefefe68; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; -+ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000001; -+ *((int*)& __m128_op0[2]) = 0xfffffffe; -+ *((int*)& __m128_op0[1]) = 0x00000001; -+ *((int*)& __m128_op0[0]) = 0xfffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x2a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1f0fdf7f3e3b31d4; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xe0f02081c1c4ce2c; -+ *((unsigned long*)& __m256i_result[2]) = 0x8008000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xe0f02081c1c4ce2c; -+ *((unsigned long*)& __m256i_result[0]) = 0x8008000000000000; -+ __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffe367cc82f8989a; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4f90000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffc3aaa8d58f43c8; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000082f8989a; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000d58f43c8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x1f0fdf7f; -+ *((int*)& __m256_op0[6]) = 0x3e3b31d4; -+ *((int*)& __m256_op0[5]) = 0x7ff80000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x1f0fdf7f; -+ *((int*)& __m256_op0[2]) = 0x3e3b31d4; -+ *((int*)& __m256_op0[1]) = 0x7ff80000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x002a5429; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x002a5429; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a5429; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a5429; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffc7418a023680; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff8845bb954b00; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffc7418a023680; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000002a5429; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff8845bb954b00; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000002a5429; -+ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000082f8989a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000d58f43c8; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010183f9999b; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x01010101d58f43c9; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; -+ __m128i_out = __lsx_vclz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffc741; -+ *((int*)& __m256_op0[6]) = 0x8a023680; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffff8845; -+ *((int*)& __m256_op0[2]) = 0xbb954b00; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffc74180000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff884580000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001fffffffe; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffc74180000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff884580000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0xbf800000; -+ *((int*)& __m256_result[6]) = 0xbf800000; -+ *((int*)& __m256_result[5]) = 0xd662fa00; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0xbf800000; -+ *((int*)& __m256_result[2]) = 0xbf800000; -+ *((int*)& __m256_result[1]) = 0xd6ef7500; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00fe01f000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00fe01f000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbf800000bf800000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xd662fa0000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xbf800000bf800000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xd6ef750000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x417e01f040800000; -+ *((unsigned long*)& __m256i_result[2]) = 0x299d060000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x417e01f040800000; -+ *((unsigned long*)& __m256i_result[0]) = 0x29108b0000000000; -+ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe0f02081c1c4ce2c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8008000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe0f02081c1c4ce2c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8008000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000b8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000b8; -+ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x60f02081c1c4ce2c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8008000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x60f02081c1c4ce2c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8008000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010183f9999b; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01010101d58f43c9; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010183f9999b; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x01010101d58f43c9; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a5429; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a5429; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000002a54290; -+ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0101010183f9999b; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[1]) = 0x01010101d58f43c9; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a5429; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a5429; -+ *((unsigned long*)& __m256i_op1[3]) = 0x417e01f040800000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x299d060000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x417e01f040800000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x29108b0000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001000000010; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefe6a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000fefefe6a; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x7c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fefefe6a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fefefe6a; -+ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fefefe6a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000fbf9; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000fbf9; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000007f00000000; -+ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000001000100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000100; -+ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x5a5a5a5a5b5a5b5a; -+ *((unsigned long*)& __m128i_result[0]) = 0x5a5a5a5a5b5a5b5a; -+ __m128i_out = __lsx_vxori_b(__m128i_op0,0x5a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5a5a5a5a5b5a5b5a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5a5a5a5a5b5a5b5a; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001494b494a; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001494b494a; -+ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fefefe6a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000007070700; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000002010202; -+ __m128i_out = __lsx_vclo_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a5429; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a5429; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000055; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000055; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000100; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x002a542a; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x002a542a; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000100; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001000100; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffefffffffe; -+ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000007070700; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002010202; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000007070700; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000002010202; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010183f95466; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01010101d58efe94; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010183f95466; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x01010101d58efe94; -+ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0xa7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000055; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000055; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffefefeff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff295329; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffefefeff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff295329; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff01010101; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00d6acd7; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff01010101; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00d6acd7; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010183f95466; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01010101d58efe94; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000101000083f95; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000101000001010; -+ *((unsigned long*)& __m256i_result[1]) = 0x00001010000d58f0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000101000001010; -+ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000007f00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7ffffffeffffffff; -+ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_h(__m128i_op0,10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffefefeff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffff295329; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffefefeff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffff295329; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[3]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_result[2]) = 0x001f001f02c442af; -+ *((unsigned long*)& __m256i_result[1]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_result[0]) = 0x001f001f02c442af; -+ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x7ffffffe; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); -+ *((unsigned long*)& __m128i_op0[1]) = 0x00005a5a00005a5a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00005b5a00005b5a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5a5a5a5a5b5a5b5a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5a5a5a5a5b5a5b5a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a542a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a542a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000005400; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000005400; -+ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000000fefefe6a; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000c2bac2c2; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fefefe6a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000fefefe6a; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; -+ __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00fe01f000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00fe01f000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000007f8; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x2d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x001f001f02c442af; -+ *((unsigned long*)& __m256i_op0[1]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x001f001f02c442af; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00fe01f000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00fe01f000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xfffffffffefefeff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffff295329; -+ *((unsigned long*)& __m256i_op2[1]) = 0xfffffffffefefeff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffff295329; -+ *((unsigned long*)& __m256i_result[3]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_result[1]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000c40086; -+ __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7ffffffeffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x01ff01ff01ff01ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x01ff01ff01ff01ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; -+ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_du_wu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a542a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a542a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000002a542a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000002a542a; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x7ffffffe; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrml_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xfefefeff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xff295329; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xfefefeff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xff295329; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000004290; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004290; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000004290; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004290; -+ __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a542a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a542a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000004290; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004290; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000004290; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004290; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000004290; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000002a96ba; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000004290; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000002a96ba; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; -+ __m128i_out = __lsx_vclz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4080808080808080; -+ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefeff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff295329; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefeff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff295329; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffe00f7ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffff629d7; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffe00f7ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffff629d7; -+ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x001e001ea1bfa1bf; -+ *((unsigned long*)& __m256d_op0[2]) = 0x001e001e83e5422e; -+ *((unsigned long*)& __m256d_op0[1]) = 0x001e001ea1bfa1bf; -+ *((unsigned long*)& __m256d_op0[0]) = 0x011f011f0244420e; -+ *((unsigned long*)& __m256d_op1[3]) = 0xfffe00f7ffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfffffffffff629d7; -+ *((unsigned long*)& __m256d_op1[1]) = 0xfffe00f7ffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfffffffffff629d7; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x01ff01ff; -+ *((int*)& __m128_op0[2]) = 0x01ff01ff; -+ *((int*)& __m128_op0[1]) = 0x01ff01ff; -+ *((int*)& __m128_op0[0]) = 0x01ff01ff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x01ff01ff; -+ *((int*)& __m128_op0[2]) = 0x01ff01ff; -+ *((int*)& __m128_op0[1]) = 0x01ff01ff; -+ *((int*)& __m128_op0[0]) = 0x01ff01ff; -+ *((int*)& __m128_result[3]) = 0xc2f80000; -+ *((int*)& __m128_result[2]) = 0xc2f80000; -+ *((int*)& __m128_result[1]) = 0xc2f80000; -+ *((int*)& __m128_result[0]) = 0xc2f80000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ffffffeffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0xff80ffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7ffffffeffffffff; -+ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xe6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xff80ffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x7ffffffe; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000083f95466; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010100005400; -+ *((unsigned long*)& __m256i_op1[3]) = 0x001e001ea1bfa1bf; -+ *((unsigned long*)& __m256i_op1[2]) = 0x001e001e83e5422e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x001e001ea1bfa1bf; -+ *((unsigned long*)& __m256i_op1[0]) = 0x011f011f0244420e; -+ *((unsigned long*)& __m256i_result[3]) = 0x000f000fd0dfd0df; -+ *((unsigned long*)& __m256i_result[2]) = 0x000f000f83ef4b4a; -+ *((unsigned long*)& __m256i_result[1]) = 0x000f000fd0dfd0df; -+ *((unsigned long*)& __m256i_result[0]) = 0x0110011001224b07; -+ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x83f95466; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x01010101; -+ *((int*)& __m256_op0[0]) = 0x00005400; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xfefefeff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xff295329; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xfefefeff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xff295329; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000004290; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000002a96ba; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000004290; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000002a96ba; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000083f95466; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0101010100005400; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000004290; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000083f95466; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000004290; -+ *((unsigned long*)& __m256d_result[0]) = 0x0101010100005400; -+ __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000002a5; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000002a5; -+ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefeff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff295329; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefeff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff295329; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_result[3]) = 0x007f00f8ff7fff80; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fff6a9d8; -+ *((unsigned long*)& __m256i_result[1]) = 0x007f00f8ff7fff80; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff6a9d8; -+ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x01ff01ff01ff01ff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x01ff01ff01ff01ff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x01ff01ff01ff01ff; -+ *((unsigned long*)& __m128d_result[0]) = 0x01ff01ff01ff01ff; -+ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000083f95466; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010100005400; -+ *((unsigned long*)& __m256i_op1[3]) = 0x007f00f8ff7fff80; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff6a9d8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x007f00f8ff7fff80; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff6a9d8; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x007f00f8ff7fff80; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff6a9d8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x007f00f8ff7fff80; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff6a9d8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000089; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000089; -+ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x02a54290; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x02a54290; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x02a54290; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x0154dc84; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x02a54290; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000089; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x82a54290; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x028aa700; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x82a54290; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x02a54287; -+ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000001ff000001ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000001ff000001ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000001ff000001ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000001ff000001ff; -+ *((unsigned long*)& __m128i_op2[1]) = 0xff80ffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x7ffffffeffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000002fe800000ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7ffffe0100000000; -+ __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000089; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000089; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_result[0]) = 0x000a000a000a000a; -+ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00fe01f0; -+ *((int*)& __m256_op0[6]) = 0x00010000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00c40086; -+ *((int*)& __m256_op0[3]) = 0x00fe01f0; -+ *((int*)& __m256_op0[2]) = 0x00010000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00c40086; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x82a54290; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x028aa700; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x82a54290; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x02a54287; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00010000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00c40086; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00010000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00c40086; -+ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000082a54290; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000028aa700; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000082a54290; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000002a54287; -+ *((unsigned long*)& __m256i_result[3]) = 0x007f00f841532148; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001a753c3; -+ *((unsigned long*)& __m256i_result[1]) = 0x007f00f841532148; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001b52187; -+ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_result[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_result[0]) = 0x000a000a000a000a; -+ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00fe01f0; -+ *((int*)& __m256_op0[6]) = 0x00010000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00c40086; -+ *((int*)& __m256_op0[3]) = 0x00fe01f0; -+ *((int*)& __m256_op0[2]) = 0x00010000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00c40086; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000089; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000089; -+ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x98ff98ff220e220d; -+ *((unsigned long*)& __m128d_op0[0]) = 0xa2e1a2601ff01ff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; -+ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000082a54290; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000028aa700; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000082a54290; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54287; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000002a542a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000002a542a; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x803f800080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe0404041c0404040; -+ int_op1 = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xe0404041e0404041; -+ *((unsigned long*)& __m128i_result[0]) = 0xe0404041e0404041; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000400; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000002a542a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000002a542a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000242; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000242; -+ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00fe01f000010000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000c40086; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000c40086; -+ __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xe0404041e0404041; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe0404041e0404041; -+ *((unsigned long*)& __m128i_op1[1]) = 0x803f800080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe0404041c0404040; -+ *((unsigned long*)& __m128i_result[1]) = 0xe0404041e0404041; -+ *((unsigned long*)& __m128i_result[0]) = 0x803f800080000000; -+ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xe0404041e0404041; -+ *((unsigned long*)& __m128d_op0[0]) = 0xe0404041e0404041; -+ *((unsigned long*)& __m128i_result[1]) = 0xe0404041e0404041; -+ *((unsigned long*)& __m128i_result[0]) = 0xe0404041e0404041; -+ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000002a54290; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; -+ *((int*)& __m128_result[3]) = 0x35200000; -+ *((int*)& __m128_result[2]) = 0x35200000; -+ *((int*)& __m128_result[1]) = 0x35200000; -+ *((int*)& __m128_result[0]) = 0x35200000; -+ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000089; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xe0404041e0404041; -+ *((unsigned long*)& __m128i_op0[0]) = 0x803f800080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000e; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000009; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xe0404041e0404041; -+ *((unsigned long*)& __m128i_op0[0]) = 0x803f800080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xff80ffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x7ffffffe; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe0404041e0404041; -+ *((unsigned long*)& __m128i_op1[0]) = 0x803f800080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x02a54290; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0154dc84; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x02a54290; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000089; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x02a54290; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x0154dc84; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x02a54290; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000089; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x02a54290; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x0154dc84; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x02a54290; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000089; -+ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x59); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000089; -+ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0a00000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbsll_v(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007fff00000089; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; -+ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0000; -+ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x3f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op2[1]) = 0x000000004c7f4c7f; -+ *((unsigned long*)& __m128i_op2[0]) = 0xe0c0c0c0d1c7d1c6; -+ *((unsigned long*)& __m128i_result[1]) = 0x061006100613030c; -+ *((unsigned long*)& __m128i_result[0]) = 0x4d6814ef9c77ce46; -+ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0xfebcfebcfebcfebc; -+ *((unsigned long*)& __m256i_result[2]) = 0xfebcfebcfebcfebc; -+ *((unsigned long*)& __m256i_result[1]) = 0xfebcfebcfebcfebc; -+ *((unsigned long*)& __m256i_result[0]) = 0xfebcfebcfebcfebc; -+ __m256i_out = __lasx_xvldi(1724); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x061006100613030c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4d6814ef9c77ce46; -+ *((unsigned long*)& __m128i_result[1]) = 0x010f010f0112010b; -+ *((unsigned long*)& __m128i_result[0]) = 0x016701ee01760145; -+ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000001fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001fe; -+ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000fd0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000fd0000; -+ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0000; -+ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x29); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a000a000a000a00; -+ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff6fff6fff6fff6; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff6fff6fff6fff6; -+ __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x010f00000111fffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0x016700dc0176003a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0a000a000a000a00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x4d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xa5a5a5a5a5a5a5a5; -+ *((unsigned long*)& __m256d_op1[2]) = 0xa5a5a5a5a5a5a5ff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xa5a5a5a5a5a5a5a5; -+ *((unsigned long*)& __m256d_op1[0]) = 0xa5a5a5a5a5a5a5ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x36); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_result[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_result[0]) = 0x000a000a000a000a; -+ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; -+ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000fd0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fd0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001b0000001b; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001b00fd0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001b0000001b; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001b00fd0000; -+ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000001b0000001b; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001b00fd0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000001b0000001b; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001b00fd0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xf8f8f8f8f8f8f8f8; -+ *((unsigned long*)& __m256i_result[2]) = 0xf8f8f8f8f8f8f8f8; -+ *((unsigned long*)& __m256i_result[1]) = 0xf8f8f8f8f8f8f8f8; -+ *((unsigned long*)& __m256i_result[0]) = 0xf8f8f8f8f8f8f8f8; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000fd0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fd0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000007f0000; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000fe0100000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fe0100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000a0000000a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000a00000009; -+ *((unsigned long*)& __m128i_result[1]) = 0x000a000a0000000a; -+ *((unsigned long*)& __m128i_result[0]) = 0x000a000a000a000a; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0xaf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xa5a5a5a5a5a5a5a5; -+ *((unsigned long*)& __m256d_op1[2]) = 0xa5a5a5a5a5a99e03; -+ *((unsigned long*)& __m256d_op1[1]) = 0xa5a5a5a5a5a5a5a5; -+ *((unsigned long*)& __m256d_op1[0]) = 0xa5a5a5a5a5a99e03; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000a0000000a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000a00000009; -+ *((unsigned long*)& __m128i_result[1]) = 0x0a0a0a000a0a0a00; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a0a0a0009090900; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000fe0100000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fe0100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000a000a00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000a000a00000000; -+ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000feb60000b7d0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000feb60000c7eb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000feb60000b7d0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000feb60000c7eb; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0a0a0a000a0a0a00; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0a0a0a0009090900; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000feb60000b7d0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000feb60000c7eb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000feb60000b7d0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000feb60000c7eb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0707feb60707c7eb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0707feb60707c7eb; -+ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000003fff3fff; -+ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0040000000400000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0040000000400000; -+ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001900000019; -+ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000fe0100000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000fe0100000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000001900000019; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000001900000019; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000001900000019; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0040000000400000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0040000000400000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0141010101410101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0141010101410101; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0141010101410101; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0141010101410101; -+ *((unsigned long*)& __m128i_result[1]) = 0xfebffefffebffeff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfebffefffebffeff; -+ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x45baa7ef6a95a985; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x45baa7ef6a95a985; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_result[2]) = 0x45baa7ef6a95a985; -+ *((unsigned long*)& __m256i_result[1]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_result[0]) = 0x45baa7ef6a95a985; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0141010101410101; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0141010101410101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6420e0208400c4c4; -+ *((unsigned long*)& __m128i_op0[0]) = 0x20c4e0c4e0da647a; -+ *((unsigned long*)& __m128i_result[1]) = 0x6420e0208400c4e3; -+ *((unsigned long*)& __m128i_result[0]) = 0x20c4e0c4e0da6499; -+ __m128i_out = __lsx_vaddi_du(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x6420e020; -+ *((int*)& __m128_op0[2]) = 0x8400c4e3; -+ *((int*)& __m128_op0[1]) = 0x20c4e0c4; -+ *((int*)& __m128_op0[0]) = 0xe0da6499; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0141010101410101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0141010101410101; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfebffefffebffeff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfebffefffebffeff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000001b0000001b; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001b00fd0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000001b0000001b; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001b00fd0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000019; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000019; -+ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0707feb608c9328b; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc237bd65fc892985; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0707feb608c9328b; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc237bd65fc892985; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00150015003a402f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x333568ce26dcd055; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00150015003a402f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x333568ce26dcd055; -+ *((unsigned long*)& __m256i_result[3]) = 0x0e0f1192846ff912; -+ *((unsigned long*)& __m256i_result[2]) = 0x002a0074666a4db9; -+ *((unsigned long*)& __m256i_result[1]) = 0x0e0f1192846ff912; -+ *((unsigned long*)& __m256i_result[0]) = 0x002a0074666a4db9; -+ __m256i_out = __lasx_xvsrani_h_w(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0141010101410101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0141010101410101; -+ *((unsigned long*)& __m128i_result[1]) = 0x4101010141010100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbsll_v(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00150015003a402f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x333568ce26dcd055; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00150015003a402f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x333568ce26dcd055; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000007d0d0d0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000007d0d0d0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000007d0d0d0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000007d0d0d0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000007d0d0d00000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000007d0d0d00000; -+ __m256i_out = __lasx_xvbsrl_v(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000001b0000001b; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000001b00fd0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000001b0000001b; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001b00fd0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000001b; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001b; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000001b; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000fd00000000; -+ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000be00be; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x1f1b917c9f3d5e05; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0e0f1192846ff912; -+ *((unsigned long*)& __m256i_op0[2]) = 0x002a0074666a4db9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0e0f1192846ff912; -+ *((unsigned long*)& __m256i_op0[0]) = 0x002a0074666a4db9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000018; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000018; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fff7fff05407fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fff7fff05407fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000100000018; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000100000018; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x1f60000000c00000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x1f60000000c00000; -+ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x1f1b917c; -+ *((int*)& __m128_op0[0]) = 0x9f3d5e05; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x4fa432d6; -+ *((int*)& __m128_result[0]) = 0x7fc00000; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1f60000000c00000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1f60000000c00000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x60000000c0000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x60000000c0000000; -+ __m256i_out = __lasx_xvslli_h(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fff003f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fff003f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000627; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000627; -+ __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff7fff05407fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff05407fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x400040003abf4000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x400040003abf4000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000003fff3fff; -+ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000627; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000627; -+ *((unsigned long*)& __m256i_op2[3]) = 0x7fff7fff05407fff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x7fff7fff05407fff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000003fff3fff; -+ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fff003f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fff003f; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007fff; -+ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000627; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000627; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1f60000000c00000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1f60000000c00000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x7fff7fff05407fff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x7fff7fff05407fff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000627; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000627; -+ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0141010101410101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0141010101410101; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4180418041804180; -+ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x4fa432d67fc00000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0141010101410101; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0141010101410101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0408040800000004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0408040800000004; -+ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000084; -+ *((unsigned long*)& __m256i_result[3]) = 0x0084008400840084; -+ *((unsigned long*)& __m256i_result[2]) = 0x0084008400840084; -+ *((unsigned long*)& __m256i_result[1]) = 0x0084008400840084; -+ *((unsigned long*)& __m256i_result[0]) = 0x0084008400840084; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff7fff05407fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff05407fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00001fff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00001fff; -+ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x45baa7ef6a95a985; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x45baa7ef6a95a985; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000800; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100001; -+ __m256i_out = __lasx_xvclz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3e1f321529232736; -+ *((unsigned long*)& __m128i_op1[0]) = 0x161d0c373c200826; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000082020201; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000820200000201; -+ __m128i_out = __lsx_vexth_wu_hu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x65b780a3ae3bf8cb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x161d0c363c200826; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x65b780a2ae3bf8ca; -+ *((unsigned long*)& __m128i_result[0]) = 0x161d0c373c200827; -+ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x3fff3fff; -+ *((int*)& __m256_op0[6]) = 0x3fff3fff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x3fff3fff; -+ *((int*)& __m256_op0[3]) = 0x3fff3fff; -+ *((int*)& __m256_op0[2]) = 0x3fff3fff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x3fff3fff; -+ *((int*)& __m256_op1[7]) = 0x017e01fe; -+ *((int*)& __m256_op1[6]) = 0x01fe01fe; -+ *((int*)& __m256_op1[5]) = 0x05860606; -+ *((int*)& __m256_op1[4]) = 0x01fe0202; -+ *((int*)& __m256_op1[3]) = 0x017e01fe; -+ *((int*)& __m256_op1[2]) = 0x01fe0000; -+ *((int*)& __m256_op1[1]) = 0x05860606; -+ *((int*)& __m256_op1[0]) = 0x01fe0004; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x017e01fe01fe01fe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0586060601fe0202; -+ *((unsigned long*)& __m256i_op1[1]) = 0x017e01fe01fe0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0586060601fe0004; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffbfffafffffffe; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffbfffaffff0000; -+ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x65b780a2ae3bf8ca; -+ *((unsigned long*)& __m128i_op1[0]) = 0x161d0c373c200827; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000001ff; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x017e01fe01fe01fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0586060601fe0202; -+ *((unsigned long*)& __m256i_op0[1]) = 0x017e01fe01fe0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0586060601fe0004; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0010001000100001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0010001000100001; -+ *((unsigned long*)& __m256i_result[3]) = 0x017f01fe01ff01fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x05960616020e0203; -+ *((unsigned long*)& __m256i_result[1]) = 0x017f01fe01ff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x05960616020e0005; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x017f01fe01ff01fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x05960616020e0203; -+ *((unsigned long*)& __m256i_op0[1]) = 0x017f01fe01ff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x05960616020e0005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x017f01fe01ff01fe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x05960616020e0203; -+ *((unsigned long*)& __m256i_op1[1]) = 0x017f01fe01ff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x05960616020e0005; -+ *((unsigned long*)& __m256i_result[3]) = 0x00fe01fc01fe01fc; -+ *((unsigned long*)& __m256i_result[2]) = 0x012c002c001c0006; -+ *((unsigned long*)& __m256i_result[1]) = 0x00fe01fc01fe0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x012c002c001c000a; -+ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff4000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000403f3fff; -+ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffbfffafffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffbfffaffff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00fe01fc01fe01fc; -+ *((unsigned long*)& __m256i_op1[2]) = 0x012c002c001c0006; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00fe01fc01fe0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x012c002c001c000a; -+ *((unsigned long*)& __m256i_result[3]) = 0x807e80fd80fe80fd; -+ *((unsigned long*)& __m256i_result[2]) = 0x80938013800d8002; -+ *((unsigned long*)& __m256i_result[1]) = 0x807e80fd80fe0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x80938013800d0005; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00fe01fc01fe01fc; -+ *((unsigned long*)& __m256i_op0[2]) = 0x012c002c001c0006; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00fe01fc01fe0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x012c002c001c000a; -+ long_int_result = 0x00fe01fc01fe0000; -+ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x1); -+ *((unsigned long*)& __m128i_op0[1]) = 0x4101010141010100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x4101010141010100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x807e80fd80fe80fd; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80938013800d8002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x807e80fd80fe0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80938013800d0005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffff00001fff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffff00001fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x807e80fd80fe80fd; -+ *((unsigned long*)& __m256i_result[2]) = 0x80938013800d8002; -+ *((unsigned long*)& __m256i_result[1]) = 0x807e80fd80fe0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x80938013800d0005; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff4000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000403f3fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x45baa7ef6a95a985; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x45baa7ef6a95a985; -+ *((unsigned long*)& __m256i_result[3]) = 0x38f7414938f7882f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x38f7414938f78830; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x64b680a2ae3af8ca; -+ *((unsigned long*)& __m128i_op0[0]) = 0x161c0c363c200826; -+ *((unsigned long*)& __m128i_result[1]) = 0x64b680a2ae3af8c8; -+ *((unsigned long*)& __m128i_result[0]) = 0x161c0c363c200824; -+ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4101010141010100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x64b680a2ae3af8c8; -+ *((unsigned long*)& __m128i_op1[0]) = 0x161c0c363c200824; -+ *((unsigned long*)& __m128i_result[1]) = 0x23b57fa16d39f7c8; -+ *((unsigned long*)& __m128i_result[0]) = 0x161c0c363c200824; -+ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4101010141010100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000001ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x4101010141010100; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000001ff; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x807e80fd80fe80fd; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80938013800d8002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x807e80fd80fe0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80938013800d0005; -+ *((unsigned long*)& __m256i_result[3]) = 0x8091811081118110; -+ *((unsigned long*)& __m256i_result[2]) = 0x80a6802680208015; -+ *((unsigned long*)& __m256i_result[1]) = 0x8091811081110013; -+ *((unsigned long*)& __m256i_result[0]) = 0x80a6802680200018; -+ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0707feb6; -+ *((int*)& __m256_op0[6]) = 0x0707b7d0; -+ *((int*)& __m256_op0[5]) = 0x45baa7ef; -+ *((int*)& __m256_op0[4]) = 0x6a95a985; -+ *((int*)& __m256_op0[3]) = 0x0707feb6; -+ *((int*)& __m256_op0[2]) = 0x0707b7d0; -+ *((int*)& __m256_op0[1]) = 0x45baa7ef; -+ *((int*)& __m256_op0[0]) = 0x6a95a985; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000017547fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000017547fffffff; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0408040800008003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0408040800008003; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0408040800008002; -+ *((unsigned long*)& __m256i_result[0]) = 0xfbf7fbf7ffff7ffd; -+ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x23b57fa16d39f7c8; -+ *((unsigned long*)& __m128i_op1[0]) = 0x161c0c363c200824; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; -+ __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x34); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000017547fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000017547fffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x807e80fd80fe80fd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x80938013800d8002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x807e80fd80fe0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x80938013800d0005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000801380f380fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000801380f300fb; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4101010141010100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000001ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0020808100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x29); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x38f7414938f7882f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x38f7414938f78830; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000801380f380fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000801380f300fb; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x2c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0408040800008003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0408040800008003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff80800; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0408040800008003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x04080408fff87803; -+ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000800; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000801380f380fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000801380f300fb; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff7fedffffff05; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x0000fffd; -+ *((int*)& __m128_op1[3]) = 0x7fffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000001ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x45baa7ef6a95a985; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x45baa7ef6a95a985; -+ *((unsigned long*)& __m256i_result[3]) = 0x0707b7cff8f84830; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000354ad4c28; -+ *((unsigned long*)& __m256i_result[1]) = 0x0707b7cff8f84830; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000354ad4c28; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff82bb9784; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffc6bb97ac; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000007ffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0408040800008003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x04080408fff87803; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0707b7cff8f84830; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000354ad4c28; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0707b7cff8f84830; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000354ad4c28; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffd5a98; -+ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffd5a98; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffd5a98; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000007f3a40; -+ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0020808100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff4000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000403f3fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x3fff3fff3fff4000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000403f3fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7ffe7ffe7ffe7ffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007ffe7ffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x7ffe7ffe7ffe8000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000807e7ffe; -+ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff4000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000403f3fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x3fff3fff3fff3fff; -+ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffd5a98; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000101ff01; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000fffd; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff000000ff; -+ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8091811081118110; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80a6802680208015; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8091811081110013; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80a6802680200018; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8091811081118110; -+ *((unsigned long*)& __m256i_op1[2]) = 0x80a6802680208015; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8091811081110013; -+ *((unsigned long*)& __m256i_op1[0]) = 0x80a6802680200018; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvdiv_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000101ff01; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffff6fffffff6; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffff6fffffff6; -+ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff00000000000001; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; -+ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff00000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe80000000000001; -+ __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000101ff01; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; -+ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007ffe7ffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe7ffe7ffe8000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000807e7ffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8091811081118110; -+ *((unsigned long*)& __m256i_op1[2]) = 0x80a6802680208015; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8091811081110013; -+ *((unsigned long*)& __m256i_op1[0]) = 0x80a6802680200018; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffefffe0000feff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffeff0000007e7f; -+ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007ffe7ffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe7ffe7ffe8000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000807e7ffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ffe7ffe7ffe7ffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007ffe7ffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ffe7ffe7ffe8000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000807e7ffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x7ffe7ffe7ffe7ffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007ffe7ffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x7ffe7ffe7ffe8000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000807e7ffe; -+ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000801380f380fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000801380f300fb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008013; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000080f3; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fb; -+ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x45baa7ef6a95a985; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0707feb60707b7d0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x45baa7ef6a95a985; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ffe7ffd7ffe7fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ffe7ffd7ffe8001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0707feb70707b7d1; -+ *((unsigned long*)& __m256i_result[2]) = 0x65baa7efea95a985; -+ *((unsigned long*)& __m256i_result[1]) = 0x0707feb70707b7d1; -+ *((unsigned long*)& __m256i_result[0]) = 0x65baa7ef6a95a987; -+ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff000000ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000000000; -+ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000007f3a40; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff82bb9784; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffc6bb97ac; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff82bb9784; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffc6bb97ac; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000004000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000004000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000004000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000004000000; -+ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff82bb9784; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc6bb97ac; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x7fffffff82bb9784; -+ *((unsigned long*)& __m128i_op2[0]) = 0x7fffffffc6bb97ac; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff82bb9784; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffc6bb97ac; -+ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xfe800000; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((int*)& __m128_op1[3]) = 0x7fffffff; -+ *((int*)& __m128_op1[2]) = 0x82bb9784; -+ *((int*)& __m128_op1[1]) = 0x7fffffff; -+ *((int*)& __m128_op1[0]) = 0xc6bb97ac; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe80000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe80000000000001; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000027f000000fe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe80000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000018000000000; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_d(__m128i_op0,5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000007f3a40; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000007f3a40; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000d24; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000801380f380fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000801380f300fb; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000007f3a40; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x42); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe80000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00fe000000000000; -+ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007ffe7ffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe7ffe7ffe8000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000807e7ffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x7f7e7f7e7f7e7f7e; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007f7e7f7e; -+ *((unsigned long*)& __m256i_result[1]) = 0x7f7e7f7e7f7e0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000007e7f7e; -+ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00fe000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe80000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x027e0000000000ff; -+ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vslei_wu(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00fdffffffffff02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe80000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe80ffffffffff02; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffe80; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x30); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x027e0000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe80ffffffffff02; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000007fffffff; -+ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000d24; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000d24; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe80ff80ffff0000; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,-16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000013; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001000000fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000013; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000001000000fb; -+ *((unsigned long*)& __m256i_result[3]) = 0x8080808180808093; -+ *((unsigned long*)& __m256i_result[2]) = 0x80808081808080fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x8080808180808093; -+ *((unsigned long*)& __m256i_result[0]) = 0x80808081808080fb; -+ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000d24; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8080808180808093; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80808081808080fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8080808180808093; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80808081808080fb; -+ *((unsigned long*)& __m256i_result[3]) = 0xf5f5f5f5f5f5f5f5; -+ *((unsigned long*)& __m256i_result[2]) = 0xf5f5f5f5f5f5f5fe; -+ *((unsigned long*)& __m256i_result[1]) = 0xf5f5f5f5f5f5f5f5; -+ *((unsigned long*)& __m256i_result[0]) = 0xf5f5f5f5f5f5f5fb; -+ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; -+ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; -+ __m128i_out = __lsx_vslli_h(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0505050505050505; -+ *((unsigned long*)& __m128i_result[0]) = 0x0505050504040404; -+ __m128i_out = __lsx_vaddi_bu(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00010013000100fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00010013000100fb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000004000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000004000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_result[2]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_result[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_result[0]) = 0x0400040004000400; -+ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000004000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000004000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000004000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000004000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f3f018000000000; -+ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf5f5f5f5f5f5f5f5; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf5f5f5f5f5f5f5f5; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000004000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000004000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xff04ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[0]) = 0xff04ff00ff00ff00; -+ __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ long_int_result = 0x0000000000000000; -+ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x1); -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x7f3f0180; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000800000098; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000040000ffca; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000800000098; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000000040000ff79; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x04000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x04000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800000098; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000040000ffca; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000800000098; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000040000ff79; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff04ff00ff00ff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff04ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000008000000a; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000008000000a; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x44); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000010000003f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000010000003f; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000010000003f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f007f007f007f00; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000010000003f; -+ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf5f5f5f5f5f5f5f5; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf5f5f5f5f5f5f5f5; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xf9f5f9f5f9f5f9f5; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xf9f5f9f5f9f5f9f5; -+ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xf5f5f5f5f5f5f5f5; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xf5f5f5f5f5f5f5f5; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x8000000a; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x8000000a; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000010000003f; -+ *((unsigned long*)& __m128d_op1[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000010000003f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff04ff00ff00ff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff04ff00ff00ff00; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffefffefffefffe; -+ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000010000003f; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000030000003f; -+ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7fff7fff; -+ *((int*)& __m128_op0[2]) = 0x7fff7fff; -+ *((int*)& __m128_op0[1]) = 0x00000001; -+ *((int*)& __m128_op0[0]) = 0x0000003f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000010000003f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000010000003f; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000010000003f; -+ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000030000003f; -+ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f007f007f007f00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0003003f; -+ __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000030000003f; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xfffffffe; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvori_b(__m256i_op0,0x6a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000002; -+ *((int*)& __m256_op0[6]) = 0x00000002; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000002; -+ *((int*)& __m256_op0[2]) = 0x00000002; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000020; -+ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000003; -+ *((int*)& __m128_op0[0]) = 0x0000003f; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000003; -+ *((int*)& __m128_op1[0]) = 0x0000003f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000400; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000400; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe00000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff01010105; -+ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x80000000; -+ *((int*)& __m128_result[2]) = 0x80000000; -+ *((int*)& __m128_result[1]) = 0x80000000; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000fffe0000fffe; -+ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffff00; -+ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000400; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000400; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; -+ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000fffe0000fffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000fffe0000fffe; -+ __m128i_out = __lsx_vmod_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc1bdceee242070db; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe8c7b756d76aa478; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001800390049ffaa; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0029ff96005cff88; -+ *((unsigned long*)& __m128i_result[1]) = 0x001800390049ffaa; -+ *((unsigned long*)& __m128i_result[0]) = 0x0029ff96005cff88; -+ __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001800390049ffaa; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0029ff96005cff88; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff88; -+ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; -+ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001800000039; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000049ffffffaa; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000060000000e; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000127fffffea; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x22); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001800390049ffaa; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0029ff96005cff88; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00060012000e002b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000049ffffffaa; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000e002b; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffaa; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff88; -+ *((unsigned long*)& __m128i_result[1]) = 0xe5e5e5e5e5e5e5e5; -+ *((unsigned long*)& __m128i_result[0]) = 0xe5e5e5e5e4e4e46d; -+ __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; -+ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00060012000e002b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000049ffffffaa; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000060000000e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000127fffffea; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000060000000e; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001201fe01e9; -+ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000060000000e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001201fe01e9; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000060000000e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001201fe01e9; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000c0000001c; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002403fc03d2; -+ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000060000000e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000127fffffea; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f0101070101010f; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000127f010116; -+ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xe5e5e5e5e5e5e5e5; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe5e5e5e5e4e4e46d; -+ *((unsigned long*)& __m128i_result[1]) = 0xe5e5e5e5e5e5e5e5; -+ *((unsigned long*)& __m128i_result[0]) = 0xe5e5e5e5e4e4e46d; -+ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff8fff8fff8fff8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff8fff8fff8fff8; -+ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf2f2e5e5e5e5e5e5; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xf2f2e5e5e5e5e5dc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xf2f2e5e5; -+ *((int*)& __m128_op0[2]) = 0xe5e5e5e5; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xe5e5e5e5; -+ *((int*)& __m128_op1[2]) = 0xe5e5e5e5; -+ *((int*)& __m128_op1[1]) = 0xe5e5e5e5; -+ *((int*)& __m128_op1[0]) = 0xe4e4e46d; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff8fff8fff8fff8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff8fff8fff8fff8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf2f2e5e5e5e5e5dc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff8fff8fff8fff8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff8fff8fff8fff8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff80ff80ff80ff80; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff80ff80ff80ff80; -+ __m256i_out = __lasx_xvslli_h(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f0101070101010f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000127f010116; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ffffffffff; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000ffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x120e120dedf1edf2; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x120e120dedf1edf2; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff8fff8fff8fff8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff8fff8fff8fff8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x8001800180018001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x8001800180018001; -+ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffff80000001; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x120e120dedf1edf2; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x120e120dedf1edf2; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000120e120d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000120e120d; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000020000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000020000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000020000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000020000000000; -+ __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x29); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; -+ __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000120e120d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000120e120d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000907; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000907; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x67); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000200; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000200; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000200; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000200; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0x89); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ unsigned_long_int_result = 0xffffffffffffffff; -+ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x2); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000907; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000907; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x007e007e007e007e; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000907; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000907; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; -+ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3fffffffc0000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; -+ __m128i_out = __lsx_vexth_w_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xbfffbfffbfffbffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000907; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000907; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x3fffffffc0000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000200; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000200; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000200; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000200; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000009; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000009; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000009; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; -+ int_result = 0xffffffffffffffff; -+ int_out = __lsx_vpickve2gr_b(__m128i_op0,0xc); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op2[1]) = 0xbfffbfffbfffbffe; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x4000400040004002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; -+ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ unsigned_int_result = 0x00000000ffffffff; -+ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x2); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; -+ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ long_int_result = 0xffffffffffffffff; -+ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x1); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbfffbfffbfffbffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xbfffbfffbfffbffe; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xbfffbfffbfffbffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001ffff0001ffff; -+ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xff00ff00ff00ff00; -+ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000045f3fb; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000045f3fb; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; -+ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000008080809; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000008080809; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000008080809; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000008080809; -+ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x5353535353535353; -+ *((unsigned long*)& __m256i_result[2]) = 0x5353535353535353; -+ *((unsigned long*)& __m256i_result[1]) = 0x5353535353535353; -+ *((unsigned long*)& __m256i_result[0]) = 0x5353535353535353; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x53); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000045f3fb; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000045f3fb; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffba0c05; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffba0c05; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128d_op1[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256d_op0[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256d_op0[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256d_op0[0]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffba0c05; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffba0c05; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000483800; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000483800; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000483800; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffba0c05; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000483800; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffba0c05; -+ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x37); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfe01fe01fe01fe01; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe01fe01fe01fe01; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfe01fe01fe01fe01; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe01fe01fe01fe01; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfe01fe01fe01fe01; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe01fe01fe01fe01; -+ *((unsigned long*)& __m128i_op2[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op2[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0xf10cf508f904fd01; -+ *((unsigned long*)& __m128i_result[0]) = 0xf10cf508f904fd01; -+ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffba0c05; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffba0c05; -+ *((unsigned long*)& __m256i_op1[3]) = 0x5353535353535353; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5353535353535353; -+ *((unsigned long*)& __m256i_op1[1]) = 0x5353535353535353; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5353535353535353; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0303030303020000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0303030303020000; -+ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_w(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; -+ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0xff807f807f807f80; -+ *((unsigned long*)& __m128i_result[0]) = 0xff807f807f807f80; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff807f807f807f80; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff807f807f807f80; -+ *((unsigned long*)& __m128i_result[1]) = 0xfb807b807b807b80; -+ *((unsigned long*)& __m128i_result[0]) = 0xfb807b807b807b80; -+ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf10cf508f904fd01; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf10cf508f904fd01; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf10cf508f904fd01; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf10cf508f904fd01; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffe218ffffea10; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffff208fffffa02; -+ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffe218ffffea10; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff208fffffa02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffe218ffffea10; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffff208fffffa02; -+ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x03f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[2]) = 0x03f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[1]) = 0x03f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[0]) = 0x03f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op2[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op2[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op2[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op2[0]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0303030303020000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0303030303020000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffe218ffffea10; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff208fffffa02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007f017f01; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007f017f01; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007f017f01; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007f017f01; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000007f017f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000007f017f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x03f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[2]) = 0x03f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x03f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[0]) = 0x03f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x03f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[2]) = 0x03f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[1]) = 0x03f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[0]) = 0x03f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[3]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_result[2]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_result[1]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_result[0]) = 0x07efefefefefefee; -+ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000045f3fb; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000045f3fb; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000004500f300fb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000004500f300fb; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_d(__m128i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff9; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff9; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_op1[2]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_op1[1]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_op1[0]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x2); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_op1[2]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_op1[1]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_op1[0]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001fbfbfc; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001fbfbfc; -+ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x62); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010000000100000; -+ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x5959595959595959; -+ *((unsigned long*)& __m128i_result[0]) = 0x5959595959595959; -+ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x59); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaxi_b(__m128i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffe218ffffea10; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff208fffffa02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffff208fffffa02; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffb80000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffb80000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffe218ffffea10; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff208fffffa02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xfffff208fffffa02; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffe218ffffea10; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffff208fffffa02; -+ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000004500f300fb; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000004500f300fb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xffb80000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xffb80000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_op1[2]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_op1[1]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_op1[0]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x07efefefefefefee; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x07efefefefefefee; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf8f8e018f8f8e810; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf8f8f008f8f8f800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000045000d0005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000045000d0005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,-8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf8f8e018f8f8e810; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf8f8f008f8f8f800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000e0180000e810; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000f0080000f800; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1010000010100000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1010000010100000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1010000010100000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1010000010100000; -+ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000004800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000004500f300fb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000004800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000004500f300fb; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000004800000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000004500f300fb; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000004800000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000004500f300fb; -+ *((unsigned long*)& __m256i_result[3]) = 0x7b7b7b7b80000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xcacacb1011040500; -+ *((unsigned long*)& __m256i_result[1]) = 0x7b7b7b7b80000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xcacacb1011040500; -+ __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0010000000100000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0010000000100000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0010000000100000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0010000000100000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000483800; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000483800; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x41cc5bb8a95fd1eb; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x41cc5bb8a95fd1eb; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x41cc5bb8a95fd1eb; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x41cc5bb8a95fd1eb; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7b7b7b7b80000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xcacacb1011040500; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7b7b7b7b80000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xcacacb1011040500; -+ *((unsigned long*)& __m256i_result[3]) = 0x49cc5bb8a95fd1eb; -+ *((unsigned long*)& __m256i_result[2]) = 0x7ff4080102102001; -+ *((unsigned long*)& __m256i_result[1]) = 0x49cc5bb8a95fd1eb; -+ *((unsigned long*)& __m256i_result[0]) = 0x7ff4080102102001; -+ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000e0180000e810; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000f0080000f800; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000e0180000e810; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000f0080000f800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000045; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000045; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010146; -+ *((unsigned long*)& __m256i_result[2]) = 0x01010101010e0106; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010146; -+ *((unsigned long*)& __m256i_result[0]) = 0x01010101010e0106; -+ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000e0180000e810; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000f0080000f800; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000e0180000e810; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000f0080000f800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000f0f800; -+ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010000000100000; -+ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010000000000; -+ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfrint_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00100000; -+ *((int*)& __m256_op0[6]) = 0x00100000; -+ *((int*)& __m256_op0[5]) = 0x00100000; -+ *((int*)& __m256_op0[4]) = 0x00100000; -+ *((int*)& __m256_op0[3]) = 0x00100000; -+ *((int*)& __m256_op0[2]) = 0x00100000; -+ *((int*)& __m256_op0[1]) = 0x00100000; -+ *((int*)& __m256_op0[0]) = 0x00100000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; -+ __m128i_out = __lsx_vclz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00080000002c0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0008000000080000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00080000002c0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0008000000080000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00080000002c0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00080000002c0000; -+ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x4c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_w(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000045; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000045; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000045; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000045; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d0005; -+ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; -+ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000045; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000045; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x50); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1211100f11100f0e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x100f0e0d0f0e0d0c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[1]) = 0x11000f2010000e20; -+ *((unsigned long*)& __m128i_result[0]) = 0x0f000d200e000c20; -+ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010000000100000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000483800; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000583800; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000100000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000583800; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000100000; -+ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x11000f2010000e20; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0f000d200e000c20; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x11000f200f000d20; -+ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; -+ *((unsigned long*)& __m128i_result[1]) = 0x11000f2010000e20; -+ *((unsigned long*)& __m128i_result[0]) = 0x0f000d200e000c20; -+ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x11000f20; -+ *((int*)& __m128_op0[2]) = 0x10000e20; -+ *((int*)& __m128_op0[1]) = 0x0f000d20; -+ *((int*)& __m128_op0[0]) = 0x0e000c20; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000d000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000d000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000583800; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000583800; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d0000; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_b(__m128i_op0,-6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000045; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000045; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000045; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000045; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000045; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000045; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d0005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000013b13380; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000013b13380; -+ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x11000f2010000e20; -+ *((unsigned long*)& __m128i_result[0]) = 0x0f000d200e000c20; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x11000f20; -+ *((int*)& __m128_op0[2]) = 0x10000e20; -+ *((int*)& __m128_op0[1]) = 0x0f000d20; -+ *((int*)& __m128_op0[0]) = 0x0e000c20; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x11000f2000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0f000d2000000000; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xe3e3e3e3e3e3e3e3; -+ *((unsigned long*)& __m128i_result[0]) = 0xe3e3e3e3e3e3e3e3; -+ __m128i_out = __lsx_vxori_b(__m128i_op0,0xe3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00ffffff00ffff; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00ffffff00ffff; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d0000; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[2]) = 0x4000404040004040; -+ *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[0]) = 0x4000404040004040; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x40); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0008000000080000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0008000000080000; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe3e3e3e3e3e3e3e3; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe3e3e3e3e3e3e3e3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xe3e3e3e3e3e3e3e3; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((int*)& __m128_result[3]) = 0x4f800000; -+ *((int*)& __m128_result[2]) = 0x4f800000; -+ *((int*)& __m128_result[1]) = 0x4f800000; -+ *((int*)& __m128_result[0]) = 0x4f800000; -+ __m128_out = __lsx_vffint_s_wu(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe3e3e3e3e3e3e3e3; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe3e3e3e3e3e3e3e3; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe3e3e3e3e3e3e3e3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffe01fe01f; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe01fe01f; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffe01fe01f; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffe01fe01f; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xff00ffff; -+ *((int*)& __m256_op0[6]) = 0xff00ffff; -+ *((int*)& __m256_op0[5]) = 0xff00ffff; -+ *((int*)& __m256_op0[4]) = 0xff00ffff; -+ *((int*)& __m256_op0[3]) = 0xff00ffff; -+ *((int*)& __m256_op0[2]) = 0xff00ffff; -+ *((int*)& __m256_op0[1]) = 0xff00ffff; -+ *((int*)& __m256_op0[0]) = 0xff00ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000fe01020b0001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000fe01020b0001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f8000004f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f8000004f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f8000004f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f8000004f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x4f8000004f800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4f8000004f800000; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x64); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f8000004f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f8000004f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f8000004f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f8000004f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f8000004f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f8000004f800000; -+ *((unsigned long*)& __m128d_result[1]) = 0x43d3e0000013e000; -+ *((unsigned long*)& __m128d_result[0]) = 0x43d3e0000013e000; -+ __m128d_out = __lsx_vffint_d_l(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xff00d5007f00ffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xff00d5007f00ffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256d_result[3]) = 0x7f00d5007f00ffff; -+ *((unsigned long*)& __m256d_result[2]) = 0x7f00ffffff00ffff; -+ *((unsigned long*)& __m256d_result[1]) = 0x7f00d5007f00ffff; -+ *((unsigned long*)& __m256d_result[0]) = 0x7f00ffffff00ffff; -+ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffb080ffffb080; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffb080ffffb080; -+ *((unsigned long*)& __m128i_op2[1]) = 0x004fcfcfd01f9f9f; -+ *((unsigned long*)& __m128i_op2[0]) = 0x9f4fcfcfcf800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3504b5fd2dee1f80; -+ *((unsigned long*)& __m128i_result[0]) = 0x4676f70fc0000000; -+ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000fe01020b0001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000fe01020b0001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0fff0fff00000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0fff0fff00000020; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0fff0fff00000020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0fff0fff00000020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00d5007f00ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00d5007f00ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x43d3e0000013e000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x43d3e0000013e000; -+ *((unsigned long*)& __m128i_result[1]) = 0x43d3e0000013e000; -+ *((unsigned long*)& __m128i_result[0]) = 0x43d3e0000013e000; -+ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffff3fffffff3; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffff3fffffff3; -+ __m128i_out = __lsx_vmini_w(__m128i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x004fcfcfd01f9f9f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9f4fcfcfcf800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x004fcfcfd01f9f9f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9f4fcfcfcf800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x004f1fcfd01f9f9f; -+ *((unsigned long*)& __m128i_result[0]) = 0x9f4fcfcfcf800000; -+ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xda); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x43d3e0000013e000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x43d3e0000013e000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc14eef7fc14ea000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000ea000010fa101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x43d3e0000013e000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x43d3e0000013e000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffd3000000130000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffd3000000130000; -+ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffff3fffffff3; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffff3fffffff3; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffff3fffffff4; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffff3fffffff4; -+ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffd3000000130000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffd3000000130000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffd3000000130000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffd3000000130000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffd3000000130000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffd3000000130000; -+ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x004f1fcfd01f9f9f; -+ *((unsigned long*)& __m128d_op0[0]) = 0x9f4fcfcfcf800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000004f804f80; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000004f804f80; -+ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffd30000; -+ *((int*)& __m128_op0[2]) = 0x00130000; -+ *((int*)& __m128_op0[1]) = 0xffd30000; -+ *((int*)& __m128_op0[0]) = 0x00130000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00d5007f00ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00d5007f00ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000055ff01f90ab5; -+ *((unsigned long*)& __m256i_op0[2]) = 0xaa95eafffec6e01f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000055ff01f90ab5; -+ *((unsigned long*)& __m256i_op0[0]) = 0xaa95eafffec6e01f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfec6e01ffec6e01f; -+ *((unsigned long*)& __m256i_result[2]) = 0xfec6e01ffec6e01f; -+ *((unsigned long*)& __m256i_result[1]) = 0xfec6e01ffec6e01f; -+ *((unsigned long*)& __m256i_result[0]) = 0xfec6e01ffec6e01f; -+ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ long_int_result = 0x0000000000000000; -+ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x0); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000003f00390035; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8015003f0006001f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000003f00390035; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8015003f0006001f; -+ *((unsigned long*)& __m256i_result[3]) = 0x000b004a00440040; -+ *((unsigned long*)& __m256i_result[2]) = 0x8020004a0011002a; -+ *((unsigned long*)& __m256i_result[1]) = 0x000b004a00440040; -+ *((unsigned long*)& __m256i_result[0]) = 0x8020004a0011002a; -+ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x80000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x80000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x80000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x80000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfrint_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0000003f; -+ *((int*)& __m256_op0[6]) = 0x00390035; -+ *((int*)& __m256_op0[5]) = 0x8015003f; -+ *((int*)& __m256_op0[4]) = 0x0006001f; -+ *((int*)& __m256_op0[3]) = 0x0000003f; -+ *((int*)& __m256_op0[2]) = 0x00390035; -+ *((int*)& __m256_op0[1]) = 0x8015003f; -+ *((int*)& __m256_op0[0]) = 0x0006001f; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000b004a00440040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8020004a0011002a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000b004a00440040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8020004a0011002a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000004a00000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000004a0000002a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000004a00000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000004a0000002a; -+ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000b004a00440040; -+ *((unsigned long*)& __m256d_op0[2]) = 0x8020004a0011002a; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000b004a00440040; -+ *((unsigned long*)& __m256d_op0[0]) = 0x8020004a0011002a; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0fff0fff00000020; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0fff0fff00000020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x000055ff; -+ *((int*)& __m256_op0[6]) = 0x01f90ab5; -+ *((int*)& __m256_op0[5]) = 0xaa95eaff; -+ *((int*)& __m256_op0[4]) = 0xfec6e01f; -+ *((int*)& __m256_op0[3]) = 0x000055ff; -+ *((int*)& __m256_op0[2]) = 0x01f90ab5; -+ *((int*)& __m256_op0[1]) = 0xaa95eaff; -+ *((int*)& __m256_op0[0]) = 0xfec6e01f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ long_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000003f00390035; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8015003f0006001f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000003f00390035; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8015003f0006001f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x80000000001529c1; -+ *((unsigned long*)& __m256i_op1[2]) = 0x80007073cadc3779; -+ *((unsigned long*)& __m256i_op1[1]) = 0x80000000001529c1; -+ *((unsigned long*)& __m256i_op1[0]) = 0x80007073cadc3779; -+ *((unsigned long*)& __m256i_result[3]) = 0x00008000003f0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00390015003529c1; -+ *((unsigned long*)& __m256i_result[1]) = 0x00008000003f0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00390015003529c1; -+ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x004f0080004f0080; -+ *((unsigned long*)& __m128i_result[0]) = 0x004f0080004f0080; -+ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x80000000001529c1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80007073cadc3779; -+ *((unsigned long*)& __m256i_op0[1]) = 0x80000000001529c1; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80007073cadc3779; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_d(__m256i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x9f009f009f009f00; -+ *((unsigned long*)& __m128i_result[0]) = 0x9f009f009f009f00; -+ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff8001ffff8001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffffffff; -+ __m256i_out = __lasx_xvslti_hu(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffffffff; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x0000ffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x0000ffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000fffffffefffe; -+ *((unsigned long*)& __m256i_result[1]) = 0xff7fffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000fffffffefffe; -+ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x004f0080; -+ *((int*)& __m128_op0[2]) = 0x004f0080; -+ *((int*)& __m128_op0[1]) = 0x004f0080; -+ *((int*)& __m128_op0[0]) = 0x004f0080; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x7fff7fff; -+ *((int*)& __m128_op2[2]) = 0x7fff7fff; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7fff7fff; -+ *((int*)& __m128_result[2]) = 0x7fff7fff; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff8001ffff8001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x001ffff0003ffff0; -+ *((unsigned long*)& __m128i_result[0]) = 0x000fffefffefffef; -+ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x4b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000004a00000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000004a0000002a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000004a00000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000004a0000002a; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000fffffffefffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff7fffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000fffffffefffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002500000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x00008024ffff8014; -+ *((unsigned long*)& __m256i_result[1]) = 0xffc0002500000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x00008024ffff8014; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000010100000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010100000000; -+ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff00000000; -+ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000004a557baac4; -+ *((unsigned long*)& __m256i_op1[2]) = 0x556caad9aabbaa88; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000004a557baac4; -+ *((unsigned long*)& __m256i_op1[0]) = 0x556caad9aabbaa88; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000004a557baac4; -+ *((unsigned long*)& __m256i_result[2]) = 0x556caad9aabbaa88; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000004a557baac4; -+ *((unsigned long*)& __m256i_result[0]) = 0x556caad9aabbaa88; -+ __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000004a557baac4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x556caad9aabbaa88; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000004a557baac4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x556caad9aabbaa88; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000004a557baac4; -+ *((unsigned long*)& __m256i_op1[2]) = 0x556caad9aabbaa88; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000004a557baac4; -+ *((unsigned long*)& __m256i_op1[0]) = 0x556caad9aabbaa88; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000004a557baac4; -+ *((unsigned long*)& __m256i_result[2]) = 0x556caad9aabbaa88; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000004a557baac4; -+ *((unsigned long*)& __m256i_result[0]) = 0x556caad9aabbaa88; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7ffe7ffe7ffe7ffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000010100000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000010100000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00008000003f0000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00390015003529c1; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00008000003f0000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00390015003529c1; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x32); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0909090909090909; -+ *((unsigned long*)& __m256i_result[2]) = 0x0909090909090909; -+ *((unsigned long*)& __m256i_result[1]) = 0x0909090909090909; -+ *((unsigned long*)& __m256i_result[0]) = 0x0909090909090909; -+ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0080000200000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00010003; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0000004a; -+ *((int*)& __m256_op0[6]) = 0x557baac4; -+ *((int*)& __m256_op0[5]) = 0x556caad9; -+ *((int*)& __m256_op0[4]) = 0xaabbaa88; -+ *((int*)& __m256_op0[3]) = 0x0000004a; -+ *((int*)& __m256_op0[2]) = 0x557baac4; -+ *((int*)& __m256_op0[1]) = 0x556caad9; -+ *((int*)& __m256_op0[0]) = 0xaabbaa88; -+ *((int*)& __m256_op1[7]) = 0x09090909; -+ *((int*)& __m256_op1[6]) = 0x09090909; -+ *((int*)& __m256_op1[5]) = 0x09090909; -+ *((int*)& __m256_op1[4]) = 0x09090909; -+ *((int*)& __m256_op1[3]) = 0x09090909; -+ *((int*)& __m256_op1[2]) = 0x09090909; -+ *((int*)& __m256_op1[1]) = 0x09090909; -+ *((int*)& __m256_op1[0]) = 0x09090909; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op2[1]) = 0x001ffff0003ffff0; -+ *((unsigned long*)& __m128i_op2[0]) = 0x000fffefffefffef; -+ *((unsigned long*)& __m128i_result[1]) = 0x8009700478185812; -+ *((unsigned long*)& __m128i_result[0]) = 0xe009f00ee7fb0800; -+ __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; -+ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0909090909090909; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0909090909090909; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0909090909090909; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0909090909090909; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010003; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0080000200000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00010003; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ff00ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ffffff00; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ff00ff00; -+ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ffe7ffe7ffe7ffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00007ffe00007ffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010003; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0080000200000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00010003; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0080000200000003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00010002; -+ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00009f0000009f00; -+ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x4f804f80; -+ *((int*)& __m128_op0[0]) = 0x4f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001ffff0003ffff0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000fffefffefffef; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x001ffff0003ffff0; -+ *((unsigned long*)& __m128i_result[0]) = 0x000fffefffefffef; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00007fff; -+ *((int*)& __m128_op1[2]) = 0x00007fff; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00007fff; -+ *((int*)& __m128_result[2]) = 0x00007fff; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128d_op0[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0080000200000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00010002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0080000200000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00010002; -+ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000200000003; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000ffff00010002; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0080000200000003; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000ffff00010002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x75b043c4d17db125; -+ *((unsigned long*)& __m128i_op0[0]) = 0xeef8227b596117b1; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x75b043c4d17db125; -+ *((unsigned long*)& __m128i_result[0]) = 0xeef8227b4f8017b1; -+ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001ffff0003ffff0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000fffefffefffef; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffefffef; -+ __m128i_out = __lsx_vmini_w(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x75b043c4d17db125; -+ *((unsigned long*)& __m128i_op1[0]) = 0xeef8227b4f8017b1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x027c027c000027c0; -+ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000de32400; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x027c027c000027c0; -+ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x77); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001ffff0003ffff0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000fffefffefffef; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00000000; -+ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001ffff0003ffff0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000fffefffefffef; -+ *((unsigned long*)& __m128i_result[1]) = 0x001ffff0003ffff0; -+ *((unsigned long*)& __m128i_result[0]) = 0x000fffefffefffef; -+ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000070700000707; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000009091b1b1212; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000070700000707; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000009091b1b1212; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001ffff0003ffff0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000fffefffefffef; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffefffef; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001ffff0003ffff0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000fffefffefffef; -+ *((unsigned long*)& __m128i_result[1]) = 0x001ffff0003ffff0; -+ *((unsigned long*)& __m128i_result[0]) = 0x028c026bfff027af; -+ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001ffff0003ffff0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x028c026bfff027af; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000003fc03fc00; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffc00a3009b000; -+ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x55555555; -+ *((int*)& __m256_op0[5]) = 0x00000001; -+ *((int*)& __m256_op0[4]) = 0x00000004; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x55555555; -+ *((int*)& __m256_op0[1]) = 0x00000001; -+ *((int*)& __m256_op0[0]) = 0x00000004; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00007fff00000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0040000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007fff00000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000055555555; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000004; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000055555555; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000004; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2aaaaaaa2aaaaaab; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x2aaaaaaa2aaaaaab; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1111111111111111; -+ *((unsigned long*)& __m256i_result[2]) = 0x1111111111111111; -+ *((unsigned long*)& __m256i_result[1]) = 0x1111111111111111; -+ *((unsigned long*)& __m256i_result[0]) = 0x1111111111111111; -+ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x027c027c000027c0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x027c027c000027c0; -+ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1111111111111111; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1111111111111111; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1111111111111111; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1111111111111111; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0888888888888888; -+ *((unsigned long*)& __m256i_result[2]) = 0x0888888888888888; -+ *((unsigned long*)& __m256i_result[1]) = 0x0888888888888888; -+ *((unsigned long*)& __m256i_result[0]) = 0x0888888888888888; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; -+ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x75b043c4d17db125; -+ *((unsigned long*)& __m128i_op0[0]) = 0xeef8227b4f8017b1; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x027c027c000027c0; -+ *((unsigned long*)& __m128i_result[1]) = 0x75b043c4007db125; -+ *((unsigned long*)& __m128i_result[0]) = 0xeef8227b4f8017b1; -+ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x004f0080004f0080; -+ *((unsigned long*)& __m128i_result[0]) = 0x004f0080004f0080; -+ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001000fbff9; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000002ff9afef; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000004f804f81; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000004f804f80; -+ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000001020202; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001020202; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x027c027c000027c0; -+ __m128i_out = __lsx_vmaxi_h(__m128i_op0,-6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1111111111111111; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1111111111111111; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1111111111111111; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1111111111111111; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1111111111111111; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1111111111111111; -+ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xdededededededede; -+ *((unsigned long*)& __m256i_result[2]) = 0xdededededededede; -+ *((unsigned long*)& __m256i_result[1]) = 0xdededededededede; -+ *((unsigned long*)& __m256i_result[0]) = 0xdededededededede; -+ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x21); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000004f804f81; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000004f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000010000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001400000014; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363797c63996399; -+ *((unsigned long*)& __m128i_op0[0]) = 0x171f0a1f6376441f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363797c63996399; -+ *((unsigned long*)& __m128i_op1[0]) = 0x171f0a1f6376441f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x4f804f81; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x4f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000004f804f81; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000004f804f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000004fc04f81; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000004fc04f80; -+ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdededededededede; -+ *((unsigned long*)& __m256i_op1[2]) = 0xdededededededede; -+ *((unsigned long*)& __m256i_op1[1]) = 0xdededededededede; -+ *((unsigned long*)& __m256i_op1[0]) = 0xdededededededede; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8888888808888888; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0888888888888888; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8888888808888888; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0888888888888888; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x77777777f7777777; -+ *((unsigned long*)& __m256i_result[2]) = 0xf777777777777777; -+ *((unsigned long*)& __m256i_result[1]) = 0x77777777f7777777; -+ *((unsigned long*)& __m256i_result[0]) = 0xf777777777777777; -+ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000001000100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000100; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xdededede; -+ *((int*)& __m256_op0[6]) = 0xdededede; -+ *((int*)& __m256_op0[5]) = 0xdededede; -+ *((int*)& __m256_op0[4]) = 0xdededede; -+ *((int*)& __m256_op0[3]) = 0xdededede; -+ *((int*)& __m256_op0[2]) = 0xdededede; -+ *((int*)& __m256_op0[1]) = 0xdededede; -+ *((int*)& __m256_op0[0]) = 0xdededede; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363797c63996399; -+ *((unsigned long*)& __m128i_op0[0]) = 0x171f0a1f6376441f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x6363797c63990099; -+ *((unsigned long*)& __m128i_result[0]) = 0x171f0a1f6376441f; -+ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0x94); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363797c63990099; -+ *((unsigned long*)& __m128i_op0[0]) = 0x171f0a1f6376441f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363797c63990099; -+ *((unsigned long*)& __m128i_op1[0]) = 0x171f0a1f6376441f; -+ *((unsigned long*)& __m128i_result[1]) = 0x181e180005021811; -+ *((unsigned long*)& __m128i_result[0]) = 0x181e180005021811; -+ __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x77777777; -+ *((int*)& __m256_op0[6]) = 0xf7777777; -+ *((int*)& __m256_op0[5]) = 0xf7777777; -+ *((int*)& __m256_op0[4]) = 0x77777777; -+ *((int*)& __m256_op0[3]) = 0x77777777; -+ *((int*)& __m256_op0[2]) = 0xf7777777; -+ *((int*)& __m256_op0[1]) = 0xf7777777; -+ *((int*)& __m256_op0[0]) = 0x77777777; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; -+ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbsll_v(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x80000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x80000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x77777777f7777777; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf777777777777777; -+ *((unsigned long*)& __m256i_op0[1]) = 0x77777777f7777777; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf777777777777777; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000000004fc04f81; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000004fc04f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000004fc04f81; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000004fc04f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000004fc04f81; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000004fc04f80; -+ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3e035e51522f0799; -+ *((unsigned long*)& __m128i_result[1]) = 0x9292929292929292; -+ *((unsigned long*)& __m128i_result[0]) = 0x8090808280909002; -+ __m128i_out = __lsx_vnori_b(__m128i_op0,0x6d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000010; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000010; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000002b902b3e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000002b902b3e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000002a102a3a; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000002a102a3a; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x3a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_result[3]) = 0x1000100054445443; -+ *((unsigned long*)& __m256i_result[2]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_result[1]) = 0x1000100054445443; -+ *((unsigned long*)& __m256i_result[0]) = 0x7bbbbbbbf7777778; -+ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; -+ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000004fc04f81; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000004fc04f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00007f7f00007f7f; -+ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3e035e51522f0799; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3e035e51522f0799; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3e035e51522f0799; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000002bfd9461; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000000004fc04f81; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000004fc04f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001c00ffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m128i_result[1]) = 0x000001000f00fe00; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000017fff00fe7f; -+ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff10; -+ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007bbbbbbb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007bbbbbbb; -+ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x8d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007bbbbbbb; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007bbbbbbb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000073333333; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000073333333; -+ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000001000f00fe00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000017fff00fe7f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_w(__m128i_op0,9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf8f8f8f8f8f8f8f8; -+ *((unsigned long*)& __m256i_result[2]) = 0xf8f8f8f8f8f8f8f8; -+ *((unsigned long*)& __m256i_result[1]) = 0xf8f8f8f8f8f8f8f8; -+ *((unsigned long*)& __m256i_result[0]) = 0xf8f8f8f8f8f8f8f8; -+ __m256i_out = __lasx_xvmini_b(__m256i_op0,-8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001c00ffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010201808040; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010280808040; -+ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000073333333; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000073333333; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000073333333; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000073333333; -+ __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000002bfd9461; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000001000f00fe00; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000017fff00fe7f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000f00; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff00; -+ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000100; -+ *((int*)& __m128_op0[2]) = 0x0f00fe00; -+ *((int*)& __m128_op0[1]) = 0x0000017f; -+ *((int*)& __m128_op0[0]) = 0xff00fe7f; -+ *((unsigned long*)& __m128d_result[1]) = 0x3727f00000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xc7e01fcfe0000000; -+ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf7fdd5ffebe1c9e3; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf7fdd5ffebe1c9e3; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000002467db99; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000003e143852; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000002467db99; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000003e143852; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffdb982466; -+ *((unsigned long*)& __m256i_result[2]) = 0xf7fdd5ffadcd9191; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffdb982466; -+ *((unsigned long*)& __m256i_result[0]) = 0xf7fdd5ffadcd9191; -+ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ffa7f8ff81; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000003f0080ffc0; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000007fff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000a7f87fffff81; -+ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000002bfd9461; -+ *((unsigned long*)& __m128i_result[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000002bfd9461; -+ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00003ff000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_w_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000002467db99; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003e143852; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000002467db99; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000003e143852; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000246700003e14; -+ *((unsigned long*)& __m256i_result[2]) = 0x000044447bbbf777; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000246700003e14; -+ *((unsigned long*)& __m256i_result[0]) = 0x000044447bbbf777; -+ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000073333333; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000073333333; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x56); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000002bfd9461; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000007fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000002bfd9461; -+ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ffa7f8ff81; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000003f0080ffc0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000007fff00ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000a7f87fffff81; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffd400000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000004000000040; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000044444443; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7bbbbbbbf7777778; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000004444; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007bbb0000f777; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000004444; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007bbb0000f777; -+ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000004444; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00007bbb0000f777; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000004444; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00007bbb0000f777; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000002222; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003dde00007bbc; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000002222; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003dde00007bbc; -+ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000002bfd9461; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000f00; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00000000ffffff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000002bfd9461; -+ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3727f00000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc7e01fcfe0000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3727112c00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x39201f7120000040; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xe5b9012c00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xc7e01fcfe0000000; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000022222221; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3dddddddfbbb3bbc; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000022222221; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3dddddddfbbb3bbc; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00007fff; -+ *((int*)& __m128_op0[2]) = 0x00007fff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x2bfd9461; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x2bfd9461; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000007fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000002bfd9461; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000f00; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x1ff800000000477f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000015fec9b0; -+ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00070000; -+ *((int*)& __m128_op0[2]) = 0x00040000; -+ *((int*)& __m128_op0[1]) = 0x00030000; -+ *((int*)& __m128_op0[0]) = 0x00010000; -+ *((int*)& __m128_op1[3]) = 0x00070000; -+ *((int*)& __m128_op1[2]) = 0x00040000; -+ *((int*)& __m128_op1[1]) = 0x00030000; -+ *((int*)& __m128_op1[0]) = 0x00010000; -+ *((int*)& __m128_result[3]) = 0x3f800000; -+ *((int*)& __m128_result[2]) = 0x3f800000; -+ *((int*)& __m128_result[1]) = 0x3f800000; -+ *((int*)& __m128_result[0]) = 0x3f800000; -+ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000002bfd9461; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000400400004004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000015ff4a31; -+ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000f00; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000000; -+ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000004444; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00007bbb0000f777; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000004444; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00007bbb0000f777; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000002222; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003ddd80007bbb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000002222; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003ddd80007bbb; -+ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800001; -+ *((unsigned long*)& __m128i_result[0]) = 0x3f8000003f800001; -+ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0007000000040000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0007000000040000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0003000000010000; -+ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000002bfd9461; -+ *((unsigned long*)& __m128i_op2[1]) = 0x3f8000003f800001; -+ *((unsigned long*)& __m128i_op2[0]) = 0x3f8000003f800001; -+ *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3f8000003f800000; -+ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0007000000040000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000780000007800; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0007000000040000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0003000000010000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000002222; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003ddd80007bbb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000002222; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003ddd80007bbb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001700170017; -+ __m256i_out = __lasx_xvmini_hu(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000002222; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003ddd80007bbb; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000002222; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003ddd80007bbb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x31); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000170017; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000170017; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0007000000040000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0003000000010000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000000010000; -+ __m128i_out = __lsx_vpcnt_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000017; -+ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000000010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000000010001; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00030000; -+ *((int*)& __m128_op0[2]) = 0x00010000; -+ *((int*)& __m128_op0[1]) = 0x00020000; -+ *((int*)& __m128_op0[0]) = 0x00010000; -+ *((int*)& __m128_op1[3]) = 0x3f800000; -+ *((int*)& __m128_op1[2]) = 0x3f800000; -+ *((int*)& __m128_op1[1]) = 0x3f800000; -+ *((int*)& __m128_op1[0]) = 0x3f800000; -+ *((int*)& __m128_op2[3]) = 0x00030000; -+ *((int*)& __m128_op2[2]) = 0x00010000; -+ *((int*)& __m128_op2[1]) = 0x00020000; -+ *((int*)& __m128_op2[0]) = 0x00010000; -+ *((int*)& __m128_result[3]) = 0x80060000; -+ *((int*)& __m128_result[2]) = 0x80020000; -+ *((int*)& __m128_result[1]) = 0x80040000; -+ *((int*)& __m128_result[0]) = 0x80020000; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000170017; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000170017; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000170017; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000170017; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000170017; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000170017; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000170017; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000170017; -+ __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0003000000010000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x000000ff; -+ *((int*)& __m128_op0[2]) = 0x808000ff; -+ *((int*)& __m128_op0[1]) = 0x000000ff; -+ *((int*)& __m128_op0[0]) = 0x808000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x3); -+ *((unsigned long*)& __m128i_op0[1]) = 0x8006000080020000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8004000080020000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffff8fffffff8; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffff8fffffff8; -+ __m128i_out = __lsx_vsat_w(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001700170017; -+ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8006000080020000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8004000080020000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8006000080020000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8004000080020000; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x00003f8000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00003f8000000000; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; -+ unsigned_long_int_result = 0x3f8000003f800000; -+ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); -+ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ long_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1fc000001fc00000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1fc000001fc00000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x8000ffff00000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x8000ffff00000000; -+ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x28); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001700170017; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; -+ __m128i_out = __lsx_vclz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000ffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000ffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000fefe00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000fefe00000000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00003f8000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00003f8000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000ffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000ffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000080003f80ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x28); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00003f8000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00003f8000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; -+ __m128d_out = __lsx_vfrecip_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000d; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000d; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000d; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000d; -+ __m256i_out = __lasx_xvaddi_du(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3ff0010000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3ff0010000000000; -+ __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00003f8000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00003f8000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x003f800000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x003f800000000000; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xd2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000080003f80ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op2[1]) = 0x3ff0010000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x3ff0010000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000080003f80ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00008000; -+ *((int*)& __m128_op1[2]) = 0x3f80ffff; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003f800000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003f800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff0000ffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xff0000ffffffffff; -+ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003f800000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003f800000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000080003f80ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000001fc00000000; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0010000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ff0010000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3fffff0000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3fffff0000000000; -+ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x27); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000001fc00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000001fc00000000; -+ __m128i_out = __lsx_vmaxi_h(__m128i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; -+ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100007f01; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00020000; -+ *((int*)& __m128_op0[2]) = 0x00020000; -+ *((int*)& __m128_op0[1]) = 0x000001fc; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000100007f01; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; -+ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000001; -+ *((int*)& __m128_op0[2]) = 0x00007f01; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000001fc00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000000020000; -+ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000001fc00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000000010000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010100000000; -+ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ long_op0 = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3f8000003f800000; -+ __m128i_out = __lsx_vreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_d(__m128i_op0,0x3c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x000000fe; -+ *((int*)& __m128_op0[2]) = 0x808000ff; -+ *((int*)& __m128_op0[1]) = 0x000000fe; -+ *((int*)& __m128_op0[0]) = 0x808000fe; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000003fffff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000003fffff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff000000ff00; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x80000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x80000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x80000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x80000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x0000ffff; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x0000ffff; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000001; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000001; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000001; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000001; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0x80000001; -+ *((int*)& __m256_result[5]) = 0x80000000; -+ *((int*)& __m256_result[4]) = 0x80000001; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0x80000001; -+ *((int*)& __m256_result[1]) = 0x80000000; -+ *((int*)& __m256_result[0]) = 0x80000001; -+ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff000000ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3fffff0000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3fffff0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ffff0000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ffff000000ff00; -+ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff000000ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff000000ff00; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3fffff0000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3fffff0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f7fff003f800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f7fff003f800000; -+ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3fffff0000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3fffff0000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3f80000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3f80000000000000; -+ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff000000ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3fffff0000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3fffff0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3f80000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3f80000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff000000ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x1fc0000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1fc07f8000007f80; -+ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff000000ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x03c0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x03c0038000000380; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff000000ff00; -+ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x03c0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x03c0038000000380; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0f0000000f000000; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; -+ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffc1000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffcc000b000b000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000b000b010a000b; -+ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; -+ __m256i_out = __lasx_xvclo_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffcc000b000b000b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000b000b010a000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f7f000b000b000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000b000b010a000b; -+ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3fffff0000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3fffff0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3ffffeffffffffe5; -+ *((unsigned long*)& __m128i_result[0]) = 0x3ffffeffffffffe5; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffc1000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffc1000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff000000007fff; -+ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x03c0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x03c0038000000380; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ffff0000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ffff000000ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x03c0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x03c0038000000380; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ffff0000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ffff000000ff00; -+ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_du(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000001; -+ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x03c0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x03c0038000000380; -+ *((unsigned long*)& __m128i_result[1]) = 0x000003c000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_w_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_w(__m256i_op0,-2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000010a000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00ffff0000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ffff000000ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000010a000b; -+ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f7f000b000b000b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000b000b010a000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101080408040804; -+ *((unsigned long*)& __m128i_result[0]) = 0x0804080407040804; -+ __m128i_out = __lsx_vclz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; -+ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0909090909090909; -+ *((unsigned long*)& __m256i_result[2]) = 0x0909090909090909; -+ *((unsigned long*)& __m256i_result[1]) = 0x0909090909090909; -+ *((unsigned long*)& __m256i_result[0]) = 0x0909090909090909; -+ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x66); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff000000007fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101080408040804; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0804080407040804; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000010a000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101080408040804; -+ *((unsigned long*)& __m128i_result[0]) = 0x000100810080e081; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000010a000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000104000800; -+ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff000000ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003fc0; -+ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x22); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000800000000000; -+ __m256i_out = __lasx_xvneg_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x0000ffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0000ffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x0000ffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0000ffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffe50000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffe020; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3fc00000010a000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x00001b0000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; -+ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x4d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0101080408040804; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0804080407040804; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0101080408040804; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0804080407040804; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000104000800; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0101080408040804; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0804080407040804; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000104000800; -+ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[3]) = 0x00c200c200c200c2; -+ *((unsigned long*)& __m256i_result[2]) = 0x00c200c200c200bb; -+ *((unsigned long*)& __m256i_result[1]) = 0x00c200c200c200c2; -+ *((unsigned long*)& __m256i_result[0]) = 0x00c200c200c200bb; -+ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100089bde; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000104000800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x80044def00000001; -+ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80044def00000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff000000ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x00007f8449a19084; -+ *((unsigned long*)& __m128i_result[0]) = 0x49a210000000ff00; -+ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000104000800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000104000800; -+ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00c200c200c200c2; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00c200c200c200bb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00c200c200c200c2; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00c200c200c200bb; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffc2c2ffffc2c2; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffc2c2ffffc2c2; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffc2c2ffffc2c2; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffc2c2ffffc2c2; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x003100310031002f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x003100310031002f; -+ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00c200c200c200c2; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00c200c200c200bb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00c200c200c200c2; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00c200c200c200bb; -+ *((unsigned long*)& __m256i_result[3]) = 0x007fffff007fffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x007fffff007fffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x007fffff007fffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x007fffff007fffff; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00c200c200c200c2; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00c200c200c200bb; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00c200c200c200c2; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00c200c200c200bb; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00070000; -+ *((int*)& __m128_op0[2]) = 0x00050000; -+ *((int*)& __m128_op0[1]) = 0x00030000; -+ *((int*)& __m128_op0[0]) = 0x00010000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0xff81007c; -+ *((int*)& __m128_op1[1]) = 0xffb7005f; -+ *((int*)& __m128_op1[0]) = 0x0070007c; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000805; -+ *((unsigned long*)& __m128i_op0[0]) = 0x978d95ac768d8784; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000104000800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000897957687; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000408; -+ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000897957687; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000408; -+ *((unsigned long*)& __m128i_result[1]) = 0xf7f7f7ff8e8c6d7e; -+ *((unsigned long*)& __m128i_result[0]) = 0xf7f7f7f7f7f7fbff; -+ __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x007fffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007fffff007fffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x007fffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007fffff007fffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00c200c200c200c2; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00c200c200c200bb; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00c200c200c200c2; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00c200c200c200bb; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffbdff3cffbdff44; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffbdff3cffbdff44; -+ __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf7f7f7ff8e8c6d7e; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf7f7f7f7f7f7fbff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xf7f7f7ff8e8c6d7e; -+ *((unsigned long*)& __m128i_result[0]) = 0xf7f7f7f7f7f7fbff; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000897957687; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000408; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000010000000080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000100; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0007000000050000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_wu(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffbdff3cffbdff44; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffbdff3cffbdff44; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000001dc; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001dc; -+ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000008; -+ *((int*)& __m128_op0[2]) = 0x97957687; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000408; -+ *((int*)& __m128_op1[3]) = 0x00000008; -+ *((int*)& __m128_op1[2]) = 0x97957687; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000408; -+ *((int*)& __m128_op2[3]) = 0x00010001; -+ *((int*)& __m128_op2[2]) = 0x00010001; -+ *((int*)& __m128_op2[1]) = 0x00010001; -+ *((int*)& __m128_op2[0]) = 0x04000800; -+ *((int*)& __m128_result[3]) = 0x80010001; -+ *((int*)& __m128_result[2]) = 0x80010001; -+ *((int*)& __m128_result[1]) = 0x80010001; -+ *((int*)& __m128_result[0]) = 0x84000800; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000104000800; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8001000180010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8001000184000800; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff80007e028401; -+ *((unsigned long*)& __m128i_result[0]) = 0x9a10144000400000; -+ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000007ae567a3e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000700ff00000000; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000104000800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000040004000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010002000000000; -+ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff81007c; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffb7005f0070007c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000104000800; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000007c; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000005f0003e000; -+ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffbdff3cffbdff44; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffbdff3cffbdff44; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffff7effffff46; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff7effffff46; -+ __m256i_out = __lasx_xvori_b(__m256i_op0,0x42); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000000000001dc; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000001dc; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x00000000000001dc; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x00000000000001dc; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x80000000000001dc; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x80000000000001dc; -+ __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000001dc; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000001dc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffbdff3cffbdff44; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffbdff3cffbdff44; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffbdff3cffbdff44; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffbdff3cffbdff44; -+ *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfe8bfe0efe8bfe12; -+ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfe8bfe0efe8bfe12; -+ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_op0[1]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_op1[3]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_op1[2]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_op1[1]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_result[3]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_result[2]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_result[1]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_result[0]) = 0xc2c2c2c2c2c2c2c2; -+ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc2c2c2c2c2c2c2c2; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffe000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffe000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffe000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffe000000000000; -+ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x31); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffff7e; -+ *((int*)& __m256_op0[4]) = 0xffffff46; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffff7e; -+ *((int*)& __m256_op0[0]) = 0xffffff46; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000001dc; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000001dc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff24; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff24; -+ __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfe8bfe0efe8bfe12; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfe8bfe0efe8bfe12; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff81007c; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffb7005f0070007c; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff80007e028401; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9a10144000400000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001ffff00010; -+ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x5b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x80010001b57fc565; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8001000184000be0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x80010001b57fc565; -+ *((unsigned long*)& __m128i_result[0]) = 0x8001000184000be0; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000700ff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000040004000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0010002000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000700ff00000000; -+ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000005f0003e000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000897957687; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000408; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff24; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff24; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; -+ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000897957687; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000408; -+ *((unsigned long*)& __m128i_op1[1]) = 0x80010001b57fc565; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8001000184000be0; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000080001fffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000040004000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0010002000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000897957687; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000408; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000ed0e0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000004080; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; -+ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1111111111111111; -+ *((unsigned long*)& __m128i_result[0]) = 0x1111111111111111; -+ __m128i_out = __lsx_vmaxi_bu(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff00ffff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_result[1]) = 0xfcfcfc00fcfc00fc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcfcfcfcfc00; -+ __m128i_out = __lsx_vslli_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000897957687; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000408; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff0007e215b122; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ffeffff7bfff828; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80010001; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff80010001; -+ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfefefefe; -+ *((int*)& __m256_op0[6]) = 0xfefefefe; -+ *((int*)& __m256_op0[5]) = 0xfe8bfe0e; -+ *((int*)& __m256_op0[4]) = 0xfe8bfe12; -+ *((int*)& __m256_op0[3]) = 0xfefefefe; -+ *((int*)& __m256_op0[2]) = 0xfefefefe; -+ *((int*)& __m256_op0[1]) = 0xfe8bfe0e; -+ *((int*)& __m256_op0[0]) = 0xfe8bfe12; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x80010009816ac5de; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8001000184000bd8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0bd80bd80bd80bd8; -+ *((unsigned long*)& __m128i_result[0]) = 0x0bd80bd80bd80bd8; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000007; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ed0e0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000004080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000ed0e0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000004080; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x80000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x80000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x80000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x80000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x80000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x80000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x80000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x80000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_op0[1]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffa; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffffa; -+ *((unsigned long*)& __m256i_result[3]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_result[2]) = 0x6161616100000018; -+ *((unsigned long*)& __m256i_result[1]) = 0x6161616161616161; -+ *((unsigned long*)& __m256i_result[0]) = 0x6161616100000018; -+ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7fff0007; -+ *((int*)& __m128_op0[2]) = 0xe215b122; -+ *((int*)& __m128_op0[1]) = 0x7ffeffff; -+ *((int*)& __m128_op0[0]) = 0x7bfff828; -+ *((int*)& __m128_op1[3]) = 0x80010009; -+ *((int*)& __m128_op1[2]) = 0x816ac5de; -+ *((int*)& __m128_op1[1]) = 0x80010001; -+ *((int*)& __m128_op1[0]) = 0x84000bd8; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0bd80bd80bd80bd8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0bd80bd80bd80bd8; -+ unsigned_long_int_result = 0x0bd80bd80bd80bd8; -+ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffa; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffffa; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffa; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffa; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x59); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_h(__m256i_op0,13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x80000000b57ec564; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000083ff0be0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0014000000140014; -+ *((unsigned long*)& __m128i_result[0]) = 0x0014000000140014; -+ __m128i_out = __lsx_vmini_hu(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x80000000b57ec564; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000083ff0be0; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001b57ec563; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000183ff0bdf; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffa; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffffa; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5b35342c979955da; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m128i_result[0]) = 0x5b35342c970455da; -+ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x0); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000003397dd140; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000004bd7cdd20; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0016ffb00016ffb0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0016ffb00016ffb0; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000004a294b; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000006d04bc; -+ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffffa; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffffa; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x2a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000004a294b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000006d04bc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0016001600160016; -+ *((unsigned long*)& __m256i_result[2]) = 0x0016001600160016; -+ *((unsigned long*)& __m256i_result[1]) = 0x0016001600160016; -+ *((unsigned long*)& __m256i_result[0]) = 0x0016001600160016; -+ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x7fff0007e215b122; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7ffeffff7bfff828; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0bd80bd80bdfffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0bd80bd80bd80000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0bd80bd80bdfffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0bd80bd80bd80000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0bef0b880bd80bd8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0bd80bd80bdfffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0bd80bd80bd80000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000017b017b01; -+ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x5b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0016001600160016; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0016001600160016; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0016001600160016; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0016001600160016; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_d(__m128i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_w(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x0bd80bd8; -+ *((int*)& __m128_op1[2]) = 0x0bdfffff; -+ *((int*)& __m128_op1[1]) = 0x0bd80bd8; -+ *((int*)& __m128_op1[0]) = 0x0bd80000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff80010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0bd80bd80bdfffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0bd80bd80bd80000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1ffffffff8001000; -+ *((unsigned long*)& __m128i_result[0]) = 0xf0bd80bd80bd8000; -+ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1ffffffff8001000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf0bd80bd80bd8000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff7ffffffefffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xdfffdfffdffffffe; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrmh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xd9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffe0001fffe0001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffe0001fffe0001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1ffffffff8001000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf0bd80bd80bd8000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1ffffffff8001000; -+ *((unsigned long*)& __m128i_result[0]) = 0xf0bd80bd80bd8000; -+ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfffe0001; -+ *((int*)& __m128_op0[2]) = 0xfffe0001; -+ *((int*)& __m128_op0[1]) = 0xfffe0001; -+ *((int*)& __m128_op0[0]) = 0xfffe0001; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xfffe0001; -+ *((int*)& __m128_result[2]) = 0xfffe0001; -+ *((int*)& __m128_result[1]) = 0xfffe0001; -+ *((int*)& __m128_result[0]) = 0xfffe0001; -+ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffdfffffffdff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffdfffffffdff; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x37); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffe0001fffe0001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0001fffe0001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xfffe0001fffe0001; -+ *((unsigned long*)& __m128i_op2[0]) = 0xfffe0001fffe0001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffefffffffe; -+ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000001c; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001c; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000001c; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001c; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0bd80bd80bdfffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0bd80bd80bd80000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0bd80bd80bd80000; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xf9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1ffffffff8001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf0bd80bd80bd8000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffe0001fffe0001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0001fffe0001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffefffffffe; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0x8); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m128i_result[0]) = 0x3d3d3d3d3d3d3d3d; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0x3d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; -+ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010000000000000; -+ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x3d3d3d3d; -+ *((int*)& __m128_op0[2]) = 0x3d3d3d3d; -+ *((int*)& __m128_op0[1]) = 0x3d3d3d3d; -+ *((int*)& __m128_op0[0]) = 0x3d3d3d3d; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00100000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x0000bd3d; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000c00; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_h(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000bd3d00000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000c00; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00bd003d; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000bd3d00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000bd3d00000000; -+ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; -+ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000bd3d00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000bd3d00000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000bd3d00000000; -+ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; -+ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000020202020; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x3a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000020202020; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; -+ __m256i_out = __lasx_xvreplve_w(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000bd003d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0013001300130013; -+ *((unsigned long*)& __m128i_result[0]) = 0x0013001300130013; -+ __m128i_out = __lsx_vmini_hu(__m128i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; -+ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00130013; -+ *((int*)& __m128_op0[2]) = 0x00130013; -+ *((int*)& __m128_op0[1]) = 0x00130013; -+ *((int*)& __m128_op0[0]) = 0x00130013; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x3f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000bd3d00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffff0000000ad3d; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffff000fffff000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff0000; -+ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffff0000000ad3d; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff000fffff000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xefffdffff0009d3d; -+ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000bd3d00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000bd3d00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000bd3d00000000; -+ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000bd3d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000c0000bd49; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000c7fff000c; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffff0000000ad3d; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff000fffff000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffff00010001000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffff000fffff000; -+ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000bd3d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xefffdffff0009d3d; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000bd3d; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff0000; -+ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0020002000400040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0020002000400040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0020002000400040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0020002000400040; -+ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; -+ __m128i_out = __lsx_vmaxi_b(__m128i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000bd3d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000bd30; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000d7fff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000007a6d; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000dfefe0000; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0020002000400040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0020002000400040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0020002000400040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0020002000400040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000005555; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000005555; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000c0000bd49; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000c7fff000c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000c7fff000c; -+ *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; -+ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; -+ __m256i_out = __lasx_xvclz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000c0000bd49; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000c7fff000c; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_vpickve2gr_b(__m128i_op0,0xb); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0008000800080008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0008000800080008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0008000800080008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0008000800080008; -+ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; -+ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0020002000400040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0020002000400040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0020002000400040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0020002000400040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010001000200020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010001000200020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010001000200020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010001000200020; -+ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000bd3d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000c7fff000c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000006ffef000; -+ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000005; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00060000; -+ *((int*)& __m256_op0[6]) = 0x00040000; -+ *((int*)& __m256_op0[5]) = 0x00020000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00060000; -+ *((int*)& __m256_op0[2]) = 0x00040000; -+ *((int*)& __m256_op0[1]) = 0x00020000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00060000; -+ *((int*)& __m256_op1[6]) = 0x00040000; -+ *((int*)& __m256_op1[5]) = 0x00020000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00060000; -+ *((int*)& __m256_op1[2]) = 0x00040000; -+ *((int*)& __m256_op1[1]) = 0x00020000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0000000c; -+ *((int*)& __m128_op0[2]) = 0x7fff000c; -+ *((int*)& __m128_op0[1]) = 0x10001000; -+ *((int*)& __m128_op0[0]) = 0x10001000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000010000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000008000000080; -+ __m128i_out = __lsx_vfclass_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00050008000e0010; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0007000800100010; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00050008000e0010; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0007000800100010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffff000f0008d3c; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff0016fff8d3d; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffff000f0008d3c; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffff0016fff8d3d; -+ *((unsigned long*)& __m128i_result[1]) = 0xe10000004deb2610; -+ *((unsigned long*)& __m128i_result[0]) = 0xe101e0014dec4089; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xe1000000; -+ *((int*)& __m128_op0[2]) = 0x4deb2610; -+ *((int*)& __m128_op0[1]) = 0xe101e001; -+ *((int*)& __m128_op0[0]) = 0x4dec4089; -+ *((unsigned long*)& __m128i_result[1]) = 0x800000001d64c200; -+ *((unsigned long*)& __m128i_result[0]) = 0x800000001d881120; -+ __m128i_out = __lsx_vftint_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0006000000020000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0006000000020000; -+ __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000008000000080; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000008000000080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000c7fff000c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xfffff000f0008d3c; -+ *((unsigned long*)& __m128i_op2[0]) = 0xfffff0016fff8d3d; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000100f8100002; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff0ff8006f0f950; -+ __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000008000000080; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000008000000080; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000008000000080; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; -+ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0x95); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000080; -+ *((int*)& __m256_op0[6]) = 0x00000080; -+ *((int*)& __m256_op0[5]) = 0x00000080; -+ *((int*)& __m256_op0[4]) = 0x00000080; -+ *((int*)& __m256_op0[3]) = 0x00000080; -+ *((int*)& __m256_op0[2]) = 0x00000080; -+ *((int*)& __m256_op0[1]) = 0x00000080; -+ *((int*)& __m256_op0[0]) = 0x00000080; -+ *((int*)& __m256_op1[7]) = 0x00000001; -+ *((int*)& __m256_op1[6]) = 0x00000001; -+ *((int*)& __m256_op1[5]) = 0x00000001; -+ *((int*)& __m256_op1[4]) = 0x00000001; -+ *((int*)& __m256_op1[3]) = 0x00000001; -+ *((int*)& __m256_op1[2]) = 0x00000001; -+ *((int*)& __m256_op1[1]) = 0x00000001; -+ *((int*)& __m256_op1[0]) = 0x00000001; -+ *((int*)& __m256_result[7]) = 0x00000001; -+ *((int*)& __m256_result[6]) = 0x00000001; -+ *((int*)& __m256_result[5]) = 0x00000001; -+ *((int*)& __m256_result[4]) = 0x00000001; -+ *((int*)& __m256_result[3]) = 0x00000001; -+ *((int*)& __m256_result[2]) = 0x00000001; -+ *((int*)& __m256_result[1]) = 0x00000001; -+ *((int*)& __m256_result[0]) = 0x00000001; -+ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000f0009d3c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000016fff9d3d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000bd0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000007f0; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000916c; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000010000954d; -+ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000c0000bd49; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000c7fff000c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000f0009d3c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000016fff9d3d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000c000000060003; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000c0000bd49; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000c7fff000c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000100c6ffef00d; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000f0009d3c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000016fff9d3d; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffff000f0008d3c; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffff0016fff8d3d; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff000000003c3c; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff0101ffff3d3d; -+ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffff00010000fff; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x800000001d64c200; -+ *((unsigned long*)& __m128d_op0[0]) = 0x800000001d881120; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000000f0009d3c; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000016fff9dff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ff000000ff; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; -+ __m256d_out = __lasx_xvflogb_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_d(__m256i_op0,14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffff01; -+ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf0000000f0000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xf0000000f0000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff07effffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100110002; -+ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffff00010000fff; -+ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,-4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000200; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000200; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000200; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000200; -+ *((int*)& __m256_op2[7]) = 0xffffffa0; -+ *((int*)& __m256_op2[6]) = 0x00000001; -+ *((int*)& __m256_op2[5]) = 0xffffffe0; -+ *((int*)& __m256_op2[4]) = 0x00000001; -+ *((int*)& __m256_op2[3]) = 0xffffffa0; -+ *((int*)& __m256_op2[2]) = 0x00000001; -+ *((int*)& __m256_op2[1]) = 0xffffffe0; -+ *((int*)& __m256_op2[0]) = 0x00000001; -+ *((int*)& __m256_result[7]) = 0xffffffa0; -+ *((int*)& __m256_result[6]) = 0x80000001; -+ *((int*)& __m256_result[5]) = 0xffffffe0; -+ *((int*)& __m256_result[4]) = 0x80000001; -+ *((int*)& __m256_result[3]) = 0xffffffa0; -+ *((int*)& __m256_result[2]) = 0x80000001; -+ *((int*)& __m256_result[1]) = 0xffffffe0; -+ *((int*)& __m256_result[0]) = 0x80000001; -+ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_wu(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffa080000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffe080000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffa080000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffe080000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0002000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000010000f00; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000010000f01; -+ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000100f8100002; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff0ff8006f0f950; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x006f0efe258ca851; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ffff00; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00002f0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000958aefff895e; -+ *((unsigned long*)& __m128i_result[1]) = 0xfafafafafafafafa; -+ *((unsigned long*)& __m128i_result[0]) = 0xfafa958aeffa89fa; -+ __m128i_out = __lsx_vmini_b(__m128i_op0,-6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x24); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ffff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000100c6ffef10c; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffff01; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffeff400000df4; -+ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0002000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0002000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0002000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000100c6ffef10c; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff70; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff9001a47e; -+ __m128i_out = __lsx_vhsubw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000067400002685; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000100c6ffef10c; -+ unsigned_int_result = 0x00000000000000ff; -+ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x2); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffff01; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffeff400000df4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ff91fffffff5; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff00650001ffb0; -+ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff91fffffff5; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff00650001ffb0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000067400002685; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ff91fffffff5; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff00650000ff85; -+ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x24); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffff01; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffeff400000df4; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff03fe; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffe9df0000e81b; -+ __m128i_out = __lsx_vrotri_h(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x006f0efe258ca851; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff90ffffe0f5; -+ *((unsigned long*)& __m128i_result[0]) = 0x006e7973258d0ef4; -+ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_b(__m128i_op0,12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; -+ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00060000; -+ *((int*)& __m256_op0[6]) = 0x00040000; -+ *((int*)& __m256_op0[5]) = 0x00020000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00060000; -+ *((int*)& __m256_op0[2]) = 0x00040000; -+ *((int*)& __m256_op0[1]) = 0x00020000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000c000ffffc000; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; -+ __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff91fffffff5; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff00650001ffb0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffff0001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffff0001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000c000ffffc000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000c000ffffc000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000958affff995d; -+ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000958affff995d; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000c000ffffc000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000006f00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000c00000000000; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffefffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010401; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010401; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010401; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010401; -+ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; -+ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000fdfc0000fd03; -+ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; -+ __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000404040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000404040; -+ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x68); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000404040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000404040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000404040; -+ __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404240; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404240; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000040404240; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000040404240; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007f7f00007f7f; -+ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffefffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000095896a760000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x006f0efe258ca851; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffff7fc8ffff8000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffff200000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000015516a768038; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff9ed2e1c000; -+ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x23); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_b(__m128i_op0,13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x40404040; -+ *((int*)& __m256_op0[6]) = 0x40404040; -+ *((int*)& __m256_op0[5]) = 0x40404040; -+ *((int*)& __m256_op0[4]) = 0x40404040; -+ *((int*)& __m256_op0[3]) = 0x40404040; -+ *((int*)& __m256_op0[2]) = 0x40404040; -+ *((int*)& __m256_op0[1]) = 0x40404040; -+ *((int*)& __m256_op0[0]) = 0x40404040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; -+ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000958affff995d; -+ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000015516a768038; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffff9ed2e1c000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000958affff995d; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000c00000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000bfffffffe0f6; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000bfffffffe0f6; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff7a53; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000007f0000007f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000007f0000007f; -+ *((unsigned long*)& __m256i_result[1]) = 0xff01ff80ff01ff80; -+ *((unsigned long*)& __m256i_result[0]) = 0xff01ff800000007e; -+ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000001; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000001; -+ *((int*)& __m256_op0[1]) = 0x80000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000007f0000007f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000007f0000007f; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff01ff80ff01ff80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff01ff800000007e; -+ *((unsigned long*)& __m256i_result[3]) = 0x003f8000003f8000; -+ *((unsigned long*)& __m256i_result[2]) = 0x003f8000003f8000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffc07f80ffc07f80; -+ *((unsigned long*)& __m256i_result[0]) = 0xffc07f80003f0000; -+ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x36de0000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x3be14000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000030000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000030000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x24); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000036de0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000003be14000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000007e8a60; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000001edde; -+ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000007e8a60; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000001edde; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; -+ __m128i_out = __lsx_vclo_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000036de0000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000003be14000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000030000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000030000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000018002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000018002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff7a53; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vslei_hu(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00018002; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000002; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00018002; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000002; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00030000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00030000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000007e8a60; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000001edde; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000de00003e14; -+ *((unsigned long*)& __m128i_result[0]) = 0x00012b15ffff32ba; -+ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x3f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff7a53; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff7a53; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vslti_wu(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001f0a; -+ __m128i_out = __lsx_vmaxi_w(__m128i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000036de0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000003be14000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00000000ffff7a53; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000001f0000; -+ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001f0a; -+ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000000; -+ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000003be14000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000003bfb4000; -+ __m128i_out = __lsx_vmaxi_b(__m128i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x55); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffe0001fffe0003; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000000000002; -+ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_w_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; -+ __m256i_out = __lasx_xvabsd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x36); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000bfffffffe0f6; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000010001000a; -+ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; -+ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000003bfb4000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000003bfb4000; -+ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000003bfb4000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000003bfb4000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; -+ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_b(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000de0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001f0a; -+ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000006f00000000; -+ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x0000006f; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000037; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x2f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000de0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000006f00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001f0a; -+ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000007b; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000037; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001f0a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000036; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000007b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000002; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0x80000000; -+ *((int*)& __m256_result[5]) = 0x80000000; -+ *((int*)& __m256_result[4]) = 0x80000000; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0x80000000; -+ *((int*)& __m256_result[1]) = 0x80000000; -+ *((int*)& __m256_result[0]) = 0x80000000; -+ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000020000000200; -+ __m128i_out = __lsx_vfclass_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000000; -+ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000050000007b; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000500000005; -+ __m128i_out = __lsx_vmaxi_w(__m128i_op0,5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ int_op1 = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; -+ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff9fff9fff9fff9; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff9fff9fff9fff9; -+ __m256i_out = __lasx_xvmini_h(__m256i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; -+ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000800000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000007b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000100010001007c; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x007b01ec007b3a9e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; -+ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000100010000fe7c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000100010000fe01; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000100010000fe01; -+ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000060; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000100010000fe01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000050000007b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000500000005; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffbffffff85; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffc0000fdfc; -+ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff3fff3fff3fff3; -+ __m256i_out = __lasx_xvmini_h(__m256i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; -+ __m128d_out = __lsx_vflogb_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000007b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff1000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff1000100010001; -+ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000007b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000007b; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000070; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff5; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00010001; -+ *((int*)& __m128_op0[2]) = 0x00010001; -+ *((int*)& __m128_op0[1]) = 0x00010001; -+ *((int*)& __m128_op0[0]) = 0x00010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_du(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000100010000fe7c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000100010000fe01; -+ *((unsigned long*)& __m128i_result[1]) = 0x000f000f00100000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000f000f00100000; -+ __m128i_out = __lsx_vclz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000001a00; -+ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000001a00; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00010002; -+ *((int*)& __m128_op0[2]) = 0x0000fe7d; -+ *((int*)& __m128_op0[1]) = 0x00010002; -+ *((int*)& __m128_op0[0]) = 0x0000fe02; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x0000007b; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000001a00; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vclz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff3fff3fff3fff3; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00f300ff00f3; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00f300ff00f3; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00f300ff00f3; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00f300ff00f3; -+ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x000100010001007c; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffbffffff85; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffc0000fdfc; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff8fff8fff8fff8; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff8fff8fff8fff8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x0000007b; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001a00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_d(__m256i_op0,1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x35); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff8fff8fff8fff8; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff8fff8fff8fff8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; -+ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x3b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_b(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0003000300030004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0003000300030004; -+ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0204; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x32); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000001007c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00010001; -+ *((int*)& __m128_op1[2]) = 0x0001007c; -+ *((int*)& __m128_op1[1]) = 0x00010001; -+ *((int*)& __m128_op1[0]) = 0x00010001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4429146a7b4c88b2; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe22b3595efa4aa0c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000442900007b4c; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000e22b0000efa4; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000004; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0204; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000442900007b4c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000e22b0000efa4; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000442800007b50; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0204; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000442800007b50; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0204; -+ *((int*)& __m128_result[3]) = 0x46885000; -+ *((int*)& __m128_result[2]) = 0x46f6a000; -+ *((int*)& __m128_result[1]) = 0x4f800000; -+ *((int*)& __m128_result[0]) = 0x4f7fff02; -+ __m128_out = __lsx_vffint_s_wu(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4688500046f6a000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f8000004f7fff02; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ffffff03ffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00013fff; -+ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ffffff03ffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00013fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000088500000f6a0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001fffd00000407; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000442900007b4c; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000e22b0000efa4; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ffffff03ffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00013fff; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000100010001007c; -+ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000100000001007c; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000000010000; -+ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000020000007d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000001f400000; -+ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128d_result[1]) = 0x40f0001000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x40f0001000000000; -+ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000020000007d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000746400016388; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000586100015567; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0800000200000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000020000007d; -+ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x40f0001000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x40f0001000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010001; -+ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00800000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x1f400000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffa8ff9f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffffabff99; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000100000002007d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000020001; -+ *((unsigned long*)& __m128i_result[1]) = 0x00010000ffab001c; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001ffffffadff9a; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0xffa8ff9f; -+ *((int*)& __m128_op1[1]) = 0x0000ffff; -+ *((int*)& __m128_op1[0]) = 0xffabff99; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x6d6d6d6d6d6d6d6d; -+ *((unsigned long*)& __m256i_result[2]) = 0x6d6d6d6d6d6d6d6d; -+ *((unsigned long*)& __m256i_result[1]) = 0x6d6d6d6d6d6d6d6d; -+ *((unsigned long*)& __m256i_result[0]) = 0x6d6d6d6d6d6d6d6d; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x6d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x7f800000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x7f800000; -+ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00010001; -+ *((int*)& __m128_op1[2]) = 0x0001007c; -+ *((int*)& __m128_op1[1]) = 0x00010001; -+ *((int*)& __m128_op1[0]) = 0x00010001; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00010000ffab001c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001ffffffadff9a; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vslti_hu(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[6]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[5]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[4]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[3]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[2]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[1]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[0]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[7]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[6]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[5]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[4]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[3]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[2]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[1]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[0]) = 0x6d6d6d6d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x40f0001000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x40f0001000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; -+ __m128i_out = __lsx_vslei_hu(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[6]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[5]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[4]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[3]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[2]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[1]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[0]) = 0x6d6d6d6d; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x40f0001000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x40f0001000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x40f0001000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; -+ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100010001; -+ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; -+ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128d_result[0]) = 0xfffcfffcfffcfffc; -+ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00003fff00003fff; -+ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmsknz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[6]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[5]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[4]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[3]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[2]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[1]) = 0x6d6d6d6d; -+ *((int*)& __m256_op0[0]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[7]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[6]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[5]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[4]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[3]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[2]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[1]) = 0x6d6d6d6d; -+ *((int*)& __m256_op1[0]) = 0x6d6d6d6d; -+ *((unsigned long*)& __m256i_result[3]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_result[2]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_result[1]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_result[0]) = 0x7c007c007c007c00; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x000000000000ffff; -+ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x40f0001000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x40f0001000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1e0200001e020000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffcfffd; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffdfffcfffd; -+ __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffd; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffdfffcfffd; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff7f7f7fff7fffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff7f7f7fff7fffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3f7f7f7eff800000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3f7f7f7eff800000; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1e0200001e020000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffcfffcfffcfffd; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffcfffdfffcfffd; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffcfffffffd; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffdfffffffd; -+ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffd; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffdfffcfffd; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffdfffcfffd; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x3f7f7f7e; -+ *((int*)& __m256_op1[4]) = 0xff800000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x3f7f7f7e; -+ *((int*)& __m256_op1[0]) = 0xff800000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x7fffffff; -+ *((int*)& __m256_op2[4]) = 0xff7fffff; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x7fffffff; -+ *((int*)& __m256_op2[0]) = 0xff7fffff; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x7fffffff; -+ *((int*)& __m256_result[4]) = 0x7fc00000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x7fffffff; -+ *((int*)& __m256_result[0]) = 0x7fc00000; -+ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8080808000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8080808000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3f7f7f7eff800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3f7f7f7eff800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007efeff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007efeff00; -+ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffff7fffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffff7fffff; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007efeff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007efeff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007aff7c00; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffd017d00; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007aff7c00; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffd017d00; -+ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007efeff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007efeff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000008e7c00; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000067751500; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000008e7c00; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000067751500; -+ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fff9fff9; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fff9fffa; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x007ffe7ffe400000; -+ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x2a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x007ffe7ffe400000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x007ffd0001400840; -+ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007aff7c00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffd017d00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007aff7c00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffd017d00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000008e7c00; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000067751500; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000008e7c00; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000067751500; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000007a00f8; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff01640092; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000007a00f8; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff01640092; -+ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000100640000ff92; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000100640000ff92; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007c0100007c01; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007c0100007c00; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007c0100007c01; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007c0100007c00; -+ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x30); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007aff7c00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffd017d00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007aff7c00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffd017d00; -+ *((unsigned long*)& __m256i_result[3]) = 0x7aff7c0000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfd017d0000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7aff7c0000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfd017d0000000000; -+ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xb3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x007ffd0001400840; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x007ffd0001400840; -+ __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x007ffd0001400840; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_h(__m128i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007aff7c00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffd017d00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007aff7c00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffd017d00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000c7aff7c00; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffd017d00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000c7aff7c00; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffd017d00; -+ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3fffffffff7f0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3fffffffff7f0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000c7aff7c00; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffd017d00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000c7aff7c00; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffd017d00; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000002030000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x030303670101fd90; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000002030000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x030303670101fd90; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3ffffffffc7bfc99; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3ffffffffc7bfc99; -+ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000027d00f8; -+ *((unsigned long*)& __m256i_op1[2]) = 0x040204660265fe22; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000027d00f8; -+ *((unsigned long*)& __m256i_op1[0]) = 0x040204660265fe22; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffd000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffd000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x3a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x007ffd00; -+ *((int*)& __m128_op2[0]) = 0x01400840; -+ *((int*)& __m128_result[3]) = 0x80000000; -+ *((int*)& __m128_result[2]) = 0x80000000; -+ *((int*)& __m128_result[1]) = 0x007ffd00; -+ *((int*)& __m128_result[0]) = 0x01400840; -+ __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffd000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffd000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfefa000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x007ffd0001400840; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x007ffd0001400840; -+ *((unsigned long*)& __m128i_result[1]) = 0x3fffffff80000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00003ffd000a4000; -+ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3fffffff80000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00003ffd000a4000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffd000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffcffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000fffd000a0000; -+ __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xfefa0000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfefa000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfefa000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc3f0c3f0c3f0c3f0; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc3f0c3f0c3f0c3f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc3f0c3f0c3f0c3f0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc3f0c3f0c3f0c3f0; -+ *((unsigned long*)& __m256i_result[3]) = 0xc3f0c3f0c3f0c3f0; -+ *((unsigned long*)& __m256i_result[2]) = 0xc3f0c3f0c3f0c3f0; -+ *((unsigned long*)& __m256i_result[1]) = 0xc3f0c3f0c3f0c3f0; -+ *((unsigned long*)& __m256i_result[0]) = 0xc3f0c3f0c3f0c3f0; -+ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0x3c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc3f0c3f0c3f0c3f0; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc3f0c3f0c3f0c3f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc3f0c3f0c3f0c3f0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc3f0c3f0c3f0c3f0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xc3f0c3f0c3f0c3f0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xc3f0c3f0c3f0c3f0; -+ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000000007a00f8; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00ff00ff01640092; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000007a00f8; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00ff00ff01640092; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3fffffff80000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00003ffd000a4000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffcffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000fffd000a0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xf000800080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000a00028004000; -+ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfffcffff00000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000fffd000a0000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xf0fd800080000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000a00028004000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xd207e90001fb16ef; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc8eab25698f97e90; -+ *((unsigned long*)& __m256i_op0[1]) = 0xd207e90001fb16ef; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc8eab25698f97e90; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf0fd800080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000a00028004000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5c9c9c9ce3636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x63635c9e63692363; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf0fd800080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000a00028004000; -+ *((unsigned long*)& __m128i_result[1]) = 0x6b9fe3649c9d6363; -+ *((unsigned long*)& __m128i_result[0]) = 0x6363bc9e8b696363; -+ __m128i_out = __lsx_vadda_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5c9c9c9ce3636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x63635c9e63692363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffe3636363; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000063692363; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xd207e90001fb16ef; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc8eab25698f97e90; -+ *((unsigned long*)& __m256i_op0[1]) = 0xd207e90001fb16ef; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc8eab25698f97e90; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0007000000fb00ef; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ea005600f90090; -+ *((unsigned long*)& __m256i_result[1]) = 0x0007000000fb00ef; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ea005600f90090; -+ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5c9c9c9ce3636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x63635c9e63692363; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000005c9c9c9c; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffe3636363; -+ __m128i_out = __lsx_vexth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256d_op0[1]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_result[3]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_result[2]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_result[1]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_result[0]) = 0x7c007c007c007c00; -+ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffd000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf0fd800080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000a00028004000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000f000800000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x000f000000000000; -+ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffe4ffffffe4; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffe4ffffffe4; -+ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffd000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002ffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6b9fe3649c9d6363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363bc9e8b696363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6b9fe3649c9d6363; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363bc9e8b696363; -+ *((unsigned long*)& __m128i_result[1]) = 0xb9fe3640e4eb1b18; -+ *((unsigned long*)& __m128i_result[0]) = 0x800000005b4b1b18; -+ __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xb9fe3640e4eb1b18; -+ *((unsigned long*)& __m128i_op0[0]) = 0x800000005b4b1b18; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffb9fe00003640; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffe4eb00001b18; -+ __m128i_out = __lsx_vexth_w_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_result[2]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_result[1]) = 0x7c007c007c007c00; -+ *((unsigned long*)& __m256i_result[0]) = 0x7c007c007c007c00; -+ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0002ffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67b7cf643c9d636a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x39d70e366f547977; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0002ffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x66b34f643c9c626a; -+ *((unsigned long*)& __m128i_result[0]) = 0x38d60e366e547876; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xb9fe3640e4eb1b18; -+ *((unsigned long*)& __m128i_op0[0]) = 0x800000005b4b1b18; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffb9fe00003640; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffe4eb00001b18; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x80001b155b4b0000; -+ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe273e273e273e273; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe273e273e273e273; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe273e273e273e273; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe273e273e273e273; -+ *((unsigned long*)& __m256i_op1[3]) = 0xd207e90001fb16ef; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc8eab25698f97e90; -+ *((unsigned long*)& __m256i_op1[1]) = 0xd207e90001fb16ef; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc8eab25698f97e90; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001c4e8ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001c4e8ffffffff; -+ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xd207e90001fb16ef; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc8eab25698f97e90; -+ *((unsigned long*)& __m256i_op0[1]) = 0xd207e90001fb16ef; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc8eab25698f97e90; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x01fb16ef98f97e90; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x01fb16ef98f97e90; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xb9fe3640e4eb1b18; -+ *((unsigned long*)& __m128i_op0[0]) = 0x800000005b4b1b18; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffd000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xdcfe1b20f2f60e0c; -+ *((unsigned long*)& __m128i_result[0]) = 0xc00000002e260e0c; -+ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x0001c4e8; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x0001c4e8; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001c4e8ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001c4e8ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0080000000800000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0081c4e8ff7fffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0080000000800000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0081c4e8ff7fffff; -+ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00002df900001700; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffe05ffffe911; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00002df900001700; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffe05ffffe911; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000300000003; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffcfffffffc; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000300000003; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffcfffffffc; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x66b34f643c9c626a; -+ *((unsigned long*)& __m128d_op0[0]) = 0x38d60e366e547876; -+ *((unsigned long*)& __m128d_op1[1]) = 0x66b34f643c9c626a; -+ *((unsigned long*)& __m128d_op1[0]) = 0x38d60e366e547876; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x80008000b70fb810; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3c0f3c0f3911b910; -+ *((unsigned long*)& __m256i_op0[1]) = 0x80008000b70fb810; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3c0f3c0f3911b910; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff6f20; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000781e0000f221; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff6f20; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000781e0000f221; -+ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ff010000ff01; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ff010000ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ff010000ff01; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ff010000ff01; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffff00006c82; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00009b140000917b; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffff00006c82; -+ *((unsigned long*)& __m128d_result[0]) = 0x00009b140000917b; -+ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xffff6f20; -+ *((int*)& __m256_op0[5]) = 0x0000781e; -+ *((int*)& __m256_op0[4]) = 0x0000f221; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xffff6f20; -+ *((int*)& __m256_op0[1]) = 0x0000781e; -+ *((int*)& __m256_op0[0]) = 0x0000f221; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0xffff6f20; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0xffff6f20; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff6f20; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000781e0000f221; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff6f20; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000781e0000f221; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80001b155b4b0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00006c82; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00009b140000917b; -+ *((unsigned long*)& __m128i_result[1]) = 0x80000000fffffffc; -+ *((unsigned long*)& __m128i_result[0]) = 0xb150000000000000; -+ __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffefffffffe; -+ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80001b155b4b0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x80001b155b4b0000; -+ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff994cb09c; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffc3639d96; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff6f20; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff6f20; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xdbc8000000003fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xdbc8000000003fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0000ffff; -+ *((int*)& __m256_op0[6]) = 0x0000ffff; -+ *((int*)& __m256_op0[5]) = 0x0000ffff; -+ *((int*)& __m256_op0[4]) = 0x0000ffff; -+ *((int*)& __m256_op0[3]) = 0x0000ffff; -+ *((int*)& __m256_op0[2]) = 0x0000ffff; -+ *((int*)& __m256_op0[1]) = 0x0000ffff; -+ *((int*)& __m256_op0[0]) = 0x0000ffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x20); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff994cb09c; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc3639d96; -+ *((unsigned long*)& __m128i_op1[1]) = 0x20de27761210386d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x34632935195a123c; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff994db09c; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffc7639d96; -+ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xdbc80000; -+ *((int*)& __m256_op1[6]) = 0x00003fff; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0xdbc80000; -+ *((int*)& __m256_op1[2]) = 0x00003fff; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdbc8000000003fff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xdbc8000000003fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdbc8000000003fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xdbc8000000003fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff994db09c; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffc7639d96; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xdbc8000000003fff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xdbc8000000003fff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0xff800000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0xff800000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vandi_b(__m128i_op0,0x27); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_h(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vclz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000e0000000e; -+ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0xecececececececec; -+ *((unsigned long*)& __m128i_result[0]) = 0xecececececececec; -+ __m128i_out = __lsx_vldi(1004); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_d(__m256i_op0,11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0x86); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xff3eff3eff3eff3e; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xff3eff3eff3eff3e; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff3eff3eff3eff3e; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff3eff3eff3eff3e; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff3e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff3e; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x70); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff3eff3eff3eff3e; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff3eff3eff3eff3e; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00c100c100c100c1; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00c100c100c100c1; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; -+ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00c100c100c100c1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00c100c100c100c1; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0003000300030003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0003000300030003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x003f003f003f003f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff3eff3eff3eff3e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff3eff3eff3eff3e; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff3eff3eff3eff3e; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xff3eff3eff3eff3e; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000500000005; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000500000005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000500000005; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000500000005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; -+ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff3eff3eff3eff3e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff3eff3eff3eff3e; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000500000005; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000500000005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000500000005; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000500000005; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; -+ __m128i_out = __lsx_vmaxi_w(__m128i_op0,4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_hu(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0x80000000; -+ *((int*)& __m256_result[5]) = 0x80000000; -+ *((int*)& __m256_result[4]) = 0x80000000; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0x80000000; -+ *((int*)& __m256_result[1]) = 0x80000000; -+ *((int*)& __m256_result[0]) = 0x80000000; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_result[1]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; -+ __m256i_out = __lasx_xvori_b(__m256i_op0,0xbf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_op1[1]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffb79fb74; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffb79fb74; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000010486048c; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000006; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000010486048c; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000006; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_result[3]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m256i_result[1]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; -+ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0005000500050005; -+ *((unsigned long*)& __m128i_result[0]) = 0x0005000500050005; -+ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x000000010486048c; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000100000006; -+ *((unsigned long*)& __m256d_op1[1]) = 0x000000010486048c; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000100000006; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000010486048c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000010486048c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x6f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00050005; -+ *((int*)& __m128_op0[2]) = 0x00050005; -+ *((int*)& __m128_op0[1]) = 0x00050005; -+ *((int*)& __m128_op0[0]) = 0x00050005; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffa0078fffa0074; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffa0078fffa0074; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffb79fb74; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffb79fb74; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m256i_result[3]) = 0x000100010485048a; -+ *((unsigned long*)& __m256i_result[2]) = 0x0005ff870005ff86; -+ *((unsigned long*)& __m256i_result[1]) = 0x000100010485048a; -+ *((unsigned long*)& __m256i_result[0]) = 0x0005ff870005ff86; -+ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000020006; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffb79fb74; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffb79fb74; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m256d_result[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xc192181230000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xc192181230000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0xd9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00050005; -+ *((int*)& __m128_op1[2]) = 0x00050005; -+ *((int*)& __m128_op1[1]) = 0x00050005; -+ *((int*)& __m128_op1[0]) = 0x00050005; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffecffffffec; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000100010485048a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0005ff870005ff86; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000100010485048a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0005ff870005ff86; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffeffebfb7afb62; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffa0065fffa0066; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffeffebfb7afb62; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffa0065fffa0066; -+ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffeffeb; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fb7afb62; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffeffeb; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fb7afb62; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffeffebfb7afb62; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffeffebfb7afb62; -+ __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc192181230000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc192181230000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff0; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffa0078fffa0074; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffa0078fffa0074; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff000000ff0000; -+ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffeffebfb7afb62; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffeffebfb7afb62; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc192181230000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc192181230000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x4010000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3e6ce7d9cb7afb62; -+ *((unsigned long*)& __m256i_result[1]) = 0x4010000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3e6ce7d9cb7afb62; -+ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffa0078fffa0074; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffa0078fffa0074; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffa2078fffa2074; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffa2078fffa2074; -+ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffa0078fffa0074; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffa0078fffa0074; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffa2078fffa2074; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffa2078fffa2074; -+ *((unsigned long*)& __m256i_result[3]) = 0x01ff01ff01ff01ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x01ff01ff01ff01ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x01ff01ff01ff01ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x01ff01ff01ff01ff; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffeffebfb7afb62; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffeffebfb7afb62; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffeffebfb7afb62; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffeffebfb7afb62; -+ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4010000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3e6ce7d9cb7afb62; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4010000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3e6ce7d9cb7afb62; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2008000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1f3673ece5bd7db1; -+ *((unsigned long*)& __m256i_result[1]) = 0x2008000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1f3673ece5bd7db1; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4010000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3e6ce7d9cb7afb62; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4010000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3e6ce7d9cb7afb62; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000401000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003e6c0000cb7a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000401000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003e6c0000cb7a; -+ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000401000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003e6c0000cb7a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000401000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003e6c0000cb7a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x40000000b000032d; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x40000000b000032d; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xeffc000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf064c6098d214127; -+ *((unsigned long*)& __m256i_op0[1]) = 0xeffc000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf064c6098d214127; -+ *((unsigned long*)& __m256i_result[3]) = 0xeffc001800180018; -+ *((unsigned long*)& __m256i_result[2]) = 0xf064c6098d214127; -+ *((unsigned long*)& __m256i_result[1]) = 0xeffc001800180018; -+ *((unsigned long*)& __m256i_result[0]) = 0xf064c6098d214127; -+ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc192181230000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc192181230000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xff800000; -+ *((int*)& __m256_result[6]) = 0xff800000; -+ *((int*)& __m256_result[5]) = 0xff800000; -+ *((int*)& __m256_result[4]) = 0xff800000; -+ *((int*)& __m256_result[3]) = 0xff800000; -+ *((int*)& __m256_result[2]) = 0xff800000; -+ *((int*)& __m256_result[1]) = 0xff800000; -+ *((int*)& __m256_result[0]) = 0xff800000; -+ __m256_out = __lasx_xvflogb_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x29); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003030000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xff800000ff800000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0xff800000; -+ *((int*)& __m256_result[4]) = 0xff800000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0xff800000; -+ *((int*)& __m256_result[0]) = 0xff800000; -+ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff820002ff820002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff820002ff820002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00020002ff820002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00020002ff820002; -+ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff800000ff800000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_result[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff80000000000000; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00020002ff820002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00020002ff820002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff82; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000003ffda00f3; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000003ffda00f3; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff820002ff820002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff820002ff820002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0002000200020002; -+ __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00020002ff820002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00020002ff820002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; -+ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffint_d_l(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0800080008000800; -+ *((unsigned long*)& __m128i_result[0]) = 0x0800080008000800; -+ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffecffffffec; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfe7fffecfe7fffec; -+ *((unsigned long*)& __m256i_result[2]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfe7fffecfe7fffec; -+ *((unsigned long*)& __m256i_result[0]) = 0xff80000000000000; -+ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xf4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfe7fffecfe7fffec; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfe7fffecfe7fffec; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808000800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080808000000; -+ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0002000200020002; -+ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfe7fffecfe7fffec; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfe7fffecfe7fffec; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000800080008000; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffdfffdfffdfffd; -+ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0800080008000800; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0800080008000800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0800080008000800; -+ *((unsigned long*)& __m128i_result[0]) = 0x0800080008000800; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; -+ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0800080008000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0800080008000800; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[3]) = 0x4343434343434343; -+ *((unsigned long*)& __m256i_result[2]) = 0x4343434343434343; -+ *((unsigned long*)& __m256i_result[1]) = 0x4343434343434343; -+ *((unsigned long*)& __m256i_result[0]) = 0x4343434343434343; -+ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x38); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0100010001000100; -+ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001a0000001a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001a0000001a; -+ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0800080008000800; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0800080008000800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x2b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0800080008000800; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0800080008000800; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0040004000400040; -+ __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0800080008000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0800080008000800; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_d(__m128i_op0,0x35); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0020002000200020; -+ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; -+ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; -+ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; -+ *((unsigned long*)& __m128i_result[1]) = 0x9a9a9a9a9a9a9a9a; -+ *((unsigned long*)& __m128i_result[0]) = 0x9aba9aba9aba9aba; -+ __m128i_out = __lsx_vxori_b(__m128i_op0,0x9a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0b0b0b0b0b0b0b0b; -+ *((unsigned long*)& __m128i_result[0]) = 0x0b0b0b0b0b0b0b0b; -+ __m128i_out = __lsx_vmaxi_b(__m128i_op0,11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000020000; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0040004000400040; -+ *((unsigned long*)& __m128i_result[0]) = 0x0040004000400040; -+ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x55555555; -+ *((int*)& __m256_op0[6]) = 0x55555555; -+ *((int*)& __m256_op0[5]) = 0x5d5d5d5d; -+ *((int*)& __m256_op0[4]) = 0x5d555d55; -+ *((int*)& __m256_op0[3]) = 0x55555555; -+ *((int*)& __m256_op0[2]) = 0x55555555; -+ *((int*)& __m256_op0[1]) = 0x5d5ca2a3; -+ *((int*)& __m256_op0[0]) = 0x5d54aaab; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0100000001000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0100000001000000; -+ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffee; -+ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256i_result[2]) = 0x01fc03fc01fc03fc; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256i_result[0]) = 0x01fc03fc01fc03fc; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x3e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256d_op0[2]) = 0x01fc03fc01fc03fc; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256d_op0[0]) = 0x01fc03fc01fc03fc; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256i_result[2]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256i_result[0]) = 0x3ff0000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100000001000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0040004000400040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0040004000400040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsle_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0040004000400040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0040004000400040; -+ *((unsigned long*)& __m128i_result[1]) = 0xffc0ffc0ffc0ffc0; -+ *((unsigned long*)& __m128i_result[0]) = 0xffc0ffc0ffc0ffc0; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffdffd; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffdffd; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffdffd; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffdffd; -+ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffee; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffee; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffee; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffee; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0040004000400040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0040004000400040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01fc03fc01fc03fc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01fc03fc01fc03fc; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000200000001e; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000200000001e; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0081000100810001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0081000100810001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0081000100810001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0081000100810001; -+ __m256i_out = __lasx_xvneg_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffdc; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffdc; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000040000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffdc; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffdc; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffdc; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffdc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffffffdd; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffdc; -+ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff80ff00ff80ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff80ff00ff80ff01; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff80ff00ff80ff01; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff80ff00ff80ff01; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x007f00ff007f00fe; -+ *((unsigned long*)& __m256i_op2[2]) = 0xf711ee11f711ee91; -+ *((unsigned long*)& __m256i_op2[1]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xf711ee11f711ee11; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff80ff00ff80ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff80ff00ff80ff01; -+ __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000040000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; -+ __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000080000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000002affaa; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff002affaa; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000002affaa; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffd50055; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x002affaa00000000; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00820082ff81ff81; -+ *((unsigned long*)& __m128d_op0[0]) = 0xff81ff81ff81ff81; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000820000ff81; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff810000ff81; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000820000ff81; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff810000ff81; -+ __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffeffffffdd; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffdc; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x002affaa00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffffffdd; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffdc; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256d_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x000000000000ffff; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ee; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ee; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f00ff007f00ff; -+ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffeffffffdd; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffdc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m128i_result[0]) = 0x001f001f001f001f; -+ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000808; -+ __m256i_out = __lasx_xvclo_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ long_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000808; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1010100fefefeff0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0f8f0e8df676f778; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00ff00ff00ef32; -+ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ee; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ee; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffce; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000fc7c; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffce; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000fc7c; -+ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x28); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_result[2]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_result[1]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_result[0]) = 0xe7e7e7e7e7e7e7e7; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xff81ff82ff810081; -+ *((unsigned long*)& __m128i_op2[0]) = 0xff82ff810081ff81; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_result[3]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_result[2]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_result[1]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_result[0]) = 0xe7e7e7e7e7e7e7e7; -+ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op1[3]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_op1[1]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe7e7e7e7e7e7e7e7; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xe6e8e6e8e6e8d719; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xe6e8e6e8e6e8d719; -+ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x0000ffce; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0000fc7c; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x0000ffce; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0000fc7c; -+ *((int*)& __m256_op1[7]) = 0xe7e7e7e7; -+ *((int*)& __m256_op1[6]) = 0xe7e7e7e7; -+ *((int*)& __m256_op1[5]) = 0xe7e7e7e7; -+ *((int*)& __m256_op1[4]) = 0xe7e7e7e7; -+ *((int*)& __m256_op1[3]) = 0xe7e7e7e7; -+ *((int*)& __m256_op1[2]) = 0xe7e7e7e7; -+ *((int*)& __m256_op1[1]) = 0xe7e7e7e7; -+ *((int*)& __m256_op1[0]) = 0xe7e7e7e7; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ffce20; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ffce20; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ee1100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000004560408; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ee1100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000004560408; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff1100; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000004560420; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff1100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000004560420; -+ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ffce20; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ffce20; -+ *((unsigned long*)& __m256i_result[3]) = 0x1514151415141514; -+ *((unsigned long*)& __m256i_result[2]) = 0x151415141514e335; -+ *((unsigned long*)& __m256i_result[1]) = 0x1514151415141514; -+ *((unsigned long*)& __m256i_result[0]) = 0x151415141514e335; -+ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1514151415141514; -+ *((unsigned long*)& __m256i_op1[2]) = 0x151415141514e335; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1514151415141514; -+ *((unsigned long*)& __m256i_op1[0]) = 0x151415141514e335; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000e9ece9ec; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000e9ece9ec; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000e9ece9ec; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000e9ece9ec; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256d_op0[2]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256d_op0[1]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256d_op0[0]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00ff00ff00ef0120; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00ff00ff00ef0120; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffff007f00000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffff007f00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xecec006c00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xecec006c00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff007f00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff007f00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ef0120; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ef0120; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000e9ece9ec; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000e9ece9ec; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000e9ece9ec; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000e9ece9ec; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff0120; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000e9ec0000e9ec; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff0120; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000e9ec0000e9ec; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_d(__m128i_op0,0x38); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff007f00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff007f00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff0000007f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00550f0000550f00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00ff00ff00ef32; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000015c015c0; -+ *((unsigned long*)& __m256i_result[2]) = 0xc0c0c0cdc0c0c0cd; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xc0c0c0cdc0c0c0cd; -+ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001f0000001f; -+ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff1100; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000004560420; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff1100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000004560420; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff1100; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000004560420; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff1100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000004560420; -+ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001f; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; -+ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xd04752cdd5543b56; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6906e68064f3d78b; -+ *((unsigned long*)& __m256i_op0[1]) = 0xd04752cdd5543b56; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6906e68064f3d78b; -+ *((unsigned long*)& __m256i_result[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000300000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000300000002; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001f; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff007f00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff007f00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000007f00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000007f00000000; -+ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x000000000000001f; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000001f; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xd04752cdd5543b56; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6906e68064f3d78b; -+ *((unsigned long*)& __m256i_op0[1]) = 0xd04752cdd5543b56; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6906e68064f3d78b; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff1100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000004560420; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff1100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000004560420; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ffff00ff00; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000fff00004542; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ffff00ff00; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000fff00004542; -+ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ffff00ff00; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000fff00004542; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ffff00ff00; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000fff00004542; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ffff00ff00; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000fff00004542; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ffff00ff00; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000fff00004542; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff0000007f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff0000007f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000300000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000300000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001f; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001f; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; -+ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffefffffffe; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0202020202020203; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0202020202020203; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001f0000ffff; -+ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x52525252525252cb; -+ *((unsigned long*)& __m128i_op1[0]) = 0x52525252525252cb; -+ *((unsigned long*)& __m128i_result[1]) = 0xaeaeaeaeaeaeae35; -+ *((unsigned long*)& __m128i_result[0]) = 0xaeaeaeaeaeaeae35; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xaeaeaeaeaeaeae35; -+ *((unsigned long*)& __m128i_op0[0]) = 0xaeaeaeaeaeaeae35; -+ *((unsigned long*)& __m128i_op1[1]) = 0xaeaeaeaeaeaeae35; -+ *((unsigned long*)& __m128i_op1[0]) = 0xaeaeaeaeaeaeae35; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; -+ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x3e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000300000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000300000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004411; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004411; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0008000800080008; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000c005e000c0029; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0004005600040020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000300000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000300000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000060008; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000c005b; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffe0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000040053; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00ff00ffff00ff00; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000fff00004542; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00ff00ffff00ff00; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000fff00004542; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001f0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_hu(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000005000000020; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000005000000020; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000005000000020; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000005000000020; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000005000000020; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000005000000020; -+ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000005000000020; -+ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000005000000020; -+ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0202020202020203; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0202020202020203; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000002020202; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000002020202; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001f0000ffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000060008; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000000000c005b; -+ *((unsigned long*)& __m256i_op2[1]) = 0xfffffffffffe0000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000040053; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff0007fff7; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff005affa4; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffe100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000053ffac; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004411; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004411; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000001f0000ffff; -+ *((unsigned long*)& __m256d_result[3]) = 0x60000007fffe0001; -+ *((unsigned long*)& __m256d_result[2]) = 0x60000007fffe0001; -+ *((unsigned long*)& __m256d_result[1]) = 0x6056fd4e7926d5c0; -+ *((unsigned long*)& __m256d_result[0]) = 0x6056fd4e1a4616c4; -+ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00040000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00040000; -+ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00ff00ff; -+ *((int*)& __m256_op0[6]) = 0x00ff00ff; -+ *((int*)& __m256_op0[5]) = 0x00ff00ff; -+ *((int*)& __m256_op0[4]) = 0x000c0000; -+ *((int*)& __m256_op0[3]) = 0x00ff00ff; -+ *((int*)& __m256_op0[2]) = 0x00ff00ff; -+ *((int*)& __m256_op0[1]) = 0x00ff00ff; -+ *((int*)& __m256_op0[0]) = 0x00040000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00ff00ff; -+ *((int*)& __m256_result[6]) = 0x00ff00ff; -+ *((int*)& __m256_result[5]) = 0x00ff00ff; -+ *((int*)& __m256_result[4]) = 0x000c0000; -+ *((int*)& __m256_result[3]) = 0x00ff00ff; -+ *((int*)& __m256_result[2]) = 0x00ff00ff; -+ *((int*)& __m256_result[1]) = 0x00ff00ff; -+ *((int*)& __m256_result[0]) = 0x00040000; -+ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000005000000020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000005000000020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; -+ *((int*)& __m256_result[7]) = 0xdf000000; -+ *((int*)& __m256_result[6]) = 0x52a00000; -+ *((int*)& __m256_result[5]) = 0x5b7f00ff; -+ *((int*)& __m256_result[4]) = 0x5b7f00ff; -+ *((int*)& __m256_result[3]) = 0xdf000000; -+ *((int*)& __m256_result[2]) = 0x52a00000; -+ *((int*)& __m256_result[1]) = 0x5b7f00ff; -+ *((int*)& __m256_result[0]) = 0x5b7f00ff; -+ __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004411; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004411; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020202020206431; -+ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_w(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000005000000020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000005000000020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002800000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002800000010; -+ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00040000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00040000; -+ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ff00000083; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0xff01ff010000ff7d; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000fffc; -+ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004411; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004411; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x00010001000c4411; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100044411; -+ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xff01ff01; -+ *((int*)& __m128_op1[2]) = 0x0000ff7d; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x0000fffc; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000002800000010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000002800000010; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff0127000c0010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff012700040010; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff01ff010000ff7d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000fffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_b(__m128i_op0,2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00010001000c4411; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100044411; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000002800000010; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000002800000010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0002000200020018; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0002000200020008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; -+ __m128d_out = __lsx_vfrecip_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000002; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000002; -+ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0002000200020018; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0002000200020008; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00c0000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0040000000000000; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; -+ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x35); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdf00000052a00000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5b7f00ff5b7f00ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xdf00000052a00000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5b7f00ff5b7f00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00040000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00040000; -+ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdf00000052a00000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5b7f00ff5b7f00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xdf00000052a00000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5b7f00ff5b7f00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffefffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffefffff; -+ __m128i_out = __lsx_vrotri_w(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdf00000052a00000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5b7f00ff5b7f00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xdf00000052a00000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5b7f00ff5b7f00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_result[3]) = 0xdf01010153a10101; -+ *((unsigned long*)& __m256i_result[2]) = 0x5b7f01ff5b7f10ff; -+ *((unsigned long*)& __m256i_result[1]) = 0xdf01010153a10101; -+ *((unsigned long*)& __m256i_result[0]) = 0x5b7f01ff5b7f10ff; -+ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffefffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffefffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0007000700070007; -+ *((unsigned long*)& __m128i_result[0]) = 0x0007000700070007; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffefffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffefffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_b(__m128i_op0,5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdf00000052a00000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5b7f00ff5b7f00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xdf00000052a00000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5b7f00ff5b7f00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00c0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0040000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000c0000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000040000000; -+ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00040000; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdf01010153a10101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5b7f01ff5b7f10ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xdf01010153a10101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5b7f01ff5b7f10ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xcf01010143a10101; -+ *((unsigned long*)& __m256i_result[2]) = 0x4b6f01ef4b6f00ef; -+ *((unsigned long*)& __m256i_result[1]) = 0xcf01010143a10101; -+ *((unsigned long*)& __m256i_result[0]) = 0x4b6f01ef4b6f00ef; -+ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004411; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004411; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004411; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004411; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_result[3]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_result[2]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_result[1]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_result[0]) = 0x001f001f001f001f; -+ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000c0000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000040000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0020001f001f001e; -+ *((unsigned long*)& __m256i_result[2]) = 0x001f001fc01f001f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0020001f001f001e; -+ *((unsigned long*)& __m256i_result[0]) = 0x001f001f401f001f; -+ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m128d_result[0]) = 0xbff0000000000000; -+ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256d_result[3]) = 0x43c0101010101010; -+ *((unsigned long*)& __m256d_result[2]) = 0x43c0101010101032; -+ *((unsigned long*)& __m256d_result[1]) = 0x43c0101010101010; -+ *((unsigned long*)& __m256d_result[0]) = 0x43c0101010101032; -+ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x1010101010101010; -+ *((unsigned long*)& __m128i_result[0]) = 0xefefefefefefefef; -+ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000c0000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000c0000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000040000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0003030300000300; -+ *((unsigned long*)& __m256i_result[2]) = 0x0003030300000300; -+ *((unsigned long*)& __m256i_result[1]) = 0x0003030300000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0003030300000100; -+ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x01010101; -+ *((int*)& __m128_op0[0]) = 0x01010101; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xbff0000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0039ffffffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffbeffffffffffff; -+ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op1[1]) = 0x41dfffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000083b00000000; -+ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x33); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000100000020; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000083b00000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x41dfffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xbff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; -+ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x7e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_h(__m256i_op0,11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1111111111111111; -+ *((unsigned long*)& __m128i_result[0]) = 0x1111111111111111; -+ __m128i_out = __lsx_vmaxi_bu(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1111111111111111; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111111111111111; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1111111111111111; -+ *((unsigned long*)& __m128i_result[0]) = 0x1111111111111111; -+ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdf01010153a10101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5b7f01ff5b7f10ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xdf01010153a10101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5b7f01ff5b7f10ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1111111111111111; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111111111111111; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; -+ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0003030300000300; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0003030300000300; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0003030300000100; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0003030300000100; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x35); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0003030300000300; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0003030300000300; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0003030300000100; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0003030300000100; -+ *((unsigned long*)& __m256d_result[3]) = 0x1febc46085090ea0; -+ *((unsigned long*)& __m256d_result[2]) = 0x1febc46085090ea0; -+ *((unsigned long*)& __m256d_result[1]) = 0x1febc46085090567; -+ *((unsigned long*)& __m256d_result[0]) = 0x1febc46085090567; -+ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe6; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffe6; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0003030300000300; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0003030300000300; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0003030300000100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0003030300000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x00f800f800f800f8; -+ *((unsigned long*)& __m256i_result[2]) = 0x0018181800181818; -+ *((unsigned long*)& __m256i_result[1]) = 0x00f800f800f800f8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0018181800181818; -+ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0008; -+ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0003030300000300; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0003030300000300; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0003030300000100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0003030300000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0043030300400300; -+ *((unsigned long*)& __m256i_result[2]) = 0x0043030300400300; -+ *((unsigned long*)& __m256i_result[1]) = 0x0043030300400100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0043030300400100; -+ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffff0008; -+ *((int*)& __m128_op1[3]) = 0xffc2ffe0; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x0000ffc1; -+ *((int*)& __m128_op1[0]) = 0x00010001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_result[3]) = 0x04080c1014182d35; -+ *((unsigned long*)& __m256i_result[2]) = 0x716d696573765161; -+ *((unsigned long*)& __m256i_result[1]) = 0x04080c1014182d35; -+ *((unsigned long*)& __m256i_result[0]) = 0x716d696573765161; -+ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00f800f800f800f8; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0018181800181818; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00f800f800f800f8; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0018181800181818; -+ *((unsigned long*)& __m256i_result[3]) = 0x001f1f3e3e1f1f00; -+ *((unsigned long*)& __m256i_result[2]) = 0x0003060909060300; -+ *((unsigned long*)& __m256i_result[1]) = 0x001f1f3e3e1f1f00; -+ *((unsigned long*)& __m256i_result[0]) = 0x0003060909060300; -+ __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1111111111111111; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111111111111111; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[1]) = 0x1111113111111131; -+ *((unsigned long*)& __m128i_result[0]) = 0x1111113111111131; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0043030300400300; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0043030300400300; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0043030300400100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0043030300400100; -+ *((unsigned long*)& __m256i_result[3]) = 0xffdd001dffe00020; -+ *((unsigned long*)& __m256i_result[2]) = 0xffdd001dffe00031; -+ *((unsigned long*)& __m256i_result[1]) = 0xffdd001dffe00020; -+ *((unsigned long*)& __m256i_result[0]) = 0xffdd001dffe00031; -+ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x001f1f3e; -+ *((int*)& __m256_op1[6]) = 0x3e1f1f00; -+ *((int*)& __m256_op1[5]) = 0x00030609; -+ *((int*)& __m256_op1[4]) = 0x09060300; -+ *((int*)& __m256_op1[3]) = 0x001f1f3e; -+ *((int*)& __m256_op1[2]) = 0x3e1f1f00; -+ *((int*)& __m256_op1[1]) = 0x00030609; -+ *((int*)& __m256_op1[0]) = 0x09060300; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff000100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x41dfffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff000200000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x7f800000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x7f800000; -+ __m256_out = __lasx_xvfrecip_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111131; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111131; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffff0008; -+ *((unsigned long*)& __m128i_result[1]) = 0x1111113111111141; -+ *((unsigned long*)& __m128i_result[0]) = 0x1111113111111121; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff000100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7f8000007f7fffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f8000007f7fffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7f8000007f7fffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f8000007f7fffff; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_d(__m128i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x41dfbe1f41e0ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffc2ffe000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffc100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x41dfbe1f41e0ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffc100010001; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xec); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3f77aab500000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffc100010001; -+ *((unsigned long*)& __m128i_op2[1]) = 0x3f77aab500000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffc100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0fbc1df53c1ae3f9; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff820f81; -+ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1111113111111141; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1111113111111121; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000111111312; -+ *((unsigned long*)& __m128i_result[0]) = 0x2222272111111410; -+ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0fbc1df53c1ae3f9; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff820f81; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xf144e32bc4e61d27; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000020017ef19f; -+ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffdd001dffe00020; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffdd001dffe00031; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffdd001dffe00020; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffdd001dffe00031; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x3ff73ff83ff73ff8; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x3ff73ff83ff73ff8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x3ff73ff83ff73ff8; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x3ff73ff83ff73ff8; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256d_op2[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256d_op2[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256d_op2[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256d_result[3]) = 0xa020202020202020; -+ *((unsigned long*)& __m256d_result[2]) = 0xa020202020206431; -+ *((unsigned long*)& __m256d_result[1]) = 0xa020202020202020; -+ *((unsigned long*)& __m256d_result[0]) = 0xa020202020206431; -+ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x33); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffc2ffe700000007; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffc100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffc100010001; -+ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x01fffffffe000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x01fffffffe000000; -+ __m256i_out = __lasx_xvbsrl_v(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x41dfffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0100000008080808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vclz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffc2ffe700000007; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffc100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x41dfffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xbde2ffe800000007; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffc100010001; -+ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x41dfffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x403be000; -+ *((int*)& __m128_result[2]) = 0xffffe000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000020; -+ *((int*)& __m128_op0[2]) = 0x00000020; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x0000ffc1; -+ *((int*)& __m128_op1[0]) = 0x00010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000021ffffffdf; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000e60; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000001ff85ffdc0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000332ae5d97330; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1ff85ffe2ae5d973; -+ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvexth_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x01ffffff; -+ *((int*)& __m256_op1[4]) = 0xfe000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x01ffffff; -+ *((int*)& __m256_op1[0]) = 0xfe000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000021ffffffdf; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000e60; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1ff85ffe2ae5d973; -+ *((unsigned long*)& __m128i_result[1]) = 0x00010020fffeffde; -+ *((unsigned long*)& __m128i_result[0]) = 0x0100400100200e68; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00010020fffeffde; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100400100200e68; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00010020fffeffde; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0100400100200e68; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x1ff85ffe2ae5d973; -+ *((unsigned long*)& __m128i_result[1]) = 0x00010020fffeffde; -+ *((unsigned long*)& __m128i_result[0]) = 0x011f57c100201a46; -+ __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1ff85ffe2ae5d973; -+ *((unsigned long*)& __m128i_op1[1]) = 0x403be000ffffe000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000ffc2f; -+ *((unsigned long*)& __m128i_result[0]) = 0x00201df000000000; -+ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x29); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffc2ffe7; -+ *((int*)& __m128_op0[2]) = 0x00000007; -+ *((int*)& __m128_op0[1]) = 0x0000ffc1; -+ *((int*)& __m128_op0[0]) = 0x00010001; -+ *((int*)& __m128_op1[3]) = 0xffc2ffe7; -+ *((int*)& __m128_op1[2]) = 0x00000007; -+ *((int*)& __m128_op1[1]) = 0x0000ffc1; -+ *((int*)& __m128_op1[0]) = 0x00010001; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x000ffc2f; -+ *((int*)& __m128_op2[1]) = 0x00201df0; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xffc2ffe7; -+ *((int*)& __m128_result[2]) = 0x800ffc2f; -+ *((int*)& __m128_result[1]) = 0x80201df0; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ffc2f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00201df000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3838383838300010; -+ *((unsigned long*)& __m128i_result[0]) = 0x3818200838383838; -+ __m128i_out = __lsx_vnori_b(__m128i_op0,0xc7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2222272011111410; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2222272011111410; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; -+ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xa020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0xa020202020206431; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0xa020202020206431; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xa020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0xa020202020206431; -+ *((unsigned long*)& __m256i_result[1]) = 0xa020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0xa020202020206431; -+ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01fffffffe000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01fffffffe000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x01fffffffe000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x01fffffffe000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfe00000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000017f7f7f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f00000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000017f7f7f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f00000000000000; -+ __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xa020202020202020; -+ *((unsigned long*)& __m256i_op1[2]) = 0xa020202020206431; -+ *((unsigned long*)& __m256i_op1[1]) = 0xa020202020202020; -+ *((unsigned long*)& __m256i_op1[0]) = 0xa020202020206431; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202031; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202031; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ffc2f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00201df000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffc2ffe700000007; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffc100010001; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00010020fffeffde; -+ *((unsigned long*)& __m128i_op2[0]) = 0x011f57c100201a46; -+ *((unsigned long*)& __m128i_result[1]) = 0x001ffce00016fb41; -+ *((unsigned long*)& __m128i_result[0]) = 0x57cb857100001a46; -+ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000017f7f7f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000017f7f7f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000017f00007f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007f0000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x1111113111111141; -+ *((unsigned long*)& __m128d_op1[0]) = 0x1111113111111121; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffc2ffe7; -+ *((int*)& __m128_op0[2]) = 0x00000007; -+ *((int*)& __m128_op0[1]) = 0x0000ffc1; -+ *((int*)& __m128_op0[0]) = 0x00010001; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0xfffff1a0; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfbffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfbffffffffffffff; -+ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x3a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000017f00007f7f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00007f0000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fd; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff810000000000; -+ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202031; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202031; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xa020202020202020; -+ *((unsigned long*)& __m256i_op1[2]) = 0xa020202020206431; -+ *((unsigned long*)& __m256i_op1[1]) = 0xa020202020202020; -+ *((unsigned long*)& __m256i_op1[0]) = 0xa020202020206431; -+ *((unsigned long*)& __m256i_result[3]) = 0xa020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0xa020202020206431; -+ *((unsigned long*)& __m256i_result[1]) = 0xa020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0xa020202020206431; -+ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf0800320fff1fa20; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0032000000000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfbffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x7bffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0xfbffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x7bffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; -+ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_du(__m256i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x1111113111111141; -+ *((unsigned long*)& __m128d_op0[0]) = 0x1111113111111121; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0032000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001ffce00016fb41; -+ *((unsigned long*)& __m128i_op0[0]) = 0x57cb857100001a46; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfbffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7bffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000150000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffeffff001effff; -+ __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000150000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffeffff001effff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffff1a0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000f00f; -+ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfbffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x7bffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000020; -+ *((int*)& __m128_op0[0]) = 0x00000020; -+ *((unsigned long*)& __m128d_result[1]) = 0x36f0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x36f0000000000000; -+ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xa020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0xa020202020206431; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0xa020202020206431; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xd010101010101010; -+ *((unsigned long*)& __m256i_result[2]) = 0xd010101010103218; -+ *((unsigned long*)& __m256i_result[1]) = 0xd010101010101010; -+ *((unsigned long*)& __m256i_result[0]) = 0xd010101010103218; -+ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000f00f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000f00f; -+ __m128i_out = __lsx_vslli_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfbffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7bffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfbffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7bffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xf7ffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xf7feffffffffffff; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xd010101010101010; -+ *((unsigned long*)& __m256i_op0[2]) = 0xd010101010103218; -+ *((unsigned long*)& __m256i_op0[1]) = 0xd010101010101010; -+ *((unsigned long*)& __m256i_op0[0]) = 0xd010101010103218; -+ *((unsigned long*)& __m256i_op1[3]) = 0xd010101010101010; -+ *((unsigned long*)& __m256i_op1[2]) = 0xd010101010103218; -+ *((unsigned long*)& __m256i_op1[1]) = 0xd010101010101010; -+ *((unsigned long*)& __m256i_op1[0]) = 0xd010101010103218; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010002000100020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010002000100020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010002000100020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010002000100020; -+ __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0010002000100020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0010002000100020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0010002000100020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0010002000100020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; -+ __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff77777807777775; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe6eeef00eeeeeebf; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000f00f; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff007700070077; -+ *((unsigned long*)& __m128i_result[0]) = 0x00e600ef00ee01de; -+ __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x111110ff11111141; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000f00f; -+ *((unsigned long*)& __m128i_result[1]) = 0x111110ff11111141; -+ *((unsigned long*)& __m128i_result[0]) = 0x1111113111111100; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x111110ff11111141; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfbffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7bffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x060808ff08080820; -+ *((unsigned long*)& __m128i_result[0]) = 0x4608081808080810; -+ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_h(__m256i_op0,10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000f00f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000007fff; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111141; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfrint_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0010002000100020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010002000100020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0010002000100020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010002000100020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffe; -+ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x3e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x111110ff11111141; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111100; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,-1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111141; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; -+ *((unsigned long*)& __m128i_result[1]) = 0x1111311111114111; -+ *((unsigned long*)& __m128i_result[0]) = 0x1111311111112111; -+ __m128i_out = __lsx_vrotri_h(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000001ff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffe0000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000001ff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffe0000000000; -+ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x111110ff11111141; -+ *((unsigned long*)& __m128i_op1[0]) = 0x11111131111116a6; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfe00000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000001ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffe0000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000001ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffe0000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000001ff8000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001ff8000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111141; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_h(__m128i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1111311111114111; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111311111112111; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x1111311111114111; -+ *((unsigned long*)& __m128i_result[0]) = 0x1111311111110000; -+ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000001ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffe0000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000001ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffe0000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00080008000801ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0008000800080008; -+ *((unsigned long*)& __m256i_result[1]) = 0x00080008000801ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0008000800080008; -+ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111141; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ff8000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffe0000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ff8000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffe0000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x3f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_du(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000002000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000800000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000002000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000800000; -+ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x28); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000007fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_result[0]) = 0x2020202020207fff; -+ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000002000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000002000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_result[0]) = 0x2020202020207f7f; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00080008000801ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0008000800080008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00080008000801ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0008000800080008; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x3f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00ff0077; -+ *((int*)& __m128_op0[2]) = 0x00070077; -+ *((int*)& __m128_op0[1]) = 0x00e600ef; -+ *((int*)& __m128_op0[0]) = 0x00ee01de; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00007fff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020643100000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020643100000000; -+ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0032000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000009c400000000; -+ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_op2[0]) = 0x2020202020207f7f; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; -+ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff0000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; -+ *((unsigned long*)& __m256i_op2[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1111311111114111; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1111311111110000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000001ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffe0000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000001ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffe0000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x20202020; -+ *((int*)& __m128_op0[2]) = 0x20202020; -+ *((int*)& __m128_op0[1]) = 0x20202020; -+ *((int*)& __m128_op0[0]) = 0x20207fff; -+ *((int*)& __m128_op1[3]) = 0x32d3f35e; -+ *((int*)& __m128_op1[2]) = 0xcd509d13; -+ *((int*)& __m128_op1[1]) = 0x3e081b3c; -+ *((int*)& __m128_op1[0]) = 0x93f6b356; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; -+ unsigned_int_result = 0x0000000020202020; -+ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x1); -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2020202020207fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x01010101010101ff; -+ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x20202020; -+ *((int*)& __m128_op0[2]) = 0x20202020; -+ *((int*)& __m128_op0[1]) = 0x20202020; -+ *((int*)& __m128_op0[0]) = 0x20207fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffff02; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; -+ *((unsigned long*)& __m128i_result[1]) = 0x5d5d5d5d5d5d5d5d; -+ *((unsigned long*)& __m128i_result[0]) = 0x5d5d5d5d5d5d0000; -+ __m128i_out = __lsx_vnori_b(__m128i_op0,0xa2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff80000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff80000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_hu(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc000c000c000ff81; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5d5d5d5d5d5d5d5d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5d5d5d5d5d5d0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xa2a2a2a3a2a2a2a3; -+ *((unsigned long*)& __m128i_result[0]) = 0xc605c000aedd0000; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0ba00ba00ba00ba0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0ba00ba00ba011eb; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000a0000000a; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000a0000000d; -+ __m128i_out = __lsx_vpcnt_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_w(__m256i_op0,-2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc605c000aedd0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xc605c000aedd0000; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_h_w(__m256i_op0,__m256i_op1,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff80000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff80000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc605c000aedd0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5d5d5d5d5d5d5d5d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5d5d5d5d5d5d0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xa2a2a2a3a2a2a2a3; -+ *((unsigned long*)& __m128i_result[0]) = 0xc605c000aedd0000; -+ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf1819b7c0732a6b6; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffb9917a6e7fffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_d(__m128i_op0,12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0020002000200020; -+ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x2a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0ba00ba00ba00ba0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0ba00ba00ba011eb; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf1819b7c0732a6b6; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffb9917a6e7fffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x05d0ba0002e8802e; -+ *((unsigned long*)& __m128i_result[0]) = 0xd005e802174023d6; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f417f417f027e03; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m128i_result[0]) = 0x2020202020207e03; -+ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x05d0ba0002e8802e; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd005e802174023d6; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc000c000c000ff81; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0ba00ba00ba00ba0; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0ba00ba00ba011eb; -+ *((unsigned long*)& __m128i_result[1]) = 0x05d0ae6002e8748e; -+ *((unsigned long*)& __m128i_result[0]) = 0xcd1de80217374041; -+ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc605c000aedd0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000005151515; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000006302e00; -+ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000005151515; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000006302e00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f417f417f027e03; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001fd0; -+ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x32); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000001; -+ *((int*)& __m256_op0[6]) = 0x00000001; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000001; -+ *((int*)& __m256_op0[2]) = 0x00000001; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7fc00000; -+ *((int*)& __m256_result[4]) = 0x7fc00000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7fc00000; -+ *((int*)& __m256_result[0]) = 0x7fc00000; -+ __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc605c000aedd0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_wu(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0080010000800100; -+ *((unsigned long*)& __m256i_result[2]) = 0x00c0000000c00000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0080010000800100; -+ *((unsigned long*)& __m256i_result[0]) = 0x00c0000000c00000; -+ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x7f800000; -+ *((int*)& __m256_op2[6]) = 0x7f800000; -+ *((int*)& __m256_op2[5]) = 0x7fc00000; -+ *((int*)& __m256_op2[4]) = 0x7fc00000; -+ *((int*)& __m256_op2[3]) = 0x7f800000; -+ *((int*)& __m256_op2[2]) = 0x7f800000; -+ *((int*)& __m256_op2[1]) = 0x7fc00000; -+ *((int*)& __m256_op2[0]) = 0x7fc00000; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7fc00000; -+ *((int*)& __m256_result[4]) = 0x7fc00000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7fc00000; -+ *((int*)& __m256_result[0]) = 0x7fc00000; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9795698585057dec; -+ *((unsigned long*)& __m128i_op0[0]) = 0x87f82867431a1d08; -+ *((unsigned long*)& __m128i_result[1]) = 0x9780697084f07dd7; -+ *((unsigned long*)& __m128i_result[0]) = 0x87e3285243051cf3; -+ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001fd0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001fd0; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f417f417f027e03; -+ *((unsigned long*)& __m128i_op1[1]) = 0x9780697084f07dd7; -+ *((unsigned long*)& __m128i_op1[0]) = 0x87e3285243051cf3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9780697084f07dd7; -+ *((unsigned long*)& __m128i_op0[0]) = 0x87e3285243051cf3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000cdc1; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9795698585057dec; -+ *((unsigned long*)& __m128i_op0[0]) = 0x87f82867431a1d08; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1149a96eb1a08000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000cdc1; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe93d0bd19ff0c170; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5237c1bac9eadf55; -+ *((unsigned long*)& __m128i_op2[1]) = 0x05d0ae6002e8748e; -+ *((unsigned long*)& __m128i_op2[0]) = 0xcd1de80217374041; -+ *((unsigned long*)& __m128i_result[1]) = 0xf490ee600180ce20; -+ *((unsigned long*)& __m128i_result[0]) = 0x063bff74fb46e356; -+ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1149a96eb1a08000; -+ *((unsigned long*)& __m128i_result[1]) = 0xb1a08000b1a08000; -+ *((unsigned long*)& __m128i_result[0]) = 0xb1a08000b1a08000; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001fd0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001fd0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001ffff0001ffff; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x05d0ae6002e8748e; -+ *((unsigned long*)& __m128i_op0[0]) = 0xcd1de80217374041; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000065a0; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x05d0ae6002e8748e; -+ *((unsigned long*)& __m128i_op0[0]) = 0xcd1de80217374041; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f417f417f027e03; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe93d0bd19ff0c170; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5237c1bac9eadf55; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x60); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000001; -+ *((int*)& __m256_op0[6]) = 0x00000001; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000001; -+ *((int*)& __m256_op0[2]) = 0x00000001; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe93d0bd19ff0c170; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5237c1bac9eadf55; -+ *((unsigned long*)& __m128i_result[1]) = 0x5237c1baffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x7d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffefffefffefffe; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000065a0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x2e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9941d1d5f4ba9d08; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x9941d155f43a9d08; -+ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xdfffffffdfffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xdfffffffdfffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9941d155f43a9d08; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00008bf700017052; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000f841000091aa; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe6d4572c8a5835bc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe5017c2ac9ca9fd0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000f8410000; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xe93d0bd19ff0c170; -+ *((unsigned long*)& __m128d_op0[0]) = 0x5237c1bac9eadf55; -+ *((unsigned long*)& __m128d_op1[1]) = 0xe6d4572c8a5835bc; -+ *((unsigned long*)& __m128d_op1[0]) = 0xe5017c2ac9ca9fd0; -+ *((unsigned long*)& __m128d_result[1]) = 0xe93d0bd19ff07013; -+ *((unsigned long*)& __m128d_result[0]) = 0x65017c2ac9ca9fd0; -+ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; -+ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe93d0bd19ff0c170; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5237c1bac9eadf55; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000007fc00000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fc00000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fc00000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fc00000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xdfffffffdfffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xdfffffffdfffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xbff0000000000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xe93d0bd19ff07013; -+ *((unsigned long*)& __m128d_op0[0]) = 0x65017c2ac9ca9fd0; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00008bf700017052; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000f841000091aa; -+ *((unsigned long*)& __m128d_result[1]) = 0xe93d0bd19ff07013; -+ *((unsigned long*)& __m128d_result[0]) = 0x65017c2ac9ca9fd0; -+ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000080000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xe93d0bd19ff07013; -+ *((unsigned long*)& __m128d_op1[0]) = 0x65017c2ac9ca9fd0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001ca02f854; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001ca02f854; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_d(__m256i_op0,-3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xe93d0bd19ff07013; -+ *((unsigned long*)& __m128d_op0[0]) = 0x65017c2ac9ca9fd0; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001ca02f854; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffcafff8ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000a0; -+ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcafff8ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe6d4572c8a5835bc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe5017c2ac9ca9fd0; -+ *((unsigned long*)& __m128i_result[1]) = 0x00d3012b015700bb; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001002affca0070; -+ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00d3012b015700bb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001002affca0070; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001ca02f854; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_result[1]) = 0x00d3012b015700bb; -+ *((unsigned long*)& __m128i_result[0]) = 0x00010000ffca0070; -+ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcafff8ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffcafff8ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000a0; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00d3012b015700bb; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00010000ffca0070; -+ *((unsigned long*)& __m128i_result[1]) = 0xff2cfed4fea8ff44; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffeffff0035ff8f; -+ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcafff8ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff2cfed4fea8ff44; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffeffff0035ff8f; -+ *((unsigned long*)& __m128i_result[1]) = 0x00d3012acc56f9bb; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000a0; -+ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x9c9c9c9c; -+ *((int*)& __m128_op1[2]) = 0x9c9c9c9c; -+ *((int*)& __m128_op1[1]) = 0x9c9c9c9c; -+ *((int*)& __m128_op1[0]) = 0x9c9c9c9c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001ca02f854; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00000001ca02f854; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001ca02f854; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; -+ __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000020202020; -+ *((unsigned long*)& __m128i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_result[0]) = 0x2020202020202020; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000900000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000900013fa0; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x23); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x80000000; -+ *((int*)& __m256_op0[6]) = 0x80000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x80000000; -+ *((int*)& __m256_op0[2]) = 0x80000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000001; -+ *((int*)& __m128_op0[2]) = 0xca02f854; -+ *((int*)& __m128_op0[1]) = 0x00000001; -+ *((int*)& __m128_op0[0]) = 0x00013fa0; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0xca02f854; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfrint_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ca02f854; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000fea8ff44; -+ *((unsigned long*)& __m128d_op1[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128d_op1[0]) = 0x2020202020202020; -+ *((unsigned long*)& __m128d_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128d_result[0]) = 0x2020202020202020; -+ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ca02f854; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ca02f854; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ca0200000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ca0200000000; -+ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xbff00000bff00000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xbff00000bff00000; -+ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbff00000bff00000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbff00000bff00000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffbff1ffffbff1; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffbff1ffffbff1; -+ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020202020; -+ int_op1 = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_result[0]) = 0x202020202020ff20; -+ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001ca02f854; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001ca02f854; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x202020202020ff20; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x2000200020002000; -+ *((unsigned long*)& __m128i_result[0]) = 0x2000200020002000; -+ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fea8ff44; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fea8ff44; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000008000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000001ca02f854; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000001ca02f854; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffbff1ffffbff1; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffbff1ffffbff1; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffeffc4000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffeffc4000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffeffc4000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffeffc4000000; -+ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001ca02f854; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001ca02f854; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2000200020002000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2000200020002000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000120002000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; -+ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00d3012acc56f9bb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000004b01; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000004b01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00d3012acc56f9bb; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000a0; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000004b01; -+ __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000004b01; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffb4ff; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffb4ff; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffb4ff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffb4ff; -+ __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00d3012acc56f9bb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000120002000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_result[1]) = 0x00d3012acc56f9bb; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001021; -+ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000401000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000401000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000401000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000401000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000120002000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000200020; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000003f; -+ __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000001; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000016; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffb4ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffb4ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000016; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffb4ff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffff98dea; -+ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00d3012acc56f9bb; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001021; -+ *((unsigned long*)& __m128i_result[1]) = 0x0108020410400208; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010102; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128d_result[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x40f3fa0000000000; -+ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvneg_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffb4ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffff98dea; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x40f3fa0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xc00fffffffffb4ff; -+ *((unsigned long*)& __m128i_result[0]) = 0xbf0c05fffff98dea; -+ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000120002000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_result[1]) = 0x2000200000013fa0; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000013fa0; -+ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0020000000200000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0020000000200000; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x2b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0020000000200000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0020000000200000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffdfffffffdfffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffdfffffffdfffff; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0606060606060606; -+ *((unsigned long*)& __m256i_result[2]) = 0x0606060606060606; -+ *((unsigned long*)& __m256i_result[1]) = 0x0606060606060606; -+ *((unsigned long*)& __m256i_result[0]) = 0x0606060606060606; -+ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0606060606060606; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0606060606060606; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0606060606060606; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0606060606060606; -+ *((unsigned long*)& __m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; -+ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000120002000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2000200000013fa0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000013fa0; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000120002000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; -+ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000200020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffdfffffffdfffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffdfffffffdfffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00d3012acc56f9bb; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000001021; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffb4ff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffb4ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2000200000013fa0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000013fa0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000001000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000120002000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000100013fa0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffdfffffffdfffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffdfffffffdfffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0020000000200001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0020000000200001; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x5); -+ *((unsigned long*)& __m128i_op0[1]) = 0x00d3012acc56f9bb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001021; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00d3012acc56f9bb; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001021; -+ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffe000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffe000; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x54); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_d(__m128i_op0,5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; -+ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; -+ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x40f3fa0000000000; -+ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000006a9a5c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000092444; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000006a9a5c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000092444; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000d4ccb8; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000124888; -+ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00d4ccb8; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00124888; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffbd994889; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000a092444; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000890000000000; -+ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0x58); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffe000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffe000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000e000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000e000; -+ __m256i_out = __lasx_xvmod_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; -+ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffb4ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffb4ff; -+ *((unsigned long*)& __m128i_result[1]) = 0xc110000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xc00d060000000000; -+ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xda); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xc110000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc00d060000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x40f3fa0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xf047ef0000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf047ef0000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xbd994889; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x0a092444; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x3941248880000000; -+ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrani_h_w(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x39412488; -+ *((int*)& __m128_op0[0]) = 0x80000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x18171615; -+ *((int*)& __m128_op0[2]) = 0x17161514; -+ *((int*)& __m128_op0[1]) = 0x16151413; -+ *((int*)& __m128_op0[0]) = 0x151d3756; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x39412488; -+ *((int*)& __m128_op1[0]) = 0x80000000; -+ *((int*)& __m128_op2[3]) = 0x3ff00000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x40f3fa00; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xbff00000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0xc0f3fa00; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xc0f3fa0080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffec060; -+ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3ff0008000800080; -+ *((unsigned long*)& __m128i_result[0]) = 0x40f3fa8000800080; -+ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3941248880000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3941248880000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x40f3fa0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x76f4248880000000; -+ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x76f42488; -+ *((int*)& __m128_op0[0]) = 0x80000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000000; -+ __m128i_out = __lsx_vftint_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003fff00003fff; -+ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x32); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x40f3fa0000000000; -+ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff0000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff0000ff; -+ int_op1 = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff0000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff0000ff; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc485edbcc0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x003f000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x007c000d00400000; -+ __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff0000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff0000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ff00000000ff; -+ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0003000300030003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0003000300030003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x76f424887fffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc110000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc00d060000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xc110000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff7fffffff; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc485edbcc0000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000c485; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; -+ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x30); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003f000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x007c000d00400000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000003f00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000007c00000040; -+ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0x31); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fd; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fd; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x18171615; -+ *((int*)& __m128_op0[2]) = 0x17161514; -+ *((int*)& __m128_op0[1]) = 0x16151413; -+ *((int*)& __m128_op0[0]) = 0x15141312; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1817161517161514; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1615141315141312; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x76f424887fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000017161515; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000095141311; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0003000300030003; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0003000300030003; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[3]) = 0x0600060000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0600060000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x76f424887fffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff082f000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003f000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000f7d1000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x773324887fffffff; -+ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x059a35ef139a8e00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000017161515; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000095141311; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_d(__m128i_op0,0x34); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff0000ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff0000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007f0200007f02; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007f0200007f02; -+ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000002; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000002; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffff00000002; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffff00000002; -+ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0000; -+ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0xa7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000002; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000002; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff082f000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003f000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000f7d1000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x773324887fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff082efffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x88cbdb7780000001; -+ __m128i_out = __lsx_vsub_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x1817161517161514; -+ *((unsigned long*)& __m128d_op1[0]) = 0x1615141315141312; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff082f000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003f000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc04d600d3aded151; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x004cff8fffde0051; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000f7d1000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x773324887fffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000017161515; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000095141311; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000017fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x1716151595141311; -+ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x004cff8fffde0051; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; -+ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe00010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe00010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000100fe000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000100fe00010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x000100fe000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000100fe00010001; -+ __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0xb4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000001fdfffffe02; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000001fefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff01fefffeff02; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000001fdfffffe02; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000001fefe; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff01fefffeff02; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; -+ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ff00000000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007fff80fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff80fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff80007ffe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ff007fff80fe; -+ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x18171615; -+ *((int*)& __m128_op0[2]) = 0x17161514; -+ *((int*)& __m128_op0[1]) = 0x16151413; -+ *((int*)& __m128_op0[0]) = 0x15141312; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1817161517161514; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1615141315141312; -+ *((unsigned long*)& __m128i_result[1]) = 0x0c0c8b8a8b8b0b0a; -+ *((unsigned long*)& __m128i_result[0]) = 0x8b8a8a898a8a8909; -+ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000017161515; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000095141311; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x76f424887fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000170014; -+ *((unsigned long*)& __m128i_result[0]) = 0xff0cff78ff96ff14; -+ __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000002; -+ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x76f424887fffffff; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x3f800000; -+ *((int*)& __m128_result[1]) = 0x4eede849; -+ *((int*)& __m128_result[0]) = 0x4f000000; -+ __m128_out = __lsx_vffint_s_w(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000170014; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff0cff78ff96ff14; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xff0cff78ff96ff14; -+ __m128i_out = __lsx_vextl_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0c0c8b8a8b8b0b0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8b8a8a898a8a8909; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1817161517161514; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1615141315141312; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000000007fff80fe; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000007fff80fe; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff80007ffe; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000ff007fff80fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000003f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4eede8494f000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1817161517161514; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1615141315141312; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff8607db959f; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff0cff78ff96ff14; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000008a0000008a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000008900000009; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000043c5ea7b6; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000008fc4ef7b4; -+ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0001fffe0000ffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0001fffe00010001; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0001fffe0000ffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0001fffe00010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000001fdfffffe02; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000001fefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff01fefffeff02; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000fd00ffff02ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001fffeff; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00fe00feff02ff; -+ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000fd00ffff02ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fffeff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001fffe0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe00010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff81ffffff00; -+ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe00010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe00010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001fffe0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe00010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000043c5ea7b6; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000008fc4ef7b4; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000fea0000fffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe00010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe00010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0007fff8000ffff0; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000007fff8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0007fff8000ffff0; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000007fff8; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffff8607db959f; -+ *((unsigned long*)& __m128d_op0[0]) = 0xff0cff78ff96ff14; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff900000800; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff900000800; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000008a0000008a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000008900000009; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffffff; -+ __m128i_out = __lsx_vslti_bu(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0007fff8000ffff0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000007fff8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0007fff8000ffff0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000007fff8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0007fff8000ffff0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0007fff8000ffff0; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000008a0000008a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000008900000009; -+ *((unsigned long*)& __m128i_op1[1]) = 0x63637687636316bb; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x6363771163631745; -+ *((unsigned long*)& __m128i_result[0]) = 0x636363ec6363636c; -+ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000fea0000fffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363771163631745; -+ *((unsigned long*)& __m128i_op1[0]) = 0x636363ec6363636c; -+ *((unsigned long*)& __m128i_result[1]) = 0x006300fb00630143; -+ *((unsigned long*)& __m128i_result[0]) = 0x0063ffec0063006c; -+ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000fea0000fffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffff8607db959f; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff0cff78ff96ff14; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000fea0000fffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xff0cff78ff96ff14; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xc2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00007f7f00007f7f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff900000800; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007f7f00007f00; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007f7f00007fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0x87); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000fea0000fffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff0cff78ff96ff14; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000fd00ffff02fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fffeff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00007f7f00007f00; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00007f7f00007fff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0100; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00007f7f00007f00; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00007f7f00007fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0007fff8000ffff0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000007fff8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0007fff8000ffff0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000007fff8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f00ff00000000; -+ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000c6c6ee22; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c6c62e8a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000c6c6ee22; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c6c62e8a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000bf; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000002bb; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffc000400780087; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000fe80fffc0183; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffc000400f8ff87; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff80ff00ff7c0183; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff900000800; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffc00000078; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffc; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffc000000f8; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff790000077c; -+ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000fd00ffff02ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fffeff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff02ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0100; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00fe00feff02ff; -+ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffff9cff05; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff9cfebd; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffe0001; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xc0fffff000000000; -+ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffe0001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00003a247fff7fff; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffe0001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000500000005; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000005fffe0006; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc0fffff000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000bf; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000002bb; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xc0fffff000000000; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffffe02; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000300000005fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffff02; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000300000005fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0007fd00000f02ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fffeff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ffffffff00; -+ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffe0001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000bf; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000002bb; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00080000fffe0001; -+ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ffffffff00; -+ *((unsigned long*)& __m256d_result[3]) = 0x40efffe000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x40efffe000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc0fffff000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffe00000; -+ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x63637687; -+ *((int*)& __m128_op0[2]) = 0x636316bb; -+ *((int*)& __m128_op0[1]) = 0x63636363; -+ *((int*)& __m128_op0[0]) = 0x63636363; -+ *((unsigned long*)& __m128d_result[1]) = 0x446c6ed0e0000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x446c62d760000000; -+ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc0fffff000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffe00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ long_int_result = 0x00000000ffff0100; -+ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x1); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; -+ int_result = 0x0000000000003a24; -+ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x2); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0007fff8000ffff0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0007fff8000ffff0; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000030007; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x40cd120000000000; -+ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff02ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0100; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff7fff7f; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff7f027f; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff7f0100; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00fe00fe7f027f; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff02ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffff0100; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00fefffeff02ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000100; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00feff00000000; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_wu(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0007fd00000f02ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fffeff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00fe00feff02ff; -+ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ff000000ff; -+ __m128i_out = __lsx_vreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00fe00feff02fe; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00fe00feff027f; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00fe00feff02fe; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00fe00feff027f; -+ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff01fe0400000006; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000005fffa; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00fe01fc0005fff4; -+ __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfbfbfb17fbfb38ea; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfbfb47fbfbfb0404; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000005fffa; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfbfbfb17fbfb38ea; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfbfb47fbfbfb0404; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000002f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000029; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff02ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffff0100; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00fefffeff02ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00030006fa05f20e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00030081bd80f90e; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[2]) = 0x00010003fc827a86; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007f7f7f7f0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f017fc0ddbf7d86; -+ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000002f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000029; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x000000000000002f; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000029; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; -+ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff01fe0400000006; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000500000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff01fe0400000005; -+ __m128i_out = __lsx_vmini_w(__m128i_op0,5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00fe01fc0005fff4; -+ int_op1 = 0x0000000020202020; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000820202020; -+ *((unsigned long*)& __m128i_result[0]) = 0x00fe01fc0005fff4; -+ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000820202020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00fe01fc0005fff4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000003a24; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003dbe88077c78c1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000820205a44; -+ *((unsigned long*)& __m128i_result[0]) = 0x013bc084078278b5; -+ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000002f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000029; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfbfbfb17fbfb38ea; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfbfb47fbfbfb0404; -+ *((unsigned long*)& __m128i_result[1]) = 0xfbfbfb17fbfb3919; -+ *((unsigned long*)& __m128i_result[0]) = 0xfbfb47fbfbfb042d; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x000000000000002f; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000029; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000003a24; -+ *((unsigned long*)& __m128d_op1[0]) = 0x003dbe88077c78c1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x40effc0000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x40effc0000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00007f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00010003fc827a86; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00007f7f7f7f0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f017fc0ddbf7d86; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00153f1594ea02ff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000ffffffff0100; -+ *((unsigned long*)& __m256i_op2[0]) = 0xff15c1ea95ea02ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xc06e7c817f7e8081; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000bd3f016f177a; -+ *((unsigned long*)& __m256i_result[1]) = 0xc06e7c8100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x60c485800178147a; -+ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffbe20fc; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000001cc7ee87; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000010bb83239; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000c409ed87; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0100020001bf1efd; -+ *((unsigned long*)& __m256i_result[2]) = 0x010002001ec8ec88; -+ *((unsigned long*)& __m256i_result[1]) = 0x010002010db9303a; -+ *((unsigned long*)& __m256i_result[0]) = 0x01000200c60aeb88; -+ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00153f1594ea02ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffff0100; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff15c1ea95ea02ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00030006fa05f20e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00030081bd80f90e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000018; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000018; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x2d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000002f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000029; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x010101010101012f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010129; -+ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x40efffe000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x40efffe000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_result[1]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f807f007f7f817f; -+ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffff00; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffff00; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; -+ __m128i_out = __lsx_vfrintrp_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; -+ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffd700; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; -+ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x40efffe000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x40efffe000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00000000ff7fff7f; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000000ff7f027f; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000000ff7f0100; -+ *((unsigned long*)& __m256i_op2[0]) = 0xff00fe00fe7f027f; -+ *((unsigned long*)& __m256i_result[3]) = 0x40efffe09fa88260; -+ *((unsigned long*)& __m256i_result[2]) = 0x6b07ca8e013fbf01; -+ *((unsigned long*)& __m256i_result[1]) = 0x40efffe09fa7e358; -+ *((unsigned long*)& __m256i_result[0]) = 0x80ce32be3e827f00; -+ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000030007; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00153f1594ea02ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffff0100; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff15c1ea95ea02ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000153f15; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff15c1ea; -+ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x9ff87f7f7f807f7f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x9ff87f7f7f807f7f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_result[1]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f807f007f7f817f; -+ __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_result[3]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_result[1]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f807f007f7f817f; -+ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000018; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000018; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000018; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000018; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_result[3]) = 0x4ffc3f783fc040c0; -+ *((unsigned long*)& __m256i_result[2]) = 0x3fc03f803fc040c0; -+ *((unsigned long*)& __m256i_result[1]) = 0x4ffc3f783fc040c0; -+ *((unsigned long*)& __m256i_result[0]) = 0x3fc03f803fc040c0; -+ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x40efffe09fa88260; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6b07ca8e013fbf01; -+ *((unsigned long*)& __m256i_op0[1]) = 0x40efffe09fa7e358; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80ce32be3e827f00; -+ *((unsigned long*)& __m256d_result[3]) = 0x43d03bfff827ea21; -+ *((unsigned long*)& __m256d_result[2]) = 0x43dac1f2a3804ff0; -+ *((unsigned long*)& __m256d_result[1]) = 0x43d03bfff827e9f9; -+ *((unsigned long*)& __m256d_result[0]) = 0x43e019c657c7d050; -+ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000018; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000018; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffff30000000b; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff3fffffff3; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffff30000000b; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff3fffffff3; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000002f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000029; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000003a24; -+ *((unsigned long*)& __m128i_op2[0]) = 0x003dbe88077c78c1; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000002f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000029; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffff30000000b; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff3fffffff3; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffff30000000b; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff3fffffff3; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007f7f817f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007f7f817f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f807f007f7f817f; -+ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007f7f817f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007f7f817f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f807f007f7f817f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4ffc3f783fc040c0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3fc03f803fc040c0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4ffc3f783fc040c0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3fc03f803fc040c0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0003fbfc0bfbfc03; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0003fbfc0bfbfc03; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x2d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x43d03bfff827ea21; -+ *((unsigned long*)& __m256i_op1[2]) = 0x43dac1f2a3804ff0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x43d03bfff827e9f9; -+ *((unsigned long*)& __m256i_op1[0]) = 0x43e019c657c7d050; -+ *((unsigned long*)& __m256i_result[3]) = 0xbc30c40107d915df; -+ *((unsigned long*)& __m256i_result[2]) = 0xbc263e0e5c80b010; -+ *((unsigned long*)& __m256i_result[1]) = 0xbc30c40107d91607; -+ *((unsigned long*)& __m256i_result[0]) = 0xbc20e63aa8392fb0; -+ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0020002000200020; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0020002000200020; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000003a24; -+ *((unsigned long*)& __m128i_result[0]) = 0x003dc288077c7cc1; -+ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000008; -+ *((int*)& __m128_op0[1]) = 0x00200020; -+ *((int*)& __m128_op0[0]) = 0x00200020; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4ffc3f79d20bf257; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffec6f90604bf; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4ffc3f79d20bf257; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffec6f90604bf; -+ *((unsigned long*)& __m256i_result[3]) = 0x4ffc3f79d20bf257; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffec6f90604bf; -+ *((unsigned long*)& __m256i_result[1]) = 0x4ffc3f79d20bf257; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffec6f90604bf; -+ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x43d03bfff827ea21; -+ *((unsigned long*)& __m256i_op1[2]) = 0x43dac1f2a3804ff0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x43d03bfff827e9f9; -+ *((unsigned long*)& __m256i_op1[0]) = 0x43e019c657c7d050; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xe8001411edf9c0f8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xe80014fdf0e3e428; -+ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x009f00f8007e00f0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f007f0081007f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x009f00f8007e00f0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f007f0081007f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x009f00f8007e00f0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f007f0081007f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x009f00f8007e00f0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f007f0081007f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x009f00f8007e00f0; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f007f0081007f; -+ *((unsigned long*)& __m256i_result[1]) = 0x009f00f8007e00f0; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f007f0081007f; -+ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff7fff7f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff7f027f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff7f0100; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00fe7f027f; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000007f; -+ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000020000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000020000000; -+ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x23); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x43d03bfff827ea21; -+ *((unsigned long*)& __m256i_op0[2]) = 0x43dac1f2a3804ff0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x43d03bfff827e9f9; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43e019c657c7d050; -+ *((unsigned long*)& __m256i_op1[3]) = 0x43d03bfff827ea21; -+ *((unsigned long*)& __m256i_op1[2]) = 0x43dac1f2a3804ff0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x43d03bfff827e9f9; -+ *((unsigned long*)& __m256i_op1[0]) = 0x43e019c657c7d050; -+ *((unsigned long*)& __m256i_result[3]) = 0x86ff76ffff4eff42; -+ *((unsigned long*)& __m256i_result[2]) = 0x86ffffffffff9eff; -+ *((unsigned long*)& __m256i_result[1]) = 0x86ff76ffff4effff; -+ *((unsigned long*)& __m256i_result[0]) = 0x86ff32ffaeffffa0; -+ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0020002000200020; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffef8; -+ *((unsigned long*)& __m128i_result[0]) = 0xffdfffdfffdffee0; -+ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; -+ __m128i_out = __lsx_vslei_hu(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x40efffe09fa88260; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6b07ca8e013fbf01; -+ *((unsigned long*)& __m256i_op0[1]) = 0x40efffe09fa7e358; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80ce32be3e827f00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x86ff76ffff4eff42; -+ *((unsigned long*)& __m256i_op1[2]) = 0x86ffffffffff9eff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x86ff76ffff4effff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x86ff32ffaeffffa0; -+ *((unsigned long*)& __m256i_result[3]) = 0x223d76f09f3881ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x3870ca8d013e76a0; -+ *((unsigned long*)& __m256i_result[1]) = 0x223d76f09f37e357; -+ *((unsigned long*)& __m256i_result[0]) = 0x43ec0a1b2aba7ed0; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4ffc3f783fc040c0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3fc03f803fc040c0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4ffc3f783fc040c0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3fc03f803fc040c0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000001000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff0000ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffefffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffef8; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffdfffdfffdffee0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffdfffdf; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffefffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffefffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffefefffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000018; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000019; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000200000001e; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000019; -+ __m256i_out = __lasx_xvclz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffefefffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0400000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffefefffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbc30c40108a45423; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbc263e0e5d00e69f; -+ *((unsigned long*)& __m256i_op1[1]) = 0xbc30c40108a4544b; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbc20e63aa8b9663f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrlrni_hu_w(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffdfffdf; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffdf; -+ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffdf; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000021; -+ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffefefffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffefefffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000021; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x223d76f09f3881ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3870ca8d013e76a0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x223d76f09f37e357; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43ec0a1b2aba7ed0; -+ *((unsigned long*)& __m256i_result[3]) = 0xdec38a1061c87f01; -+ *((unsigned long*)& __m256i_result[2]) = 0xc8903673ffc28a60; -+ *((unsigned long*)& __m256i_result[1]) = 0xdec38a1061c91da9; -+ *((unsigned long*)& __m256i_result[0]) = 0xbd14f6e5d6468230; -+ __m256i_out = __lasx_xvneg_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffdfffdf; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000018; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000002000000019; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000200000001e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000002000000019; -+ *((unsigned long*)& __m256i_op1[3]) = 0x223d76f09f3881ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3870ca8d013e76a0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x223d76f09f37e357; -+ *((unsigned long*)& __m256i_op1[0]) = 0x43ec0a1b2aba7ed0; -+ *((unsigned long*)& __m256i_result[3]) = 0x223d771060c77e19; -+ *((unsigned long*)& __m256i_result[2]) = 0x3870caad013e76b9; -+ *((unsigned long*)& __m256i_result[1]) = 0x223d771060c81cc7; -+ *((unsigned long*)& __m256i_result[0]) = 0x43ec0a3b2aba7ee9; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000007f; -+ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000002; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0000007f; -+ *((int*)& __m256_op1[7]) = 0xfffffff3; -+ *((int*)& __m256_op1[6]) = 0x0000000b; -+ *((int*)& __m256_op1[5]) = 0xfffffff3; -+ *((int*)& __m256_op1[4]) = 0xfffffff3; -+ *((int*)& __m256_op1[3]) = 0xfffffff3; -+ *((int*)& __m256_op1[2]) = 0x0000000b; -+ *((int*)& __m256_op1[1]) = 0xfffffff3; -+ *((int*)& __m256_op1[0]) = 0xfffffff3; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000018; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000019; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000200000001e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000019; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0004000000030000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000400000003c000; -+ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x33); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x009f00f8007e00f0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f007f0081007f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x009f00f8007e00f0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f007f0081007f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0ea85f60984a8555; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00a21ef3246995f3; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1189ce8000fa14ed; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0e459089665f40f3; -+ *((unsigned long*)& __m256i_result[3]) = 0x000100f800000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0020001000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000f800000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0004000000000010; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x223d76f09f3881ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3870ca8d013e76a0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x223d76f09f37e357; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43ec0a1b2aba7ed0; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff8910ffff7e01; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff3573ffff8960; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff8910ffff1ca9; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffff5e5ffff8130; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffdfffdf; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; -+ __m128d_out = __lsx_vfrecip_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffff30000000b; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff3fffffff3; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffff30000000b; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff3fffffff3; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbc30c40108a45423; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbc263e0e5d00e69f; -+ *((unsigned long*)& __m256i_op1[1]) = 0xbc30c40108a4544b; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbc20e63aa8b9663f; -+ *((unsigned long*)& __m256i_result[3]) = 0x71860bf35f0f9d81; -+ *((unsigned long*)& __m256i_result[2]) = 0x720ed94a46f449ed; -+ *((unsigned long*)& __m256i_result[1]) = 0x71860bf35f0f9f39; -+ *((unsigned long*)& __m256i_result[0]) = 0x72544f0e6e95cecd; -+ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x71860bf35f0f9d81; -+ *((unsigned long*)& __m256i_op0[2]) = 0x720ed94a46f449ed; -+ *((unsigned long*)& __m256i_op0[1]) = 0x71860bf35f0f9f39; -+ *((unsigned long*)& __m256i_op0[0]) = 0x72544f0e6e95cecd; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff8910ffff7e01; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff3573ffff8960; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff8910ffff1ca9; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffff5e5ffff8130; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffcb423a587053; -+ *((unsigned long*)& __m256i_result[2]) = 0x6d46f43e71141b81; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffcb423a584528; -+ *((unsigned long*)& __m256i_result[0]) = 0x9bdf36c8d78158a1; -+ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x223d76f09f3881ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3870ca8d013e76a0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x223d76f09f37e357; -+ *((unsigned long*)& __m256i_op1[0]) = 0x43ec0a1b2aba7ed0; -+ *((unsigned long*)& __m256i_result[3]) = 0x111ebb784f9c4100; -+ *((unsigned long*)& __m256i_result[2]) = 0x1c386546809f3b50; -+ *((unsigned long*)& __m256i_result[1]) = 0x111ebb784f9bf1ac; -+ *((unsigned long*)& __m256i_result[0]) = 0x21f6050d955d3f68; -+ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; -+ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; -+ __m128d_out = __lsx_vfrint_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7ff0000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000ffffffdfffdf; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000ffffffdfffdf; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbc74c3d108e05422; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbc1e3e6a5cace67c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xbc74c3d108e0544a; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbc18e696a86565f4; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbc74c3d108e05422; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbc1e3e6a5cace67c; -+ *((unsigned long*)& __m256i_op1[1]) = 0xbc74c3d108e0544a; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbc18e696a86565f4; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x48); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; -+ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xa5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff8910ffff7e01; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff3573ffff8960; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff8910ffff1ca9; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffff5e5ffff8130; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff8910ffff7e01; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff3573ffff8960; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff8910ffff1ca9; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffff5e5ffff8130; -+ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x223d76f09f3881ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3870ca8d013e76a0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x223d76f09f37e357; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43ec0a1b2aba7ed0; -+ *((unsigned long*)& __m256i_result[3]) = 0x223d76f09f3881ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x3870ca9d013e76b0; -+ *((unsigned long*)& __m256i_result[1]) = 0x223d76f09f37e357; -+ *((unsigned long*)& __m256i_result[0]) = 0x43ec0a1b2aba7ed0; -+ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffcb423a587053; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6d46f43e71141b81; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffcb423a584528; -+ *((unsigned long*)& __m256i_op0[0]) = 0x9bdf36c8d78158a1; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000007fffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000036a37; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000007fffe; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000004def9; -+ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x2d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffbfffffffbf; -+ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffcb423a587053; -+ *((unsigned long*)& __m256d_op0[2]) = 0x6d46f43e71141b81; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffcb423a584528; -+ *((unsigned long*)& __m256d_op0[0]) = 0x9bdf36c8d78158a1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x223d76f0; -+ *((int*)& __m256_op0[6]) = 0x9f3881ff; -+ *((int*)& __m256_op0[5]) = 0x3870ca8d; -+ *((int*)& __m256_op0[4]) = 0x013e76a0; -+ *((int*)& __m256_op0[3]) = 0x223d76f0; -+ *((int*)& __m256_op0[2]) = 0x9f37e357; -+ *((int*)& __m256_op0[1]) = 0x43ec0a1b; -+ *((int*)& __m256_op0[0]) = 0x2aba7ed0; -+ *((int*)& __m256_op1[7]) = 0x111ebb78; -+ *((int*)& __m256_op1[6]) = 0x4f9c4100; -+ *((int*)& __m256_op1[5]) = 0x1c386546; -+ *((int*)& __m256_op1[4]) = 0x809f3b50; -+ *((int*)& __m256_op1[3]) = 0x111ebb78; -+ *((int*)& __m256_op1[2]) = 0x4f9bf1ac; -+ *((int*)& __m256_op1[1]) = 0x21f6050d; -+ *((int*)& __m256_op1[0]) = 0x955d3f68; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x111ebb784f9c4100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1c386546809f3b50; -+ *((unsigned long*)& __m256i_op1[1]) = 0x111ebb784f9bf1ac; -+ *((unsigned long*)& __m256i_op1[0]) = 0x21f6050d955d3f68; -+ *((unsigned long*)& __m256i_result[3]) = 0x088f5dbc27ce2080; -+ *((unsigned long*)& __m256i_result[2]) = 0x161c32a2c04f9da7; -+ *((unsigned long*)& __m256i_result[1]) = 0x088f5dbc27cdf8d6; -+ *((unsigned long*)& __m256i_result[0]) = 0x10fb02864aae9fb4; -+ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x111ebb78; -+ *((int*)& __m256_op1[6]) = 0x4f9c4100; -+ *((int*)& __m256_op1[5]) = 0x1c386546; -+ *((int*)& __m256_op1[4]) = 0x809f3b50; -+ *((int*)& __m256_op1[3]) = 0x111ebb78; -+ *((int*)& __m256_op1[2]) = 0x4f9bf1ac; -+ *((int*)& __m256_op1[1]) = 0x21f6050d; -+ *((int*)& __m256_op1[0]) = 0x955d3f68; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff7ffffef77fffdd; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf77edf9cffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; -+ __m256i_out = __lasx_xvabsd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vrotri_h(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x111ebb784f9c4100; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1c386546809f3b50; -+ *((unsigned long*)& __m256i_op0[1]) = 0x111ebb784f9bf1ac; -+ *((unsigned long*)& __m256i_op0[0]) = 0x21f6050d955d3f68; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xbab0c4b000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xaa0ac09800000000; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000007fffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000036a37; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000007fffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000004def9; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff7ffffef77fffdd; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf77edf9cffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000008800022; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000001; -+ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x29); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xb8ec43be; -+ *((int*)& __m128_op1[2]) = 0xfe38e64b; -+ *((int*)& __m128_op1[1]) = 0x6477d042; -+ *((int*)& __m128_op1[0]) = 0x343cce24; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000008800022; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128d_op2[1]) = 0xb8ec43befe38e64b; -+ *((unsigned long*)& __m128d_op2[0]) = 0x6477d042343cce24; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffbfffffffbf; -+ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0097011900f4009f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x003200d4010f0144; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0097011900f301cd; -+ *((unsigned long*)& __m256i_op0[0]) = 0x010b008800f80153; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4ffc3f7800000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3fc03f6400000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4ffc3f7800000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3fc03f6400000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x4eb13ec100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3ec13ec100000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x4eb13ec100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3ec13ec100000000; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_h(__m256i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffff7f; -+ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x5f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0004040404000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0004040404000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0004040404000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0004040404000000; -+ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffbfffffffbf; -+ long_op1 = 0x0000000000003a24; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003a24; -+ __m128i_out = __lsx_vinsgr2vr_d(__m128i_op0,long_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffbfffffffbe; -+ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4ffc3f7800000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3fc03f6400000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4ffc3f7800000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3fc03f6400000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000050fd00000101; -+ *((unsigned long*)& __m256i_result[2]) = 0x000040c100000101; -+ *((unsigned long*)& __m256i_result[1]) = 0x000050fd00000101; -+ *((unsigned long*)& __m256i_result[0]) = 0x000040c100000101; -+ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0004040404000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0004040404000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0004040404000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0004040404000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000050fd00000101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000040c100000101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000050fd00000101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000040c100000101; -+ *((unsigned long*)& __m256i_result[3]) = 0x000050fd00000101; -+ *((unsigned long*)& __m256i_result[2]) = 0x000040c100000101; -+ *((unsigned long*)& __m256i_result[1]) = 0x000050fd00000101; -+ *((unsigned long*)& __m256i_result[0]) = 0x000040c100000101; -+ __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_result[0]) = 0x4040404040404040; -+ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff7e00000081; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000008000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffff5fffffff5; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff5fffffff5; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffff5fffffff5; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff5fffffff5; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff7e00000081; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffbfffffffbf; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000000000ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0404000004040000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0404000004040000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op1[3]) = 0x8011ffee804c004c; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00faff0500c3ff3c; -+ *((unsigned long*)& __m256d_op1[1]) = 0x80f900f980780078; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0057ffa800ceff31; -+ *((unsigned long*)& __m256d_op2[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256d_op2[2]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256d_op2[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256d_op2[0]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256d_result[2]) = 0x80003fc00000428a; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256d_result[0]) = 0x80003fc00000428a; -+ __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x01010101; -+ *((int*)& __m256_op0[6]) = 0x01010101; -+ *((int*)& __m256_op0[5]) = 0x01010101; -+ *((int*)& __m256_op0[4]) = 0x01010101; -+ *((int*)& __m256_op0[3]) = 0x01010101; -+ *((int*)& __m256_op0[2]) = 0x01010101; -+ *((int*)& __m256_op0[1]) = 0x01010101; -+ *((int*)& __m256_op0[0]) = 0x01010101; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff0000007f800000; -+ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffee0000004c0000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff050000ff3c0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00f9000000780000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffa80000ff310000; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffee0000004c0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff050000ff3c0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00f9000000780000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffa80000ff310000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffbfc0ffffbfc0; -+ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffbfc0ffffbfc0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000032; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; -+ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8011ffee804c004c; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00faff0500c3ff3c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x80f900f980780078; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0057ffa800ceff31; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xff000000ff000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ff000000ff00; -+ *((unsigned long*)& __m256i_result[0]) = 0xff000000ff000000; -+ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff000000ff000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ff000000ff00; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff000000ff000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffee0000ff4c; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ff050000ff3c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fff90000ff78; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffa80000ff31; -+ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffee0000ff4c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ff050000ff3c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000fff90000ff78; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffa80000ff31; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffee0000ff4c; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ff050000ff3c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fff90000ff78; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffa80000ff31; -+ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_result[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_result[0]) = 0x4040404040404040; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; -+ *((unsigned long*)& __m256i_result[3]) = 0x8011ffae800c000c; -+ *((unsigned long*)& __m256i_result[2]) = 0x00baff050083ff3c; -+ *((unsigned long*)& __m256i_result[1]) = 0x80b900b980380038; -+ *((unsigned long*)& __m256i_result[0]) = 0x0017ffa8008eff31; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_op2[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_op2[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0fff0fff0fff0fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0fff0fff0fff0fff; -+ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff000000010000; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000032; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000032; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff000000010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8011ffae800c000c; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00baff050083ff3c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x80b900b980380038; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0017ffa8008eff31; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff800c000c; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000084ff3c; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff80380038; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000008fff31; -+ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffee0000ff4c; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000ff050000ff3c; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000fff90000ff78; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000ffa80000ff31; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffff0000; -+ *((int*)& __m256_op0[4]) = 0xffff0000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffff0000; -+ *((int*)& __m256_op0[0]) = 0xffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; -+ __m128i_out = __lsx_vneg_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0b085bfc00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0b004bc000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0b085bfc00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0b004bc000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0404010008080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0408010008080808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0404010008080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0408010008080808; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8011ffae800c000c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00baff050083ff3c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x80b900b980380038; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0017ffa8008eff31; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff0000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001010001; -+ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000012; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0fff0fff0fff0fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0fff0fff0fff0fff; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0fff0fff0fff0fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0fff0fff0fff0fff; -+ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x0000ff00; -+ *((int*)& __m128_op1[3]) = 0x40404040; -+ *((int*)& __m128_op1[2]) = 0x40404040; -+ *((int*)& __m128_op1[1]) = 0x40404040; -+ *((int*)& __m128_op1[0]) = 0x40404040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffee0000ff4c; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ff050000ff3c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000fff90000ff78; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffa80000ff31; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0b085bfc00000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0b004bc000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0b085bfc00000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0b004bc000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0fff0fff0fff0fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0fff0fff7f800fff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0fff0fff0fff0fff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0fff0fff0fff0fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xf001f0010101f002; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x35); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0fff0fff0fff0fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0fff0fff0fff0fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0404010008080808; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0408010008080808; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0404010008080808; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0408010008080808; -+ *((int*)& __m256_result[7]) = 0x38808000; -+ *((int*)& __m256_result[6]) = 0x37800000; -+ *((int*)& __m256_result[5]) = 0x39010000; -+ *((int*)& __m256_result[4]) = 0x39010000; -+ *((int*)& __m256_result[3]) = 0x38808000; -+ *((int*)& __m256_result[2]) = 0x37800000; -+ *((int*)& __m256_result[1]) = 0x39010000; -+ *((int*)& __m256_result[0]) = 0x39010000; -+ __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3880800037800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3901000039010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3880800037800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3901000039010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003fc00000428a; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0006ffff0004ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0002ffff0000ffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff7f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002fffefffd0001; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefefefefefefefe; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m128i_result[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefefefefefefefe; -+ __m128i_out = __lsx_vmini_h(__m128i_op0,2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0xffffffffffffffff; -+ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x1); -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xe6e6e6e6e6e6e6e6; -+ *((unsigned long*)& __m128i_result[0]) = 0xe6e6e6e6e6e6e6e6; -+ __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf001f0010101f002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0404010008080808; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0408010008080808; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0404010008080808; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0408010008080808; -+ *((unsigned long*)& __m256i_result[3]) = 0x0505070804040404; -+ *((unsigned long*)& __m256i_result[2]) = 0x0504070804040404; -+ *((unsigned long*)& __m256i_result[1]) = 0x0505070804040404; -+ *((unsigned long*)& __m256i_result[0]) = 0x0504070804040404; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m128i_result[1]) = 0x1202120212021202; -+ *((unsigned long*)& __m128i_result[0]) = 0x1202120212021202; -+ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002fffefffd0001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1202120212021202; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1202120212021202; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0202fe02fd020102; -+ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ff000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ff000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; -+ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1202120212021202; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1202120212021202; -+ *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; -+ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0505070804040404; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0504070804040404; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0505070804040404; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0504070804040404; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0283038402020202; -+ *((unsigned long*)& __m256i_result[2]) = 0x0282038402020202; -+ *((unsigned long*)& __m256i_result[1]) = 0x0283038402020202; -+ *((unsigned long*)& __m256i_result[0]) = 0x0282038402020202; -+ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0202fe02fd020102; -+ *((unsigned long*)& __m128i_result[1]) = 0xfefcfefcfefcfefc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfcfc00fc01fcfdfc; -+ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf001f0010101f002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0202fe02fd020102; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000202fe02; -+ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x78); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0505070804040404; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0504070804040404; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0505070804040404; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0504070804040404; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ff000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ff000000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0504080804030405; -+ *((unsigned long*)& __m256i_result[2]) = 0x0504060904040305; -+ *((unsigned long*)& __m256i_result[1]) = 0x0504080804030405; -+ *((unsigned long*)& __m256i_result[0]) = 0x0504060904040305; -+ __m256i_out = __lasx_xvsub_q(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfffefffe; -+ *((int*)& __m128_op0[2]) = 0xfffefffe; -+ *((int*)& __m128_op0[1]) = 0xfffefffe; -+ *((int*)& __m128_op0[0]) = 0xfffefffe; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xf001f001; -+ *((int*)& __m128_op1[0]) = 0x0101f002; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0202fe02fd020102; -+ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_result[0]) = 0x0400040004000400; -+ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000202fe02; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff00ff; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0504080804030405; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0504060904040305; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0504080804030405; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0504060904040305; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000141020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000141020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x66); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x1000100012030e02; -+ *((unsigned long*)& __m128i_result[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefefefefefefefe; -+ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000000202fe02; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffff00fc0000ff02; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0000ff00; -+ *((int*)& __m256_op0[6]) = 0x0000ffff; -+ *((int*)& __m256_op0[5]) = 0x000000ff; -+ *((int*)& __m256_op0[4]) = 0x000000ff; -+ *((int*)& __m256_op0[3]) = 0x0000ff00; -+ *((int*)& __m256_op0[2]) = 0x0000ffff; -+ *((int*)& __m256_op0[1]) = 0x000000ff; -+ *((int*)& __m256_op0[0]) = 0x000000ff; -+ *((int*)& __m256_op1[7]) = 0x0000ffee; -+ *((int*)& __m256_op1[6]) = 0x0000ff4c; -+ *((int*)& __m256_op1[5]) = 0x0000ff05; -+ *((int*)& __m256_op1[4]) = 0x0000ff3c; -+ *((int*)& __m256_op1[3]) = 0x0000fff9; -+ *((int*)& __m256_op1[2]) = 0x0000ff78; -+ *((int*)& __m256_op1[1]) = 0x0000ffa8; -+ *((int*)& __m256_op1[0]) = 0x0000ff31; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010100; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff00fc0000ff02; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xff01ff040000fffe; -+ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000202fe02; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff3c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff31; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x5e5e5e5e5e5e5e1c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x5e5e5e5e5e5e5e10; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x5e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffffeff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x55aa55aa55aa55ab; -+ *((unsigned long*)& __m128i_op0[0]) = 0xaa55555655aaaaa8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff00000000ffff; -+ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fffffeff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000009ffffff08; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x55aa55aa55aa55ab; -+ *((unsigned long*)& __m128i_op0[0]) = 0xaa55555655aaaaa8; -+ *((unsigned long*)& __m128i_result[1]) = 0x55aa55c355aa55c4; -+ *((unsigned long*)& __m128i_result[0]) = 0xaa55556f55aaaac1; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000141020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000141020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1020102010201020; -+ *((unsigned long*)& __m256i_result[2]) = 0x1020102010201020; -+ *((unsigned long*)& __m256i_result[1]) = 0x1020102010201020; -+ *((unsigned long*)& __m256i_result[0]) = 0x1020102010201020; -+ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffc00fd; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; -+ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_h(__m128i_op0,-16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1020102010201020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1020102010201020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1020102010201020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1020102010201020; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_result[1]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xefdfefdfefdfefdf; -+ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsknz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x55aa55aa55aa55ab; -+ *((unsigned long*)& __m128i_op0[0]) = 0xaa55555655aaaaa8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ef4002d21fc7001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x28bf02d1ec6a35b2; -+ *((unsigned long*)& __m128i_result[1]) = 0x2a7b7c9260f90ee2; -+ *((unsigned long*)& __m128i_result[0]) = 0x1b1c6cdfd57f5736; -+ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003fc00000428a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_op0[1]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d0d00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d0d00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; -+ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x6c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1020102010201020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1020102010201020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1020102010201020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1020102010201020; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[3]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_op2[1]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_result[3]) = 0x1031146010201020; -+ *((unsigned long*)& __m256i_result[2]) = 0x1020102010201020; -+ *((unsigned long*)& __m256i_result[1]) = 0x1031146010201020; -+ *((unsigned long*)& __m256i_result[0]) = 0x1020102010201020; -+ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x2c2c2c2c2c2c2c2c; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x2c2c2c2c2c2c2c2c; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvori_b(__m256i_op0,0x2c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128d_result[0]) = 0x1000100010001000; -+ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x55aa55c3d5aa55c4; -+ *((unsigned long*)& __m128i_op0[0]) = 0xaa55556fd5aaaac1; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000c; -+ *((unsigned long*)& __m128i_result[0]) = 0xaa55556fd5aaaac1; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2c2c2c2c2c2c2c2c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2c2c2c2c2c2c2c2c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0d0d0d0d00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0d0d0d0d00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[3]) = 0x02407a3c00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0d0cf2f30d0cf2f3; -+ *((unsigned long*)& __m256i_result[1]) = 0x02407a3c00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0d0cf2f30d0cf2f3; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_du(__m128i_op0,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x86); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2a7b7c9260f90ee2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1b1c6cdfd57f5736; -+ *((unsigned long*)& __m128i_result[1]) = 0x153e3e49307d0771; -+ *((unsigned long*)& __m128i_result[0]) = 0x0d8e36706ac02b9b; -+ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x153e3e49; -+ *((int*)& __m128_op0[2]) = 0x307d0771; -+ *((int*)& __m128_op0[1]) = 0x0d8e3670; -+ *((int*)& __m128_op0[0]) = 0x6ac02b9b; -+ *((int*)& __m128_op1[3]) = 0x55aa55c3; -+ *((int*)& __m128_op1[2]) = 0xd5aa55c4; -+ *((int*)& __m128_op1[1]) = 0xaa55556f; -+ *((int*)& __m128_op1[0]) = 0xd5aaaac1; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000100000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x1000100000001000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000100000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x1000100000001000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x02407a3c00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0d0cf2f30d0cf2f3; -+ *((unsigned long*)& __m256i_op0[1]) = 0x02407a3c00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0d0cf2f30d0cf2f3; -+ *((unsigned long*)& __m256i_op1[3]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_op1[1]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0010100000100000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1000100000101000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000000010; -+ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xefdfefdf; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0xefdfefdf; -+ *((int*)& __m256_op1[4]) = 0xefdfefdf; -+ *((int*)& __m256_op1[3]) = 0xefdfefdf; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0xefdfefdf; -+ *((int*)& __m256_op1[0]) = 0xefdfefdf; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0010001000000010; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000080000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_op0[1]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffefffef00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m256i_result[1]) = 0xffefffef00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffefffefffefffef; -+ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffee0000ff4c; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ff050000ff3c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000fff90000ff78; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffa80000ff31; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_op0[1]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff0fff0ff01ff01; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff0fff0fff0fff0; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff0fff0ff01ff01; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff0fff0fff0fff0; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x80000000307d0771; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0d8e36706ac02b9b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x80000000307d0771; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0d8e36706ac02b9b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff80df00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0010100000100000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1000100000101000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010100000100000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1000100000101000; -+ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_op1[1]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0x80000000; -+ *((int*)& __m256_result[5]) = 0x80000000; -+ *((int*)& __m256_result[4]) = 0x80000000; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0x80000000; -+ *((int*)& __m256_result[1]) = 0x80000000; -+ *((int*)& __m256_result[0]) = 0x80000000; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000dfa6e0c6; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000d46cdc13; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff80df00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00007f7f00007f7f; -+ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256d_op0[1]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff80df00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000dfa6e0c6; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000d46cdc13; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000d46cdc13; -+ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_op0[1]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_result[3]) = 0xdbcbdbcbecececec; -+ *((unsigned long*)& __m256i_result[2]) = 0xdbcbdbcbdbcbdbcb; -+ *((unsigned long*)& __m256i_result[1]) = 0xdbcbdbcbecececec; -+ *((unsigned long*)& __m256i_result[0]) = 0xdbcbdbcbdbcbdbcb; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xa5c4c774; -+ *((int*)& __m128_op0[2]) = 0x856ba83b; -+ *((int*)& __m128_op0[1]) = 0x8003caef; -+ *((int*)& __m128_op0[0]) = 0x54691124; -+ *((unsigned long*)& __m128i_result[1]) = 0xbf800000bf800000; -+ *((unsigned long*)& __m128i_result[0]) = 0xbf80000054691124; -+ __m128i_out = __lsx_vfrintrm_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfff0fff0; -+ *((int*)& __m256_op0[6]) = 0xff01ff01; -+ *((int*)& __m256_op0[5]) = 0xfff0fff0; -+ *((int*)& __m256_op0[4]) = 0xfff0fff0; -+ *((int*)& __m256_op0[3]) = 0xfff0fff0; -+ *((int*)& __m256_op0[2]) = 0xff01ff01; -+ *((int*)& __m256_op0[1]) = 0xfff0fff0; -+ *((int*)& __m256_op0[0]) = 0xfff0fff0; -+ *((int*)& __m256_op1[7]) = 0xffefffef; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0xffefffef; -+ *((int*)& __m256_op1[4]) = 0xffefffef; -+ *((int*)& __m256_op1[3]) = 0xffefffef; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0xffefffef; -+ *((int*)& __m256_op1[0]) = 0xffefffef; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffefffef00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffefffef00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m256i_op1[3]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_op1[1]) = 0xefdfefdf00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xefdfefdfefdfefdf; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffefffef00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m256i_result[1]) = 0xffefffef00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffefffefffefffef; -+ __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000d46cdc13; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000060000000; -+ __m128i_out = __lsx_vslli_w(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff80df00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xa5c4c774856ba837; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2a569f8081c3bbe9; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffb96bffff57c9; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff6080ffff4417; -+ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0xd46cdc13; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0xff800000; -+ *((int*)& __m128_result[1]) = 0xff800000; -+ *((int*)& __m128_result[0]) = 0x7fc00000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0fff0ff01ff01; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff0fff0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0fff0ff01ff01; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff0fff0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ef4002d21fc7001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x28bf02d1ec6a35b2; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffb96bffff57c9; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff6080ffff4417; -+ *((unsigned long*)& __m128i_op2[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xff8000007fc00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7ef400ad21fc7081; -+ *((unsigned long*)& __m128i_result[0]) = 0x28bf0351ec69b5f2; -+ __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffb96bffff57c9; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff6080ffff4417; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffb96bffff57c9; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff6080ffff4417; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0fff0ff01ff01; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff0fff0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0fff0ff01ff01; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff0fff0; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff0fff0ff01ff14; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff0fff0fff10003; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff0fff0ff01ff14; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff0fff0fff10003; -+ __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ef400ad21fc7081; -+ *((unsigned long*)& __m128i_op0[0]) = 0x28bf0351ec69b5f2; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffb96bffff57c9; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff6080ffff4417; -+ *((unsigned long*)& __m128i_result[1]) = 0x7ef3ddac21fc5a2c; -+ *((unsigned long*)& __m128i_result[0]) = 0x28bee9edec690869; -+ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0fff0ff01ff14; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff10003; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0fff0ff01ff14; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff10003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfefee0e3fefefe00; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfefee0e3fefefe00; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7ef400ad; -+ *((int*)& __m128_op0[2]) = 0x21fc7081; -+ *((int*)& __m128_op0[1]) = 0x28bf0351; -+ *((int*)& __m128_op0[0]) = 0xec69b5f2; -+ *((int*)& __m128_op1[3]) = 0xff800000; -+ *((int*)& __m128_op1[2]) = 0xff800000; -+ *((int*)& __m128_op1[1]) = 0xff800000; -+ *((int*)& __m128_op1[0]) = 0x7fc00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; -+ __m256i_out = __lasx_xvclz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xdfa6e0c6; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0xd46cdc13; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000002c002400; -+ *((unsigned long*)& __m128d_op1[1]) = 0x7ef400ad21fc7081; -+ *((unsigned long*)& __m128d_op1[0]) = 0x28bf0351ec69b5f2; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0fff0ff01ff01; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff0fff0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0fff0ff01ff01; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff0fff0; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff0; -+ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ef400ad21fc7081; -+ *((unsigned long*)& __m128i_op1[0]) = 0x28bf0351ec69b5f2; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ad00007081; -+ *((unsigned long*)& __m128i_result[0]) = 0x000003510000b5f2; -+ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000dfa6e0c6; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000d46cdc13; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ef400ad21fc7081; -+ *((unsigned long*)& __m128i_op1[0]) = 0x28bf0351ec69b5f2; -+ *((unsigned long*)& __m128i_result[1]) = 0xdfa6e0c6d46cdc13; -+ *((unsigned long*)& __m128i_result[0]) = 0x21fc7081ec69b5f2; -+ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xdfa6e0c6d46cdc13; -+ *((unsigned long*)& __m128i_op0[0]) = 0x21fc7081ec69b5f2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000002c002400; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffb96bffff57c9; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffff6080ffff4417; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0a0a0a0a0a0a0a0a; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a0a0a0a0a0a0a0a; -+ __m128i_out = __lsx_vmaxi_bu(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffb96b; -+ *((int*)& __m128_op0[2]) = 0xffff57c9; -+ *((int*)& __m128_op0[1]) = 0xffff6080; -+ *((int*)& __m128_op0[0]) = 0xffff4417; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffb96bffff57c9; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff6080ffff4417; -+ __m128i_out = __lsx_vfrintrp_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x000000ad; -+ *((int*)& __m128_op0[2]) = 0x00007081; -+ *((int*)& __m128_op0[1]) = 0x00000351; -+ *((int*)& __m128_op0[0]) = 0x0000b5f2; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfrint_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x7f800000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x80000000; -+ *((int*)& __m128_result[2]) = 0x80000000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffefffef00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffefffef00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00ff0000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00ff0000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00ff00ff00ff00; -+ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffb96bffff57c9; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff6080ffff4417; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_w(__m128i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000dfa6e0c6; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000d46cdc13; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x64); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdbcbdbcbecececec; -+ *((unsigned long*)& __m256i_op1[2]) = 0xdbcbdbcb0000dbcb; -+ *((unsigned long*)& __m256i_op1[1]) = 0xdbcbdbcbecececec; -+ *((unsigned long*)& __m256i_op1[0]) = 0xdbcbdbcb0000dbcb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffefffef00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffefffef00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff00ff0000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff00ff0000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[3]) = 0xffefffef00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m256i_result[1]) = 0xffefffef00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffefffefffefffef; -+ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfefee0e3; -+ *((int*)& __m256_op0[6]) = 0xfefefe00; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xfefee0e3; -+ *((int*)& __m256_op0[2]) = 0xfefefe00; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ unsigned_int_result = 0x00000000000000ff; -+ unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0x9); -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f80000080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x800080007f008000; -+ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0a0a0a0a0a0a0a0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0a0a0a0a0a0a0a0a; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffb96bffff57c9; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff6080ffff4417; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a0aa9890a0ac5f3; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdbcbdbcbecececec; -+ *((unsigned long*)& __m256i_op1[2]) = 0xdbcbdbcb0000dbcb; -+ *((unsigned long*)& __m256i_op1[1]) = 0xdbcbdbcbecececec; -+ *((unsigned long*)& __m256i_op1[0]) = 0xdbcbdbcb0000dbcb; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x24342434ffff2435; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x24342434ffff2435; -+ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x24342434ffff2435; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x24342434ffff2435; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x800080007f008000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a0aa9890a0ac5f3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffff000; -+ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfffffffffffff000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000060000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xfffffffffffff000; -+ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d1c1b1a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffff000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffff000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdbcbdbcbecececec; -+ *((unsigned long*)& __m256i_op0[2]) = 0xdbcbdbcb0000dbcb; -+ *((unsigned long*)& __m256i_op0[1]) = 0xdbcbdbcbecececec; -+ *((unsigned long*)& __m256i_op0[0]) = 0xdbcbdbcb0000dbcb; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2535253514141414; -+ *((unsigned long*)& __m256i_result[2]) = 0x2535253500002535; -+ *((unsigned long*)& __m256i_result[1]) = 0x2535253514141414; -+ *((unsigned long*)& __m256i_result[0]) = 0x2535253500002535; -+ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_b(__m256i_op0,0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000fe; -+ __m128i_out = __lsx_vmsknz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; -+ __m256i_out = __lasx_xvclz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdbcbdbcbecececec; -+ *((unsigned long*)& __m256i_op1[2]) = 0xdbcbdbcb0000dbcb; -+ *((unsigned long*)& __m256i_op1[1]) = 0xdbcbdbcbecececec; -+ *((unsigned long*)& __m256i_op1[0]) = 0xdbcbdbcb0000dbcb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000080000001000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000080000001000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; -+ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000080000001000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000080000001000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000f0000000f; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdbcbdbcbecececec; -+ *((unsigned long*)& __m256i_op0[2]) = 0xdbcbdbcb0000dbcb; -+ *((unsigned long*)& __m256i_op0[1]) = 0xdbcbdbcbecececec; -+ *((unsigned long*)& __m256i_op0[0]) = 0xdbcbdbcb0000dbcb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000f0000000f000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000f0000000f000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000f0000000f000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000f0000000f000; -+ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x009c3e201e39e7e3; -+ *((unsigned long*)& __m256i_op0[2]) = 0x87c1135043408bba; -+ *((unsigned long*)& __m256i_op0[1]) = 0x009c3e201e39e7e3; -+ *((unsigned long*)& __m256i_op0[0]) = 0x87c1135043408bba; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000f0000000f000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000f0000000f000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000f0000000f000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000f0000000f000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000f0000000f000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000f0000000f000; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x35); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0010000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0010000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0010000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0008000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0010000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0008000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0010000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0008000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0010000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0008000000000000; -+ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001fffff001fffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001fffff001fffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x001fffff001fffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x001fffff001fffff; -+ __m128i_out = __lsx_vmaxi_w(__m128i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x3b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d1c1b1a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; -+ *((unsigned long*)& __m128i_result[1]) = 0x01203f1e3d1c3b1a; -+ *((unsigned long*)& __m128i_result[0]) = 0x3918371635143312; -+ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000f0000000f000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000f0000000f000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff1fffffff1; -+ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001fffff001fffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001fffff001fffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x001fffff001fffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x001fffff001fffff; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[0]) = 0x6363636363636363; -+ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0020000f0000000f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0010000f0000000f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0020000f0000000f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0010000f0000000f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x01203f1e3d1c3b1a; -+ *((unsigned long*)& __m128d_op0[0]) = 0x3918371635143312; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000af555555555; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000af555555555; -+ *((unsigned long*)& __m128d_result[1]) = 0x01203f1e3d1c3b1a; -+ *((unsigned long*)& __m128d_result[0]) = 0x3918371635143312; -+ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff1fffffff1; -+ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xcd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0010000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0010000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001fffff001fffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001fffff001fffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x21201f1e1d1c1b1a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1918171615141312; -+ *((unsigned long*)& __m128i_result[1]) = 0x10ff10ff10ff10ff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0020000f0000000f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010000f0000000f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0020000f0000000f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010000f0000000f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0020000f0000000f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010000f0000000f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0020000f0000000f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010000f0000000f; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x01203f1e3d1c3b1a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3918371635143312; -+ *((unsigned long*)& __m128i_op1[1]) = 0x21201f1e1d1c1b1a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1918171615141312; -+ *((unsigned long*)& __m128i_result[1]) = 0x480f7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; -+ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; -+ __m128i_out = __lsx_vsat_du(__m128i_op0,0x3e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0010000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0010000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0020000f0000000f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0010000f0000000f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0020000f0000000f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0010000f0000000f; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d001b1a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x21201f1e19181716; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000af555555555; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000af555555555; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000af5; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000af5; -+ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d001b1a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; -+ *((unsigned long*)& __m128i_result[1]) = 0x21201f1e1d001b25; -+ *((unsigned long*)& __m128i_result[0]) = 0x191817161514131d; -+ __m128i_out = __lsx_vaddi_du(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0003000900050007; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; -+ __m128i_out = __lsx_vpcnt_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x01203f1e3d1c3b1a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3918371635143312; -+ *((unsigned long*)& __m128i_op1[1]) = 0x21201f1e1d001b25; -+ *((unsigned long*)& __m128i_op1[0]) = 0x191817161514131d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000001e8e1d8; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000e400000001; -+ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001e8e1d8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000e400000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001e8e1d8; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000e400000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000e4e4; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000101; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000109000000c9; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x01203f1e3d1c3b1a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3918371635143312; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000001d5d4; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000150d707009; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x03f1e3d28b1a8a1a; -+ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0010000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0010000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0020000f0000000f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010000f0000000f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0020000f0000000f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010000f0000000f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1e0000001e002000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1e0000001e002000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x27); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x03f1e3d28b1a8a1a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x03f1e3d28b1a8a1a; -+ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x03f1e3d28b1a8a1a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000001d5d4; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000150d707009; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffe2a2c; -+ *((unsigned long*)& __m128i_result[0]) = 0x03f1e3bd80000000; -+ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d001b1a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001918000017160; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001514000013120; -+ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0020000f0000000f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010000f0000000f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0020000f0000000f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010000f0000000f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,-4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x01203f1e3d1c3b1a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3918371635143312; -+ *((unsigned long*)& __m128i_result[1]) = 0x21011f3f193d173b; -+ *((unsigned long*)& __m128i_result[0]) = 0xff39ff37ff35ff33; -+ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffacdb6dbecac; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1f5533a694f902c0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00005dcbe7e830c0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x03f21e0114bf19da; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x03f1e3d28b1a8a1a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x03f1e3d28b1a8a1a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x18e2184858682868; -+ __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000022; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000022; -+ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x21011f3f193d173b; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff39ff37ff35ff33; -+ *((unsigned long*)& __m128i_result[1]) = 0x00fe008e009e0071; -+ *((unsigned long*)& __m128i_result[0]) = 0x001c006f00c4008d; -+ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000fffe; -+ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000fffc0000fffc; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fffc0000fffc; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x21011f3f193d173b; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff39ff37ff35ff33; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000015d926c7; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000e41b; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000fffe; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; -+ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000022; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000022; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000003f200001e01; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000014bf000019da; -+ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c99aed5b88fcf; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7c3650c5f79a61a3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00005dcbe7e830c0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000015d926c7; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000e41b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000005dcb; -+ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00005dcbe7e830c0; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffacdb6dbecac; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1f5533a694f902c0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000001fffff59; -+ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x63); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00005dcbe7e830c0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x03f21e0114bf19da; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000003f200001e01; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000014bf000019da; -+ *((unsigned long*)& __m128i_result[1]) = 0x0005fe0300010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100010001; -+ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfffffff1; -+ *((int*)& __m256_op0[6]) = 0xfffffff1; -+ *((int*)& __m256_op0[5]) = 0xfffffff1; -+ *((int*)& __m256_op0[4]) = 0xfffffff1; -+ *((int*)& __m256_op0[3]) = 0xfffffff1; -+ *((int*)& __m256_op0[2]) = 0xfffffff1; -+ *((int*)& __m256_op0[1]) = 0xfffffff1; -+ *((int*)& __m256_op0[0]) = 0xfffffff1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000022; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000022; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000045ff740023; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000001fffc0001; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000045ff740023; -+ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xacc8c794af2caf01; -+ *((unsigned long*)& __m128i_op0[0]) = 0xa91e2048938c40f0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00fd0101; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00fd0101; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00fd0101; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00fd0101; -+ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000001fffff59; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000aaabffff; -+ __m256i_out = __lasx_xvmini_b(__m256i_op0,11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000015d926c7; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000e41b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000abff0000abff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000abff0000abff; -+ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffacdb6dbecac; -+ *((unsigned long*)& __m128i_result[0]) = 0x1f5533a694f902c0; -+ __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000023; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000023; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000023; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000023; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000023; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000023; -+ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x15d926c7; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x0000e41b; -+ *((int*)& __m128_op1[3]) = 0xfffffacd; -+ *((int*)& __m128_op1[2]) = 0xb6dbecac; -+ *((int*)& __m128_op1[1]) = 0x1f5533a6; -+ *((int*)& __m128_op1[0]) = 0x94f902c0; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000015d926c7; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000e41b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff56ff55ff01ff01; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff56ff55ff01ff01; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007f7f7f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007f7f7f7f; -+ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007f7f7f7f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007f7f7f7f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000001fffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000001fffe; -+ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x1223dabf; -+ *((int*)& __m128_op0[2]) = 0x4c3b3549; -+ *((int*)& __m128_op0[1]) = 0x8e8f8626; -+ *((int*)& __m128_op0[0]) = 0xf15be124; -+ *((int*)& __m128_op1[3]) = 0xfffffacd; -+ *((int*)& __m128_op1[2]) = 0xb6dbecac; -+ *((int*)& __m128_op1[1]) = 0x1f5533a6; -+ *((int*)& __m128_op1[0]) = 0x94f902c0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffadffedbfefe; -+ *((unsigned long*)& __m128i_result[0]) = 0x5f5f7bfedefb5ada; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0x5a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000022ffdd; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000022ffdd; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000f4b6ff23; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000f4b6ff23; -+ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0005fe0300010101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe03000101010000; -+ __m128i_out = __lsx_vbsrl_v(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x007f807f007e8080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f807f007e806f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x007f807f007e8080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f807f007e806f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000023; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000023; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000007e8080; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000007e8092; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000007e8080; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000007e8092; -+ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfffffadf; -+ *((int*)& __m128_op0[2]) = 0xfedbfefe; -+ *((int*)& __m128_op0[1]) = 0x5f5f7bfe; -+ *((int*)& __m128_op0[0]) = 0xdefb5ada; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff80000000; -+ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xff56ff55; -+ *((int*)& __m256_op0[4]) = 0xff01ff01; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xff56ff55; -+ *((int*)& __m256_op0[0]) = 0xff01ff01; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x0000abff; -+ *((int*)& __m256_op1[4]) = 0x0000abff; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x0000abff; -+ *((int*)& __m256_op1[0]) = 0x0000abff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xff56ff55ff01ff01; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xff56ff55ff01ff01; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000023; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000023; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000023; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000023; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000aaabffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00aa00ab00ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00aa00ab00ff00ff; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00aa00ab00ff00ff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00aa00ab00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000007e8080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007e8092; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000007e8080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000007e8092; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbsll_v(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffda6f; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffe3d7; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffda6e; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffe3d6; -+ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffda6e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffe3d6; -+ *((unsigned long*)& __m128i_op1[1]) = 0xeeb1e4f4bc3763f3; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6f5edf5ada6fe3d7; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffe3d6; -+ *((unsigned long*)& __m128i_result[0]) = 0xeeb1e4f4bc3763f3; -+ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x23); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000043cf26c7; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000e31d4cae8636; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000021e79364; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000718ea657431b; -+ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x2b2a292827262524; -+ *((unsigned long*)& __m256i_op1[2]) = 0x232221201f1e1d1c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x2b2a292827262524; -+ *((unsigned long*)& __m256i_op1[0]) = 0x232221201f1e1d1c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xeeb1e4f43c3763f3; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff5a6fe3d7; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000021e79364; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000718ea657431b; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000006ca193ec; -+ *((unsigned long*)& __m128i_result[0]) = 0x00008e72b5b94cad; -+ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffff60ca7104649; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff790a15db63d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffff60ca710464a; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffff790a15db63e; -+ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvffint_s_w(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ int_op0 = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; -+ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2b2a292827262524; -+ *((unsigned long*)& __m256i_op0[2]) = 0x232221201f1e1d1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2b2a292827262524; -+ *((unsigned long*)& __m256i_op0[0]) = 0x232221201f1e1d1c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000023; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000023; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000027262524; -+ *((unsigned long*)& __m256i_result[2]) = 0x232221201f1e1d1c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000027262524; -+ *((unsigned long*)& __m256i_result[0]) = 0x232221201f1e1d1c; -+ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0xbd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000007e8080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fdda7dc4; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000007e8080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fdda7dc4; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff827f80; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0226823c; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff827f80; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0226823c; -+ __m256i_out = __lasx_xvneg_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_vext2xv_w_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffff60ca7104649; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff790a15db63d; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffc00ffde4000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe857400fed8f400; -+ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5a6f5c53ebed3faa; -+ *((unsigned long*)& __m128i_op1[0]) = 0xa36aca4435b8b8e1; -+ *((unsigned long*)& __m128i_result[1]) = 0x5a6f61865d36d3aa; -+ *((unsigned long*)& __m128i_result[0]) = 0x7bea6962a0bfb621; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffda6f; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffe3d7; -+ *((unsigned long*)& __m128i_result[1]) = 0xfefffffffeffda6f; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefffffffeffe3d7; -+ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff827f80; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0226823c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff827f80; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0226823c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000027262524; -+ *((unsigned long*)& __m256i_op0[2]) = 0x232221201f1e1d1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000027262524; -+ *((unsigned long*)& __m256i_op0[0]) = 0x232221201f1e1d1c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000027262524; -+ *((unsigned long*)& __m256i_result[2]) = 0x23222120171e151c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000027262524; -+ *((unsigned long*)& __m256i_result[0]) = 0x23222120171e151c; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5a6f5c53ebed3faa; -+ *((unsigned long*)& __m128i_op0[0]) = 0xa36aca4435b8b8e1; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5a6f5c53ebed3faa; -+ *((unsigned long*)& __m128i_op1[0]) = 0xa36aca4435b8b8e1; -+ *((unsigned long*)& __m128i_result[1]) = 0x5c535c533faa3faa; -+ *((unsigned long*)& __m128i_result[0]) = 0xca44ca44b8e1b8e1; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x201fdfe0201fdfe0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x201fdfe0201fdfe0; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000021e79364; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000718ea657431b; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfefffffffeffda6f; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfefffffffeffe3d7; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ff0000ff86; -+ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x201fdfe0201fdfe0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x201fdfe0201fdfe0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[3]) = 0x1010101010101013; -+ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_result[1]) = 0x1010101010101013; -+ *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; -+ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff0000ff86; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffa6ff91fdd8ef77; -+ *((unsigned long*)& __m128i_op1[0]) = 0x061202bffb141c38; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000005a00000228; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffff9ee000004ec; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000027262524; -+ *((unsigned long*)& __m256i_op0[2]) = 0x23222120171e151c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000027262524; -+ *((unsigned long*)& __m256i_op0[0]) = 0x23222120171e151c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x201fdfe0201fdfe0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x201fdfe0201fdfe0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010127272525; -+ *((unsigned long*)& __m256i_result[2]) = 0x23a2a121179e951d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010127272525; -+ *((unsigned long*)& __m256i_result[0]) = 0x23a2a121179e951d; -+ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x62cbf96e4acfaf40; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf0bc9a5278285a4a; -+ *((int*)& __m128_result[3]) = 0xc6178000; -+ *((int*)& __m128_result[2]) = 0xbb4a4000; -+ *((int*)& __m128_result[1]) = 0x47050000; -+ *((int*)& __m128_result[0]) = 0x43494000; -+ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0101010127272525; -+ *((unsigned long*)& __m256d_op1[2]) = 0x23a2a121179e951d; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0101010127272525; -+ *((unsigned long*)& __m256d_op1[0]) = 0x23a2a121179e951d; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0101010127272525; -+ *((unsigned long*)& __m256i_op2[2]) = 0x23a2a121179e951d; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0101010127272525; -+ *((unsigned long*)& __m256i_op2[0]) = 0x23a2a121179e951d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvmadd_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffff0001; -+ *((unsigned long*)& __m256i_op2[2]) = 0xfffffffffdd97dc4; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffff0001; -+ *((unsigned long*)& __m256i_op2[0]) = 0xfffffffffdd97dc4; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x1010100f10100fd4; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x1010100f10100fd4; -+ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffa6ff91fdd8ef77; -+ *((unsigned long*)& __m128i_op0[0]) = 0x061202bffb141c38; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_b(__m128i_op0,13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff0000ff86; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x010101fe0101fe87; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000005a00000228; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffff9ee000004ec; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffacdb6dbecac; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1f5533a694f902c0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1f54e0ab00000000; -+ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffa6ff91fdd8ef77; -+ *((unsigned long*)& __m128d_op0[0]) = 0x061202bffb141c38; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfefffffffed08f77; -+ *((unsigned long*)& __m128d_op1[0]) = 0x8160cdd2f365ed0d; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffa6ff91fdd8ef77; -+ *((unsigned long*)& __m128i_op0[0]) = 0x061202bffb141c38; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op1[0]) = 0x010101fe0101fe87; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000004000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x3a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x01010101; -+ *((int*)& __m128_op0[2]) = 0x01010101; -+ *((int*)& __m128_op0[1]) = 0x010101fe; -+ *((int*)& __m128_op0[0]) = 0x0101fe87; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000004000000002; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x5555410154551515; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0004455501500540; -+ *((unsigned long*)& __m128d_result[1]) = 0xd555410154551515; -+ *((unsigned long*)& __m128d_result[0]) = 0x8004455501500540; -+ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000023a20000a121; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000179e0000951d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000023a20000a121; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000179e0000951d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010000000100; -+ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x62cbf96e; -+ *((int*)& __m128_op0[2]) = 0x4acfaf40; -+ *((int*)& __m128_op0[1]) = 0xf0bc9a52; -+ *((int*)& __m128_op0[0]) = 0x78285a4a; -+ *((unsigned long*)& __m128i_result[1]) = 0x62cbf96e4acfaf40; -+ *((unsigned long*)& __m128i_result[0]) = 0xf0bc9a5278285a4a; -+ __m128i_out = __lsx_vfrintrz_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x62cbf96e4acfaf40; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf0bc9a5278285a4a; -+ *((unsigned long*)& __m128i_op2[1]) = 0xfffffacdb6dbecac; -+ *((unsigned long*)& __m128i_op2[0]) = 0x1f5533a694f902c0; -+ *((unsigned long*)& __m128i_result[1]) = 0x62cbf84c02cbac00; -+ *((unsigned long*)& __m128i_result[0]) = 0x1014120210280240; -+ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff0001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffdd97dc4; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff0001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffdd97dc4; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0001; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffdd97dc4; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0001; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffdd97dc4; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000023a20000a121; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000179e0000951d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000023a20000a121; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000179e0000951d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000125100005111; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000c4f00004b0f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000125100005111; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000c4f00004b0f; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1f54e0ab00000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op2[0]) = 0x010101fe0101fe87; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101fe870101fe87; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101fe8700000000; -+ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0101fe870101fe87; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0101fe8700000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0101fe870101fe87; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0101fe8700000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x62cbf96e4acfaf40; -+ *((unsigned long*)& __m128d_op1[0]) = 0xf0bc9a5278285a4a; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101fe870101fe87; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101fe8700000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000236200005111; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000175e0000490d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000236200005111; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000175e0000490d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffeeffaf; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000011; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffeeffaf; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000011; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000226200005111; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000165e0000480d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000226200005111; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000165e0000480d; -+ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x01010101; -+ *((int*)& __m256_op0[6]) = 0x27272525; -+ *((int*)& __m256_op0[5]) = 0x23a2a121; -+ *((int*)& __m256_op0[4]) = 0x179e951d; -+ *((int*)& __m256_op0[3]) = 0x01010101; -+ *((int*)& __m256_op0[2]) = 0x27272525; -+ *((int*)& __m256_op0[1]) = 0x23a2a121; -+ *((int*)& __m256_op0[0]) = 0x179e951d; -+ *((int*)& __m256_op1[7]) = 0x00001251; -+ *((int*)& __m256_op1[6]) = 0x00005111; -+ *((int*)& __m256_op1[5]) = 0x00000c4f; -+ *((int*)& __m256_op1[4]) = 0x00004b0f; -+ *((int*)& __m256_op1[3]) = 0x00001251; -+ *((int*)& __m256_op1[2]) = 0x00005111; -+ *((int*)& __m256_op1[1]) = 0x00000c4f; -+ *((int*)& __m256_op1[0]) = 0x00004b0f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00002262; -+ *((int*)& __m256_op0[6]) = 0x00005111; -+ *((int*)& __m256_op0[5]) = 0x0000165e; -+ *((int*)& __m256_op0[4]) = 0x0000480d; -+ *((int*)& __m256_op0[3]) = 0x00002262; -+ *((int*)& __m256_op0[2]) = 0x00005111; -+ *((int*)& __m256_op0[1]) = 0x0000165e; -+ *((int*)& __m256_op0[0]) = 0x0000480d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x62cbf96e4acfaf40; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf0bc9a5278285a4a; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x62cbf96e4acfaf40; -+ __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x40); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1f54e0ab00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffb6d01f5f94f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001f50000; -+ __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1010100f10100fd4; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1010100f10100fd4; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffeeffaf; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000011; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffeeffaf; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000011; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000051; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000101000000fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000051; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000101000000fff; -+ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000051; -+ *((int*)& __m256_op1[5]) = 0x00001010; -+ *((int*)& __m256_op1[4]) = 0x00000fff; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000051; -+ *((int*)& __m256_op1[1]) = 0x00001010; -+ *((int*)& __m256_op1[0]) = 0x00000fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000051; -+ *((int*)& __m256_op0[5]) = 0x00001010; -+ *((int*)& __m256_op0[4]) = 0x00000fff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000051; -+ *((int*)& __m256_op0[1]) = 0x00001010; -+ *((int*)& __m256_op0[0]) = 0x00000fff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000236200005111; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000175e0000490d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000236200005111; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000175e0000490d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00220021004a007e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00220021004a007e; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffdfffffffdffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffddffdeffb5ff8d; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffdfffffffdffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffddffdeffb5ff8d; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00ff00ff; -+ *((int*)& __m128_op0[2]) = 0x00ff00ff; -+ *((int*)& __m128_op0[1]) = 0x62cbf96e; -+ *((int*)& __m128_op0[0]) = 0x4acfaf40; -+ *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x62cbf96e4acfaf40; -+ __m128i_out = __lsx_vfrintrp_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001f50000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffe0b0000; -+ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000236200005111; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000175e0000490d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000236200005111; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000175e0000490d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000002362; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000010000175d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000002362; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000010000175d; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00ff00ff; -+ *((int*)& __m128_op0[2]) = 0x00ff00ff; -+ *((int*)& __m128_op0[1]) = 0x62cbf96e; -+ *((int*)& __m128_op0[0]) = 0x4acfaf40; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrmh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1010100f10100fd4; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1010100f10100fd4; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffeeffaf; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000011; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffeeffaf; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000011; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffffffeeffaf; -+ *((unsigned long*)& __m256i_result[2]) = 0x1010100f10100fd4; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffeeffaf; -+ *((unsigned long*)& __m256i_result[0]) = 0x1010100f10100fd4; -+ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x62cbf96e4acfaf40; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf0bc9a5278285a4a; -+ *((unsigned long*)& __m128i_result[1]) = 0x62cbf96e4acfaf40; -+ *((unsigned long*)& __m128i_result[0]) = 0xf0bc9a5278285a4a; -+ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffdfffffffdffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffddffdeffb5ff8d; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffdfffffffdffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffddffdeffb5ff8d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffeeffaf; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1010100f10100fd4; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffeeffaf; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1010100f10100fd4; -+ *((unsigned long*)& __m256i_op2[3]) = 0xfffdfffffffdffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffddffdeffb5ff8d; -+ *((unsigned long*)& __m256i_op2[1]) = 0xfffdfffffffdffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffddffdeffb5ff8d; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffefffcffff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0febedc9bb95dd8f; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffefffcffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0febedc9bb95dd8f; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x01f50000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000226200005111; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000165e0000480d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000226200005111; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000165e0000480d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffd8ffc7ffdaff8a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffd8ffc7ffdaff8a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000226200005111; -+ *((unsigned long*)& __m256i_result[2]) = 0x000016000000480d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000226200005111; -+ *((unsigned long*)& __m256i_result[0]) = 0x000016000000480d; -+ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1f54e0ab00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00001f5400000000; -+ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000b8f81b8c840e4; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000b8f81b8c840e4; -+ *((unsigned long*)& __m256i_result[3]) = 0x000007ff000007ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000007fffffff800; -+ *((unsigned long*)& __m256i_result[1]) = 0x000007ff000007ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000007fffffff800; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00001f5400000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff00000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0x1010100f10100fd4; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff00000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0x1010100f10100fd4; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000b8f81b8c840e4; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000b8f81b8c840e4; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000504f00002361; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff8f81000040e4; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000504f00002361; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff8f81000040e4; -+ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1f54e0ab00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x000007ff; -+ *((int*)& __m256_op0[6]) = 0x000007ff; -+ *((int*)& __m256_op0[5]) = 0x000007ff; -+ *((int*)& __m256_op0[4]) = 0xfffff800; -+ *((int*)& __m256_op0[3]) = 0x000007ff; -+ *((int*)& __m256_op0[2]) = 0x000007ff; -+ *((int*)& __m256_op0[1]) = 0x000007ff; -+ *((int*)& __m256_op0[0]) = 0xfffff800; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x000007ff; -+ *((int*)& __m256_result[6]) = 0x000007ff; -+ *((int*)& __m256_result[5]) = 0x000007ff; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x000007ff; -+ *((int*)& __m256_result[2]) = 0x000007ff; -+ *((int*)& __m256_result[1]) = 0x000007ff; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000504f00002361; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff8f81000040e4; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000504f00002361; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff8f81000040e4; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000007ff000007ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000007ff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000007ff000007ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000007ff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000584e00002b60; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000787dffffbf1c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000584e00002b60; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000787dffffbf1c; -+ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xffeeffaf; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000011; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xffeeffaf; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000011; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffd8ffc7ffdaff8a; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffd8ffc7ffdaff8a; -+ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000695d00009b8f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000074f20000d272; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00001f5400000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; -+ __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000b8f81b8c840e4; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000b8f81b8c840e4; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffb3b4; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff5ffff4738; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffb3b4; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff5ffff4738; -+ __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf800d0d8ffffeecf; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000383fffffdf0d; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf800d0d8ffffeecf; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000383fffffdf0d; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf000f000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf000f000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xe800c0d8fffeeece; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff383efffedf0c; -+ *((unsigned long*)& __m256i_result[1]) = 0xe800c0d8fffeeece; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff383efffedf0c; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; -+ __m128i_out = __lsx_vaddi_du(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256d_op1[3]) = 0xf800d0d8ffffeecf; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000383fffffdf0d; -+ *((unsigned long*)& __m256d_op1[1]) = 0xf800d0d8ffffeecf; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000383fffffdf0d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf800d0d8ffffeecf; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000383fffffdf0d; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf800d0d8ffffeecf; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000383fffffdf0d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0xd0d8eecf383fdf0d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0xd0d8eecf383fdf0d; -+ __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffd8ffc7ffdaff8a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffd8ffc7ffdaff8a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x3f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000b8f81b8c850f4; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000b8f81b8c850f4; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xd0d8eecf383fdf0d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xd0d8eecf383fdf0d; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000b8f81b8c850f4; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000b8f81b8c850f4; -+ *((unsigned long*)& __m256i_result[3]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_result[2]) = 0x000b2673a90896a4; -+ *((unsigned long*)& __m256i_result[1]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_result[0]) = 0x000b2673a90896a4; -+ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; -+ __m128d_out = __lsx_vflogb_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00001f5400000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001f00000000; -+ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffb3b4; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff5ffff4738; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffb3b4; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff5ffff4738; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0xee); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x3bcc5098; -+ *((int*)& __m128_op1[2]) = 0x703fa5f0; -+ *((int*)& __m128_op1[1]) = 0xab7b3134; -+ *((int*)& __m128_op1[0]) = 0x9703f605; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xffffb3b4; -+ *((int*)& __m256_op0[5]) = 0xfffffff5; -+ *((int*)& __m256_op0[4]) = 0xffff4738; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xffffb3b4; -+ *((int*)& __m256_op0[1]) = 0xfffffff5; -+ *((int*)& __m256_op0[0]) = 0xffff4738; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0xffffb3b4; -+ *((int*)& __m256_result[5]) = 0xfffffff5; -+ *((int*)& __m256_result[4]) = 0xffff4738; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0xffffb3b4; -+ *((int*)& __m256_result[1]) = 0xfffffff5; -+ *((int*)& __m256_result[0]) = 0xffff4738; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256d_op0[2]) = 0xd0d8eecf383fdf0d; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256d_op0[0]) = 0xd0d8eecf383fdf0d; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xaf0489001bd4c0c3; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xaf0489001bd4c0c3; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffd8ffc7ffdaff8a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffd8ffc7ffdaff8a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000d0d8ffffeecf; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000383fffffdf0d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000d0d8ffffeecf; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000383fffffdf0d; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffd8ffc7ffffdf0d; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffd8ffc7ffffdf0d; -+ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000226200005111; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000016000000480d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000226200005111; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000016000000480d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xd0d8eecf383fdf0d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xd0d8eecf383fdf0d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1131288800000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1131288800000002; -+ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000014; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000014; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffd8ffc7; -+ *((int*)& __m256_op0[4]) = 0xffdaff8a; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffd8ffc7; -+ *((int*)& __m256_op0[0]) = 0xffdaff8a; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0xffffb3b4; -+ *((int*)& __m256_op1[5]) = 0xfffffff5; -+ *((int*)& __m256_op1[4]) = 0xffff4738; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0xffffb3b4; -+ *((int*)& __m256_op1[1]) = 0xfffffff5; -+ *((int*)& __m256_op1[0]) = 0xffff4738; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe800c0d8fffeeece; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff383efffedf0c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe800c0d8fffeeece; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff383efffedf0c; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xe800c000fffeeece; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff383efffedf0c; -+ *((unsigned long*)& __m256i_result[1]) = 0xe800c000fffeeece; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff383efffedf0c; -+ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; -+ __m128d_out = __lsx_vflogb_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe800c000fffeeece; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff383efffedf0c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe800c000fffeeece; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff383efffedf0c; -+ int_op1 = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xe800c000fffeeece; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff383e000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0xe800c000fffeeece; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff383efffedf0c; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x26); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00220021004a007e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00220021004a007e; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00ff00ff00ff00; -+ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_result[2]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_result[0]) = 0xf0f0f0f0f0f0f0f0; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000fffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010000000000001; -+ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0008000000000000; -+ __m128i_out = __lsx_vrotri_d(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000fffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0010000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffb3b4; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff5ffff4738; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffb3b4; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff5ffff4738; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_hu(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256d_op0[2]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256d_op0[0]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0001b0b1b4b5dd9f; -+ *((unsigned long*)& __m256d_op2[2]) = 0x7f7f7f5c8f374980; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0001b0b1b4b5dd9f; -+ *((unsigned long*)& __m256d_op2[0]) = 0x7f7f7f5c8f374980; -+ *((unsigned long*)& __m256d_result[3]) = 0x8001b0b1b4b5dd9f; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0x8001b0b1b4b5dd9f; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000fffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0010000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000007f41; -+ __m128i_out = __lsx_vmsknz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001b0b1b4b5dd9f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f5c8f374980; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001b0b1b4b5dd9f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f7f7f5c8f374980; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xd0d8eecf383fdf0d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xd0d8eecf383fdf0d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100007f7f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100007f7f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; -+ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x30); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000fffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0010000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007f41; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000fffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010000000000001; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfff00000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xfff00000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000fffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0010000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007f41; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffc7f7f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffc000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffc7f7f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffc000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8001b0b1b4b5dd9f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8001b0b1b4b5dd9f; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000b0b100015d1e; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001fffe0001bfff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000b0b100015d1e; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001fffe0001bfff; -+ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x58); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000b2673a90896a4; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000b2673a90896a4; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffafafb3b3dc9d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffafafb3b3dc9d; -+ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7ff8000000000000; -+ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000fffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0010000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000b2673a90896a4; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000b2673a90896a4; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xd0d8eecf383fdf0d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xd0d8eecf383fdf0d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001504f4c4b2361; -+ *((unsigned long*)& __m256i_result[2]) = 0x303338a48f374969; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001504f4c4b2361; -+ *((unsigned long*)& __m256i_result[0]) = 0x303338a48f374969; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffafaf; -+ *((int*)& __m256_op0[4]) = 0xb3b3dc9d; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffafaf; -+ *((int*)& __m256_op0[0]) = 0xb3b3dc9d; -+ *((int*)& __m256_op1[7]) = 0x00020000; -+ *((int*)& __m256_op1[6]) = 0x00020000; -+ *((int*)& __m256_op1[5]) = 0x00220021; -+ *((int*)& __m256_op1[4]) = 0x004a007e; -+ *((int*)& __m256_op1[3]) = 0x00020000; -+ *((int*)& __m256_op1[2]) = 0x00020000; -+ *((int*)& __m256_op1[1]) = 0x00220021; -+ *((int*)& __m256_op1[0]) = 0x004a007e; -+ *((int*)& __m256_op2[7]) = 0x00000001; -+ *((int*)& __m256_op2[6]) = 0x00007f7f; -+ *((int*)& __m256_op2[5]) = 0x00000001; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000001; -+ *((int*)& __m256_op2[2]) = 0x00007f7f; -+ *((int*)& __m256_op2[1]) = 0x00000001; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x80000001; -+ *((int*)& __m256_result[6]) = 0x80007f7f; -+ *((int*)& __m256_result[5]) = 0xffffafaf; -+ *((int*)& __m256_result[4]) = 0x80000000; -+ *((int*)& __m256_result[3]) = 0x80000001; -+ *((int*)& __m256_result[2]) = 0x80007f7f; -+ *((int*)& __m256_result[1]) = 0xffffafaf; -+ *((int*)& __m256_result[0]) = 0x80000000; -+ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_result[2]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_result[0]) = 0xf0f0f0f0f0f0f0f0; -+ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0f0f0ef; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf0f0f0f0f0f0f0ef; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0f0f0ef; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf0f0f0f0f0f0f0ef; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000180007f7f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffafaf80000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000180007f7f; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffafaf80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000070f07170; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000070f0f0ef; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000070f07170; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000070f0f0ef; -+ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000090909090; -+ *((unsigned long*)& __m256i_result[2]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000090909090; -+ *((unsigned long*)& __m256i_result[0]) = 0x9090909090909090; -+ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x95); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001504f4c4b2361; -+ *((unsigned long*)& __m256i_op0[2]) = 0x303338a48f374969; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001504f4c4b2361; -+ *((unsigned long*)& __m256i_op0[0]) = 0x303338a48f374969; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff47b4ffff5879; -+ __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x81); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000b2673a90896a4; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000050504c4c2362; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000b2673a90896a4; -+ *((unsigned long*)& __m256i_result[3]) = 0xa90896a400000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xa90896a400000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x22); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00020421d7d41124; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00020421d7d41124; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000180007f7f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffafaf80000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000180007f7f; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffafaf80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x55550000; -+ *((int*)& __m256_op0[6]) = 0x55550000; -+ *((int*)& __m256_op0[5]) = 0x55550000; -+ *((int*)& __m256_op0[4]) = 0x55550000; -+ *((int*)& __m256_op0[3]) = 0x55550000; -+ *((int*)& __m256_op0[2]) = 0x55550000; -+ *((int*)& __m256_op0[1]) = 0x55550000; -+ *((int*)& __m256_op0[0]) = 0x55550000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000d5000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000d5000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000d5000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000d5000000000; -+ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_w(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00020421d7d41124; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00020421d7d41124; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00220021004a007e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00220021004a007e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00220021004a007e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00220021004a007e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0f0f0f0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf0f0f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ff0fff0fff0f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ff0fff0fff0f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffb10001ff8f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001004c0001ff87; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffb10001ff8f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001004c0001ff87; -+ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0008000000000000; -+ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000180007f7f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffafaf80000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000180007f7f; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffafaf80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256i_result[2]) = 0x01fe01ae00ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256i_result[0]) = 0x01fe01ae00ff00ff; -+ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff47b4ffff5879; -+ __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0000504f; -+ *((int*)& __m256_op0[6]) = 0xffff3271; -+ *((int*)& __m256_op0[5]) = 0xffff47b4; -+ *((int*)& __m256_op0[4]) = 0xffff5879; -+ *((int*)& __m256_op0[3]) = 0x0000504f; -+ *((int*)& __m256_op0[2]) = 0xffff3271; -+ *((int*)& __m256_op0[1]) = 0xffff47b4; -+ *((int*)& __m256_op0[0]) = 0xffff5879; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xa90896a400000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa90896a400000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256i_result[3]) = 0x7f7f000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x7f7f000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f7f7f7f7f7f7f; -+ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256d_op1[2]) = 0x01fe01ae00ff00ff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256d_op1[0]) = 0x01fe01ae00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7f7f000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7f7f000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff3225; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff3225; -+ *((unsigned long*)& __m256i_op1[3]) = 0x2221201f1e1d1c1b; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1a19181716151413; -+ *((unsigned long*)& __m256i_op1[1]) = 0x2221201f1e1d1c1b; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1a19181716151413; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000004442403; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000004442403; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x63); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7f7f000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7f7f000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100010001; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff47b4ffff5878; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000b84b0000a787; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff47b4ffff5878; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000b84b0000a787; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff47b4ffff5878; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000b84b0000a787; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff47b4ffff5878; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000b84b0000a787; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff07b4ffff0707; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000b8070000a787; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff07b4ffff0707; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000b8070000a787; -+ __m256i_out = __lasx_xvmini_b(__m256i_op0,7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x0000ffb1; -+ *((int*)& __m256_op1[6]) = 0x0001ff8f; -+ *((int*)& __m256_op1[5]) = 0x0001004c; -+ *((int*)& __m256_op1[4]) = 0x0001ff87; -+ *((int*)& __m256_op1[3]) = 0x0000ffb1; -+ *((int*)& __m256_op1[2]) = 0x0001ff8f; -+ *((int*)& __m256_op1[1]) = 0x0001004c; -+ *((int*)& __m256_op1[0]) = 0x0001ff87; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01fe01ae00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01fe01ae00ff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256i_result[2]) = 0x01fe01ae00ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256i_result[0]) = 0x01fe01ae00ff00ff; -+ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; -+ __m128i_out = __lsx_vnori_b(__m128i_op0,0x7f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffe1ffffffe1; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffe1ffffffe1; -+ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000100010001; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000100010001; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x1313131313131313; -+ *((unsigned long*)& __m128i_result[0]) = 0x1313131313131313; -+ __m128i_out = __lsx_vnori_b(__m128i_op0,0xec); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffe1ffffffe1; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffe1ffffffe1; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffafffffffa; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffafffffffa; -+ __m128i_out = __lsx_vmini_w(__m128i_op0,-6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfffffffafffffffa; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfffffffafffffffa; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfffffffa; -+ *((int*)& __m128_op0[2]) = 0xfffffffa; -+ *((int*)& __m128_op0[1]) = 0xfffffffa; -+ *((int*)& __m128_op0[0]) = 0xfffffffa; -+ *((int*)& __m128_result[3]) = 0xfffffffa; -+ *((int*)& __m128_result[2]) = 0xfffffffa; -+ *((int*)& __m128_result[1]) = 0xfffffffa; -+ *((int*)& __m128_result[0]) = 0xfffffffa; -+ __m128_out = __lsx_vfrecip_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0aa077b7054c9554; -+ *((unsigned long*)& __m128i_op0[0]) = 0x40c7ee1f38e4c4e8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff07b4ffff0707; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000b8070000a787; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff07b4ffff0707; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000b8070000a787; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000504fffff3271; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff47b4ffff5879; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffb7650000d496; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001800000018000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffb7650000d496; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001800000018000; -+ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000a00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000010000000a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000a00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000010000000a; -+ __m256i_out = __lasx_xvmini_w(__m256i_op0,10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256d_op1[2]) = 0x01fe01ae00ff00ff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256d_op1[0]) = 0x01fe01ae00ff00ff; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000100010001; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000100010001; -+ *((unsigned long*)& __m256d_result[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000100010001; -+ *((unsigned long*)& __m256d_result[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000100010001; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000a00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000010000000a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000a00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000010000000a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000010000000a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000010000000a; -+ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000080008001; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000a00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000010000000a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000a00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000010000000a; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000001000b000b; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000001000b000b; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000010000000a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000010000000a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01fe01ae00ff00ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x01fe01ae00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00feffff00fe81; -+ *((unsigned long*)& __m256i_result[2]) = 0xfe01fe51ff00ff40; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00feffff00fe81; -+ *((unsigned long*)& __m256i_result[0]) = 0xfe01fe51ff00ff40; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000001; -+ *((int*)& __m256_op0[4]) = 0x0000000a; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000001; -+ *((int*)& __m256_op0[0]) = 0x0000000a; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000040; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff7f80ffff7f80; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff7f80ffff7f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff7f80ffff7f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff7f80ffff7f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffeff00; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffeff00; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01fe01ae00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff010000ff017e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01fe01ae00ff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000a00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000010000000a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000a00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000010000000a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff017e6b803fc0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff017e6b803fc0; -+ __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0aa077b7054c9554; -+ *((unsigned long*)& __m128i_op0[0]) = 0x40c7ee1f38e4c4e8; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vsrai_h(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_b(__m128i_op0,8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000a00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000010000000a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000a00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000010000000a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000fffffe01fe52; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff01ff02; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffffe01fe52; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff01ff02; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000080008001; -+ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_b(__m128i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_w(__m128i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000007f7f7f7f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; -+ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000080000000; -+ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0x33); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000080008001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000080008001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000a00000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000fffff614; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000a00000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000fffff614; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000020202020; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x7ef8000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7ef8000000000000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ef8000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ef8000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7ef8000000000000; -+ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; -+ __m256i_out = __lasx_xvmini_b(__m256i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffff600000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff000009ec; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffff600000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff000009ec; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000180000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000180000001; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f6f7f7f7f6; -+ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f6f7f7f7f6; -+ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f6f7f7f7f6; -+ *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f6f7f7f7f6; -+ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff80017fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff80017fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; -+ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000280000; -+ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x30); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7ef8000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000f; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ int_result = 0x000000007ff00000; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x1); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0x92); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_w(__m256i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000000000000f; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x000000000000000f; -+ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000280000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000140001; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000000; -+ __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100007fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100007fff; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ef8000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8108000000000000; -+ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_result[2]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_result[0]) = 0x0a0a0a0a7f0a0a0a; -+ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000001fffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000001fffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000001fffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000001fffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000000000001e; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100007fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100007fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100007fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100007fff; -+ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100008000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100007fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100008000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100007fff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000140001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000140001; -+ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010200000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010200000000; -+ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; -+ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x35); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010200000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010200000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000080000000800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; -+ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffe5ffffffe5; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffe5ffffffe5; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffe5ffffffe5; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffe5ffffffe5; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010200000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010200000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010200000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010200000000; -+ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ unsigned_long_int_result = 0x00000000ffffffff; -+ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x0); -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0xffffffe5; -+ *((int*)& __m256_op2[6]) = 0xffffffe5; -+ *((int*)& __m256_op2[5]) = 0xffffffe5; -+ *((int*)& __m256_op2[4]) = 0xffffffe5; -+ *((int*)& __m256_op2[3]) = 0xffffffe5; -+ *((int*)& __m256_op2[2]) = 0xffffffe5; -+ *((int*)& __m256_op2[1]) = 0xffffffe5; -+ *((int*)& __m256_op2[0]) = 0xffffffe5; -+ *((int*)& __m256_result[7]) = 0xffffffe5; -+ *((int*)& __m256_result[6]) = 0xffffffe5; -+ *((int*)& __m256_result[5]) = 0xffffffe5; -+ *((int*)& __m256_result[4]) = 0xffffffe5; -+ *((int*)& __m256_result[3]) = 0xffffffe5; -+ *((int*)& __m256_result[2]) = 0xffffffe5; -+ *((int*)& __m256_result[1]) = 0xffffffe5; -+ *((int*)& __m256_result[0]) = 0xffffffe5; -+ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000000f; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x61608654a2d4f6da; -+ __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000001e; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffe5ffffffe5; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffe5ffffffe5; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffe5ffffffe5; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffe5ffffffe5; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffe5ffffffe5; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffe5ffffffe5; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffe5ffffffe5; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffe5ffffffe5; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010200000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010200000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0c0c0c0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0014000100000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x35); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x61608654a2d4f6da; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x61608654a2d4f6da; -+ *((unsigned long*)& __m128i_result[1]) = 0xfee0000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xc2c00ca844a8ecb4; -+ __m128i_out = __lsx_vslli_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_w(__m256i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x5d20a0a1; -+ *((int*)& __m256_result[6]) = 0x5d20a0a1; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x5d20a0a1; -+ *((int*)& __m256_result[2]) = 0x5d20a0a1; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x5d20a0a15d20a0a1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x5d20a0a15d20a0a1; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0014000100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f807f807f807f80; -+ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000007f7f7f7f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000003fbf3fbf; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7ff8; -+ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000007f7f7f7f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000007f7f7f7f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000010; -+ __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; -+ __m256i_out = __lasx_xvclz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x5d20a0a1; -+ *((int*)& __m256_op1[6]) = 0x5d20a0a1; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x5d20a0a1; -+ *((int*)& __m256_op1[2]) = 0x5d20a0a1; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x7f7f7f7f; -+ *((int*)& __m128_op0[1]) = 0x00000001; -+ *((int*)& __m128_op0[0]) = 0x00000010; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x61608654a2d4f6da; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff0800080008000; -+ *((unsigned long*)& __m128i_result[0]) = 0xe160065422d476da; -+ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff0800080008000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe160065422d476da; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000d00000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000b00000010; -+ __m128i_out = __lsx_vpcnt_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x61608654a2d4f6da; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ff08ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x5d20a0a1; -+ *((int*)& __m256_op0[6]) = 0x5d20a0a1; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x5d20a0a1; -+ *((int*)& __m256_op0[2]) = 0x5d20a0a1; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x5d20a0a15d20a0a1; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x5d20a0a15d20a0a1; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffeaffffffea; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffeaffffffea; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffeaffffffea; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffeaffffffea; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ff08ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ff08ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff0; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000f; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; -+ __m128i_out = __lsx_vslei_hu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff08ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vmskltz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_w(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000002c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000002c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000002c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000002c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000002c0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000002c0000; -+ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x5d20a0a15d20a0a1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x5d20a0a15d20a0a1; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x5d20a0895d20a089; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffe8ffffffe8; -+ *((unsigned long*)& __m256i_result[1]) = 0x5d20a0895d20a089; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffe8ffffffe8; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000003fbf3fbf; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7ff8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3fbf3fbf00007fff; -+ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000003fbf3fbf; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7ff8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff8007; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xfffffff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffint_d_lu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000003fbf3fbf; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7ff8; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000100; -+ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffff8fffffff8; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff8fffffff8; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffff8fffffff8; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff8fffffff8; -+ __m256i_out = __lasx_xvmini_w(__m256i_op0,-8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x0000000f; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00077f88; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00077f97; -+ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000077f97; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffeff7f0000; -+ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3fbf3fbf00007fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000000e; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x3fbf3fbf00007fff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000000003fbf3fbf; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7fff7fff7fff7ff8; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000003fbf3fbf; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7ff8; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff3fbfffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff3fbfffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3fbf3fbf00007fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x007f7f7f01027f02; -+ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff3fbfffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000100fe000100fe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; -+ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; -+ __m128i_out = __lsx_vclz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000100fe000100fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; -+ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000002000; -+ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x39); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000100fe000100fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000100fe000100fe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000002000; -+ __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; -+ __m256d_out = __lasx_xvflogb_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000100fe000100fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x31); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3fbf3fbf00007fff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00100010; -+ *((int*)& __m128_op0[2]) = 0x00100010; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000039; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000039; -+ __m128i_out = __lsx_vclz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00002000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x1fe02000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000003f800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000003f800000; -+ __m128i_out = __lsx_vfrintrp_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x4050000000000000; -+ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x7f800000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x7f800000; -+ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00003f80000000ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4050000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x2028000000000000; -+ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000001fe02000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000001fe02000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4050000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_w(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00000000; -+ __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4050000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2028000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ff000000ff; -+ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x000000ff; -+ *((int*)& __m128_op0[2]) = 0x000000ff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x371fe00000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x371fe00000000000; -+ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x371fe00000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x371fe00000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_result[0]) = 0x370bdfecffecffec; -+ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op1[0]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x000000ff; -+ *((int*)& __m128_op0[0]) = 0x000000ff; -+ *((int*)& __m128_op1[3]) = 0x370bdfec; -+ *((int*)& __m128_op1[2]) = 0xffecffec; -+ *((int*)& __m128_op1[1]) = 0x370bdfec; -+ *((int*)& __m128_op1[0]) = 0xffecffec; -+ *((int*)& __m128_result[3]) = 0x370bdfec; -+ *((int*)& __m128_result[2]) = 0xffecffec; -+ *((int*)& __m128_result[1]) = 0x370bdfec; -+ *((int*)& __m128_result[0]) = 0xffecffec; -+ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000008140c80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0037ffdfffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0037ffdfffeb007f; -+ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x371fe00000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x371fe00000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ffffffffff; -+ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003f3f; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_h(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00010001; -+ *((int*)& __m256_op1[6]) = 0x00010001; -+ *((int*)& __m256_op1[5]) = 0x00010001; -+ *((int*)& __m256_op1[4]) = 0x00010001; -+ *((int*)& __m256_op1[3]) = 0x00010001; -+ *((int*)& __m256_op1[2]) = 0x00010001; -+ *((int*)& __m256_op1[1]) = 0x00010001; -+ *((int*)& __m256_op1[0]) = 0x00010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op1[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op1[0]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000006e17bfd8; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000006e17bfd8; -+ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000ffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff0100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff0100000001; -+ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x7f800000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x7f800000; -+ __m256_out = __lasx_xvfrecip_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; -+ __m128i_out = __lsx_vclo_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x7f800000; -+ *((int*)& __m256_op0[6]) = 0x7f800000; -+ *((int*)& __m256_op0[5]) = 0x7f800000; -+ *((int*)& __m256_op0[4]) = 0x7f800000; -+ *((int*)& __m256_op0[3]) = 0x7f800000; -+ *((int*)& __m256_op0[2]) = 0x7f800000; -+ *((int*)& __m256_op0[1]) = 0x7f800000; -+ *((int*)& __m256_op0[0]) = 0x7f800000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000006e17bfd8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000006e17bfd8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffff0100000001; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffff0100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000006e17bfd8; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000006e17bfd8; -+ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000008140c80; -+ *((unsigned long*)& __m128i_result[1]) = 0x1f1f1f1f1f1f1f1f; -+ *((unsigned long*)& __m128i_result[0]) = 0x1f1f1f1f27332b9f; -+ __m128i_out = __lsx_vaddi_bu(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x36fbdfdcffdcffdc; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; -+ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_d(__m256i_op0,-2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op1[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op1[0]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_result[1]) = 0x370bdfec00130014; -+ *((unsigned long*)& __m128i_result[0]) = 0x370bdfec00130014; -+ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0x38); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op2[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fefffffffffffff; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_w(__m256i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000008140c80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000008140c80; -+ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x36fbdfdcffdcffdc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000008140c80; -+ *((unsigned long*)& __m128i_op2[1]) = 0x1f1f1f1f1f1f1f00; -+ *((unsigned long*)& __m128i_op2[0]) = 0x1f1f1f27332b9f00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x36fbdfdcffdc0008; -+ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x3ff0010000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x3ff0010000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000008140c80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000008140c80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000002050320; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000002050320; -+ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000008130c7f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1f1f1f1f1f1f1f00; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1f1f1f27332b9f00; -+ *((unsigned long*)& __m128i_op2[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op2[0]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_result[1]) = 0x06b1213ef1efa299; -+ *((unsigned long*)& __m128i_result[0]) = 0x8312f5424ca4a07f; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000007fef; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fef; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000007fef; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007fef; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_b(__m256i_op0,5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x06b1213ef1efa299; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8312f5424ca4a07f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1f1f1f1f1f1f1f00; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1f1f1f27332b9f00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xa23214697fd03f7f; -+ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f70000000000000; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x7f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fef; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fef; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fef; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fef; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007fee; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000fedd; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000fedd; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000fedd; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000fedd; -+ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000002050320; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002050320; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000002050320; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000002050320; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f70000000000000; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000012; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfec00130014; -+ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfec00130014; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000370bffffdfec; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001300000014; -+ __m128i_out = __lsx_vexth_w_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x800080008000ffee; -+ *((unsigned long*)& __m256i_result[2]) = 0x800080008000ffee; -+ *((unsigned long*)& __m256i_result[1]) = 0x800080008000ffee; -+ *((unsigned long*)& __m256i_result[0]) = 0x800080008000ffee; -+ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001c88bf0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001c88bf0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001c88bf0; -+ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000002050320; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002050320; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001c88bf0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000320; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000007730; -+ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001c88bf0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001c88bf0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7f70000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000dc300003ffb; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000dc300003ffb; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000dc300003ffb; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000dc300003ffb; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000ffff3fbfffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7fffffff7fffffff; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x7ffffffb; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001c88bf0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000320; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007730; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fee; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xa23214697fd03f7f; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007ffffffb; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x010101017f010101; -+ __m128i_out = __lsx_vmaxi_b(__m128i_op0,1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff810011; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff810011; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff810011; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff810011; -+ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfeca2eb9931; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00d3007c014e00bd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200020002; -+ *((unsigned long*)& __m128i_result[0]) = 0x06e1000e00030005; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000002050320; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002050320; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op1[0]) = 0x010101017f010101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000040600000406; -+ *((unsigned long*)& __m128i_result[0]) = 0x020202020202fe02; -+ __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xfff70156; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xfff70156; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xfff70156; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xfff70156; -+ *((int*)& __m256_op1[7]) = 0x7fefffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0x7fefffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0x7fefffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0x7fefffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffff70156; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffff70156; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffff70156; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffff70156; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xa2321469; -+ *((int*)& __m128_op0[0]) = 0x7fd03f7f; -+ *((int*)& __m128_op1[3]) = 0x00000406; -+ *((int*)& __m128_op1[2]) = 0x00000406; -+ *((int*)& __m128_op1[1]) = 0x02020202; -+ *((int*)& __m128_op1[0]) = 0x0202fe02; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000040600000406; -+ *((unsigned long*)& __m128i_op0[0]) = 0x020202020202fe02; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff503fbfff503fb; -+ *((unsigned long*)& __m128i_result[0]) = 0x01f701f701f7fdf7; -+ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff810011; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff810011; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x3fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x3fff7fffffc08008; -+ *((unsigned long*)& __m256i_result[1]) = 0x3fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x3fff7fffffc08008; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000040600000406; -+ *((unsigned long*)& __m128i_op0[0]) = 0x020202020202fe02; -+ *((unsigned long*)& __m128i_result[1]) = 0x0020200000202000; -+ *((unsigned long*)& __m128i_result[0]) = 0x002020000fe02000; -+ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; -+ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x7fefffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0x7fefffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0x7fefffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0x7fefffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7fefffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0x7fefffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0x7fefffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0x7fefffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff810011; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff810011; -+ *((unsigned long*)& __m256i_op1[3]) = 0x3fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3fff8000ffa08004; -+ *((unsigned long*)& __m256i_op1[1]) = 0x3fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3fff8000ffa08004; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff01; -+ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x13f9c5b60028a415; -+ *((unsigned long*)& __m128i_op0[0]) = 0x545cab1d7e57c415; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x13f9c5b60028a415; -+ *((unsigned long*)& __m128i_result[0]) = 0x545cab1d81a83bea; -+ __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfeca2eb9931; -+ *((unsigned long*)& __m128i_op1[1]) = 0x370bdfecffecffec; -+ *((unsigned long*)& __m128i_op1[0]) = 0x370bdfeca2eb9931; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x370bdfec; -+ *((int*)& __m128_op0[2]) = 0xffecffec; -+ *((int*)& __m128_op0[1]) = 0x370bdfec; -+ *((int*)& __m128_op0[0]) = 0xa2eb9931; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff810011; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff810011; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x817f11ed81800ff0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x817f11ed81800ff0; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000aaaa; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000545cab1d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000081a83bea; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00d3007c014e00bd; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000aaaa; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fef010000010100; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fef010000010100; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fef010000010100; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fef010000010100; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000aaaa; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffff70156; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffff70156; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffff70156; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffff70156; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x74); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000545cab1d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000081a83bea; -+ *((unsigned long*)& __m128i_op1[1]) = 0x13f9c5b60028a415; -+ *((unsigned long*)& __m128i_op1[0]) = 0x545cab1d81a83bea; -+ *((unsigned long*)& __m128i_result[1]) = 0x00400000547cab1d; -+ *((unsigned long*)& __m128i_result[0]) = 0x2000000081a83fea; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x14ccc6320176a4d2; -+ *((unsigned long*)& __m128d_op0[0]) = 0x685670d37e80682a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000080; -+ __m128i_out = __lsx_vfclass_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x13f9c5b60028a415; -+ *((unsigned long*)& __m128d_op1[0]) = 0x545cab1d81a83bea; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x14ccc6320176a4d2; -+ *((unsigned long*)& __m128d_op0[0]) = 0x685670d37e80682a; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x14ccc6320176a4d2; -+ *((unsigned long*)& __m128d_op0[0]) = 0x685670d37e80682a; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x14ccc6320176a4d2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x685670d37e80682a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x14ccc6320176a4d2; -+ *((unsigned long*)& __m128i_result[0]) = 0x685670d37e80682a; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x817f11ed81800ff0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x817f11ed81800ff0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff8180ffff8181; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff8180ffff8181; -+ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x14ccc6320176a4d2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x685670d37e80682a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x14ccc6320076a4d2; -+ *((unsigned long*)& __m128i_result[0]) = 0x685670d27e00682a; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x817f11ed81800ff0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x817f11ed81800ff0; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffe05fc47b400; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffe06003fc000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffe05fc47b400; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffe06003fc000; -+ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x14ccc6320076a4d2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x685670d27e00682a; -+ *((unsigned long*)& __m128i_result[1]) = 0x14ccc6320076a4d2; -+ *((unsigned long*)& __m128i_result[0]) = 0x685670d27e00682a; -+ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x9d9d9d9d9d9d9d9d; -+ *((unsigned long*)& __m128i_result[0]) = 0x9d9d9d9d9d9d9d9d; -+ __m128i_out = __lsx_vnori_b(__m128i_op0,0x62); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff810011; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff810011; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff8180ffff8181; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff8180ffff8181; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000008000ff00; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff81ff81; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000008000ff00; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff81ff81; -+ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0fff01800fff0181; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0fff01800fff0181; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0007ff800007ff80; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0007ff800007ff80; -+ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x14ccc632; -+ *((int*)& __m128_op0[2]) = 0x0076a4d2; -+ *((int*)& __m128_op0[1]) = 0x685670d2; -+ *((int*)& __m128_op0[0]) = 0x7e00682a; -+ *((int*)& __m128_op1[3]) = 0x14ccc632; -+ *((int*)& __m128_op1[2]) = 0x0076a4d2; -+ *((int*)& __m128_op1[1]) = 0x685670d2; -+ *((int*)& __m128_op1[0]) = 0x7e00682a; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000001; -+ *((int*)& __m256_op0[6]) = 0x00000001; -+ *((int*)& __m256_op0[5]) = 0x0fff0180; -+ *((int*)& __m256_op0[4]) = 0x0fff0181; -+ *((int*)& __m256_op0[3]) = 0x00000001; -+ *((int*)& __m256_op0[2]) = 0x00000001; -+ *((int*)& __m256_op0[1]) = 0x0fff0180; -+ *((int*)& __m256_op0[0]) = 0x0fff0181; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000545cffffab1d; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff81a800003bea; -+ *((unsigned long*)& __m128i_op1[1]) = 0x13f9c5b60028a415; -+ *((unsigned long*)& __m128i_op1[0]) = 0x545cab1d81a83bea; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000545cffff0001; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff81a800003bea; -+ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffff800000003; -+ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00197d3200197d56; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00197d3200197d56; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_h(__m256i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; -+ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x14ccc6320076a4d2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x685670d27e00682a; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x14ccc631eb3339ce; -+ *((unsigned long*)& __m128i_result[0]) = 0x685670d197a98f2e; -+ __m128i_out = __lsx_vmulwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x817f11ed81800ff0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x817f11ed81800ff0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x817f11ed81800ff0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x817f11ed81800ff0; -+ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x817f11ed81800ff0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x817f11ed81800ff0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x817f11ed81800ff0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x817f11ed81800ff0; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000004fc480040; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000004fc480040; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000004fc480040; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000004fc480040; -+ __m256i_out = __lasx_xvsrlrni_h_w(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x13f9c5b60028a415; -+ *((unsigned long*)& __m128i_op0[0]) = 0x545cab1d81a83bea; -+ *((unsigned long*)& __m128i_op1[1]) = 0x13f9c5b60028a415; -+ *((unsigned long*)& __m128i_op1[0]) = 0x545cab1d81a83bea; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0015172b; -+ __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x14ccc6320076a4d2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x685670d27e00682a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x14ccc6320076a4d2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x685670d27e00682a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; -+ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00197d3200197d56; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00197d3200197d56; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffff800000003; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0015172b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001900000019; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001900000019; -+ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000300000003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000300000003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000300000003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000300000003; -+ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000300000003; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000300000003; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000300000003; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000300000003; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffd; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffd; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffd; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffd; -+ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; -+ long_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vinsgr2vr_d(__m128i_op0,long_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0x0015172b; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xfffffffe; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xfffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff0015172b; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff0015172b; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0015172b; -+ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; -+ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0015172b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffb00151727; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op2[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op2[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x00010000fffffffc; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; -+ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x2c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x20fc000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x20fc000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffb00151727; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0015172b; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00010000fffffffc; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffb00151727; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00010000fffffffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00010000fffffffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; -+ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffebeeaaefafb; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffebeeaaeeeeb; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffebeeaaefafb; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffebeeaaeeeeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x7fefffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x7fefffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x7fffffff; -+ *((int*)& __m256_op0[6]) = 0x7fffffff; -+ *((int*)& __m256_op0[5]) = 0x7fffffff; -+ *((int*)& __m256_op0[4]) = 0x7fffffff; -+ *((int*)& __m256_op0[3]) = 0x7fffffff; -+ *((int*)& __m256_op0[2]) = 0x7fffffff; -+ *((int*)& __m256_op0[1]) = 0x7fffffff; -+ *((int*)& __m256_op0[0]) = 0x7fffffff; -+ *((int*)& __m256_op1[7]) = 0x20fc0000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x20fc0000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffebeeaaefafb; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffebeeaaeeeeb; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffebeeaaefafb; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffebeeaaeeeeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x7fefffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x7fefffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x01ffbfff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x03ffffff03ffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x01ffbfff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x03ffffff03ffffff; -+ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x26); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xfbffffff; -+ *((int*)& __m128_op0[0]) = 0x27001517; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x0000ffff; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x14ccc631eb3339ce; -+ *((unsigned long*)& __m128i_op0[0]) = 0x685670d197a98f2e; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; -+ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00fe00fe00fe0045; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00fe00fe00fe0045; -+ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff46; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0x43f0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x43f0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x43f0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x43f0000000000000; -+ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff46; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe00fe0045; -+ *((unsigned long*)& __m128i_result[1]) = 0x007f007f007f007e; -+ *((unsigned long*)& __m128i_result[0]) = 0x007f007f007effc6; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x007f007f007f007e; -+ *((unsigned long*)& __m128d_op1[0]) = 0x007f007f007effc6; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffff1fffffff1; -+ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff46; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff46000000ba; -+ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffe00000002; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffff46000000ba; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffa30000005c; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x43f0000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x43f0000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x43f0000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x43f0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff46; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x4c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x001f001f001f001f; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0xa3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x001f001f001f001f; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000001001f001e; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000001001f001e; -+ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000fffe0000ff45; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff000000b9; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffd5002affffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x343d8dc6b0ed5a08; -+ *((unsigned long*)& __m128i_result[1]) = 0x012b012c01010246; -+ *((unsigned long*)& __m128i_result[0]) = 0x353e743b50135a4f; -+ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffd5002affffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x343d8dc6b0ed5a08; -+ *((unsigned long*)& __m128i_result[1]) = 0x002affd600000001; -+ *((unsigned long*)& __m128i_result[0]) = 0xcbc2723a4f12a5f8; -+ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffe05fc47b400; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffe06003fc000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffe05fc47b400; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffe06003fc000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; -+ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x01ff020000ff03ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x01346b8d00b04c5a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x002affd600000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcbc2723a4f12a5f8; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x01ff020000ff03ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x01346b8d00b04c5a; -+ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7eeefefefefefefe; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x7eeefefefefefefe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x002affd600000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xcbc2723a4f12a5f8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffd60001723aa5f8; -+ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x002affd600000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcbc2723a4f12a5f8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x343d8dc5b0ed5a08; -+ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; -+ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_d(__m128i_op0,12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x343d8dc5b0ed5a08; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x353c8cc4b1ec5b09; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000010101010; -+ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000010101010; -+ *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe05fc47b400; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffe06003fc000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe05fc47b400; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffe06003fc000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,-3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x353c8cc4b1ec5b09; -+ *((unsigned long*)& __m128i_op1[1]) = 0x002affd600000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcbc2723a4f12a5f8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080808000000035; -+ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7eeefefefefefefe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7eeefefefefefefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7e00ee00fe00fe00; -+ *((unsigned long*)& __m256i_result[2]) = 0xfe00fe00fe00fe00; -+ *((unsigned long*)& __m256i_result[1]) = 0x7e00ee00fe00fe00; -+ *((unsigned long*)& __m256i_result[0]) = 0xfe00fe00fe00fe00; -+ __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00f525682ffd27f2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00365c60317ff930; -+ *((unsigned long*)& __m128i_result[1]) = 0xe500c085c000c005; -+ *((unsigned long*)& __m128i_result[0]) = 0xe5c1a185c48004c5; -+ __m128i_out = __lsx_vnori_b(__m128i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff00ff; -+ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x61); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffd60001723aa5f8; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000007f007f7f; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; -+ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff00ff; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff00ff; -+ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xe500c085c000c005; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe5c1a185c48004c5; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffe500ffffc085; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffc000ffffc005; -+ __m128i_out = __lsx_vexth_w_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000fefe0000fefe; -+ *((unsigned long*)& __m256i_result[2]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fefe0000fefe; -+ *((unsigned long*)& __m256i_result[0]) = 0x00fe00fe00fe00fe; -+ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_result[2]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_result[0]) = 0x00ff00fe00ff00fe; -+ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808000000035; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000200000000; -+ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00fe00fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00fe00fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00fe00fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00fe00fe; -+ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0d1202e19235e2bc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xea38e0f75f6e56d1; -+ *((unsigned long*)& __m128i_result[1]) = 0x2f3626e7b637e6be; -+ *((unsigned long*)& __m128i_result[0]) = 0xee3ee6f77f6e76f7; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0x26); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fef0000ffff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fef0000ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fef7fef7fef7fef; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fef7fef7fef7fef; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fef7fef7fef7fef; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fef7fef7fef7fef; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffe500ffffc085; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffc000ffffc005; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffc000ffffc005; -+ __m128i_out = __lsx_vextl_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0d1202e19235e2bc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xea38e0f75f6e56d1; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffe500ffffc085; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffc000ffffc005; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xe500c085; -+ *((int*)& __m128_op0[2]) = 0xc000c005; -+ *((int*)& __m128_op0[1]) = 0xe5c1a185; -+ *((int*)& __m128_op0[0]) = 0xc48004c5; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffc000; -+ *((int*)& __m128_op1[0]) = 0xffffc005; -+ *((int*)& __m128_op2[3]) = 0xff550025; -+ *((int*)& __m128_op2[2]) = 0x002a004b; -+ *((int*)& __m128_op2[1]) = 0x00590013; -+ *((int*)& __m128_op2[0]) = 0x005cffca; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xffffffff; -+ *((int*)& __m128_result[1]) = 0xffffc000; -+ *((int*)& __m128_result[0]) = 0xffffc005; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fef0000ffff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fef0000ffff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xde00fe0000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000fe010000fe01; -+ *((unsigned long*)& __m256i_result[1]) = 0xde00fe0000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000fe010000fe01; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffc000ffffc005; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000080800000808; -+ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000007070707; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff07070707; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000007070707; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff07070707; -+ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fef7fef7fef7fef; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fef7fef7fef7fef; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fef7fef7fef7fef; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fef7fef7fef7fef; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xde00fe00; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x0000fe01; -+ *((int*)& __m256_op0[4]) = 0x0000fe01; -+ *((int*)& __m256_op0[3]) = 0xde00fe00; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x0000fe01; -+ *((int*)& __m256_op0[0]) = 0x0000fe01; -+ *((int*)& __m256_op1[7]) = 0x0000ffff; -+ *((int*)& __m256_op1[6]) = 0x0000ffff; -+ *((int*)& __m256_op1[5]) = 0x00ff00fe; -+ *((int*)& __m256_op1[4]) = 0x00ff00fe; -+ *((int*)& __m256_op1[3]) = 0x0000ffff; -+ *((int*)& __m256_op1[2]) = 0x0000ffff; -+ *((int*)& __m256_op1[1]) = 0x00ff00fe; -+ *((int*)& __m256_op1[0]) = 0x00ff00fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xde00fe0000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000fe010000fe01; -+ *((unsigned long*)& __m256i_op0[1]) = 0xde00fe0000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fe010000fe01; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; -+ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000000; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x2); -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffe500ffffc085; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffc000ffffc005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001300000012; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001200000012; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbe8282a0793636d3; -+ *((unsigned long*)& __m128i_op0[0]) = 0x793636d3793636d3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0202020202020202; -+ *((unsigned long*)& __m128i_op0[0]) = 0x363d753d50155c0a; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe500c085c000c005; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe5c1a185c48004c5; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002020002020200; -+ *((unsigned long*)& __m128i_result[0]) = 0x021f3b0205150600; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_d(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002020002020200; -+ *((unsigned long*)& __m128i_op0[0]) = 0x021f3b0205150600; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000300400002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000100010040fffb; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000300400002; -+ *((unsigned long*)& __m128i_result[0]) = 0x000100010040fffb; -+ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000545400; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000545400; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffff040000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffff040000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff040000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff040000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff040000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff040000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00fe00fe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00fe00fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00fe00fe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00fe00fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100fe04ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100fe04ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff00ff; -+ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3a3a3a3b3a3a3a3a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3a3a00003a3a0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x353c8cc4b1ec5b09; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8080008000808080; -+ *((unsigned long*)& __m128i_result[0]) = 0x1a9e466258f62d84; -+ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0202020202020202; -+ *((unsigned long*)& __m128i_op1[0]) = 0x363d753d50155c0a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff040000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff040000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffff400000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffff400000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00fe00fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00fe00fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00fe00fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00fe00fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x007f8080007f007f; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f8080007f007f; -+ *((unsigned long*)& __m256i_result[1]) = 0x007f8080007f007f; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f8080007f007f; -+ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x808080e280808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080636380806363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080808080638063; -+ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffff040000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffff040000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffff0000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffff0000; -+ *((int*)& __m256_op0[4]) = 0xffff0000; -+ *((int*)& __m256_op0[3]) = 0xffff0000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffff0000; -+ *((int*)& __m256_op0[0]) = 0xffff0000; -+ *((int*)& __m256_op1[7]) = 0x007f8080; -+ *((int*)& __m256_op1[6]) = 0x007f007f; -+ *((int*)& __m256_op1[5]) = 0x007f8080; -+ *((int*)& __m256_op1[4]) = 0x007f007f; -+ *((int*)& __m256_op1[3]) = 0x007f8080; -+ *((int*)& __m256_op1[2]) = 0x007f007f; -+ *((int*)& __m256_op1[1]) = 0x007f8080; -+ *((int*)& __m256_op1[0]) = 0x007f007f; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x01ef013f01e701f8; -+ *((unsigned long*)& __m128i_op1[0]) = 0x35bb8d32b2625c00; -+ *((unsigned long*)& __m128i_result[1]) = 0x00008d3200000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0xea); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x808080e280808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080636380806363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x808080e280808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080636380806363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080638063; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080808080638063; -+ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0x00000001; -+ *((int*)& __m128_op0[1]) = 0xffffffee; -+ *((int*)& __m128_op0[0]) = 0x00000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x007f8080; -+ *((int*)& __m256_op0[6]) = 0x007f007f; -+ *((int*)& __m256_op0[5]) = 0x007f8080; -+ *((int*)& __m256_op0[4]) = 0x007f007f; -+ *((int*)& __m256_op0[3]) = 0x007f8080; -+ *((int*)& __m256_op0[2]) = 0x007f007f; -+ *((int*)& __m256_op0[1]) = 0x007f8080; -+ *((int*)& __m256_op0[0]) = 0x007f007f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x007f8080007f007f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f8080007f007f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x007f8080007f007f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f8080007f007f; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007f3f7f007f1f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007f3f7f007f1f; -+ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffee00000004; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x80808080; -+ *((int*)& __m128_op0[0]) = 0x80638063; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrph_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x03ff000003ff03ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x03ff000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x03ff000003ff03ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x03ff000000000000; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x007f8080007f007f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x007f8080007f007f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x007f8080007f007f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x007f8080007f007f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffee00000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3a3a3a3b3a3a3a3a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3a3a00003a3a0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000003a0000003a; -+ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x38); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0x00000001; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000002; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0x00000001; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000002; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0x00000001; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000002; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0x00000001; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000002; -+ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfc00ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000100fe000100fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfc00ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000100fe000100fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x00fe00fe00fe00fe; -+ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x1b1b1b1b1b1b1b1b; -+ *((unsigned long*)& __m256i_result[2]) = 0x1b1b1b1b1b1b1b1b; -+ *((unsigned long*)& __m256i_result[1]) = 0x1b1b1b1b1b1b1b1b; -+ *((unsigned long*)& __m256i_result[0]) = 0x1b1b1b1b1b1b1b1b; -+ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff000000000000; -+ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00008d3200000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x09e8e9012fded7fd; -+ *((unsigned long*)& __m128i_op1[0]) = 0x479f64b03373df61; -+ *((unsigned long*)& __m128i_result[1]) = 0x00008d3200000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00040004; -+ *((int*)& __m128_op0[2]) = 0x00040004; -+ *((int*)& __m128_op0[1]) = 0x00040004; -+ *((int*)& __m128_op0[0]) = 0x00040004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000fffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x09e8e9012fded7fd; -+ *((unsigned long*)& __m128i_op1[0]) = 0x479f64b03373df61; -+ *((unsigned long*)& __m128i_result[1]) = 0x09e8e9012fded7fd; -+ *((unsigned long*)& __m128i_result[0]) = 0x479f64b03373df61; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00040004; -+ *((int*)& __m128_op0[2]) = 0x00040004; -+ *((int*)& __m128_op0[1]) = 0x00040004; -+ *((int*)& __m128_op0[0]) = 0x00040004; -+ *((unsigned long*)& __m128d_result[1]) = 0x37c0001000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x37c0001000000000; -+ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x37c0001000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x37c0001000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x37c0001000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x37c0001000000001; -+ __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x37c0001000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x37c0001000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x77c0401040004000; -+ *((unsigned long*)& __m128i_result[0]) = 0x77c0401040004000; -+ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0100000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0100000000000000; -+ __m128i_out = __lsx_vslli_d(__m128i_op0,0x36); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; -+ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffff0400; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0xffff0400; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff040000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff040000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x77c0404a4000403a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x77c03fd640003fc6; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000003a0000003a; -+ *((unsigned long*)& __m128i_result[1]) = 0x77c0404a4000403a; -+ *((unsigned long*)& __m128i_result[0]) = 0x77c03fd640003fc6; -+ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x77c0404a4000403a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x77c03fd640003fc6; -+ *((unsigned long*)& __m128i_result[1]) = 0x75c0404a4200403a; -+ *((unsigned long*)& __m128i_result[0]) = 0x75c03fd642003fc6; -+ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0xb9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x77c0404a4000403a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x77c03fd640003fc6; -+ *((unsigned long*)& __m128i_result[1]) = 0x04c0044a0400043a; -+ *((unsigned long*)& __m128i_result[0]) = 0x04c004d6040004c6; -+ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x77c0404a4000403a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x77c03fd640003fc6; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x37c0001000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x37c0001000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0003c853c843c844; -+ *((unsigned long*)& __m128i_result[0]) = 0x0003c853c843c844; -+ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x09e8e9012fded7fd; -+ *((unsigned long*)& __m128i_op0[0]) = 0x479f64b03373df61; -+ *((unsigned long*)& __m128i_op1[1]) = 0x04c0044a0400043a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x04c004d6040004c6; -+ *((unsigned long*)& __m128i_result[1]) = 0x1d20db00ec967bec; -+ *((unsigned long*)& __m128i_result[0]) = 0x00890087009b0099; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xff00ffff00000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256d_op0[1]) = 0xff00ffff00000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x04c0044a0400043a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x04c004d6040004c6; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_result[1]) = 0x044a043a04d604c6; -+ *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; -+ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00fe00fe; -+ *((int*)& __m256_op0[6]) = 0x00fe00fe; -+ *((int*)& __m256_op0[5]) = 0x00fe00fe; -+ *((int*)& __m256_op0[4]) = 0x00fe00fe; -+ *((int*)& __m256_op0[3]) = 0x00fe00fe; -+ *((int*)& __m256_op0[2]) = 0x00fe00fe; -+ *((int*)& __m256_op0[1]) = 0x00fe00fe; -+ *((int*)& __m256_op0[0]) = 0x00fe00fe; -+ *((int*)& __m256_op1[7]) = 0x00fe00fe; -+ *((int*)& __m256_op1[6]) = 0x00fe00fe; -+ *((int*)& __m256_op1[5]) = 0x00fe00fe; -+ *((int*)& __m256_op1[4]) = 0x00fe00fe; -+ *((int*)& __m256_op1[3]) = 0x00fe00fe; -+ *((int*)& __m256_op1[2]) = 0x00fe00fe; -+ *((int*)& __m256_op1[1]) = 0x00fe00fe; -+ *((int*)& __m256_op1[0]) = 0x00fe00fe; -+ *((int*)& __m256_result[7]) = 0x3f800000; -+ *((int*)& __m256_result[6]) = 0x3f800000; -+ *((int*)& __m256_result[5]) = 0x3f800000; -+ *((int*)& __m256_result[4]) = 0x3f800000; -+ *((int*)& __m256_result[3]) = 0x3f800000; -+ *((int*)& __m256_result[2]) = 0x3f800000; -+ *((int*)& __m256_result[1]) = 0x3f800000; -+ *((int*)& __m256_result[0]) = 0x3f800000; -+ __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x77c0404a4000403a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x77c03fd640003fc6; -+ *((unsigned long*)& __m128i_result[1]) = 0x00f0008100800080; -+ *((unsigned long*)& __m128i_result[0]) = 0x00f0008000800080; -+ __m128i_out = __lsx_vsrari_h(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c844; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c844; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000003a0000003a; -+ *((unsigned long*)& __m128d_op1[1]) = 0x37c0001000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x37c0001000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x37c0001000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x37c0001000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000003a0000003a; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x37c0001000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x37c0001000000008; -+ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_h(__m128i_op0,3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000003a0000003a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000003a0000003a; -+ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; -+ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x007f0000007f0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x007f0000007f0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x27); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00f0008100800080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00f000807000009e; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000ec382e; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ec382d; -+ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00f0008100800080; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00f000807000009e; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x007f0000007f0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x007f0000007f0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000003f8000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000003f8000004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ int_op1 = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff000000ff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00000003f8000004; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00000003f8000004; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000691a6c843c8fc; -+ *((unsigned long*)& __m128i_result[0]) = 0x000691a6918691fc; -+ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvffint_s_w(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_result[1]) = 0xd6d7ded7ded7defe; -+ *((unsigned long*)& __m128i_result[0]) = 0xd6d7ded7ded7defe; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0xd6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000020000000200; -+ __m128i_out = __lsx_vfclass_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff000000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001c8520000c97d; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001c8520001c87d; -+ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000020000000200; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000020000000200; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000003f8000004; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000003f8000004; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000003f8000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000003f8000004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; -+ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0003c853c843c87e; -+ *((unsigned long*)& __m128i_result[0]) = 0x0003c853c843c87e; -+ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffff7; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffff7; -+ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_b(__m256i_op0,15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000007f8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000007f8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0b0b0b0b0b0b0b0b; -+ *((unsigned long*)& __m128i_result[0]) = 0x0b0b0b0b0b0b0b0b; -+ __m128i_out = __lsx_vmaxi_b(__m128i_op0,11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000007f8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000007f8; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; -+ __m128i_out = __lsx_vsat_hu(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000007f8; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000007f8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000f80007; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000f8; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrecip_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x4a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ long_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0002000000000007; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0006000000040000; -+ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000f80007; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000007; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000f80007; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000006c80031; -+ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000006c80031; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_d(__m128i_op0,0x3c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfd000000fb00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001fe00f8000700; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0xfdfef9ff0efff900; -+ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfdfef9ff0efff900; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffcfd000000fb00; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001fe00f8000700; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000fb01; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000007000000; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000fb01; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000007000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0002000000000007; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000fb01; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000e0000; -+ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128d_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfdfef9ff0efff900; -+ *((unsigned long*)& __m128d_result[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128d_result[0]) = 0x6363636363636363; -+ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x6363636363636363; -+ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000c000c000c000c; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x000000ff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x000000ff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00018d8e00018d8e; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff7; -+ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000007; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x807fffff80800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0002000000000007; -+ *((unsigned long*)& __m128i_result[1]) = 0x8003000000020000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4040ffffc0400004; -+ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8003000000020000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4040ffffc0400004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8003000000020000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x64); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007fff00007fff; -+ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256d_result[2]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256d_result[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256d_result[0]) = 0x00007fff00007fff; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0086000000040000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0082000000000007; -+ *((unsigned long*)& __m128d_result[1]) = 0x4160c00000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x4110000000000000; -+ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0001; -+ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c9c9c9c9d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; -+ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0086000000040000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0082000000000007; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0086000000040000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0082000000000007; -+ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffff0000; -+ *((int*)& __m256_op1[4]) = 0xffff0001; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffff0000; -+ *((int*)& __m256_op1[0]) = 0xffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000d0000000d; -+ __m128i_out = __lsx_vmini_w(__m128i_op0,13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000d0000000d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8006000000040000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8002000000000007; -+ *((unsigned long*)& __m128i_result[1]) = 0x8006000000040000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8002000d00000014; -+ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000007; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000006362ffff; -+ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0002000000000007; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000600000004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000636500006363; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x31b1777777777776; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6eee282828282829; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000006362ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000d0000000d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x6363635663636356; -+ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000006362ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000d0000000d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000dffff000d; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000007; -+ *((unsigned long*)& __m128i_op1[1]) = 0x31b1777777777776; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6eee282828282829; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000dffff000d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ffffff; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x6b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000d0000000d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000dffff000d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000070007; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000007ffff; -+ __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_w(__m128i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00001fff00001fff; -+ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000800c00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0002; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0002; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0002; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0002; -+ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000068; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; -+ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00001fff00001fff; -+ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x0000001f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000070007; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000007ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000068; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000038003; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000040033; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0002000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000068; -+ *((unsigned long*)& __m128d_op1[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128d_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000068; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000068; -+ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; -+ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_h(__m128i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_du_wu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0200000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0200000000000000; -+ __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffff00; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffff00; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; -+ __m256i_out = __lasx_xvclz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffff00; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffff00; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000fefe7f00; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000fefe7f00; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000038003; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000040033; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000068; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; -+ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0020000000200000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0020000000200000; -+ __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; -+ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00001fff00001fff; -+ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000038003; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000040033; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100080000; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000068; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000038003; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000040033; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000008; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100080000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffefff80000; -+ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffe0000fffe0002; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffe0000fffe0002; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; -+ __m256i_out = __lasx_xvneg_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffe0000fffe0002; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffe0000fffe0002; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000fffeffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000fffeffff; -+ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100080000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x80000000; -+ *((int*)& __m128_result[2]) = 0x80000000; -+ *((int*)& __m128_result[1]) = 0x80000000; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vandi_b(__m128i_op0,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001e0000001e; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001e0000001e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001e0000001e; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001e0000001e; -+ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0020000000200000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0020000000200000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0020000000200000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0020000000200000; -+ long_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0020000000200000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000001e0000001e; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000001e0000001e; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000001e0000001e; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000001e0000001e; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffeffee; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe0000fffe0012; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffeffee; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe0000fffe0012; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffeffee; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffe0000fffe0012; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffeffee; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffe0000fffe0012; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffeffee; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffe0000fffe0012; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffeffee; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffe0000fffe0012; -+ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffeffee; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe0000fffe0012; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffeffee; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe0000fffe0012; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000001ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000001ffff; -+ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000001000000010; -+ *((unsigned long*)& __m256d_op1[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0020002000200020; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0020000000200000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0020000000200000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x1010101010001000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x1010101000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; -+ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0xb); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0004000404040404; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0004000400000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; -+ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; -+ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1010101010001000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1010101000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x1010101010001000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x101010100000000e; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; -+ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000f02e1f80f04; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000f02e1f80f04; -+ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001000000000; -+ __m128i_out = __lsx_vclo_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1010101010001000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x101010100000000e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0889088908810881; -+ *((unsigned long*)& __m256i_result[2]) = 0x0081010000810100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0889088900810088; -+ *((unsigned long*)& __m256i_result[0]) = 0x0081010000810100; -+ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000100010001ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000100010001ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000100010001ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000100010001ffff; -+ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0889088908810881; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0081010000810100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0889088900810088; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0081010000810100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0004448444844084; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000408080004080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0004448444804080; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000408080004080; -+ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007ff000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007ff000000000; -+ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000100010001ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000100010001ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000100010001ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000100010001ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00007ff000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00007ff000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x79); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x000000007ff00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_d(__m128i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf7f8f7f8f800f800; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003f780000ff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf7f8f7f80000fff9; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003f780000ff80; -+ *((unsigned long*)& __m256i_result[3]) = 0xf7f8f7f8f800f800; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003f784000ff80; -+ *((unsigned long*)& __m256i_result[1]) = 0xf7f8f7f84000fff9; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003f784000ff80; -+ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xf7f8f7f8f800f800; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00003f784000ff80; -+ *((unsigned long*)& __m256d_op1[1]) = 0xf7f8f7f84000fff9; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00003f784000ff80; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0xff800000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0xff800000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf7f8f7f8f800f800; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003f784000ff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf7f8f7f84000fff9; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003f784000ff80; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000f7f8f7f8; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000003f78; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000f7f8f7f8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000003f78; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; -+ __m128i_out = __lsx_vclz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xff80000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x8060000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x8060000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x003fffff00000000; -+ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf7f8f7f8f800f800; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003f780000ff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf7f8f7f80000fff9; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003f780000ff80; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1f001f00000007ef; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00001fff200007ef; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x23); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xf7f8f7f8; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00003f78; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xf7f8f7f8; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00003f78; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0xf7f8f7f8; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00003f78; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0xf7f8f7f8; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00003f78; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0xff800000; -+ *((int*)& __m256_result[5]) = 0x80000000; -+ *((int*)& __m256_result[4]) = 0x80000000; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0xff800000; -+ *((int*)& __m256_result[1]) = 0x80000000; -+ *((int*)& __m256_result[0]) = 0x80000000; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00007ff000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007ff000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffffffff; -+ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xaad5555500000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xaad5555500000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1f001f00000007ef; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00001fff200007ef; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1f001f00000007ef; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00001fff200007ef; -+ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffff000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff01; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffff2; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff01; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1010101010001000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x101010100000000e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000fe; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff01feffff01ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000fe; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff01feffff01ff; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8060000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8060000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x805f0000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x805f0000ffffffff; -+ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f7f8f7f8; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000003f78; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f7f8f7f8; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003f78; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[2]) = 0x805f0000ffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[0]) = 0x805f0000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000f7f8f7f8; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000003f78; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000f7f8f7f8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000003f78; -+ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8060000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8060000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1f001f00000007ef; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00001fff200007ef; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff000000010000; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x805f0000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x805f0000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x805f0000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x805f0000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[2]) = 0x80be0000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[0]) = 0x80be0000ffffffff; -+ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80be0000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80be0000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000100000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000100000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff00000000; -+ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x80000000; -+ *((int*)& __m256_op1[6]) = 0xff800000; -+ *((int*)& __m256_op1[5]) = 0x80000000; -+ *((int*)& __m256_op1[4]) = 0x80000000; -+ *((int*)& __m256_op1[3]) = 0x80000000; -+ *((int*)& __m256_op1[2]) = 0xff800000; -+ *((int*)& __m256_op1[1]) = 0x80000000; -+ *((int*)& __m256_op1[0]) = 0x80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000007fc00000400; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000040000000400; -+ *((unsigned long*)& __m256i_result[1]) = 0x000007fc00000400; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000040000000400; -+ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x35); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; -+ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1f001f00000007ef; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00001fff200007ef; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000f0f0003; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000f1003; -+ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff80be0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000f0f0002; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff80be0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000f1002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x80000000ff800000; -+ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0xdb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000f0f0003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000f1003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000f0001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000011; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000010000fffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000010000fffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000010000fffe; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000010000fffe; -+ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000003d0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000003d0000; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000003f0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffc3ffff003e; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000003f0000ffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffc3ffff003e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000f07f0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffff177fffff0fc; -+ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000003dffc2; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ long_op0 = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000020202020; -+ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000003f0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffc3ffff003e; -+ *((unsigned long*)& __m128i_result[1]) = 0x00001f80007fff80; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffe1ffff801f7f; -+ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000020202020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_d(__m256i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001084314a6; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001084314a6; -+ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x800000007fff0001; -+ *((unsigned long*)& __m256i_result[2]) = 0x80000000ff7f0001; -+ *((unsigned long*)& __m256i_result[1]) = 0x800000007fff0001; -+ *((unsigned long*)& __m256i_result[0]) = 0x80000000ff7f0001; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0008000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000003f0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffc3ffff003e; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000003dffc2; -+ *((unsigned long*)& __m128i_result[1]) = 0xc000000fc0003fff; -+ *((unsigned long*)& __m128i_result[0]) = 0xbffffff0ffffc00f; -+ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x1f001f00000007ef; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00001fff200007ef; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1f001f00000007ef; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00001fff200007ef; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000003030000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000030400; -+ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000003d0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000003d0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000030000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000030000; -+ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00010000fffe0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00010000fffe0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00010000fffe0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00010000fffe0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1f001f00000007ef; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00001fff200007ef; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x80000000; -+ *((int*)& __m256_op0[6]) = 0x80000000; -+ *((int*)& __m256_op0[5]) = 0x80000000; -+ *((int*)& __m256_op0[4]) = 0xff800000; -+ *((int*)& __m256_op0[3]) = 0x80000000; -+ *((int*)& __m256_op0[2]) = 0x80000000; -+ *((int*)& __m256_op0[1]) = 0x80000000; -+ *((int*)& __m256_op0[0]) = 0xff800000; -+ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xc000000fc0003fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xbffffff0ffffc00f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000003f0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffc3ffff003e; -+ *((unsigned long*)& __m128i_result[1]) = 0x00c0000000bfffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ffffff; -+ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x28); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x800000007fff0001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x80000000ff7f0001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x800000007fff0001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x80000000ff7f0001; -+ *((unsigned long*)& __m256i_result[3]) = 0xbfffffffffff8000; -+ *((unsigned long*)& __m256i_result[2]) = 0xbfff800080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xbfffffffffff8000; -+ *((unsigned long*)& __m256i_result[0]) = 0xbfff800080000000; -+ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000001; -+ *((int*)& __m128_op0[2]) = 0x084314a6; -+ *((int*)& __m128_op0[1]) = 0x00000001; -+ *((int*)& __m128_op0[0]) = 0x084314a6; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x800000007fff0001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x80000000ff7f0001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x800000007fff0001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x80000000ff7f0001; -+ *((unsigned long*)& __m256i_result[3]) = 0x800000007fff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x80000000ff7f0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x800000007fff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x80000000ff7f0000; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001d; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001d; -+ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6b6c4beb636443e3; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0507070805070708; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xbfffffff; -+ *((int*)& __m256_op0[6]) = 0xffff8000; -+ *((int*)& __m256_op0[5]) = 0xbfff8000; -+ *((int*)& __m256_op0[4]) = 0x80000000; -+ *((int*)& __m256_op0[3]) = 0xbfffffff; -+ *((int*)& __m256_op0[2]) = 0xffff8000; -+ *((int*)& __m256_op0[1]) = 0xbfff8000; -+ *((int*)& __m256_op0[0]) = 0x80000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0xffff8000; -+ *((int*)& __m256_result[5]) = 0x80000000; -+ *((int*)& __m256_result[4]) = 0x80000000; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0xffff8000; -+ *((int*)& __m256_result[1]) = 0x80000000; -+ *((int*)& __m256_result[0]) = 0x80000000; -+ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[1]) = 0x0909090909090909; -+ *((unsigned long*)& __m128i_result[0]) = 0x0909090909090909; -+ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x63); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmini_b(__m128i_op0,1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff800000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff800000; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfrintrne_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff000000000000; -+ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808ffff0808ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808ffff0808ffff; -+ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc0000000c0000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc000000080400000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc0000000c0000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc000000080400000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0002000000010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0002000000010000; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00020000; -+ *((int*)& __m256_op1[6]) = 0x00020000; -+ *((int*)& __m256_op1[5]) = 0x00020000; -+ *((int*)& __m256_op1[4]) = 0x00010000; -+ *((int*)& __m256_op1[3]) = 0x00020000; -+ *((int*)& __m256_op1[2]) = 0x00020000; -+ *((int*)& __m256_op1[1]) = 0x00020000; -+ *((int*)& __m256_op1[0]) = 0x00010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0002000000010000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0002000000010000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; -+ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a6; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_h(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000a6; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff59; -+ __m128i_out = __lsx_vhsubw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff800000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x27); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000a6; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000080800000808; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x80000000ff800000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff000200000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff000200000000; -+ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffint_d_l(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0909090900000909; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0909090909090909; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff000200000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff000200000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff020000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff020000; -+ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000080800000808; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080000180800001; -+ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000001; -+ *((int*)& __m256_op0[5]) = 0x001f00e0; -+ *((int*)& __m256_op0[4]) = 0x1f1f1fff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000001; -+ *((int*)& __m256_op0[1]) = 0x001f00e0; -+ *((int*)& __m256_op0[0]) = 0x1f1f1fff; -+ *((int*)& __m256_op1[7]) = 0x80000000; -+ *((int*)& __m256_op1[6]) = 0x80000000; -+ *((int*)& __m256_op1[5]) = 0x80000000; -+ *((int*)& __m256_op1[4]) = 0xff800000; -+ *((int*)& __m256_op1[3]) = 0x80000000; -+ *((int*)& __m256_op1[2]) = 0x80000000; -+ *((int*)& __m256_op1[1]) = 0x80000000; -+ *((int*)& __m256_op1[0]) = 0xff800000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000001; -+ *((int*)& __m256_result[5]) = 0x001f00e0; -+ *((int*)& __m256_result[4]) = 0xff800000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000001; -+ *((int*)& __m256_result[1]) = 0x001f00e0; -+ *((int*)& __m256_result[0]) = 0xff800000; -+ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff59; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff59; -+ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x08080808; -+ *((int*)& __m128_op1[2]) = 0x08080808; -+ *((int*)& __m128_op1[1]) = 0x08080808; -+ *((int*)& __m128_op1[0]) = 0x08080808; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff000200000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff000200000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x001f00e0ff800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x001f00e0ff800000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff80000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff000200000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff000200000000; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080808280808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808280808; -+ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000100fffffeff; -+ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0xb8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808081; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x80808080ffffffff; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000080800000808; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffff80800001; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffff80800001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000080800000808; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80800001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff80800001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff7fff7ef; -+ *((unsigned long*)& __m128i_op1[0]) = 0x80808080ffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000080800000808; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffbff8888080a; -+ *((unsigned long*)& __m128i_result[0]) = 0x080803ff807ff7f9; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffbff8888080a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x080803ff807ff7f9; -+ *((unsigned long*)& __m128i_result[1]) = 0x010105017878f8f6; -+ *((unsigned long*)& __m128i_result[0]) = 0xf8f8fd0180810907; -+ __m128i_out = __lsx_vneg_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1212121212121212; -+ *((unsigned long*)& __m256i_result[2]) = 0x1212121212121212; -+ *((unsigned long*)& __m256i_result[1]) = 0x1212121212121212; -+ *((unsigned long*)& __m256i_result[0]) = 0x1212121212121212; -+ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff7fff7ef; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80808080ffffffff; -+ *((int*)& __m128_result[3]) = 0xffffe000; -+ *((int*)& __m128_result[2]) = 0xffffe000; -+ *((int*)& __m128_result[1]) = 0xc6ffe000; -+ *((int*)& __m128_result[0]) = 0xc6fde000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc6ffe000c6fde000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808081; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_result[0]) = 0x467f6080467d607f; -+ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000080800000808; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x010105017878f8f6; -+ *((unsigned long*)& __m128i_op2[0]) = 0xf8f8fd0180810907; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000080800000808; -+ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x467f6080467d607f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x467f6080467d607f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808081; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xe000e0006080b040; -+ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x467f6080467d607f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffe000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c6fde000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xe000e0006080b040; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffe000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000c6fde000; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffff00ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff00ffffffff; -+ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_h_w(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xef); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe364525335ede000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000fff00000e36; -+ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x34); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffe000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c6fde000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x39); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000010000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00fe00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000040000000000; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x2a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000fff00000e36; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000fff0e36; -+ __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000010000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00fe00ff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00000fff00000e36; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000fef01000e27ca; -+ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffff00ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffff00ffffffff; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000010000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00fe00ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000010000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00fe00fe00ff; -+ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x23); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000fef01000e27ca; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001fde020000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001c4f940000; -+ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000010000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00fe00ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; -+ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_w(__m128i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000100000001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0ed5ced7e51023e5; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00001000e51023e5; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_b(__m256i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; -+ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000e36400015253; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000035ed0001e000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000e36400015253; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000035ed0001e000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1c6c80007fffffff; -+ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1c6c80007fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0038d800ff000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00fffe00fffffe00; -+ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000fef01000f27ca; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000010000010101; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0101000001000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000ffef0010000; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000fef01000f27ca; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_w(__m128i_op0,-4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000ffef0010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff0000ff0000; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000ffef0010000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000010000010101; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0101000001000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff0000ff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff0000ff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff0000000000; -+ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0000ffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0000ffff; -+ *((int*)& __m256_op1[7]) = 0x00ff00ff; -+ *((int*)& __m256_op1[6]) = 0x00ff00ff; -+ *((int*)& __m256_op1[5]) = 0x00ff00ff; -+ *((int*)& __m256_op1[4]) = 0x00ff00ff; -+ *((int*)& __m256_op1[3]) = 0x00ff00ff; -+ *((int*)& __m256_op1[2]) = 0x00ff00ff; -+ *((int*)& __m256_op1[1]) = 0x00ff00ff; -+ *((int*)& __m256_op1[0]) = 0x00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000001ffffffff; -+ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x21); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000010101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101000001000100; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000010000010101; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0101000001000100; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x0000ff00; -+ *((int*)& __m128_op1[0]) = 0x00ff0000; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xffffffff; -+ *((int*)& __m128_result[1]) = 0xffffffff; -+ *((int*)& __m128_result[0]) = 0xffffffff; -+ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000e36400005253; -+ *((unsigned long*)& __m128i_op2[0]) = 0x000035ed0000e000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000010101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101000001000100; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000008000008080; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080800000800080; -+ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; -+ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000001ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00ff00ff; -+ *((int*)& __m256_op0[6]) = 0x00ff00ff; -+ *((int*)& __m256_op0[5]) = 0x00ff00ff; -+ *((int*)& __m256_op0[4]) = 0x00ff00ff; -+ *((int*)& __m256_op0[3]) = 0x00ff00ff; -+ *((int*)& __m256_op0[2]) = 0x00ff00ff; -+ *((int*)& __m256_op0[1]) = 0x00ff00ff; -+ *((int*)& __m256_op0[0]) = 0x00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00ff00ff; -+ *((int*)& __m256_op1[6]) = 0x00ff00ff; -+ *((int*)& __m256_op1[5]) = 0x00ff00ff; -+ *((int*)& __m256_op1[4]) = 0x00ff00ff; -+ *((int*)& __m256_op1[3]) = 0x00ff00ff; -+ *((int*)& __m256_op1[2]) = 0x00ff00ff; -+ *((int*)& __m256_op1[1]) = 0x00ff00ff; -+ *((int*)& __m256_op1[0]) = 0x00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0037ffc8d7ff2800; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff00000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[1]) = 0x001bffe4ebff9400; -+ *((unsigned long*)& __m128i_result[0]) = 0xff80000000000000; -+ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0037ffc8d7ff2800; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff00ffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_d(__m128i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000e2e3ffffd1d3; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000008000e2e3; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000008000e2e3; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000008000e2e3; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000080806362; -+ *((unsigned long*)& __m128i_result[0]) = 0x807f808000000000; -+ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0038d800ff000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00fffe00fffffe00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0038f000ff000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00fffe00fffffe00; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_b(__m256i_op0,-3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0038d800ff000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00fffe00fffffe00; -+ *((unsigned long*)& __m128d_op2[1]) = 0x8000008000008080; -+ *((unsigned long*)& __m128d_op2[0]) = 0x8080800000800080; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000008000008080; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; -+ __m256i_out = __lasx_xvfclass_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0037ffc8d7ff2800; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ffffff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0038d800ff000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00fffe00fffffe00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0137ffc9d7fe2801; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f00ff017fffff01; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00e4880080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0080810080808100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x41f0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x41f0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x41f0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x41f0000000000000; -+ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000080806362; -+ *((unsigned long*)& __m128i_op1[0]) = 0x807f808000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80806362; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff00ff; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80806362; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00008080; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; -+ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff00008080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_h(__m128i_op0,-4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000000ff801c9e; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000810000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x40eff02383e383e4; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; -+ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x800000810000807f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x808080010080007f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x800000810000807f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x808080010080007f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000020000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000020000020; -+ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x62); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff801c9e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000810000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000020000020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000020000020; -+ *((unsigned long*)& __m128i_result[1]) = 0x001d001d20000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x001d001d20000020; -+ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff801c9e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000810000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x40eff02383e383e4; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000800000007fff; -+ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000020000020; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000020000020; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00000000ff801c9e; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000810000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x0000ffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000020000020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000020000020; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000020000020; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00ff00ff; -+ *((int*)& __m128_op0[0]) = 0x00ff00ff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000fffe0001; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffe0001; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000fffe0001; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffe0001; -+ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0000ffff; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x0000ffff; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x0000ffff; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x0000ffff; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000700000007; -+ *((unsigned long*)& __m256i_result[2]) = 0x0007ffff0007ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000700000007; -+ *((unsigned long*)& __m256i_result[0]) = 0x0007ffff0007ffff; -+ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x2d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000020000020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000020000020; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000200000002000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000700000007; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0007ffff0007ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000700000007; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0007ffff0007ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000700000007; -+ *((unsigned long*)& __m256i_result[2]) = 0x00071f1f00071f1f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000700000007; -+ *((unsigned long*)& __m256i_result[0]) = 0x00071f1f00071f1f; -+ __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000007fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000020000020; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000020000020; -+ *((unsigned long*)& __m128i_result[1]) = 0x2000002000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x2000002020000020; -+ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000020006; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffe000ffdf; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000200000002000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffe000ffdf; -+ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_vpickve2gr_h(__m128i_op0,0x0); -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_b(__m256i_op0,12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000200000002000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe000ffdf; -+ *((unsigned long*)& __m128i_result[1]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffe000ffdf; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffe000ffdf; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000200000002000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe000ffdf; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000200000002001; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000001fff0021; -+ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfrint_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000200000002000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001200100012001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100200001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100200001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x3a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001200100012001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000080000000800; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000000000007ffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000000007ffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000000000007ffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000000000007ffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0e0d0c0b0e0d0c0b; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0e0d0c0b0e0d0c0b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0e0d0c0b0e0d0c0b; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0e0d0c0b0e0d0c0b; -+ *((unsigned long*)& __m256i_result[3]) = 0x0a0908070a090807; -+ *((unsigned long*)& __m256i_result[2]) = 0x0a0908070a090807; -+ *((unsigned long*)& __m256i_result[1]) = 0x0a0908070a090807; -+ *((unsigned long*)& __m256i_result[0]) = 0x0a0908070a090807; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0400400204004002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x32); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000080000000800; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000800; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0400400204004002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000080000000800; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7171717171717171; -+ *((unsigned long*)& __m256i_result[2]) = 0x8e8e8e8e8e8e8e8e; -+ *((unsigned long*)& __m256i_result[1]) = 0x7171717171717171; -+ *((unsigned long*)& __m256i_result[0]) = 0x8e8e8e8e8e8e8e8e; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x71); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0400400204004002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x6d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff00000000; -+ __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000003fffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x008e8e8e8e8e8e8e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x008e8e8e8e8e8e8e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000700000007; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0007ffff0007ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000700000007; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0007ffff0007ffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x008e8e8e8e8e8e8e; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x008e8e8e8e8e8e8e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007000008e700000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x007000008e700000; -+ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001200100012001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000003fffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00001fff00001fff; -+ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7171717171717171; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8e8e8e8e8f0e8e8e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7171717171717171; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8e8e8e8e8f0e8e8e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000007ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000007ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000007ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000007ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7171717171010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x8e8e8e8e8f00ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7171717171010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x8e8e8e8e8f00ffff; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7171717171717171; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8e8e8e8e8e8e8e8e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7171717171717171; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8e8e8e8e8e8e8e8e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00001fff00001fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000007ffc000; -+ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00001fff; -+ *((int*)& __m128_op0[2]) = 0x00001fff; -+ *((int*)& __m128_op0[1]) = 0x00000003; -+ *((int*)& __m128_op0[0]) = 0xfffffffc; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0xfffffffc; -+ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000e2e20000e2e2; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00011d1c00011d9c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000e2e20000e2e2; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00011d1c00011d9c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000e2e20000e2e2; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00011d1c00011d9c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000e2e20000e2e2; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00011d1c00011d9c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7171717171717171; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8e8e8e8e8e8e8e8e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7171717171717171; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8e8e8e8e8e8e8e8e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x01c601c6fe3afe3a; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x01c601c6fe3afe3a; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001000000000; -+ __m128i_out = __lsx_vpcnt_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffe080f6efc100f7; -+ *((unsigned long*)& __m128i_op1[0]) = 0xefd32176ffe100f7; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000040000000200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000040000000000; -+ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x71717171; -+ *((int*)& __m256_op1[6]) = 0x71010101; -+ *((int*)& __m256_op1[5]) = 0x8e8e8e8e; -+ *((int*)& __m256_op1[4]) = 0x8f00ffff; -+ *((int*)& __m256_op1[3]) = 0x71717171; -+ *((int*)& __m256_op1[2]) = 0x71010101; -+ *((int*)& __m256_op1[1]) = 0x8e8e8e8e; -+ *((int*)& __m256_op1[0]) = 0x8f00ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7c007c0080008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7c007c0080008000; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x71717171; -+ *((int*)& __m256_op0[6]) = 0x71010101; -+ *((int*)& __m256_op0[5]) = 0x8e8e8e8e; -+ *((int*)& __m256_op0[4]) = 0x8f00ffff; -+ *((int*)& __m256_op0[3]) = 0x71717171; -+ *((int*)& __m256_op0[2]) = 0x71010101; -+ *((int*)& __m256_op0[1]) = 0x8e8e8e8e; -+ *((int*)& __m256_op0[0]) = 0x8f00ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x01c601c6fe3afe3a; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01c601c6fe3afe3a; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffc6ffc6003a003a; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffc6ffc6003a003a; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007000008e700000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007000008e700000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7171717171010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8e8e8e8e8f00ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7171717171010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8e8e8e8e8f00ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xe2e2e202ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_result[0]) = 0xe2e2e202ffffffff; -+ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000007ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000007ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000007ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000007ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001e0007ffff; -+ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffe080f6efc100f7; -+ *((unsigned long*)& __m128i_op0[0]) = 0xefd32176ffe100f7; -+ int_result = 0x0000000000002176; -+ int_out = __lsx_vpickve2gr_h(__m128i_op0,0x2); -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffc6ffc6; -+ *((int*)& __m256_op0[6]) = 0x003a003a; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffc6ffc6; -+ *((int*)& __m256_op0[2]) = 0x003a003a; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x71717171; -+ *((int*)& __m256_op1[6]) = 0x71010101; -+ *((int*)& __m256_op1[5]) = 0x8e8e8e8e; -+ *((int*)& __m256_op1[4]) = 0x8f00ffff; -+ *((int*)& __m256_op1[3]) = 0x71717171; -+ *((int*)& __m256_op1[2]) = 0x71010101; -+ *((int*)& __m256_op1[1]) = 0x8e8e8e8e; -+ *((int*)& __m256_op1[0]) = 0x8f00ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2a29282726252423; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2221201f1e1d1c1b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2a29282726252423; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2221201f1e1d1c1b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000005452505; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000004442403e4; -+ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe2e2e202ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe2e2e202ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffc6ffc6003a003a; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffc6ffc6003a003a; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000465; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000465; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; -+ __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; -+ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2a29282726252423; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2221201f1e1d1c1b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x26); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffc6ffc6003a003a; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffc6ffc6003a003a; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffc6ffc6003a003a; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffc6ffc6003a003a; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; -+ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000465; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000465; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000008d00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000008d00000000; -+ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2a29282726252423; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2221201f1e1d1c1b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00a8009800880078; -+ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x07ffc000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffe080f6efc100f7; -+ *((unsigned long*)& __m128i_op0[0]) = 0xefd32176ffe100f7; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffe080f6efc100f7; -+ *((unsigned long*)& __m128i_op1[0]) = 0xefd32176ffe100f7; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x2c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffc6ffc6003a003a; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffc6ffc6003a003a; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffe37fe3001d001d; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffe37fe3001d001d; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff8000; -+ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffc6ffc6003a003a; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffc6ffc6003a003a; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff0000; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fe37fff001fffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fe37fff001fffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fffffff; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000465; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000465; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000465; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000465; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; -+ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_result[2]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_result[1]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_result[0]) = 0x7575757575757575; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x75); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; -+ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7c007c0080008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7c007c0080008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7c00000880008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7c00000880008000; -+ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2a29282726252423; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2221201f1e1d1c1b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_b(__m128i_op0,-1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000001e0007ffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffe37fe3001d001d; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000000ffff8000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffe37fe3001d001d; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000ffff8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffe200000020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000fffe00008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffe200000020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fffe00008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_result[3]) = 0x7575ffff75757595; -+ *((unsigned long*)& __m256i_result[2]) = 0x7575ffff7575f575; -+ *((unsigned long*)& __m256i_result[1]) = 0x7575ffff75757595; -+ *((unsigned long*)& __m256i_result[0]) = 0x7575ffff7575f575; -+ __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7575ffff75757595; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7575ffff7575f575; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7575ffff75757595; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7575ffff7575f575; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x3aadec4f6c7975b1; -+ *((unsigned long*)& __m256i_result[2]) = 0x3abac5447fffca89; -+ *((unsigned long*)& __m256i_result[1]) = 0x3aadec4f6c7975b1; -+ *((unsigned long*)& __m256i_result[0]) = 0x3abac5447fffca89; -+ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00a600e000a600e0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x01500178010000f8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0100000001000000; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7575ffff75757595; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7575ffff7575f575; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7575ffff75757595; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7575ffff7575f575; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op2[3]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op2[2]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op2[1]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op2[0]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff0000; -+ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x001d001d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x05452505; -+ *((int*)& __m128_op0[1]) = 0x00000004; -+ *((int*)& __m128_op0[0]) = 0x442403e4; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_result[3]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_result[2]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_result[1]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_result[0]) = 0x7575757575757575; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff0000; -+ __m256i_out = __lasx_xvpermi_q(__m256i_op0,__m256i_op1,0x22); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffe0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000fff0; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_hu(__m128i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000fff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; -+ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000fff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7c00000880008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7c00000880008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000001d001d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000080008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3e00000440004000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3e000004400f400f; -+ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x05452505; -+ *((int*)& __m128_op1[1]) = 0x00000004; -+ *((int*)& __m128_op1[0]) = 0x442403e4; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000001000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x001d001d; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x001d001d; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7c00000880008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7c00000880008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0102; -+ *((unsigned long*)& __m256i_result[2]) = 0x007c000000810081; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0102; -+ *((unsigned long*)& __m256i_result[0]) = 0x007c000000810081; -+ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000fff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000001d001d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000001d001d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000030003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000030003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x7fe37fe3; -+ *((int*)& __m256_op0[6]) = 0x001d001d; -+ *((int*)& __m256_op0[5]) = 0x7fff7fff; -+ *((int*)& __m256_op0[4]) = 0x7fff0000; -+ *((int*)& __m256_op0[3]) = 0x7fe37fe3; -+ *((int*)& __m256_op0[2]) = 0x001d001d; -+ *((int*)& __m256_op0[1]) = 0x7fff7fff; -+ *((int*)& __m256_op0[0]) = 0x7fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffe0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007c000000810081; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0102; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007c000000810081; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fe37fe3001d001d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x007c7fff00007fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00817fff00810000; -+ *((unsigned long*)& __m256i_result[1]) = 0x007c7fff00007fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00817fff00810000; -+ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffe8ffffffe8; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffe8ffffffe8; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffe8ffffffe8; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffe8ffffffe8; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010109; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ long_int_result = 0x0000000000000000; -+ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x1); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffe0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffe0; -+ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3aadec4f6c7975b1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3abac5447fffca89; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3aadec4f6c7975b1; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3abac5447fffca89; -+ *((unsigned long*)& __m256i_op1[3]) = 0x3aadec4f6c7975b1; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3abac5447fffca89; -+ *((unsigned long*)& __m256i_op1[1]) = 0x3aadec4f6c7975b1; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3abac5447fffca89; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000755a0000d8f2; -+ *((unsigned long*)& __m256i_result[2]) = 0x000075740000fffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000755a0000d8f2; -+ *((unsigned long*)& __m256i_result[0]) = 0x000075740000fffe; -+ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007c000000810081; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0102; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007c000000810081; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000005452505; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000004442403e4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffe0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000005452505; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000044525043c; -+ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffe0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000005452505; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000044525043c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7c00000880008000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7c00000880008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7c00000880008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7c00000880008000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7c00000880008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7c00000880008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0100000001000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0100000001000100; -+ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0100000001000100; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0100000001000100; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x12835580; -+ *((int*)& __m128_op0[0]) = 0xb880eb98; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xffffffff; -+ *((int*)& __m128_result[1]) = 0x55fcbad1; -+ *((int*)& __m128_result[0]) = 0x7fc00000; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0100000001000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0100000001000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x3abb3abbbabababa; -+ *((unsigned long*)& __m256i_result[2]) = 0x0080000000800080; -+ *((unsigned long*)& __m256i_result[1]) = 0x3abb3abbbabababa; -+ *((unsigned long*)& __m256i_result[0]) = 0x0080000000800080; -+ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0100000001000100; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0100000001000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0100000001000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0100000001000100; -+ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000040; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000040; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000180007fe8; -+ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0100000001000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0100000001000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffe8ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffe8ffffffe8; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffe8ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffe8ffffffe8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000005452505; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000004442403e4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x03fc03fc03fc03fc; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000b4a00008808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080800000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000040; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x42800000; -+ *((int*)& __m128_result[0]) = 0x42800000; -+ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7c00000880008000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7c00000880008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000001; -+ *((int*)& __m256_op0[6]) = 0x7bfffff0; -+ *((int*)& __m256_op0[5]) = 0x00000001; -+ *((int*)& __m256_op0[4]) = 0x80007fe8; -+ *((int*)& __m256_op0[3]) = 0x00000001; -+ *((int*)& __m256_op0[2]) = 0x7bfffff0; -+ *((int*)& __m256_op0[1]) = 0x00000001; -+ *((int*)& __m256_op0[0]) = 0x80007fe8; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0100000001000100; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0100000001000100; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_w(__m128i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000017bfffff0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000180007fe8; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff7bfffff1; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff80007fe9; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff7bfffff1; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff80007fe9; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000004000000040; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000b4a00008808; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0808080800000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000b4a00008808; -+ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4280000042800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xbd7fffffbd800000; -+ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000c0007; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000c0007; -+ *((unsigned long*)& __m256i_op1[3]) = 0x3abb3abbbabababa; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0080000000800080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x3abb3abbbabababa; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0080000000800080; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000babababa; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000008c0087; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000babababa; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000008c0087; -+ __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ long_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; -+ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000b4a00008808; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0808080800000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000001d001d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001d0000001d; -+ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x007c7fff00007fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00817fff00810000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x007c7fff00007fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00817fff00810000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x7c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00001b4a00007808; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00001b4a00007808; -+ *((unsigned long*)& __m128i_result[1]) = 0x00001b4a00007808; -+ *((unsigned long*)& __m128i_result[0]) = 0x00001b4a00007808; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00001b4a00007808; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffe4b5ffff87f8; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001d0000001d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00001d0000001d00; -+ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000fff; -+ __m128i_out = __lsx_vmaxi_h(__m128i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00001b4a00007808; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; -+ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x5252525252525252; -+ *((unsigned long*)& __m256i_result[2]) = 0x5252525252525252; -+ *((unsigned long*)& __m256i_result[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m256i_result[0]) = 0x5252525252525252; -+ __m256i_out = __lasx_xvori_b(__m256i_op0,0x52); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff01; -+ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001000; -+ int_op1 = 0x000000007ff00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; -+ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00001b4a00007808; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; -+ __m128i_out = __lsx_vslei_hu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffffff01; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x807c7fffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80817fff00810000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x807c7fffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80817fff00810000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x80767f0101050101; -+ *((unsigned long*)& __m256i_result[2]) = 0x80817f01007f0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x80767f0101050101; -+ *((unsigned long*)& __m256i_result[0]) = 0x80817f01007f0000; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; -+ __m256i_out = __lasx_xvreplve0_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x5252525252525252; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5252525252525252; -+ *((unsigned long*)& __m256i_op1[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5252525252525252; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff7bfffff1; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff80007fe9; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff7bfffff1; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff80007fe9; -+ *((unsigned long*)& __m256i_result[3]) = 0x40ff40ff40ff40ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x407b40ff40ff40f1; -+ *((unsigned long*)& __m256i_result[1]) = 0x40ff40ff40ff40ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x407b40ff40ff40f1; -+ __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x40ff40ff40ff40ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x407b40ff40ff40f1; -+ *((unsigned long*)& __m256i_op0[1]) = 0x40ff40ff40ff40ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x407b40ff40ff40f1; -+ *((unsigned long*)& __m256i_op1[3]) = 0x40ff40ff40ff40ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x407b40ff40ff40f1; -+ *((unsigned long*)& __m256i_op1[1]) = 0x40ff40ff40ff40ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x407b40ff40ff40f1; -+ *((unsigned long*)& __m256i_result[3]) = 0xbf00bf00bf00bf00; -+ *((unsigned long*)& __m256i_result[2]) = 0xbf84bf00bf00bf0e; -+ *((unsigned long*)& __m256i_result[1]) = 0xbf00bf00bf00bf00; -+ *((unsigned long*)& __m256i_result[0]) = 0xbf84bf00bf00bf0e; -+ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00060000; -+ *((int*)& __m256_op0[6]) = 0x00040000; -+ *((int*)& __m256_op0[5]) = 0x00020000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00060000; -+ *((int*)& __m256_op0[2]) = 0x00040000; -+ *((int*)& __m256_op0[1]) = 0x00020000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256d_op0[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256d_op0[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256d_op0[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0006000000040000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0002000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0006000000040000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; -+ __m128i_out = __lsx_vclz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00bf00bf00bf00bf; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00bf00bf00bf00bf; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00bf00bf00bf00bf; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00bf00bf00bf00bf; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbf00bf00bf00bf00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbf84bf00bf00bf0e; -+ *((unsigned long*)& __m256i_op0[1]) = 0xbf00bf00bf00bf00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbf84bf00bf00bf0e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xdf80df80df80df80; -+ *((unsigned long*)& __m256i_result[2]) = 0xdfc2df80df80df87; -+ *((unsigned long*)& __m256i_result[1]) = 0xdf80df80df80df80; -+ *((unsigned long*)& __m256i_result[0]) = 0xdfc2df80df80df87; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbf00bf00bf00bf00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbf84bf00bf00bf0e; -+ *((unsigned long*)& __m256i_op0[1]) = 0xbf00bf00bf00bf00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbf84bf00bf00bf0e; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbf00bf00bf00bf00; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbf84bf00bf00bf0e; -+ *((unsigned long*)& __m256i_op1[1]) = 0xbf00bf00bf00bf00; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbf84bf00bf00bf0e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x9090909090909090; -+ *((unsigned long*)& __m128i_result[0]) = 0x9090909090909090; -+ __m128i_out = __lsx_vxori_b(__m128i_op0,0x90); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffdfffdfffdfffd; -+ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000e0e0e0e0; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff4; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000e0e0e0e0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000e0e0e0e0; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7000700070007000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7000700070007000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000070007000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7000700070007000; -+ *((unsigned long*)& __m256i_result[3]) = 0xe070e000e070e000; -+ *((unsigned long*)& __m256i_result[2]) = 0xe070e000e070e000; -+ *((unsigned long*)& __m256i_result[1]) = 0xe070e000e070e000; -+ *((unsigned long*)& __m256i_result[0]) = 0xe070e000e070e000; -+ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x74); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_d(__m128i_op0,-16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x003f003f003f0040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x003f003f003f0040; -+ *((unsigned long*)& __m256i_result[3]) = 0x00003f003f003f00; -+ *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00003f003f003f00; -+ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; -+ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7000700070007000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7000700070007000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000070007000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7000700070007000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4040403fd03fd040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4040403fd03fd040; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffd03fd040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4040403fd03fd040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001010000010100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010000010100; -+ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xbf00bf00bf00bf00; -+ *((unsigned long*)& __m256d_op0[2]) = 0xbf84bf00bf00bf0e; -+ *((unsigned long*)& __m256d_op0[1]) = 0xbf00bf00bf00bf00; -+ *((unsigned long*)& __m256d_op0[0]) = 0xbf84bf00bf00bf0e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7000700070007000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7000700070007000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000070007000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7000700070007000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff8fff9000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff8fff9000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff8fff9000; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7000700070007000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7000700070007000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000070007000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7000700070007000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0e0e0e0e0e0e0e0e; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000e0e0e0e0e0e; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x003f003f003f0040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x003f003f003f0040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x003f003f003f0040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x003f003f003f0040; -+ *((unsigned long*)& __m256i_result[3]) = 0x00003f3f00003f3f; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_result[1]) = 0x00003f3f00003f3f; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003f3f00004040; -+ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfefbff06fffa0004; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfefeff04fffd0004; -+ *((unsigned long*)& __m128i_result[1]) = 0x4008804080040110; -+ *((unsigned long*)& __m128i_result[0]) = 0x4040801080200110; -+ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00003f3f00003f3f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00003f3f00003f3f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x41000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x41000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x41000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x41000000; -+ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff0000ffff0000f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffff0000; -+ *((int*)& __m128_op0[2]) = 0xffff0000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x40088040; -+ *((int*)& __m128_op1[2]) = 0x80040110; -+ *((int*)& __m128_op1[1]) = 0x40408010; -+ *((int*)& __m128_op1[0]) = 0x80200110; -+ *((int*)& __m128_result[3]) = 0xffff0000; -+ *((int*)& __m128_result[2]) = 0xffff0000; -+ *((int*)& __m128_result[1]) = 0x40408010; -+ *((int*)& __m128_result[0]) = 0x80200110; -+ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0e0e0e0e0e0e0e0e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000e0e0e0e0e0e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff8fff9000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff8fff9000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff8fff9000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00010e0d00009e0e; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00009000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000e0e; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00009000; -+ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xbf00bf00bf00bf00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xbf84bf00bf00bf0e; -+ *((unsigned long*)& __m256i_op0[1]) = 0xbf00bf00bf00bf00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xbf84bf00bf00bf0e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00003f3f00003f3f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00003f3f00003f3f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_result[3]) = 0xdf80ff20df80ff20; -+ *((unsigned long*)& __m256i_result[2]) = 0xdfc2ff20df80ffa7; -+ *((unsigned long*)& __m256i_result[1]) = 0xdf80ff20df80ff20; -+ *((unsigned long*)& __m256i_result[0]) = 0xdfc2ff20df80ffa7; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00003f3f00003f3f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00003f3f00003f3f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003f3f00004040; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_b(__m128i_op0,11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdf80df80df80df80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xdfc2df80df80df87; -+ *((unsigned long*)& __m256i_op0[1]) = 0xdf80df80df80df80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xdfc2df80df80df87; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdf80df80df80df80; -+ *((unsigned long*)& __m256i_op1[2]) = 0xdfc2df80df80df87; -+ *((unsigned long*)& __m256i_op1[1]) = 0xdf80df80df80df80; -+ *((unsigned long*)& __m256i_op1[0]) = 0xdfc2df80df80df87; -+ *((unsigned long*)& __m256i_result[3]) = 0x2080208020802080; -+ *((unsigned long*)& __m256i_result[2]) = 0x203e208020802079; -+ *((unsigned long*)& __m256i_result[1]) = 0x2080208020802080; -+ *((unsigned long*)& __m256i_result[0]) = 0x203e208020802079; -+ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdf80ff20df80ff20; -+ *((unsigned long*)& __m256i_op0[2]) = 0xdfc2ff20df80ffa7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xdf80ff20df80ff20; -+ *((unsigned long*)& __m256i_op0[0]) = 0xdfc2ff20df80ffa7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x80208020c22080a7; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x80208020c22080a7; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x80208020c22080a7; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x80208020c22080a7; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdf80ff20df80ff20; -+ *((unsigned long*)& __m256i_op1[2]) = 0xdfc2ff20df80ffa7; -+ *((unsigned long*)& __m256i_op1[1]) = 0xdf80ff20df80ff20; -+ *((unsigned long*)& __m256i_op1[0]) = 0xdfc2ff20df80ffa7; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000840100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xbffebffec0febfff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000840100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xbffebffec0febfff; -+ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffff0000; -+ *((int*)& __m128_op0[2]) = 0xffff0000; -+ *((int*)& __m128_op0[1]) = 0x40408010; -+ *((int*)& __m128_op0[0]) = 0x80200110; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff4; -+ int_op1 = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000840100000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbffebffec0fe0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000840100000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbffebffec0fe0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000420080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000420080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x5fff5fff607f0000; -+ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; -+ __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000033; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000033; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00004200; -+ *((int*)& __m256_op0[6]) = 0x80000000; -+ *((int*)& __m256_op0[5]) = 0x5fff5fff; -+ *((int*)& __m256_op0[4]) = 0x607f0000; -+ *((int*)& __m256_op0[3]) = 0x00004200; -+ *((int*)& __m256_op0[2]) = 0x80000000; -+ *((int*)& __m256_op0[1]) = 0x5fff5fff; -+ *((int*)& __m256_op0[0]) = 0x607f0000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00004200; -+ *((int*)& __m256_result[6]) = 0x80000000; -+ *((int*)& __m256_result[5]) = 0x5fff5fff; -+ *((int*)& __m256_result[4]) = 0x607f0000; -+ *((int*)& __m256_result[3]) = 0x00004200; -+ *((int*)& __m256_result[2]) = 0x80000000; -+ *((int*)& __m256_result[1]) = 0x5fff5fff; -+ *((int*)& __m256_result[0]) = 0x607f0000; -+ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffc0c0ffffbfc0; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffc0c0ffffbfc0; -+ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000033; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000033; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003f3f0000400d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003f3f0000400d; -+ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00010e0d00009e0e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00009000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000e0e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00009000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000033; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000033; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000033; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000033; -+ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x71); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000033; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000033; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3fc03fc000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f801fe000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x3fc03fc000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f801fe000000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe05f8102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe05f8102; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffe05f8102; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffe05f8102; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000420080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000420080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000420080000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000420080000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000420080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000001607f0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000420080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000001607f0000; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000033; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000033; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000420080000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000420080000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffbdff7fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xa000a0009f80ffcc; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffbdff7fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xa000a0009f80ffcc; -+ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000033; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000033; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3fc03fc000000003; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f7f1fd800000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x3fc03fc000000004; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000033; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000033; -+ *((int*)& __m256_op1[7]) = 0x00004200; -+ *((int*)& __m256_op1[6]) = 0x80000000; -+ *((int*)& __m256_op1[5]) = 0x5fff5fff; -+ *((int*)& __m256_op1[4]) = 0x607f0000; -+ *((int*)& __m256_op1[3]) = 0x00004200; -+ *((int*)& __m256_op1[2]) = 0x80000000; -+ *((int*)& __m256_op1[1]) = 0x5fff5fff; -+ *((int*)& __m256_op1[0]) = 0x607f0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f00004040; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffe05f8102; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffe05f8102; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffc0c0ffffbfc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffc0c0ffffbfc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f0000400d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f0000400d; -+ *((unsigned long*)& __m256i_result[3]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x44); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000900000009; -+ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3fc03fc000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f801fe000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3fc03fc000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f801fe000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3fc03fc000000004; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3fc03fc000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffc03fc040; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffe00000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffe00000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f801fe000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3fc03fc000000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3fc03fc000000003; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f7f1fd800000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f1f00003f3f0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3f3f00007f1f0000; -+ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003f3f0000400d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f0000400d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3fc03fc000000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3fc03fc000000003; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f7f1fd800000004; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xc0411fe800000000; -+ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f801fe000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3fc03fc000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f801fdfffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x3fc03fc000000003; -+ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x3f413f4100000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7f801fe000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000017fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000420080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000420080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000420080000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000420080000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x1000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000420080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000420080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x5fff5fff607f0000; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x28); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe05f8102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe05f8102; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000420080000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000420080000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5fff5fff607f0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f801fe000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3fc03fc000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000003fc00ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001fe01fe00; -+ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; -+ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000003fc00ff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fe01fe00; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000a; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000a; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x10000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x10000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0xffff0000; -+ *((int*)& __m128_op1[2]) = 0xffff0000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xffffffff; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000000a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000a; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3f413f4100000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f801fe000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; -+ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc0411fe800000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x601fbfbeffffffff; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbf3efff536d5169b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ebdfffffddf3f40; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3f5ec0a0feefa0b0; -+ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdf80df80df80df80; -+ *((unsigned long*)& __m256i_op1[2]) = 0xdfc2df80df80df87; -+ *((unsigned long*)& __m256i_op1[1]) = 0xdf80df80df80df80; -+ *((unsigned long*)& __m256i_op1[0]) = 0xdfc2df80df80df87; -+ *((unsigned long*)& __m256i_result[3]) = 0xff21ff21ff21ff21; -+ *((unsigned long*)& __m256i_result[2]) = 0xff21ff21ff21ff21; -+ *((unsigned long*)& __m256i_result[1]) = 0xff21ff21ff21ff21; -+ *((unsigned long*)& __m256i_result[0]) = 0xff21ff21ff21ff21; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffe00000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffe00000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffff00000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffff00000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffe00029f9f6061; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x64e464e464e464e4; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffeffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000064e264e6; -+ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfffe00029f9f6061; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x601fbfbeffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffb00fdfdf7ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff8000000000000; -+ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff7fff; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; -+ __m256i_out = __lasx_xvmini_d(__m256i_op0,9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xff21ff21ff21ff21; -+ *((unsigned long*)& __m256d_op0[2]) = 0xff21ff21ff21ff21; -+ *((unsigned long*)& __m256d_op0[1]) = 0xff21ff21ff21ff21; -+ *((unsigned long*)& __m256d_op0[0]) = 0xff21ff21ff21ff21; -+ *((unsigned long*)& __m256d_op1[3]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256d_op1[2]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256d_op1[1]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256d_op1[0]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; -+ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0100010001000100; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x01000100; -+ *((int*)& __m128_op0[0]) = 0x01000100; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x64e464e4; -+ *((int*)& __m128_op1[0]) = 0x64e464e4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; -+ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[3]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256i_result[2]) = 0xff21c241ff21c238; -+ *((unsigned long*)& __m256i_result[1]) = 0xff21c241ff21c241; -+ *((unsigned long*)& __m256i_result[0]) = 0xff21c241ff21c238; -+ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100010001000100; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000100; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3f5ec0a0feefa0b0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff02d060; -+ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff02d060; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff02d060; -+ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff02d060; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff02d060; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff02d06000000000; -+ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_du_wu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffe00029f9f6061; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3f5ec0a0feefa0b0; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffe00029fb060b1; -+ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x3fff3fff3fff3fff; -+ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m128d_op2[1]) = 0xfffb00fdfdf7ffff; -+ *((unsigned long*)& __m128d_op2[0]) = 0xfff8000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xfffb00fdfdf7ffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xfff8000000000000; -+ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffb00fdfdf7ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff8000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffb00fdfdf7ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff8000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffb00fdfdf7ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff8000000000000; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0100010001000100; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_wu(__m128i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffb00fdfdf7ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff8000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00fe000100cf005f; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128d_result[1]) = 0x5f675e96e29a5a60; -+ *((unsigned long*)& __m128d_result[0]) = 0x7fff7fff7fff7fff; -+ __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ int_op0 = 0x000000007ff00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00fe000100cf005f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00fe000100cf005f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5f675e96e29a5a60; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x5fff5e97e2ff5abf; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefffefffefffeff; -+ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; -+ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x26); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000004; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5f675e96e29a5a60; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x965f5e9660e25a60; -+ *((unsigned long*)& __m128i_result[0]) = 0xff7f7fffff7f7fff; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x34); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00ff000100ff00fe; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00ff003000ff00a0; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfrint_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000011; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000011; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5f675e96e29a5a60; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00fe000100cf005f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x5e695e95e1cb5a01; -+ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000011; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000011; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000088; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000088; -+ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000088; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000088; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000009; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000009; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000009; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5e695e95e1cb5a01; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; -+ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x5f675e96; -+ *((int*)& __m128_op0[2]) = 0xe29a5a60; -+ *((int*)& __m128_op0[1]) = 0x7fff7fff; -+ *((int*)& __m128_op0[0]) = 0x7fff7fff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x5e695e95; -+ *((int*)& __m128_op1[0]) = 0xe1cb5a01; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000005e695e95; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5e695e96c396b402; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000005e94; -+ *((unsigned long*)& __m128i_result[0]) = 0x00005e96ffffb402; -+ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000005e94; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00005e96ffffb402; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00fe000100cf005f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000000bd; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001fc0000fffeff; -+ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x27); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ long_op0 = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000020006; -+ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff000100ff00fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff003000ff00a0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff000100ff00fe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff003000ff00a0; -+ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000020006; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000020006; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000020006; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000020006; -+ __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; -+ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000c; -+ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff00; -+ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00ff000100ff00fe; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00ff003000ff00a0; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000000005e695e95; -+ *((unsigned long*)& __m128d_op1[0]) = 0x5e695e96c396b402; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2ea268972ea2966a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4026f4ffbc175bff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x5d7f5d807fea807f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; -+ *((unsigned long*)& __m256i_result[3]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f017ffd; -+ *((unsigned long*)& __m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f017ffd; -+ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff81ff7d; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff81ff7d; -+ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0001ffff0001; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff0001; -+ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000005e695e95; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5e695e96c396b402; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_d(__m128i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff00000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff00000001; -+ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffff0001; -+ *((int*)& __m128_op0[2]) = 0xffff0001; -+ *((int*)& __m128_op0[1]) = 0xffff0001; -+ *((int*)& __m128_op0[0]) = 0xffff0001; -+ *((int*)& __m128_result[3]) = 0xffff0001; -+ *((int*)& __m128_result[2]) = 0xffff0001; -+ *((int*)& __m128_result[1]) = 0xffff0001; -+ *((int*)& __m128_result[0]) = 0xffff0001; -+ __m128_out = __lsx_vfrecip_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00fe000100cf005f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7f00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100010100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x03f0000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x03f0000000000000; -+ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x34); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x5f675e96a8d359f5; -+ *((unsigned long*)& __m128d_op0[0]) = 0x46387f95d9a68001; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x5d7f5d007f6a007f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; -+ *((unsigned long*)& __m256i_result[3]) = 0xff81ff7dffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff81ff7d; -+ *((unsigned long*)& __m256i_result[1]) = 0xff81ff7dffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff81ff7d; -+ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0x28); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff81ff7dffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff81ff7dffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f017ffd; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f7f7f7f7f017ffd; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000007; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000007; -+ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xa4a4a4a4a4a4a4a4; -+ *((unsigned long*)& __m256i_result[2]) = 0xa4a4a4a4a4a4a4a4; -+ *((unsigned long*)& __m256i_result[1]) = 0xa4a4a4a4a4a4a4a4; -+ *((unsigned long*)& __m256i_result[0]) = 0xa4a4a4a4a4a4a4a4; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0xa4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000012; -+ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000100010100; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00fe0001; -+ *((int*)& __m128_op1[2]) = 0x00cf005f; -+ *((int*)& __m128_op1[1]) = 0x7fff7fff; -+ *((int*)& __m128_op1[0]) = 0x7fff7f00; -+ *((int*)& __m128_op2[3]) = 0x5d7f5d00; -+ *((int*)& __m128_op2[2]) = 0x7f6a007f; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x5d7f5d00; -+ *((int*)& __m128_result[2]) = 0x7f6a007f; -+ *((int*)& __m128_result[1]) = 0x7fff7fff; -+ *((int*)& __m128_result[0]) = 0x7fff7f00; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000012; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x5d7f5d007f6a007f; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7fff7fff7fff7f00; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_result[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_result[0]) = 0x43ef878780000009; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff000100ff00fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff003000ff00a0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0008000f00080008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0008000a00080008; -+ __m128i_out = __lsx_vclz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0218ff78fc38fc38; -+ *((unsigned long*)& __m256i_result[2]) = 0xfc00000000000048; -+ *((unsigned long*)& __m256i_result[1]) = 0x0218ff78fc38fc38; -+ *((unsigned long*)& __m256i_result[0]) = 0xfc00000000000048; -+ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_wu(__m128i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op1[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op1[0]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x43ef878780000009; -+ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x36); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x01fe01fd01fd01fd; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x5d7f5d007f6a007f; -+ *((unsigned long*)& __m128i_op2[0]) = 0x7fff7fff7fff7f00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002ebf; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x31); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00002ebf; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0xffffffff; -+ *((int*)& __m128_result[0]) = 0xffffffff; -+ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; -+ *((unsigned long*)& __m128i_result[0]) = 0xc404040404040404; -+ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0006ffff0004ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0002ffff0000ff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0006ffff0004ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0002ffff0000ff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000d; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000e; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000d; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000e; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5d7f5d807fea807f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x007f008000ea007f; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff8000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000043efffff8000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff8000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000043efffff8000; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x3d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000040400000404; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000040400000404; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x10fbe1e2e0000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x10fbe1e2e0000002; -+ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000c0000005; -+ *((unsigned long*)& __m256i_result[2]) = 0x21f8c3c4c0000005; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000c0000005; -+ *((unsigned long*)& __m256i_result[0]) = 0x21f8c3c4c0000005; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x007f008000ea007f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xf000000000000000; -+ __m128i_out = __lsx_vsat_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000c0000005; -+ *((unsigned long*)& __m256i_op0[2]) = 0x21f8c3c4c0000005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000c0000005; -+ *((unsigned long*)& __m256i_op0[0]) = 0x21f8c3c4c0000005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff8000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000043efffff8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff8000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000043efffff8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xbfffa004fffd8000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xbfffa004fffd8000; -+ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00000000c0000005; -+ *((unsigned long*)& __m256d_op1[2]) = 0x21f8c3c4c0000005; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00000000c0000005; -+ *((unsigned long*)& __m256d_op1[0]) = 0x21f8c3c4c0000005; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op1[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000080000009; -+ *((unsigned long*)& __m256i_op1[0]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5d7f5d807fea807f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xbafebb00ffd500fe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0218ff78fc38fc38; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfc00000000000048; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0218ff78fc38fc38; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfc00000000000048; -+ *((unsigned long*)& __m256i_result[3]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfc00000000000048; -+ *((unsigned long*)& __m256i_result[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfc00000000000048; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x43ef8787; -+ *((int*)& __m256_op0[4]) = 0x8000ffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x43ef8787; -+ *((int*)& __m256_op0[0]) = 0x8000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000001df00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000001df00000000; -+ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbafebb00ffd500fe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbafebb00ffd500fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbafebb00ffd500fe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xbafebb00ffd500fe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000c0000005; -+ *((unsigned long*)& __m256i_op1[2]) = 0x21f8c3c4c0000005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000c0000005; -+ *((unsigned long*)& __m256i_op1[0]) = 0x21f8c3c4c0000005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfc00000000000048; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfc00000000000048; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbfffa004fffd8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbfffa004fffd8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00003f0000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00002fffe8013fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00003f0000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00002fffe8013fff; -+ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x0218ff78; -+ *((int*)& __m256_op1[6]) = 0xfc38fc38; -+ *((int*)& __m256_op1[5]) = 0xfc000000; -+ *((int*)& __m256_op1[4]) = 0x00000048; -+ *((int*)& __m256_op1[3]) = 0x0218ff78; -+ *((int*)& __m256_op1[2]) = 0xfc38fc38; -+ *((int*)& __m256_op1[1]) = 0xfc000000; -+ *((int*)& __m256_op1[0]) = 0x00000048; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0xfc38fc38; -+ *((int*)& __m256_result[5]) = 0xfc000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0xfc38fc38; -+ *((int*)& __m256_result[1]) = 0xfc000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x43ef87878000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43ef87878000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000fc38fc38; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000fc38fc38; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff0000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffbfffa0ffffff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff0000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffbfffa0ffffff80; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbfffa004fffd8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbfffa004fffd8000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ffff0000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ffff0000ff; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00ff00ffff0000ff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00ff00ffff0000ff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_d(__m128i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fc38fc38; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fc38fc38; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff00ff0000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffbfffa0ffffff80; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff00ff0000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffbfffa0ffffff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff02000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff02000000; -+ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000feccfecc; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000feccfecc; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xbafebb00; -+ *((int*)& __m128_op1[2]) = 0xffd500fe; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000fc38fc38; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000fc38fc38; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000fefefe000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000fefefe000000; -+ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff02000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff02000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fc38fc38; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fc38fc38; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007e1c7e1c; -+ *((unsigned long*)& __m256i_result[2]) = 0x7e00000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007e1c7e1c; -+ *((unsigned long*)& __m256i_result[0]) = 0x7e00000000000000; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbafebb00ffd500fe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007e1c7e1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7e00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007e1c7e1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7e00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007e1c7e1c; -+ *((unsigned long*)& __m256i_result[2]) = 0x7e00000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007e1c7e1c; -+ *((unsigned long*)& __m256i_result[0]) = 0x7e00000000000000; -+ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff02000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff02000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x010c7fbc; -+ *((int*)& __m256_op0[6]) = 0x7e1c7e1c; -+ *((int*)& __m256_op0[5]) = 0xfe000000; -+ *((int*)& __m256_op0[4]) = 0x00000024; -+ *((int*)& __m256_op0[3]) = 0x010c7fbc; -+ *((int*)& __m256_op0[2]) = 0x7e1c7e1c; -+ *((int*)& __m256_op0[1]) = 0xfe000000; -+ *((int*)& __m256_op0[0]) = 0x00000024; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff02000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff02000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007e1c7e1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7e00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007e1c7e1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7e00000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007e1c7e1c; -+ *((unsigned long*)& __m256i_result[2]) = 0x7e00000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007e1c7e1c; -+ *((unsigned long*)& __m256i_result[0]) = 0x7e00000000000000; -+ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000000001c9880; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000001c9880; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ long_op0 = 0x000000007ff00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000007ff00000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000007ff00000; -+ __m128i_out = __lsx_vreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fc38fc38; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fc38fc38; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfc00000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0002001800ff0078; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01f8007001f80070; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0002001800ff0078; -+ *((unsigned long*)& __m256i_op1[0]) = 0x01f8007001f80070; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0218ff78fc38fc38; -+ *((unsigned long*)& __m256i_op2[2]) = 0xfc00000000000048; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0218ff78fc38fc38; -+ *((unsigned long*)& __m256i_op2[0]) = 0xfc00000000000048; -+ *((unsigned long*)& __m256i_result[3]) = 0x00300b40fc001678; -+ *((unsigned long*)& __m256i_result[2]) = 0xfc00000000001f80; -+ *((unsigned long*)& __m256i_result[1]) = 0x00300b40fc001678; -+ *((unsigned long*)& __m256i_result[0]) = 0xfc00000000001f80; -+ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x4429146a7b4c88b2; -+ *((unsigned long*)& __m128d_op0[0]) = 0xe22b3595efa4aa0c; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000048; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000048; -+ long_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000048; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000001c9880; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000001c9880; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffe36780; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffe36780; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000100000001; -+ __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000010100000101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010100000101; -+ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffe36780; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffe36780; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_result[2]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_result[0]) = 0x0100000100000001; -+ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00000000ffe36780; -+ *((unsigned long*)& __m256d_op1[2]) = 0x8000000100000001; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00000000ffe36780; -+ *((unsigned long*)& __m256d_op1[0]) = 0x8000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffb; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffb; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffb; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffb; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000100000000fc; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000100000000fc; -+ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x2d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000100000000fc; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000100000000fc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; -+ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000100000000fc; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000100000000fc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0100000001000000; -+ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffe36780; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffe36780; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000010100000101; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000010100000101; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000010000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000010000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000010000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000010000000000; -+ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x000000ffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffff0000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000010100000101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000010100000101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; -+ __m128i_out = __lsx_vpcnt_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; -+ int_result = 0x0000000000000002; -+ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x0); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; -+ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[2]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[1]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[0]) = 0x9090909090909090; -+ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x6f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x66); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0404050404040404; -+ *((unsigned long*)& __m128i_result[0]) = 0x0404050404040404; -+ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0404050404040404; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0404050404040404; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000004040504; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000004040504; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200010002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200010002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000010004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000101; -+ *((int*)& __m128_op0[2]) = 0x00000101; -+ *((int*)& __m128_op0[1]) = 0x00000101; -+ *((int*)& __m128_op0[0]) = 0x00000101; -+ *((int*)& __m128_op1[3]) = 0x00000002; -+ *((int*)& __m128_op1[2]) = 0x00000002; -+ *((int*)& __m128_op1[1]) = 0x00000002; -+ *((int*)& __m128_op1[0]) = 0x00000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[2]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[0]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op0[2]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op0[1]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op0[0]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[2]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[0]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[3]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[2]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[1]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[0]) = 0x9090909090909090; -+ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op2[2]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op2[1]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op2[0]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_result[2]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_result[0]) = 0x0100000100000001; -+ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffdfe01; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffdfe0200000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4000000000000000; -+ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; -+ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[2]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[0]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x9090909090909090; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000004040504; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000004040504; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000010100000101; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000010100000101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000008050501; -+ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x04040504; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x04040504; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000f91; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000f91; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000010100000101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000010100000101; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x08050501; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x08050501; -+ *((int*)& __m256_op1[7]) = 0x90909090; -+ *((int*)& __m256_op1[6]) = 0x90909090; -+ *((int*)& __m256_op1[5]) = 0x90909090; -+ *((int*)& __m256_op1[4]) = 0x90909090; -+ *((int*)& __m256_op1[3]) = 0x90909090; -+ *((int*)& __m256_op1[2]) = 0x90909090; -+ *((int*)& __m256_op1[1]) = 0x90909090; -+ *((int*)& __m256_op1[0]) = 0x90909090; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0100000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000008050501; -+ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x3d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; -+ __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000008050501; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000008050501; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfrint_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x01000000; -+ *((int*)& __m128_op0[0]) = 0x01000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op0[2]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op0[1]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op0[0]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[2]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[0]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_result[3]) = 0x6f6f6f6f6f6f6f6f; -+ *((unsigned long*)& __m256i_result[2]) = 0x6f6f6f6f6f6f6f6f; -+ *((unsigned long*)& __m256i_result[1]) = 0x6f6f6f6f6f6f6f6f; -+ *((unsigned long*)& __m256i_result[0]) = 0x6f6f6f6f6f6f6f6f; -+ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; -+ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0x00ffff00ff00ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ffff00ff00ff00; -+ __m128i_out = __lsx_vldi(-1686); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ffff00ff00ff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ffff00ff00ff00; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xff00ff00ff00ff00; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00ffff00; -+ *((int*)& __m128_op0[2]) = 0xff00ff00; -+ *((int*)& __m128_op0[1]) = 0x00ffff00; -+ *((int*)& __m128_op0[0]) = 0xff00ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; -+ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000008050501; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; -+ __m128i_out = __lsx_vneg_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op0[2]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op0[1]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op0[0]) = 0x9090909090909090; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xc848c848c848c848; -+ *((unsigned long*)& __m256i_result[2]) = 0x8848c848c848c848; -+ *((unsigned long*)& __m256i_result[1]) = 0xc848c848c848c848; -+ *((unsigned long*)& __m256i_result[0]) = 0x8848c848c848c848; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000f91; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000f91; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xff00ff00; -+ *((int*)& __m128_op0[2]) = 0xff00ff00; -+ *((int*)& __m128_op0[1]) = 0xff00ff00; -+ *((int*)& __m128_op0[0]) = 0xff00ff00; -+ *((int*)& __m128_result[3]) = 0x7fc00000; -+ *((int*)& __m128_result[2]) = 0x7fc00000; -+ *((int*)& __m128_result[1]) = 0x7fc00000; -+ *((int*)& __m128_result[0]) = 0x7fc00000; -+ __m128_out = __lsx_vfsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000022; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfe813f00fe813f00; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe813f00fe813f00; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe813f00fe813f00; -+ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe813f00fe813f00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000033; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfe813f00fe813f00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe813f00fe813f00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; -+ __m128i_out = __lsx_vclz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff800000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff800000000000; -+ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000f91; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000f91; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000f90; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000f90; -+ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ff000000ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff000000ff00; -+ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; -+ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; -+ *((int*)& __m128_result[3]) = 0xffe00000; -+ *((int*)& __m128_result[2]) = 0xffe00000; -+ *((int*)& __m128_result[1]) = 0xffe00000; -+ *((int*)& __m128_result[0]) = 0xffe00000; -+ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfe813f00fe813f00; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe813f00fe813f00; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff017fffff017f; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff017fffff017f; -+ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff9f017f1fa0b199; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1197817fd839ea3e; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000033; -+ *((unsigned long*)& __m128i_result[1]) = 0xff011fb11181d8ea; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc848c848c848c848; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8848c848c848c848; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc848c848c848c848; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8848c848c848c848; -+ *((unsigned long*)& __m256i_result[3]) = 0xc800c800c800c800; -+ *((unsigned long*)& __m256i_result[2]) = 0x8800c800c800c801; -+ *((unsigned long*)& __m256i_result[1]) = 0xc800c800c800c800; -+ *((unsigned long*)& __m256i_result[0]) = 0x8800c800c800c801; -+ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f90; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000f90; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff70; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff70; -+ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000003e; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00fe00fe000200fe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe000200fe; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000003e; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefe02fefefe02fe; -+ __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00fe00fe; -+ *((int*)& __m128_op0[2]) = 0x000200fe; -+ *((int*)& __m128_op0[1]) = 0x00fe00fe; -+ *((int*)& __m128_op0[0]) = 0x000200fe; -+ *((int*)& __m128_result[3]) = 0xc2fc0000; -+ *((int*)& __m128_result[2]) = 0xc3040000; -+ *((int*)& __m128_result[1]) = 0xc2fc0000; -+ *((int*)& __m128_result[0]) = 0xc3040000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0000ff70; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0000ff70; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000100; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000100; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff011fb11181d8ea; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80ff800000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00fe00fe000200fe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe000200fe; -+ *((unsigned long*)& __m128i_result[1]) = 0x00fd02fe00002302; -+ *((unsigned long*)& __m128i_result[0]) = 0x007ffd0200000000; -+ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc800c800c800c800; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8800c800c800c801; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc800c800c800c800; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8800c800c800c801; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc800c800c800c800; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8800c800c800c801; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc800c800c800c800; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8800c800c800c801; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffcc9a989a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_b(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0007000000050000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00003fff00003fff; -+ __m128i_out = __lsx_vsat_wu(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc2fc0000c3040000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc2fc0000c3040000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc848c848c848c848; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8848c848c848c848; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc848c848c848c848; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8848c848c848c848; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff37b737b8; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff77b737b8; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff37b737b8; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff77b737b8; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f90; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000f90; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000f90; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000f90; -+ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xc2fc0000; -+ *((int*)& __m128_op1[2]) = 0xc3040000; -+ *((int*)& __m128_op1[1]) = 0xc2fc0000; -+ *((int*)& __m128_op1[0]) = 0xc3040000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc800c800c800c800; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8800c800c800c801; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc800c800c800c800; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8800c800c800c801; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00fe00fe; -+ *((int*)& __m128_op0[2]) = 0x000200fe; -+ *((int*)& __m128_op0[1]) = 0x00fe00fe; -+ *((int*)& __m128_op0[0]) = 0x000200fe; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc848c848c848c848; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8848c848c848c848; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc848c848c848c848; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8848c848c848c848; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc848c848c848c848; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8848c848c848c848; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc848c848c848c848; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8848c848c848c848; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f90; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000f90; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffefffe00000000; -+ __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000208000002080; -+ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; -+ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_result[1]) = 0x2080208020802080; -+ *((unsigned long*)& __m128i_result[0]) = 0x2080208020802080; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd70b30c96ea9f4e8; -+ *((unsigned long*)& __m128i_op0[0]) = 0xa352bfac9269e0aa; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xd70b30c96ea9f4e8; -+ *((unsigned long*)& __m128i_result[0]) = 0xa352bfac9269e0aa; -+ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xd70b30c96ea9f4e8; -+ *((unsigned long*)& __m128d_op0[0]) = 0xa352bfac9269e0aa; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000208000002080; -+ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x000007c8; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x000007c8; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd70b30c96ea9f4e8; -+ *((unsigned long*)& __m128i_op1[0]) = 0xa352bfac9269e0aa; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffd70b00006ea9; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffa352ffff9269; -+ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd70b30c96ea9f4e8; -+ *((unsigned long*)& __m128i_op1[0]) = 0xa352bfac9269e0aa; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffd70b00006ea9; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffa352ffff9269; -+ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffd70b00006ea9; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffa352ffff9269; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffd70b00006ea9; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffa352ffff9269; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff0001; -+ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc848c848c848c848; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8848c848c848c848; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc848c848c848c848; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8848c848c848c848; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000007c8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000007c8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000007c8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000007c8; -+ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001fe01fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fe01fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000007c8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000007c8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x01fe01fe0000ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x01fe01fe0000ff01; -+ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd70b30c96ea9f4e8; -+ *((unsigned long*)& __m128i_op1[0]) = 0xa352bfac9269e0aa; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000003fbfc04; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001fdfe02; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000003fbfc04; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001fdfe02; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fd; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000000007c8; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000000007c8; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000001fe01fe; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000ff0100; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000001fe01fe; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000ff0100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xbba0c07b51230d5c; -+ *((unsigned long*)& __m128d_op0[0]) = 0xa15f3f9e8763c2b9; -+ *((unsigned long*)& __m128d_op1[1]) = 0xbba0c07b51230d5c; -+ *((unsigned long*)& __m128d_op1[0]) = 0xa15f3f9e8763c2b9; -+ *((int*)& __m128_result[3]) = 0x9d0603db; -+ *((int*)& __m128_result[2]) = 0x80000000; -+ *((int*)& __m128_result[1]) = 0x9d0603db; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd70b30c96ea9f4e8; -+ *((unsigned long*)& __m128i_op0[0]) = 0xa352bfac9269e0aa; -+ *((int*)& __m128_result[3]) = 0xce23d33d; -+ *((int*)& __m128_result[2]) = 0x4edd53ea; -+ *((int*)& __m128_result[1]) = 0xceb95a81; -+ *((int*)& __m128_result[0]) = 0xcedb2c3f; -+ __m128_out = __lsx_vffint_s_w(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd70b30c96ea9f4e8; -+ *((unsigned long*)& __m128i_op1[0]) = 0xa352bfac9269e0aa; -+ *((unsigned long*)& __m128i_result[1]) = 0xd70b30c96ea9f4e8; -+ *((unsigned long*)& __m128i_result[0]) = 0xa352bfac9269e0aa; -+ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000007c8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000007c8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001fe01fe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0100; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fe01fe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000c8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000c8; -+ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fd; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000004000000fd; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000004000000fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; -+ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000003fbfc04; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001fdfe02; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000003fbfc04; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001fdfe02; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_d(__m256i_op0,13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xce23d33e43d9736c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x63b2ac27aa076aeb; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x31dc2cc1bc268c93; -+ *((unsigned long*)& __m128i_result[0]) = 0x9c4d53d855f89514; -+ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_d(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xce23d33e43d9736c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x63b2ac27aa076aeb; -+ *((unsigned long*)& __m128i_result[1]) = 0x63b2ac27aa076aeb; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0xc8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x31dc2cc1bc268c93; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c4d53d855f89514; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff00000000ffff; -+ __m128i_out = __lsx_vslei_h(__m128i_op0,13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000020006; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000060000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffff0000; -+ *((int*)& __m128_op0[0]) = 0x0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; -+ __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x3e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x63b2ac27aa076aeb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000063b2ac27; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffaa076aeb; -+ __m128i_out = __lsx_vexth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; -+ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000063b2ac27; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffaa076aeb; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff9515; -+ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000063b2ac27; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffaa076aeb; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff63b3584e; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000fffdaa07d5d6; -+ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000600; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000ac26; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff80000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000060000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000003000000d613; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000c0000000; -+ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000fd; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000fe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000062d4; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000062d4; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000006338; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xd70b30c96ea9f4e8; -+ *((unsigned long*)& __m128d_op0[0]) = 0xa352bfac9269e0aa; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000003000000d613; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000c0000000; -+ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000003000000d613; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000003000000d612; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000bfffffff; -+ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000c9; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000c9; -+ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000060000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000060000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff0fffffff00001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff0fffffff09515; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ff00000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000100010000ffda; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000016; -+ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000003000000d612; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000bfffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000500000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000060000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000060000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0600000100000001; -+ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000ff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ff00000000; -+ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffff0100ff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffeffff; -+ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000060000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000500000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000060000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000fffe00006aea; -+ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256d_op1[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffbfffefffc9510; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffbfffefffc9510; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0c0b0a090b0a0908; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a09080709080706; -+ *((unsigned long*)& __m128i_op2[1]) = 0xfffbfffefffc9510; -+ *((unsigned long*)& __m128i_op2[0]) = 0xfffbfffefffc9510; -+ *((unsigned long*)& __m128i_result[1]) = 0x29c251319c3a5c90; -+ *((unsigned long*)& __m128i_result[0]) = 0x62fb9272df7da6b0; -+ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_h(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff9515; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff0100ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0607060700000807; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0707f8f803e8157e; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x31); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x000000f0; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x000000f0; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x000000f0; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x000000f0; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x01010101010101c9; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x01010101010101c9; -+ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01010101010101c9; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x01010101010101c9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffff88; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe98; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; -+ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffe98; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe98; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x06070607; -+ *((int*)& __m128_op0[2]) = 0x00000807; -+ *((int*)& __m128_op0[1]) = 0x0707f8f8; -+ *((int*)& __m128_op0[0]) = 0x03e8157e; -+ *((int*)& __m128_result[3]) = 0x5c303f97; -+ *((int*)& __m128_result[2]) = 0x61ff9049; -+ *((int*)& __m128_result[1]) = 0x5bafa1dd; -+ *((int*)& __m128_result[0]) = 0x5d3e1e1d; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xffff53d9; -+ *((int*)& __m128_op0[1]) = 0xffff0001; -+ *((int*)& __m128_op0[0]) = 0xffff9515; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff9514; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xbfbfbfbfbfbfbfbf; -+ *((unsigned long*)& __m128i_result[0]) = 0xbfbfbfbfbfbfbfbf; -+ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_result[0]) = 0xff000001ffff9515; -+ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0x67); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffe98; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; -+ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff000001ffff9515; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000007fffa9ed; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f8000017fffca8b; -+ __m128i_out = __lsx_vavgr_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff000001ffff9515; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9514; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff0000ac26; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000000001; -+ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01010101010101c9; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x01010101010101c9; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000781; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; -+ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff00000000; -+ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01010101010101c9; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x01010101010101c9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x2c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01010101010101c9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01010101010101c9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000781; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[3]) = 0x0008080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0008080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000003c; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x45); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000027; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000027; -+ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0x0000ac26; -+ *((int*)& __m128_op0[1]) = 0x00ff0000; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrmh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0c0b0a090b0a0908; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0a09080709080706; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0c0b0a090b0a0908; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a09080709080706; -+ *((unsigned long*)& __m128i_result[1]) = 0x0c0b0a090b0a0908; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a09080709080706; -+ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff0000ac26; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0c0b0a09; -+ *((int*)& __m128_op0[2]) = 0x0b0a0908; -+ *((int*)& __m128_op0[1]) = 0x0a090807; -+ *((int*)& __m128_op0[0]) = 0x09080706; -+ *((int*)& __m128_op1[3]) = 0x0c0b0a09; -+ *((int*)& __m128_op1[2]) = 0x0b0a0908; -+ *((int*)& __m128_op1[1]) = 0x0a090807; -+ *((int*)& __m128_op1[0]) = 0x09080706; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000781; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0c0b0a09; -+ *((int*)& __m128_op0[2]) = 0x0b0a0908; -+ *((int*)& __m128_op0[1]) = 0x0a090807; -+ *((int*)& __m128_op0[0]) = 0x09080706; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000008000000080; -+ __m128i_out = __lsx_vfclass_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000fffe00006aea; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffce; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000010002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff960015; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000010002; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffff960015; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_w_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x000000ff; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x000000ff; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x000000007fffa9ed; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7f8000017fffca8b; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0c0b0a090b0a0908; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a09080709080706; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0c0b0a090b0a0908; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a09080709080706; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00010002; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xff960015; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffd60015; -+ __m128i_out = __lsx_vfrintrm_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffd60015; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x80808080806b000b; -+ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000001fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff0000ac26; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80005613; -+ *((unsigned long*)& __m128i_result[0]) = 0x007f800000000000; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000781; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; -+ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_w(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[3]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; -+ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000ffce; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80005613; -+ *((unsigned long*)& __m128i_op1[0]) = 0x007f800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000807f80808000; -+ *((unsigned long*)& __m128i_result[0]) = 0x80006b0000000b00; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000807f80808000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80006b0000000b00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000807f00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x80006b0080808080; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000807f00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x80006b0080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff00007fff7fff; -+ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000781; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000078100000064; -+ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000002b0995850; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80005613; -+ *((unsigned long*)& __m128i_op1[0]) = 0x007f800000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffff80005613; -+ *((unsigned long*)& __m128i_op2[0]) = 0x007f800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff00011cf0c569; -+ *((unsigned long*)& __m128i_result[0]) = 0xc0000002b0995850; -+ __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000781; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000781; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000064; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000781; -+ *((int*)& __m256_op0[0]) = 0x00000064; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x0000ffce; -+ *((int*)& __m128_op1[3]) = 0xffff0001; -+ *((int*)& __m128_op1[2]) = 0x1cf0c569; -+ *((int*)& __m128_op1[1]) = 0xc0000002; -+ *((int*)& __m128_op1[0]) = 0xb0995850; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ long_op1 = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000064; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000781; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000064; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80005613; -+ *((unsigned long*)& __m128i_op0[0]) = 0x007f800000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80005613; -+ *((unsigned long*)& __m128i_result[0]) = 0x81000080806b000b; -+ __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000807f00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80006b0080808080; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff00011cf0c569; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc0000002b0995850; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffe30f3a97; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffcfe72830; -+ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80005613; -+ *((unsigned long*)& __m128i_op0[0]) = 0x81000080806b000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff00011cf0c569; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc0000002b0995850; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff9cf0d77b; -+ *((unsigned long*)& __m128i_result[0]) = 0xc1000082b0fb585b; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; -+ __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; -+ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x5a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000080808000; -+ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080808000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000080808000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x8b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffff00011cf0c569; -+ *((unsigned long*)& __m128d_op0[0]) = 0xc0000002b0995850; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x22); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff9cf0d77b; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc1000082b0fb585b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_b(__m256i_op0,14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080808000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000080808000; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000032; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000003c000000032; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000004e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000032; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000003c000000032; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[2]) = 0x001000100010000a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[0]) = 0x001000060010000a; -+ __m256i_out = __lasx_xvclz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000080808000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0080008000800080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0080006b0000000b; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000004e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffefffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffefffe; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000004e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextl_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffbfff8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffbfffb; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xf4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vpcnt_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffbfff8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010001; -+ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00800080; -+ *((int*)& __m128_op0[2]) = 0x00800080; -+ *((int*)& __m128_op0[1]) = 0x0080006b; -+ *((int*)& __m128_op0[0]) = 0x0000000b; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x80808080; -+ *((int*)& __m128_op1[0]) = 0x806b000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0x2f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000001010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x80808080806b000b; -+ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000001; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000001; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000001; -+ *((int*)& __m256_op1[7]) = 0x7ff00000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x7ff00000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x7ff00000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x7ff00000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_d(__m256i_op0,1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x7ff00000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x7ff00000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x7ff00000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x7ff00000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffbfff8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0080008000800080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0080006b0000000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000001ff1745745c; -+ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x80808080806b000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000c0c0c000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0080008000800080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0080006b0000000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000800080; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op2[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xa1a1a1a1a1a1a1a1; -+ *((unsigned long*)& __m256i_result[2]) = 0xa1a1a1a15e5e5e5e; -+ *((unsigned long*)& __m256i_result[1]) = 0xa1a1a1a1a1a1a1a1; -+ *((unsigned long*)& __m256i_result[0]) = 0xa1a1a1a15e5e5e5e; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0xa1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; -+ *((unsigned long*)& __m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; -+ *((unsigned long*)& __m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; -+ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000001; -+ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000800080; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; -+ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x13121110; -+ *((int*)& __m128_op0[2]) = 0x1211100f; -+ *((int*)& __m128_op0[1]) = 0x11100f0e; -+ *((int*)& __m128_op0[0]) = 0x100f0e0d; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x131211101211100f; -+ *((unsigned long*)& __m128d_op0[0]) = 0x11100f0e100f0e0d; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; -+ *((unsigned long*)& __m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; -+ *((unsigned long*)& __m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xa1a1a1a1a1a15e5e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xa1a1a1a1a1a15e5e; -+ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xc0c0c000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00800080; -+ *((int*)& __m128_op1[2]) = 0x00800080; -+ *((int*)& __m128_op1[1]) = 0x0080006b; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00800080; -+ *((int*)& __m128_result[2]) = 0xc0c0c000; -+ *((int*)& __m128_result[1]) = 0x0080006b; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000040002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00fe01e000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00fe01e000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xa1a1a1a1a1a15e5e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xa1a1a1a1a1a15e5e; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0080008000800080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0080006b00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffff80000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffff80000; -+ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000400000004000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00004000ffffffff; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0080008000800080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0080006b00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001b19b1c9c6da5a; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x001b19b1c9c6da5a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0080008000800080; -+ *((unsigned long*)& __m128i_result[0]) = 0x008003496dea0c61; -+ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x131211101211100f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x11100f0e100f0e0d; -+ *((unsigned long*)& __m128i_result[1]) = 0x13101213120f1112; -+ *((unsigned long*)& __m128i_result[0]) = 0x110e1011100d0f10; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xcb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; -+ *((unsigned long*)& __m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; -+ *((unsigned long*)& __m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; -+ *((unsigned long*)& __m256i_result[3]) = 0xa1bfa1bfa1bfa1bf; -+ *((unsigned long*)& __m256i_result[2]) = 0xa1bfa1bf5e7c5e7c; -+ *((unsigned long*)& __m256i_result[1]) = 0xa1bfa1bfa1bfa1bf; -+ *((unsigned long*)& __m256i_result[0]) = 0xa1bfa1bf5e7c5e7c; -+ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x2); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; -+ *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x007d003e007d003e; -+ *((unsigned long*)& __m256i_result[2]) = 0x007d003effa80010; -+ *((unsigned long*)& __m256i_result[1]) = 0x007d003e007d003e; -+ *((unsigned long*)& __m256i_result[0]) = 0x007d003effa80010; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0080008000800080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x008003496dea0c61; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00004000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; -+ __m256i_out = __lasx_xvnori_b(__m256i_op0,0xf7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x007d003e007d003e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x007d003effa80010; -+ *((unsigned long*)& __m256i_op1[1]) = 0x007d003e007d003e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x007d003effa80010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; -+ *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000f0000000f; -+ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0080008000800080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_d(__m128i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0080008000800080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; -+ *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; -+ *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000457db03e; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff457db03f; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000457db03e; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff457db03f; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000457db03e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457db03f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000457db03e; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457db03f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00000000457db03e; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffff457db03f; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000000457db03e; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffff457db03f; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000457db03e; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff457db03f; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000457db03e; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff457db03f; -+ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000457d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000b03f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000457d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000b03f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x3b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x31); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; -+ *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000f000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000f000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0008b03e457db03e; -+ *((unsigned long*)& __m256i_result[2]) = 0x457db03e45a87310; -+ *((unsigned long*)& __m256i_result[1]) = 0x0008b03e457db03e; -+ *((unsigned long*)& __m256i_result[0]) = 0x457db03e45a87310; -+ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefefe; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfrint_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000f000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000f000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffba8300004fc2; -+ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000457db03e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457db03f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000457db03e; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457db03f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000457d607d; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff457d607f; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000457d607d; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff457d607f; -+ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000457d607d; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457d607f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000457d607d; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457d607f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffa2beb040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffa2beb040; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000457d607d; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457d607f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000457d607d; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457d607f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffa2beb040; -+ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000457db03e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457db03f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000457db03e; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457db03f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00020001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00020001; -+ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x000f000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000f000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256d_result[2]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256d_result[0]) = 0x7fffffffa2beb040; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000f000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000f000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000f000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000f000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff1000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff1000000000000; -+ __m256i_out = __lasx_xvneg_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000020002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000020002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffba8300004fc2; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffba8300004fc2; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000001000100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000100; -+ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_d(__m128i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfff1000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfff1000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfrintrp_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x232221201f1e1d1c; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1b1a191817161514; -+ *((unsigned long*)& __m256i_op1[1]) = 0x232221201f1e1d1c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1b1a191817161514; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000f; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256d_result[3]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256d_result[2]) = 0xc1d75053f0000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256d_result[0]) = 0xc1d75053f0000000; -+ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x000100010000fffb; -+ *((unsigned long*)& __m128i_result[0]) = 0x000100010000fffb; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0303030303030303; -+ *((unsigned long*)& __m128i_result[0]) = 0x0303030303030304; -+ __m128i_out = __lsx_vaddi_bu(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc1d75053f0000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc1d75053f0000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x004100df00ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00c000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x004100df00ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00c000000000; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000022be22be; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fffa2bea2be; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000022be22be; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fffa2bea2be; -+ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x004100df00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00c000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x004100df00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00c000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc1d75053f0000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc1d75053f0000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256i_result[2]) = 0xc1d75053f0000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256i_result[0]) = 0xc1d75053f0000000; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010000; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00010001; -+ *((int*)& __m128_op0[2]) = 0x00010001; -+ *((int*)& __m128_op0[1]) = 0x00010001; -+ *((int*)& __m128_op0[0]) = 0x00010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x6); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000022be22be; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fffa2bea2be; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000022be22be; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fffa2bea2be; -+ *((unsigned long*)& __m256i_result[3]) = 0xffe1ffe1229f229f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fe07fe0a29fa29f; -+ *((unsigned long*)& __m256i_result[1]) = 0xffe1ffe1229f229f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fe07fe0a29fa29f; -+ __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000400000000; -+ __m128i_out = __lsx_vsrli_w(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffa30000165a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000104000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffa30000165a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000104000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc1d75053f0000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc1d75053f0000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xbe21000100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000505300000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xbe21000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000505300000000; -+ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffa30000165a; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000104000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffa30000165a; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000104000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc1d75053f0000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc1d75053f0000000; -+ *((int*)& __m256_result[7]) = 0xc03ae000; -+ *((int*)& __m256_result[6]) = 0x420a6000; -+ *((int*)& __m256_result[5]) = 0xc6000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0xc03ae000; -+ *((int*)& __m256_result[2]) = 0x420a6000; -+ *((int*)& __m256_result[1]) = 0xc6000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbe21000100000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000505300000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xbe21000100000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000505300000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xc1d75053f0000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x41dfffffffc00000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xc1d75053f0000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00005053000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00005053000000ff; -+ __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffa30000165a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000104000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffa30000165a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000104000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffa3; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000165a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffa3; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000165a; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x40b2bf4d; -+ *((int*)& __m256_op0[6]) = 0x30313031; -+ *((int*)& __m256_op0[5]) = 0x50005000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x40b2bf4d; -+ *((int*)& __m256_op0[2]) = 0x30313031; -+ *((int*)& __m256_op0[1]) = 0x50005000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x22be22be; -+ *((int*)& __m256_op1[5]) = 0x7fff7fff; -+ *((int*)& __m256_op1[4]) = 0xa2bea2be; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x22be22be; -+ *((int*)& __m256_op1[1]) = 0x7fff7fff; -+ *((int*)& __m256_op1[0]) = 0xa2bea2be; -+ *((int*)& __m256_result[7]) = 0x40b2bf4d; -+ *((int*)& __m256_result[6]) = 0x30313031; -+ *((int*)& __m256_result[5]) = 0x7fff7fff; -+ *((int*)& __m256_result[4]) = 0xa2bea2be; -+ *((int*)& __m256_result[3]) = 0x40b2bf4d; -+ *((int*)& __m256_result[2]) = 0x30313031; -+ *((int*)& __m256_result[1]) = 0x7fff7fff; -+ *((int*)& __m256_result[0]) = 0xa2bea2be; -+ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfffefffe; -+ *((int*)& __m128_op0[2]) = 0xfffeffff; -+ *((int*)& __m128_op0[1]) = 0xfffefffe; -+ *((int*)& __m128_op0[0]) = 0xfffeffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffefffefffeffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffefffefffeffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffa3; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000165a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffa3; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000165a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00005053000000ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00005053000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffa3; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffa3; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00010001; -+ *((int*)& __m128_op1[2]) = 0x00010001; -+ *((int*)& __m128_op1[1]) = 0x00010001; -+ *((int*)& __m128_op1[0]) = 0x00010001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x3f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffe0000000; -+ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x40b2bf4d30313031; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fffa2bea2be; -+ *((unsigned long*)& __m256i_op0[1]) = 0x40b2bf4d30313031; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fffa2bea2be; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x40b240b330313031; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff5d425d42; -+ *((unsigned long*)& __m256i_result[1]) = 0x40b240b330313031; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff5d425d42; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x7f800000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x7f800000; -+ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffa3; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000165a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffa3; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000165a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x40b240b330313031; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff5d425d42; -+ *((unsigned long*)& __m256i_op1[1]) = 0x40b240b330313031; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff5d425d42; -+ *((unsigned long*)& __m256i_result[3]) = 0x000040b200002fd4; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007fff0000739c; -+ *((unsigned long*)& __m256i_result[1]) = 0x000040b200002fd4; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007fff0000739c; -+ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_bu(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0001000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000400000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_result[2]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_result[0]) = 0xc600000000000000; -+ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff00ff; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000040b200002fd4; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00007fff0000739c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000040b200002fd4; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00007fff0000739c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000739c; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000000013ec13e; -+ *((unsigned long*)& __m128d_op1[0]) = 0xc03fc03fc0ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000040b200002fd4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00007fff0000739c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000040b200002fd4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007fff0000739c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ffff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; -+ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffe0000000; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ffff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ffff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffc03fffffffc0; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffc00000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffc03fffffffc0; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffc00000000000; -+ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000004; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xe0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfrecip_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x013ec13e; -+ *((int*)& __m128_op0[1]) = 0xc03fc03f; -+ *((int*)& __m128_op0[0]) = 0xc0ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffdfffffff8; -+ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff8000ffa3; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000008000165a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff8000ffa3; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000008000165a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff00017fff005d; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffe9a6; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff00017fff005d; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffe9a6; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000165a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000165a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000011f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000011f; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000ffff0000ffa3; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000000000000165a; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000ffff0000ffa3; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000000000000165a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000192540; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000192540; -+ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ long_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffa3; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000165a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffa3; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000165a; -+ *((unsigned long*)& __m256i_result[3]) = 0x1818ffff1818ffa3; -+ *((unsigned long*)& __m256i_result[2]) = 0x181818181818185a; -+ *((unsigned long*)& __m256i_result[1]) = 0x1818ffff1818ffa3; -+ *((unsigned long*)& __m256i_result[0]) = 0x181818181818185a; -+ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffdfffffff8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7ffffffc; -+ __m128i_out = __lsx_vavgr_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffc03b1fc5e050; -+ *((unsigned long*)& __m256d_op0[2]) = 0x6a9e3fa2603a2000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffc03b1fc5e050; -+ *((unsigned long*)& __m256d_op0[0]) = 0x6a9e3fa2603a2000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636389038903; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636389038903; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000001ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000001ffff; -+ __m128i_out = __lsx_vsat_du(__m128i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff8000ffa3; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fe70000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff8000ffa3; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fe70000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff8000ffa3; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fe70000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff8000ffa3; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fe70000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007f7f80007fa3; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007f670000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007f7f80007fa3; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007f670000; -+ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffc03fffffffc0; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffc00000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffc03fffffffc0; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffc00000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_result[2]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_result[0]) = 0xc600000000000000; -+ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff8000ffa3; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fe70000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff8000ffa3; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fe70000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; -+ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x7e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000007ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000007ffffffff; -+ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff00ff; -+ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff8000ffa3; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000008000165a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff8000ffa3; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000008000165a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0009000900090009; -+ *((unsigned long*)& __m256i_result[2]) = 0x000900090009165a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0009000900090009; -+ *((unsigned long*)& __m256i_result[0]) = 0x000900090009165a; -+ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc03ae000ffff6000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00007f7f80007fa3; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007f670000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00007f7f80007fa3; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007f670000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffe7fffffff; -+ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7fffffff7ffffffb; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe7fffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000001fd02; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffe1fffffff; -+ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffc03b1fc5e050; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6a9e3fa2603a2000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffc03b1fc5e050; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6a9e3fa2603a2000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffc03fffffffc0; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffc00000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffc03fffffffc0; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffc00000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x01fe007a01c40110; -+ *((unsigned long*)& __m256i_result[2]) = 0x019d00a2003a0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x01fe007a01c40110; -+ *((unsigned long*)& __m256i_result[0]) = 0x019d00a2003a0000; -+ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe1fffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7ffffffb; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000080008; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000077fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x000000ff; -+ *((int*)& __m128_op1[0]) = 0xfe01fd02; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000080008; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000fffe01fd02; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000040002; -+ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01fe007a01c40110; -+ *((unsigned long*)& __m256i_op0[2]) = 0x019d00a2003a0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01fe007a01c40110; -+ *((unsigned long*)& __m256i_op0[0]) = 0x019d00a2003a0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000077fff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x01fe007a01c40110; -+ *((unsigned long*)& __m256i_result[2]) = 0x019d00a20039fff9; -+ *((unsigned long*)& __m256i_result[1]) = 0x01fe007a01c40110; -+ *((unsigned long*)& __m256i_result[0]) = 0x019d00a2003a0000; -+ __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7fffffff7ffffffb; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000040002; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000080008; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xc1f0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xc1f0000000000000; -+ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x000000ff; -+ *((int*)& __m128_op0[0]) = 0xfe01fd02; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x0001fe01; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ff02ff80fede; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ff02ff80fede; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000fffe00800022; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fffe00800022; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000001fe01; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000001fe01; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0f0f0f0f00000000; -+ __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; -+ __m128i_out = __lsx_vpcnt_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000077fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000003ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x6a9e3f9a; -+ *((int*)& __m256_op0[4]) = 0x603a2001; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x6a9e3f9a; -+ *((int*)& __m256_op0[0]) = 0x603a2001; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000900000009; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff7fffffff7f; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000fffe00800022; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffe00800022; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000003ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007fff00400011; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000008001ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007fff00400011; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000077fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000307; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00ff80ff00ff80ff; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000900000009; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x01fe007a; -+ *((int*)& __m256_op0[6]) = 0x01c40110; -+ *((int*)& __m256_op0[5]) = 0x019d00a2; -+ *((int*)& __m256_op0[4]) = 0x0039fff9; -+ *((int*)& __m256_op0[3]) = 0x01fe007a; -+ *((int*)& __m256_op0[2]) = 0x01c40110; -+ *((int*)& __m256_op0[1]) = 0x019d00a2; -+ *((int*)& __m256_op0[0]) = 0x003a0000; -+ *((int*)& __m256_op1[7]) = 0x0000fffe; -+ *((int*)& __m256_op1[6]) = 0x00800022; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0x0000fffe; -+ *((int*)& __m256_op1[2]) = 0x00800022; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffff7fffffff7f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0f0f0f0f00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0f07697100000000; -+ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff80ff00ff80ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_b(__m128i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ffffff81fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffff00ffff7e01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000fffe01fd02; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000fe86; -+ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0f0f0f0f00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000fffe01fd02; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ffffffff00; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ffffffff00; -+ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000fffe00800022; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000fffe00800022; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; -+ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ffffff81fe; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffff00ffff7e01; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x000000fffe01fd02; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00fe00fffe86f901; -+ __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000100; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000100; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001000000ff; -+ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; -+ __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x000000ff; -+ *((int*)& __m128_op0[0]) = 0xfe01fd02; -+ *((int*)& __m128_op1[3]) = 0x00000001; -+ *((int*)& __m128_op1[2]) = 0x00000100; -+ *((int*)& __m128_op1[1]) = 0x00000001; -+ *((int*)& __m128_op1[0]) = 0x00000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000003ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000077fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001000000ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff80ff00ff80ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; -+ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000077fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000007ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0003ffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000003ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000007ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01fe007a01c40110; -+ *((unsigned long*)& __m256i_op0[2]) = 0x019d00a20039fff9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01fe007a01c40110; -+ *((unsigned long*)& __m256i_op0[0]) = 0x019d00a2003a0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000003ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x01fe007a01c40110; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x01fe007a01c40110; -+ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000003ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000003ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000077fff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_du(__m256i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8d8d72728d8d7272; -+ *((unsigned long*)& __m256i_result[2]) = 0x8d8d72728d8d8d8d; -+ *((unsigned long*)& __m256i_result[1]) = 0x8d8d72728d8d7272; -+ *((unsigned long*)& __m256i_result[0]) = 0x8d8d72728d8d8d8d; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x8d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8d8d72728d8d7272; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8d8d72728d8d8d8d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8d8d72728d8d7272; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8d8d72728d8d8d8d; -+ *((unsigned long*)& __m256i_result[3]) = 0x8d8d72728d8d7272; -+ *((unsigned long*)& __m256i_result[2]) = 0x8d8d72728d8d8d8d; -+ *((unsigned long*)& __m256i_result[1]) = 0x8d8d72728d8d7272; -+ *((unsigned long*)& __m256i_result[0]) = 0x8d8d72728d8d8d8d; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000003ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8d8d72728d8d7272; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8d8d72728d8d8d8d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8d8d72728d8d7272; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8d8d72728d8d8d8d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0003ffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; -+ __m128i_out = __lsx_vfclass_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0f07697100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000076971000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7f800000; -+ *((int*)& __m128_op0[2]) = 0x7f800000; -+ *((int*)& __m128_op0[1]) = 0x7f800000; -+ *((int*)& __m128_op0[0]) = 0x7f800000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x21); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000040000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000040000000; -+ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000040000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000040000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3fc000005fc00000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3fc000005fc00000; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7fc00000; -+ *((int*)& __m128_result[2]) = 0x7fc00000; -+ *((int*)& __m128_result[1]) = 0x7fc00000; -+ *((int*)& __m128_result[0]) = 0x7fc00000; -+ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x8d8d72728d8d7272; -+ *((unsigned long*)& __m256d_op0[2]) = 0x8d8d72728d8d8d8d; -+ *((unsigned long*)& __m256d_op0[1]) = 0x8d8d72728d8d7272; -+ *((unsigned long*)& __m256d_op0[0]) = 0x8d8d72728d8d8d8d; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_b(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x01fe007a; -+ *((int*)& __m256_op1[6]) = 0x01c40110; -+ *((int*)& __m256_op1[5]) = 0x019d00a2; -+ *((int*)& __m256_op1[4]) = 0x0039fff9; -+ *((int*)& __m256_op1[3]) = 0x01fe007a; -+ *((int*)& __m256_op1[2]) = 0x01c40110; -+ *((int*)& __m256_op1[1]) = 0x019d00a2; -+ *((int*)& __m256_op1[0]) = 0x003a0000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xff800000; -+ *((int*)& __m256_result[6]) = 0xff800000; -+ *((int*)& __m256_result[5]) = 0xff800000; -+ *((int*)& __m256_result[4]) = 0xff800000; -+ *((int*)& __m256_result[3]) = 0xff800000; -+ *((int*)& __m256_result[2]) = 0xff800000; -+ *((int*)& __m256_result[1]) = 0xff800000; -+ *((int*)& __m256_result[0]) = 0xff800000; -+ __m256_out = __lasx_xvflogb_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xc0008000c0008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xc0008000c0008000; -+ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000000b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xf800f800f800f800; -+ *((unsigned long*)& __m256i_result[2]) = 0xf800f800f800f800; -+ *((unsigned long*)& __m256i_result[1]) = 0xf800f800f800f800; -+ *((unsigned long*)& __m256i_result[0]) = 0xf800f800f800f800; -+ __m256i_out = __lasx_xvslli_h(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf800f800f800f800; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf800f800f800f800; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf800f800f800f800; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf800f800f800f800; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffff8000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffff8000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffff8000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffff8000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc0008000c0008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc0008000c0008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x80000000; -+ *((int*)& __m256_op1[4]) = 0x80000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x80000000; -+ *((int*)& __m256_op1[0]) = 0x80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x000000000000000b; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000000b; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc0008000c0008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc0008000c0008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc0008000c0008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc0008000c0008000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x8001000180010000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x8001000180010000; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x80000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000000b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; -+ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7fc00000; -+ *((int*)& __m256_result[6]) = 0x7fc00000; -+ *((int*)& __m256_result[5]) = 0x7fc00000; -+ *((int*)& __m256_result[4]) = 0x7fc00000; -+ *((int*)& __m256_result[3]) = 0x7fc00000; -+ *((int*)& __m256_result[2]) = 0x7fc00000; -+ *((int*)& __m256_result[1]) = 0x7fc00000; -+ *((int*)& __m256_result[0]) = 0x7fc00000; -+ __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000001; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000001; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; -+ __m256i_out = __lasx_xvsll_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff800080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff800080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff800080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; -+ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff008000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff008000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff008000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff008000000000; -+ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff800080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff800080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0xfffffff5; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff800080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000001ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000001ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000001ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001ff; -+ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000fffffff5; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbsll_v(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010000100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010000100000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010000100000000; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0010000100000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010000100000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0010000100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010000100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff800080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff800080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff80000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff80000000; -+ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x8f8f8f8f8f8f8f8f; -+ *((unsigned long*)& __m128i_result[0]) = 0x8f8f8f8f8f8f8f8f; -+ __m128i_out = __lsx_vaddi_bu(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8f8f8f8f8f8f8f8f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8f8f8f8f8f8f8f8f; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op2[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op2[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001808281820102; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001808201018081; -+ __m128i_out = __lsx_vmaddwev_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001808281820102; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001808201018081; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001008281820102; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001008201010081; -+ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; -+ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x80808080; -+ *((int*)& __m128_op1[2]) = 0x80808080; -+ *((int*)& __m128_op1[1]) = 0x80808080; -+ *((int*)& __m128_op1[0]) = 0x80808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; -+ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_result[0]) = 0x4040404040404040; -+ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0007000100040102; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0003000100010101; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0007000100040102; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0003000100010101; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_w(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000001; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000fe000000fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000fe000000fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000fe000000fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000fe000000fe; -+ __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000300000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x001c001c001c001c; -+ *((unsigned long*)& __m128i_result[0]) = 0x001c001c001c001c; -+ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffe5ffe5ffe5ffe5; -+ *((unsigned long*)& __m256i_result[2]) = 0xffe5ffe5ffe5ffe5; -+ *((unsigned long*)& __m256i_result[1]) = 0xffe5ffe5ffe5ffe5; -+ *((unsigned long*)& __m256i_result[0]) = 0xffe5ffe5ffe5ffe5; -+ __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffeb; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffeb; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff800200000002; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff800200000002; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xc1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffeb; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffeb; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000004; -+ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_h(__m256i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; -+ __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000300000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffdffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffeffff; -+ __m128i_out = __lsx_vneg_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffeb; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffeb; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000015; -+ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x5858585858585858; -+ *((unsigned long*)& __m256i_result[2]) = 0x5858585858585858; -+ *((unsigned long*)& __m256i_result[1]) = 0x5858585858585858; -+ *((unsigned long*)& __m256i_result[0]) = 0x5858585858585858; -+ __m256i_out = __lasx_xvnori_b(__m256i_op0,0xa7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1e1e1e0000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1e1e1e0000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1e1e1e0000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1e1e1e0000000000; -+ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextl_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x2000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffa; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffbfbfbfc0; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbfbfbfc0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m128i_result[1]) = 0xffbfffbfff7fff80; -+ *((unsigned long*)& __m128i_result[0]) = 0xffbfffbfff7fff80; -+ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x54); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffa; -+ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000300000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000002fffffffb; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000010000fffb; -+ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000040804000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000040804000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000040a04000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000040a04000; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffe6; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffe6; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffe6; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffe6; -+ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; -+ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000040a04000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000040a04000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000040a04000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000040a04000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffa; -+ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000002fffffffb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000010000fffb; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000bffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x42); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0xbffffffe; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_vpickve2gr_b(__m128i_op0,0x5); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0xe7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf7f8f7f8f7f8f7f8; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xf7f8f7f8f7f8f7f8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf7f8f7f8f7f8f7f8; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf7f8f7f8f7f8f7f8; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000bffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000bffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; -+ __m128i_out = __lsx_vclz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; -+ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f8; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000e0000002e; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000004e; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x7f800000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x7f800000; -+ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0f000f000f000f00; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0f000f000f000f00; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000101000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000101000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffffffff; -+ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; -+ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000500000000; -+ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xffff0000; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xf7f7f7f7; -+ *((int*)& __m256_op1[6]) = 0xf7f7f7f8; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0xf7f7f7f7; -+ *((int*)& __m256_op1[2]) = 0xf7f7f7f8; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x3a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff10000fff10000; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xfff10000; -+ *((int*)& __m256_op0[4]) = 0xfff10000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xfff10000; -+ *((int*)& __m256_op0[0]) = 0xfff10000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0xfff10000; -+ *((int*)& __m256_result[4]) = 0xfff10000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0xfff10000; -+ *((int*)& __m256_result[0]) = 0xfff10000; -+ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff1fffffff1; -+ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000001ffe2000; -+ *((unsigned long*)& __m256i_result[2]) = 0x001fe020001fe020; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000001ffe2000; -+ *((unsigned long*)& __m256i_result[0]) = 0x001fe020001fe020; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x23); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xff800000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xff800000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrm_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000000001ffe2000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x001fe020001fe020; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000000001ffe2000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x001fe020001fe020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff1fffffff1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xfff10000; -+ *((int*)& __m256_op0[4]) = 0xfff10000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xfff10000; -+ *((int*)& __m256_op0[0]) = 0xfff10000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0xfff10000; -+ *((int*)& __m256_op1[4]) = 0xfff10000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0xfff10000; -+ *((int*)& __m256_op1[0]) = 0xfff10000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff88ff88; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xfff10000; -+ *((int*)& __m256_op0[4]) = 0xfff10000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xfff10000; -+ *((int*)& __m256_op0[0]) = 0xfff10000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0xfff10000; -+ *((int*)& __m256_op1[4]) = 0xfff10000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ int_op1 = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; -+ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff000000ff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0080000000800000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff1000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff1000000000000; -+ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000001ffe2000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x001fe020001fe020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000001ffe2000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x001fe020001fe020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; -+ __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xfff10000; -+ *((int*)& __m256_op0[4]) = 0xfff10000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xfff10000; -+ *((int*)& __m256_op0[0]) = 0xfff10000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000001ffe2000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x001fe020001fe020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000001ffe2000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x001fe020001fe020; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff0020ff1f001f; -+ *((unsigned long*)& __m256i_result[2]) = 0xffe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff0020ff1f001f; -+ *((unsigned long*)& __m256i_result[0]) = 0xffe1ffe0ffe1ffe0; -+ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100f000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100f000ff; -+ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vmini_hu(__m128i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000fe200000fe1f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fe200000fe1f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000005; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x80000000; -+ *((int*)& __m128_result[2]) = 0x80000000; -+ *((int*)& __m128_result[1]) = 0x80000000; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8101010181010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x8101010181010101; -+ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128d_op1[1]) = 0x8101010181010101; -+ *((unsigned long*)& __m128d_op1[0]) = 0x8101010181010101; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x80000000; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8101010181010101; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8101010181010101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xc0808000c0808000; -+ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc0808000c0808000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xc080800000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xc080800000000000; -+ __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc0808000c0808000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000003020302; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x001ffffe00200000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x001ffffe00200000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_result[3]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_result[2]) = 0xffe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_result[1]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_result[0]) = 0xffe1ffe0ffe1ffe0; -+ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8101010181010101; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8101010181010101; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7efefefe82010201; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff0000ff; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7efefefe82010201; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000fe200000fe1f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000fe200000fe1f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x001ffffe00200000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x001ffffe00200000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000fe200000fe1f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fe200000fe1f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfffffe20; -+ *((int*)& __m256_op0[6]) = 0x001dfe1f; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xfffffe20; -+ *((int*)& __m256_op0[2]) = 0x001dfe1f; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7efefefe82010201; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x7afafaf88a050a05; -+ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xc080800000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc080800000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7efefefe82010201; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x418181017dfefdff; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7efefefe82010201; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x418181017dfefdff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff81; -+ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff81; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff7c; -+ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003fe000000000; -+ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x2b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000003020302; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff81; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000c0c00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; -+ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x001ffffe00200000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x001ffffe00200000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0020001d001f; -+ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ long_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000a00000009; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000c0c00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_result[3]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_result[1]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fe1ffe0ffe1ffe0; -+ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x3f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffc0ff80; -+ *((int*)& __m128_op1[2]) = 0xff800000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffc0ff80ff800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000c0c00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000c0c00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffc00000ff800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff0000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsat_b(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffc00000ff800000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0xffffffff; -+ *((int*)& __m128_result[0]) = 0xffffffff; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; -+ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003fe000000000; -+ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffe20; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001dfffffe1f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000c0c00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x3); -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xfffffe20; -+ *((int*)& __m256_op0[5]) = 0x0000001d; -+ *((int*)& __m256_op0[4]) = 0xfffffe1f; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffe20001dfe1f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff0000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000005; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003fe000000000; -+ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003fe000000000; -+ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000190; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffc0ff80ff800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; -+ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00003fe0; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00003fe0; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00003fe0; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00003fe0; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001400000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001400000000; -+ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001400000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001400000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7fe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256d_op0[1]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7fe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00003fe000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_result[1]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fe1ffe0ffe1ffe0; -+ __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; -+ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff1f001f; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffe1ffe0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff1f001f; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffe1ffe0; -+ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffc020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffc020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001400000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001400000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01ff0020ff1f001f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fe1ffe0ffe1ffe0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007fc0083fc7c007; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x007fc0083fc7c007; -+ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x42); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000b0000000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000b0000000b; -+ __m128i_out = __lsx_vmaxi_w(__m128i_op0,11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmini_d(__m256i_op0,-1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x003f60041f636003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x003f60041f636003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x003f60041f636003; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x003f60041f636003; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x003f60041f636003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x003f60041f636003; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x007fc0083fc7c007; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x007fc0083fc7c007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffc0003fffc0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffc0003fffc0; -+ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000010100000101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010100000101; -+ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000020000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000020000; -+ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x003f60041f636003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x003f60041f636003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; -+ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x003f60041f636003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x003f60041f636003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000003f00001f63; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000003f00001f63; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000020000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101030101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101030101; -+ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffe1; -+ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffe1; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffe1; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffe1; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffe1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101030101; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101030101; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000fffa0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffa0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101000101010001; -+ __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x01010001; -+ *((int*)& __m128_op0[0]) = 0x01010001; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00020000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00020000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00020000; -+ *((int*)& __m128_result[1]) = 0x01010001; -+ *((int*)& __m128_result[0]) = 0x01010001; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff10; -+ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffc0003fffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffc0003fffc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x007fc0083fc7c007; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x007fc0083fc7c007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f010700c70106; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f010700c70106; -+ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000000fffa0000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000fffa0000; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; -+ __m128d_out = __lsx_vfrecip_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256d_op0[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256d_op0[0]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000008; -+ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffc0003fffc0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffc0003fffc0; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; -+ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x803f6004; -+ *((int*)& __m256_op2[4]) = 0x1f636003; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x803f6004; -+ *((int*)& __m256_op2[0]) = 0x1f636003; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x803f6004; -+ *((int*)& __m256_result[4]) = 0x1f636003; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x803f6004; -+ *((int*)& __m256_result[0]) = 0x1f636003; -+ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000008; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ff0000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ff0000000000; -+ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00002000; -+ *((int*)& __m128_op0[2]) = 0x00002000; -+ *((int*)& __m128_op0[1]) = 0x10000000; -+ *((int*)& __m128_op0[0]) = 0x10000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000002; -+ *((int*)& __m256_op0[4]) = 0x00000008; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000002; -+ *((int*)& __m256_op0[0]) = 0x00000008; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x64800000; -+ *((int*)& __m256_result[4]) = 0x64000000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x64800000; -+ *((int*)& __m256_result[0]) = 0x64000000; -+ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x71); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f010700c70106; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f010700c70106; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0106010601060106; -+ *((unsigned long*)& __m256i_result[2]) = 0x0106010601060106; -+ *((unsigned long*)& __m256i_result[1]) = 0x0106010601060106; -+ *((unsigned long*)& __m256i_result[0]) = 0x0106010601060106; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x803f6004; -+ *((int*)& __m256_op0[4]) = 0x1f636003; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x803f6004; -+ *((int*)& __m256_op0[0]) = 0x1f636003; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x007f0107; -+ *((int*)& __m256_op1[4]) = 0x00c70106; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x007f0107; -+ *((int*)& __m256_op1[0]) = 0x00c70106; -+ *((int*)& __m256_result[7]) = 0x7fc00000; -+ *((int*)& __m256_result[6]) = 0x7fc00000; -+ *((int*)& __m256_result[5]) = 0xbeff7cfd; -+ *((int*)& __m256_result[4]) = 0x5e123f94; -+ *((int*)& __m256_result[3]) = 0x7fc00000; -+ *((int*)& __m256_result[2]) = 0x7fc00000; -+ *((int*)& __m256_result[1]) = 0xbeff7cfd; -+ *((int*)& __m256_result[0]) = 0x5e123f94; -+ __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0106010601060106; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0106010601060106; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0106010601060106; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0106010601060106; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00011ffb0000bee1; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00011ffb0000bee1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001010600000106; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001010600000106; -+ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000200000002000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1000000010000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000020000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0103000201030002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x3f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001010600000106; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001010600000106; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0103000201030002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_d(__m128i_op0,7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0103000201030002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000008; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101000101010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000fe0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff00ffffff00ff; -+ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00011ffb0000bee1; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00011ffb0000bee1; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000003f003f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000003f003f; -+ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000002; -+ *((int*)& __m256_op0[4]) = 0x00000008; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000002; -+ *((int*)& __m256_op0[0]) = 0x00000008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f010700c70106; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f010700c70106; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000010211921; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000010211921; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_h(__m256i_op0,0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000008; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; -+ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000008; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00000200000008; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffff00ffffff00; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00000200000008; -+ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; -+ __m128i_out = __lsx_vslei_hu(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000010100fe0101; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffff0200ffff01ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_w(__m128i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_w(__m256i_op0,8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffc0ffc1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x003f00000000003f; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffc0ffc1; -+ *((unsigned long*)& __m256i_op0[0]) = 0x003f00000000003f; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001fffe0001ffc0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0001003e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001fffe0001ffc0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0001003e; -+ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff010300ff0103; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000002ffffffff; -+ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x007f000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x007f000000000000; -+ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000002; -+ *((int*)& __m256_op1[4]) = 0x00000008; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000002; -+ *((int*)& __m256_op1[0]) = 0x00000008; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000010100fe0101; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffff0200ffff01ff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0x0001010100fe0100; -+ *((unsigned long*)& __m128d_result[0]) = 0xffff0200ffff01ff; -+ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000008; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000008; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000455555555; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000003fe0000141e; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffc01ffffebe2; -+ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000455555555; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_d(__m128i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0xffffffffffffffff; -+ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x1); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,-6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000004; -+ *((int*)& __m128_op1[0]) = 0x55555555; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00011ffb0000bee1; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00011ffb0000bee1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00011ffb0000bee1; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00011ffb0000bee1; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000455555555; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000055555555; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000002ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000017fffffff; -+ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff010300ff0103; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x007ffff001000300; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff0001000300; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff010300ff0103; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_result[0]) = 0xf0003000f0003000; -+ __m128i_out = __lsx_vslli_h(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf0003000f0003000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000017fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x003fffffff800000; -+ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000455555555; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000008; -+ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff010300ff0103; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x555500adfffc5cab; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010100000100; -+ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x010003f00000ff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x017f03000000ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x010003f00000ff00; -+ *((unsigned long*)& __m128i_op1[0]) = 0x017f03000000ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000020; -+ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000020; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x42800000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x42000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x42800000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x42000000; -+ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000017; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000017; -+ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x01010101; -+ *((int*)& __m128_op0[0]) = 0x00000100; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0xff800000; -+ *((int*)& __m128_result[1]) = 0xc2fa0000; -+ *((int*)& __m128_result[0]) = 0xc30d0000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000020; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000020; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffc0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffc0000000000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000455555555; -+ *((unsigned long*)& __m128i_result[1]) = 0xffc0000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffc0000000000004; -+ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003fffffff800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000455555555; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000455555555; -+ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x007f00ff007f00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f00ff007f00ff; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0xc9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000158; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000158; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_hu_w(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000001580000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbsll_v(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0x0101ffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0x0101ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000455555555; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000001580000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffa800000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000157; -+ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ac; -+ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000157; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00067fff00047fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00027fff000080fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00067fff00047fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00027fff000080fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x067f047f027f0080; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x067f047f027f0080; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000015800000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f7f7f80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f7f7f80; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010058; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010058; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000158; -+ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000158; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xffffffa8; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010058; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010058; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000158; -+ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000010; -+ *((int*)& __m128_op0[2]) = 0x00100010; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x79); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010058; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010058; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseqi_b(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007f7f7f80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007f7f7f80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000100010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010058; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001001100110068; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_w(__m128i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001001100110068; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfrint_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001001100110068; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001001100110067; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001001100110068; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; -+ __m128i_out = __lsx_vfclass_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vclz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_hu(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f7f7f80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f7f7f80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000fef0ff0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000fef0ff0; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vandi_b(__m128i_op0,0xbd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f7f7f80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f7f7f80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000040004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000040004; -+ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffint_d_lu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f7f7f80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f7f7f80; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007f7f7f80; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007f7f7f80; -+ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x82); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000040000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000040000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000400; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000400; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; -+ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000fef0ff0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000fef0ff0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x687a8373f249bc44; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7861145d9241a14a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101030100010001; -+ __m128i_out = __lsx_vclz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff1fff1fff1fff1; -+ __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff1fff1fff1fff1; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000020006; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000600; -+ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000e000e000e000e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000e000e000e000e; -+ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff1fff1fff1fff1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000040000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000040000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000020000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000020000; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000e000e000e000e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000e000e000e000e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x39); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0101000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0101030100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0080800000008000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0080818000008000; -+ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000e000e000e000e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000e000e000e000e; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000e000e; -+ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000e0000000e00; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000e0000000e00; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x01010001; -+ *((int*)& __m128_op1[2]) = 0x00010001; -+ *((int*)& __m128_op1[1]) = 0x01010301; -+ *((int*)& __m128_op1[0]) = 0x00010001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vneg_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000040000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000040000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00000e0000000e00; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000e0000000e00; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000040000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000040000; -+ __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00040000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00040000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xf8f8f8f8f8f8f8f8; -+ *((unsigned long*)& __m128i_result[0]) = 0xf8f8f8f8f8f8f8f8; -+ __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000e000e000e000e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000e000e000e000e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000e000e000e000e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000e000e000e000e; -+ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000e0000000e00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000e0000000e00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000e000e000e000e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000e000e000e000e; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000e0000000e00; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000e0000000e00; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000e0000000e00; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000e0000000e00; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000e000e; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0101000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101030100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x2000200020002000; -+ *((unsigned long*)& __m128i_result[0]) = 0x2000200020002000; -+ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_hu(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_result[2]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_result[1]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_result[0]) = 0x0007000700070007; -+ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00010001; -+ *((int*)& __m128_op0[2]) = 0x00010001; -+ *((int*)& __m128_op0[1]) = 0x00010001; -+ *((int*)& __m128_op0[0]) = 0x00010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrml_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000e000e000e000e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000e000e000e000e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0003800400038004; -+ *((unsigned long*)& __m256i_result[2]) = 0x000a800b000a800b; -+ *((unsigned long*)& __m256i_result[1]) = 0x0003800400038004; -+ *((unsigned long*)& __m256i_result[0]) = 0x000a800b000a800b; -+ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x000e000e; -+ *((int*)& __m256_op1[4]) = 0x000e000e; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x000e000e; -+ *((int*)& __m256_op1[0]) = 0x000e000e; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x98); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xf8f8f8f8f8f8f8f8; -+ *((unsigned long*)& __m128d_op1[0]) = 0xf8f8f8f8f8f8f8f8; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; -+ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; -+ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0007000700070007; -+ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xf8f8f8f8f8f8f8f8; -+ *((unsigned long*)& __m128i_op2[0]) = 0xf8f8f8f8f8f8f8f8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0003800400038004; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000a800b000a800b; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0003800400038004; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000a800b000a800b; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000018803100188; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000018803100188; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000014402080144; -+ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000affff800b; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000affff800b; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000affff800b; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000affff800b; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000800; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000800; -+ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000018803100188; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000018803100188; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvbsrl_v(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0003800400038004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000a800b000a800b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0003800400038004; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000a800b000a800b; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000a0080000b00; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000a0080000b00; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000a0080000b00; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000a0080000b00; -+ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0003800400038004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000a800b000a800b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0003800400038004; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000a800b000a800b; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000e; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000e; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000c; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000440800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000440800; -+ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0003800400038004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000a800b000a800b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0003800400038004; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000a800b000a800b; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000e0010000e; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000e0010000e; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x4e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvmskgez_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0010000e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0010000e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; -+ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0707070707070707; -+ *((unsigned long*)& __m256i_result[2]) = 0x0707070707070707; -+ *((unsigned long*)& __m256i_result[1]) = 0x0707070707070707; -+ *((unsigned long*)& __m256i_result[0]) = 0x0707070707070707; -+ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x27b9331b8e77ead9; -+ *((unsigned long*)& __m128i_op0[0]) = 0x58d6bf1867ace738; -+ *((unsigned long*)& __m128i_result[1]) = 0xe4cc6c9edfab6639; -+ *((unsigned long*)& __m128i_result[0]) = 0x5afc6163b39ce19e; -+ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000014402080144; -+ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffff800; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffff800; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000000fffff800; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000fffff800; -+ *((unsigned long*)& __m256i_result[3]) = 0xf800f800f800f800; -+ *((unsigned long*)& __m256i_result[2]) = 0xf800f800f800f800; -+ *((unsigned long*)& __m256i_result[1]) = 0xf800f800f800f800; -+ *((unsigned long*)& __m256i_result[0]) = 0xf800f800f800f800; -+ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x5); -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffff800; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffff800; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000002080100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000002080100; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000001880310877e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000001880310877e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002080100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002080100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000008000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000a080100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000008000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000a080100; -+ __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; -+ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffc; -+ __m128i_out = __lsx_vhsubw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffbfffffff8; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffbfffffff8; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010800; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010800; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffff800; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffff800; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001010800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001010800; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x07ffffff07ffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x07ffffff07ffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x07ffffff07ffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x07ffffff07ffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x0ffffffe0ffffffe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0ffffffe0ffffffe; -+ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000a0010400a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000a0010400a; -+ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ff00000000; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000007f007f007f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000007f007f007f; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xdd6156076967d8c9; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2e3ab5266375e71b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x6eb12b0634b46c67; -+ *((unsigned long*)& __m128i_result[0]) = 0x171d5a9531bb7390; -+ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010800; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010800; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000014402080144; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000014402080144; -+ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; -+ __m256i_out = __lasx_xvclz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000b; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000002070145; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000002070145; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xfffffffc; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xfffffffc; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xfffffffc; -+ *((int*)& __m128_result[1]) = 0xffffffff; -+ *((int*)& __m128_result[0]) = 0xfffffffc; -+ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000007f007f007f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000007f007f007f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; -+ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1ab6021f72496458; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7750af4954c29940; -+ *((unsigned long*)& __m128i_result[1]) = 0xe64afee18eb79ca8; -+ *((unsigned long*)& __m128i_result[0]) = 0x89b051b7ac3e67c0; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffdc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffbffffffd8; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffbfffffff8; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffc; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000008000b; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000008000b; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000b; -+ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1ab6021f72496458; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7750af4954c29940; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1ab6021f72496458; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7750af4954c29940; -+ *((unsigned long*)& __m128i_result[1]) = 0x6ad8ffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x6ad8ffffffffffff; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000008000b; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000008000b; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000000b; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000008000a; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000a; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000008000a; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000a; -+ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010800; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010800; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffefef800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffefef800; -+ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; -+ __m128i_out = __lsx_vmskltz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000007f007f007f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000007f007f007f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffefef800; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffefef800; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffefef800; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffefef800; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000008000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffefef800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000008000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffefef800; -+ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x27); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000010000000; -+ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001f; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffff80; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffff80; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000430207f944; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000001f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000001f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001f; -+ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000200000001e; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000200000001e; -+ __m128i_out = __lsx_vpcnt_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x38); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0000001f; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0000001f; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x0000001f; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x0000001f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m128i_result[0]) = 0xff01ff01ff01fc10; -+ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000007f007f007f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000007f007f007f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000001f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000001f; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x403f000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x403f000000000000; -+ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x45); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; -+ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000080; -+ __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x7e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ffffffe00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ffffffe00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x007f00ff00ff00fe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x007f00ff00ff00fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x7ffffffe00000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x7ffffffe00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x007f00ff00ff00fe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000007f007f007f; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000007f007f007f; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x1f9689fdb16cabbd; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x1f9689fdb16cabbd; -+ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ffffffe00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ffffffe00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff00007fff0000; -+ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0xcd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000007f007f007f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000007f007f007f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0af57272788754ab; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000005e80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0af57272788754ab; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000005e80; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000f0f0f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f0000007f; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000f0f0f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f0000007f; -+ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ffffffe00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7ffffffe00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x3a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; -+ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0008; -+ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; -+ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000017ffeffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000017ffeffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x32); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000ffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffff0100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff0100000001; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffff00018d8b; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffff0100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffff0100000001; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x7); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff00007fff0000; -+ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000001; -+ *((int*)& __m128_op0[2]) = 0x7ffeffff; -+ *((int*)& __m128_op0[1]) = 0x00000001; -+ *((int*)& __m128_op0[0]) = 0x7ffeffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; -+ __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x003f0000003f0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x003f0000003f0000; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffff0100000001; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffff0100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003f0000003f0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003f0000003f0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x803e0000803e0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x803e0000803e0000; -+ __m128i_out = __lsx_vadda_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff0008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff0008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; -+ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000bdfef907bc; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000bdfef907bc; -+ __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x803e0000803e0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x803e0000803e0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x803bfffd803bfffd; -+ *((unsigned long*)& __m128i_result[0]) = 0x803bfffd803bfffd; -+ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0010511c54440437; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0010511c54440437; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff0008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff0008; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x6); -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000000ffff0008; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000ffff0008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffff0008; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffff0008; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x000000430207f944; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_result[2]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0100010001000100; -+ __m256i_out = __lasx_xvbitrevi_h(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000008080800; -+ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000008080800; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010511c54440437; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010511c54440437; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000103fca1bd; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000103fca1bd; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000103fca1bd; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000103fca1bd; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010511c54440438; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010511c54440438; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1d8000001d800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1d8000001d800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1d8000001d800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1d8000001d800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0366000003660000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0366000003660000; -+ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000bdfef907bc; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000bdfef907bc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0000fffe0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7777777777777777; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff7777ffff7777; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0x77); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x000000bd; -+ *((int*)& __m256_op0[4]) = 0xfef907bc; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x000000bd; -+ *((int*)& __m256_op0[0]) = 0xfef907bc; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x62d2acee; -+ *((int*)& __m256_result[4]) = 0x7fc00000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x62d2acee; -+ *((int*)& __m256_result[0]) = 0x7fc00000; -+ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0100004300000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0100004300000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xff0000bd00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[0]) = 0xff0000bd00000000; -+ __m256i_out = __lasx_xvneg_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x01000100; -+ *((int*)& __m256_op0[6]) = 0x01000100; -+ *((int*)& __m256_op0[5]) = 0x01000100; -+ *((int*)& __m256_op0[4]) = 0x01000100; -+ *((int*)& __m256_op0[3]) = 0x01000100; -+ *((int*)& __m256_op0[2]) = 0x01000100; -+ *((int*)& __m256_op0[1]) = 0x01000100; -+ *((int*)& __m256_op0[0]) = 0x01000100; -+ *((int*)& __m256_op1[7]) = 0x7f800000; -+ *((int*)& __m256_op1[6]) = 0x7f800000; -+ *((int*)& __m256_op1[5]) = 0x62d2acee; -+ *((int*)& __m256_op1[4]) = 0x7fc00000; -+ *((int*)& __m256_op1[3]) = 0x7f800000; -+ *((int*)& __m256_op1[2]) = 0x7f800000; -+ *((int*)& __m256_op1[1]) = 0x62d2acee; -+ *((int*)& __m256_op1[0]) = 0x7fc00000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000043; -+ *((int*)& __m256_op0[4]) = 0x0207f944; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000043; -+ *((int*)& __m256_op0[0]) = 0x0207f944; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x8c7fc73a; -+ *((int*)& __m128_op0[2]) = 0x137e54af; -+ *((int*)& __m128_op0[1]) = 0xbc84cf6f; -+ *((int*)& __m128_op0[0]) = 0x76208329; -+ *((int*)& __m128_result[3]) = 0x7fc00000; -+ *((int*)& __m128_result[2]) = 0x297f29fe; -+ *((int*)& __m128_result[1]) = 0x7fc00000; -+ *((int*)& __m128_result[0]) = 0x5acab5a5; -+ __m128_out = __lsx_vfsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00010001000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00010001000100; -+ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0x7b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_du(__m128i_op0,0x22); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0100004300000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0100004300000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op2[2]) = 0xff00010001000100; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op2[0]) = 0xff00010001000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_result[2]) = 0x01ffff4300ffff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x01ffff4300ffff00; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x80008000ec82ab51; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000800089e08000; -+ int_result = 0xffffffff89e08000; -+ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x0); -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010511c54440438; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010511c54440438; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000777777777777; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff7777ffff7777; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000003bbbbbbbbbb; -+ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x45); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000430207f944; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000086000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00040ff288000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000086000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00040ff288000000; -+ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000777777777777; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffff7777ffff7777; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001b; -+ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01ffff4300ffff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x01ffff4300ffff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000008000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000008000000100; -+ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x3f800000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_w(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00010001000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00010001000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_h(__m128i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffc0800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000008080600; -+ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x3f800000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000086000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00040ff288000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000086000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00040ff288000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x5555555555555555; -+ *((unsigned long*)& __m256i_op1[2]) = 0x5555555555555555; -+ *((unsigned long*)& __m256i_op1[1]) = 0x5555555555555555; -+ *((unsigned long*)& __m256i_op1[0]) = 0x5555555555555555; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000fc300000fc40; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000fc300000fc40; -+ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x5555555555555555; -+ *((unsigned long*)& __m256i_op0[2]) = 0x5555555555555555; -+ *((unsigned long*)& __m256i_op0[1]) = 0x5555555555555555; -+ *((unsigned long*)& __m256i_op0[0]) = 0x5555555555555555; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x4545454545454545; -+ *((unsigned long*)& __m256i_result[2]) = 0x4545454545454545; -+ *((unsigned long*)& __m256i_result[1]) = 0x4545454545454545; -+ *((unsigned long*)& __m256i_result[0]) = 0x4545454545454545; -+ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x4d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00010001000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00010001000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000040004000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000040004000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000000; -+ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x5a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01ffff4300ffff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01ffff4300ffff00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff00000000; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000fc300000fc40; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000fc300000fc40; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff000003c0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff000003c0; -+ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000008080600; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff000003c0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff000003c0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000fc300000fc40; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000fc300000fc40; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7c030000ffc4; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7c030000ffc4; -+ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7ffeffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7ffeffffffff; -+ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000001; -+ *((int*)& __m256_op0[5]) = 0x7fff7ffe; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000001; -+ *((int*)& __m256_op0[1]) = 0x7fff7ffe; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000002; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000002; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000002; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000002; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0xffffffff; -+ *((int*)& __m256_op2[4]) = 0xffffffff; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0xffffffff; -+ *((int*)& __m256_op2[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0x80000000; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0x80000000; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7ffeffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7ffeffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000fc300000fc40; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000fc300000fc40; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f007bfffffffb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f007bfffffffb; -+ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x01ffff4300fffeff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfe0000bcff000100; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01ffff4300fffeff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfe0000bcff000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x81ff00bd80ff0101; -+ *((unsigned long*)& __m256i_result[2]) = 0x01ff00bd00ff0101; -+ *((unsigned long*)& __m256i_result[1]) = 0x81ff00bd80ff0101; -+ *((unsigned long*)& __m256i_result[0]) = 0x01ff00bd00ff0101; -+ __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001b; -+ int_op1 = 0xffffffff89e08000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001b0000001b; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001b0000001b; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc0800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff0018; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0000001b; -+ *((int*)& __m128_op0[2]) = 0x0000001b; -+ *((int*)& __m128_op0[1]) = 0x0000001b; -+ *((int*)& __m128_op0[0]) = 0x0000001b; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x0000001b; -+ *((int*)& __m128_result[2]) = 0x0000001b; -+ *((int*)& __m128_result[1]) = 0x0000001b; -+ *((int*)& __m128_result[0]) = 0x0000001b; -+ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000040004000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000004000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000040004000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000004000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01ffff4300ffff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x01ffff4300ffff00; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00ff003f003f00; -+ *((unsigned long*)& __m256i_result[2]) = 0xff0101fd00010100; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00ff003f003f00; -+ *((unsigned long*)& __m256i_result[0]) = 0xff0101fd00010100; -+ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff003f003f00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff0101fd00010100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff003f003f00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff0101fd00010100; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff00ff003f003f00; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff0101fd00010100; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff00ff003f003f00; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff0101fd00010100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xff00ff00; -+ *((int*)& __m256_op0[6]) = 0x3f003f00; -+ *((int*)& __m256_op0[5]) = 0xff0101fd; -+ *((int*)& __m256_op0[4]) = 0x00010100; -+ *((int*)& __m256_op0[3]) = 0xff00ff00; -+ *((int*)& __m256_op0[2]) = 0x3f003f00; -+ *((int*)& __m256_op0[1]) = 0xff0101fd; -+ *((int*)& __m256_op0[0]) = 0x00010100; -+ *((int*)& __m256_op1[7]) = 0x01ffff43; -+ *((int*)& __m256_op1[6]) = 0x00fffeff; -+ *((int*)& __m256_op1[5]) = 0xfe0000bc; -+ *((int*)& __m256_op1[4]) = 0xff000100; -+ *((int*)& __m256_op1[3]) = 0x01ffff43; -+ *((int*)& __m256_op1[2]) = 0x00fffeff; -+ *((int*)& __m256_op1[1]) = 0xfe0000bc; -+ *((int*)& __m256_op1[0]) = 0xff000100; -+ *((unsigned long*)& __m256i_result[3]) = 0xfc003802fc000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fc00fc00; -+ *((unsigned long*)& __m256i_result[1]) = 0xfc003802fc000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fc00fc00; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x2c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01ffff4300ffff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01ffff4300ffff00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000040004000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000040004000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100000000; -+ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x2e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffc0800000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0x6f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc0800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4545454545454545; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4545454545454545; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4545454545454545; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4545454545454545; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001b; -+ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000001b0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000001b0000; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000fc300000fc40; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000fc300000fc40; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000001b0000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000001b0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000001b001b; -+ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xc0800000; -+ *((int*)& __m128_op1[3]) = 0x0000001b; -+ *((int*)& __m128_op1[2]) = 0x0000001b; -+ *((int*)& __m128_op1[1]) = 0x0000001b; -+ *((int*)& __m128_op1[0]) = 0x0000001b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x007f007bfffffffb; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x007f007bfffffffb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000010000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000010000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc0800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffc0800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffc0800000; -+ __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000007fff0018; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000003fff800c; -+ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000010000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000010000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffeffff10000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffeffff10000000; -+ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfc003802fc000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fc00fc00; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfc003802fc000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fc00fc00; -+ *((unsigned long*)& __m256i_result[3]) = 0xfc003802fc000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fc00fc00; -+ *((unsigned long*)& __m256i_result[1]) = 0xfc003802fc000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fc00fc00; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfffeffff10000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfffeffff10000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cule_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ long_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfc003802; -+ *((int*)& __m256_op0[6]) = 0xfc000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0xfc00fc00; -+ *((int*)& __m256_op0[3]) = 0xfc003802; -+ *((int*)& __m256_op0[2]) = 0xfc000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0xfc00fc00; -+ *((int*)& __m256_result[7]) = 0x82ff902d; -+ *((int*)& __m256_result[6]) = 0x83000000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x82fe0bd9; -+ *((int*)& __m256_result[3]) = 0x82ff902d; -+ *((int*)& __m256_result[2]) = 0x83000000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x82fe0bd9; -+ __m256_out = __lasx_xvfrecip_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0018; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; -+ __m128i_out = __lsx_vclz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffeffff10000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffeffff10000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7ffffffffffffffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7ffffffffffffffe; -+ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; -+ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfc003802fc000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfc003802fc000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x03802fc000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x03802fc000000000; -+ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_wu(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xd5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffe0000000; -+ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfc003802fc000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfc003802fc000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7ffffffffffffffe; -+ *((unsigned long*)& __m256d_op1[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7ffffffffffffffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff00010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff00010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xd2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x82ff902d83000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f80000082fe0bd9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x82ff902d83000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f80000082fe0bd9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x82ff902d83000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f80000082fe0bd9; -+ *((unsigned long*)& __m256i_op1[1]) = 0x82ff902d83000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f80000082fe0bd9; -+ *((unsigned long*)& __m256i_result[3]) = 0xc008fa01c0090000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3f804000c008f404; -+ *((unsigned long*)& __m256i_result[1]) = 0xc008fa01c0090000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3f804000c008f404; -+ __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x03802fc000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x03802fc000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; -+ __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x82ff902d83000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f80000082fe0bd9; -+ *((unsigned long*)& __m256i_op1[1]) = 0x82ff902d83000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f80000082fe0bd9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; -+ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x3f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc008fa01c0090000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3f804000c008f404; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc008fa01c0090000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3f804000c008f404; -+ *((unsigned long*)& __m256i_op1[3]) = 0x82ff902d83000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f80000082fe0bd9; -+ *((unsigned long*)& __m256i_op1[1]) = 0x82ff902d83000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f80000082fe0bd9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xc0090000c0200060; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xc0090000c0200060; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc0090000c0200060; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc0090000c0200060; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f0000007f0060; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f0000007f0060; -+ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; -+ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc008fa01c0090000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3f804000c008f404; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc008fa01c0090000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3f804000c008f404; -+ *((unsigned long*)& __m256i_result[3]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_result[2]) = 0x001fc0200060047a; -+ *((unsigned long*)& __m256i_result[1]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_result[0]) = 0x001fc0200060047a; -+ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x03802fc000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x03802fc000000000; -+ *((int*)& __m256_result[7]) = 0x38600000; -+ *((int*)& __m256_result[6]) = 0x3df80000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x38600000; -+ *((int*)& __m256_result[2]) = 0x3df80000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrp_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000400028000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x386000003df80000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x386000003df80000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0xd9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; -+ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x386000003df80000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x386000003df80000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x386000003df80000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x386000003df80000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0c6a240000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0c6a240000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_op0[2]) = 0x001fc0200060047a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_op0[0]) = 0x001fc0200060047a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_result[2]) = 0x001fc0200060047a; -+ *((unsigned long*)& __m256i_result[1]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_result[0]) = 0x001fc0200060047a; -+ __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f0000007f0060; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f0000007f0060; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f0000007f0060; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f0000007f0060; -+ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400028000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000020001c020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000022; -+ __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_op0[2]) = 0x001fc0200060047a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_op0[0]) = 0x001fc0200060047a; -+ *((unsigned long*)& __m256i_result[3]) = 0xfee1057c01e10581; -+ *((unsigned long*)& __m256i_result[2]) = 0x011ec1210161057b; -+ *((unsigned long*)& __m256i_result[1]) = 0xfee1057c01e10581; -+ *((unsigned long*)& __m256i_result[0]) = 0x011ec1210161057b; -+ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002008360500088; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400028000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_op0[2]) = 0x001fc0200060047a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_op0[0]) = 0x001fc0200060047a; -+ *((unsigned long*)& __m256i_result[3]) = 0x047a047a047a047a; -+ *((unsigned long*)& __m256i_result[2]) = 0x047a047a047a047a; -+ *((unsigned long*)& __m256i_result[1]) = 0x047a047a047a047a; -+ *((unsigned long*)& __m256i_result[0]) = 0x047a047a047a047a; -+ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0002008360500088; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000008; -+ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000c; -+ __m128i_out = __lsx_vmaxi_b(__m128i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x386000003df80000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x386000003df80000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0c6a240000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0c6a240000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ca0000fff80000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ca0000fff80000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff3; -+ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f0000007f0060; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f0000007f0060; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0xffffffffffffffff; -+ int_out = __lsx_vpickve2gr_h(__m128i_op0,0x2); -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0x55); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f0000007f0060; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f0000007f0060; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00f7000000f70006; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00f7000000f70006; -+ __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_op0[2]) = 0x001fc0200060047a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_op0[0]) = 0x001fc0200060047a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000fffe00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000fffe00000000; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xfffffff3; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000008; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000088; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000008; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000088; -+ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000fffe00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fffe00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ca0000fff80000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ca0000fff80000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x386000003df80000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x386000003df80000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_du(__m128i_op0,0x36); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000fffe00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fffe00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x386000003df80000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x386000003df80000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x5fa0000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x5fa0000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x386000003df80000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x386000003df80000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ca0000fff80000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ca0000fff80000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x381800007af80000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x381800007af80000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002008300500088; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000088; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0c6a240000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0f00204000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0c6a240000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0f00204000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0xf3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x5fa00000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x5fa00000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_op1[2]) = 0x001fc0200060047a; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffe0047d00e00480; -+ *((unsigned long*)& __m256i_op1[0]) = 0x001fc0200060047a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xe07de0801f20607a; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00f3009500db00ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00f3009500db00ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000003cc0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000003cc0; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x6a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0c6a2400; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x0f002040; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x0c6a2400; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x0f002040; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x5fa0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x5fa0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0c6a240000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0f00204000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0c6a240000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0f00204000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x04a3000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x04a3000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0c6a240000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0f00204000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0c6a240000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0f00204000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cule_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x52525252; -+ *((int*)& __m128_op0[2]) = 0xadadadad; -+ *((int*)& __m128_op0[1]) = 0x52525252; -+ *((int*)& __m128_op0[0]) = 0xadadadad; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0xadadadad; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0xadadadad; -+ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfrintrne_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; -+ __m128i_out = __lsx_vslli_w(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000003cc0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000003cc0; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000003cc0; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000003cc0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x5fa0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x5fa0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000003cc0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000003cc0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000081f20607a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000081f20607a; -+ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000adadadad; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000adadadad; -+ *((unsigned long*)& __m128i_result[1]) = 0xfbfbfbfbadadadad; -+ *((unsigned long*)& __m128i_result[0]) = 0xfbfbfbfbadadadad; -+ __m128i_out = __lsx_vmini_b(__m128i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x52525252adadadad; -+ *((unsigned long*)& __m128i_op1[0]) = 0x52525252adadadad; -+ *((unsigned long*)& __m128i_result[1]) = 0x52525252adadadad; -+ *((unsigned long*)& __m128i_result[0]) = 0x52525252adadadad; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000adadadad; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000adadadad; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000adadadad; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000adadadad; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x800000007fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x800000007fffffff; -+ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0df9f8e; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0df9f8e; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe0df9f8e; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffe0df9f8e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00f7000000f70006; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00f7000000f70006; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x52525252adadadad; -+ *((unsigned long*)& __m128i_op0[0]) = 0x52525252adadadad; -+ *((unsigned long*)& __m128i_op1[1]) = 0x800000007fffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x800000007fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x5fa00000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x5fa00000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000004; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00007f95; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000004; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00007f95; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000adadadad; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000adadadad; -+ *((unsigned long*)& __m128i_result[1]) = 0xadadadadadadadad; -+ *((unsigned long*)& __m128i_result[0]) = 0xadadadadadadadad; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0df9f8e; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0df9f8e; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe0df9f8f; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffe0df9f8f; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffbfffffffb; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffb; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffbfffffffb; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffb; -+ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x52525252adadadad; -+ *((unsigned long*)& __m128i_op1[0]) = 0x52525252adadadad; -+ *((unsigned long*)& __m128i_op2[1]) = 0x800000007fffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x800000007fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00adadad00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00adadad00000000; -+ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x52525252adadadad; -+ *((unsigned long*)& __m128i_op0[0]) = 0x52525252adadadad; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5b5b5b5aa4a4a4a6; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x5b5b5b5aadadadad; -+ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x007fffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x002cffacffacffab; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000007f00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0001fffa; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe00018069; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0001fffa; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe00018069; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff01fffffffeff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff01fffffffaff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff01fffffffeff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff01fffffffaff; -+ __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5b5b5b5aadadadad; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000052525253; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0001fffa; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00018069; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001fffe0001fffa; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe00018069; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000002000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000002000; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x64); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xe07de080; -+ *((int*)& __m256_op0[4]) = 0x1f20607a; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xe07de080; -+ *((int*)& __m256_op0[0]) = 0x1f20607a; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xe07de080; -+ *((int*)& __m256_op1[4]) = 0x1f20607a; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xe07de080; -+ *((int*)& __m256_op1[0]) = 0x1f20607a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000007f00ff00ff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3ff0000000000000; -+ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7ffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7ffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff7ffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7ffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x3fffffff3ffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x3fffffff3ffffffe; -+ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x800000007fffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x800000007fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x003f0000ffffffff; -+ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7ffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7ffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffe4866c86; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe4866c86; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000002000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000002000000; -+ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xe07de080; -+ *((int*)& __m256_op1[4]) = 0x1f20607a; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xe07de080; -+ *((int*)& __m256_op1[0]) = 0x1f20607a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x207fffff22bd04fb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x207fffff22bd04fb; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000002000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000002000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x207fffff22bd04fa; -+ *((unsigned long*)& __m128i_result[0]) = 0x207fffff22bd04fa; -+ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe07de080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000001f20607a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe07de080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000001f20607a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_result[3]) = 0xfdfdfdfdfdfdfdfd; -+ *((unsigned long*)& __m256i_result[2]) = 0xe27fe2821d226278; -+ *((unsigned long*)& __m256i_result[1]) = 0xfdfdfdfdfdfdfdfd; -+ *((unsigned long*)& __m256i_result[0]) = 0xe27fe2821d226278; -+ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x1f831f80e0e09f86; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x1f831f80e0e09f86; -+ __m256i_out = __lasx_xvdiv_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000003effff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000003effff; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x441ba9fcffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x181b2541ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff7ffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7ffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff010181010102; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff81010102; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfdfdfdfdfdfdfdfd; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe27fe2821d226278; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfdfdfdfdfdfdfdfd; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe27fe2821d226278; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x441ba9fcffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x181b2541ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x401fadf8fbfbfbfb; -+ *((unsigned long*)& __m128i_result[0]) = 0x1c1f2145fbfbfbfb; -+ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000002000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000002000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff0000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff0000ffffffff; -+ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000002000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000002000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x38); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xe07de0801f20607a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x01ff01ff01c0003e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x01ff01ff01c0003e; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01ff01ff01c0003e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01ff01ff01c0003e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000100ff000100ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000100c00000003e; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x441ba9fcffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x181b2541ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xbbe5560400010001; -+ *((unsigned long*)& __m128i_result[0]) = 0xe7e5dabf00010001; -+ __m128i_out = __lsx_vneg_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbbe5560400010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe7e5dabf00010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x000b000500010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x000b000c00010001; -+ __m128i_out = __lsx_vpcnt_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff010181010102; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff81010102; -+ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00ff0000; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00ff0000; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00ff0000; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00ff0000; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fc0010181020103; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fc0ffff81020103; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbbe5560400010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe7e5dabf00010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbbe5560400010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe7e5dabf00010001; -+ *((unsigned long*)& __m128i_result[1]) = 0xe7e5560400010001; -+ *((unsigned long*)& __m128i_result[0]) = 0xe7e5dabf00010001; -+ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0xf3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xdcec560380000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x08ec7f7f80000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; -+ *((unsigned long*)& __m128i_op2[1]) = 0x32d8f0a905b6c59b; -+ *((unsigned long*)& __m128i_op2[0]) = 0x322a52fc2ba83b96; -+ *((unsigned long*)& __m128i_result[1]) = 0xaa14efac3bb62636; -+ *((unsigned long*)& __m128i_result[0]) = 0xd6c22c8353a80d2c; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; -+ *((unsigned long*)& __m128i_result[1]) = 0x03ff0101fc010102; -+ *((unsigned long*)& __m128i_result[0]) = 0x03fffffffc010102; -+ __m128i_out = __lsx_vsat_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000fffffffe000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000102020204000; -+ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xaa14efac3bb62636; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd6c22c8353a80d2c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002000300000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0003000000010000; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ff0000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ff0000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x03ff0101fc010102; -+ *((unsigned long*)& __m128i_op0[0]) = 0x03fffffffc010102; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; -+ *((unsigned long*)& __m128i_result[1]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfeffffffffffffff; -+ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7fff0101; -+ *((int*)& __m128_op0[2]) = 0x81010102; -+ *((int*)& __m128_op0[1]) = 0x7fffffff; -+ *((int*)& __m128_op0[0]) = 0x81010102; -+ *((int*)& __m128_op1[3]) = 0x00000fff; -+ *((int*)& __m128_op1[2]) = 0xffffe000; -+ *((int*)& __m128_op1[1]) = 0x00001020; -+ *((int*)& __m128_op1[0]) = 0x20204000; -+ *((int*)& __m128_result[3]) = 0x7fff0101; -+ *((int*)& __m128_result[2]) = 0xffffe000; -+ *((int*)& __m128_result[1]) = 0x7fffffff; -+ *((int*)& __m128_result[0]) = 0xa0204000; -+ __m128_out = __lsx_vfsub_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00f7000000f70007; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00f7000000f70007; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0xe7e5560400010001; -+ *((unsigned long*)& __m128d_op1[0]) = 0xe7e5dabf00010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00800000; -+ *((int*)& __m128_op0[0]) = 0x00800000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00800000; -+ *((int*)& __m128_op1[0]) = 0x00800000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfeffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xfeffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000fff; -+ *((int*)& __m128_op1[2]) = 0xffffe000; -+ *((int*)& __m128_op1[1]) = 0x00001020; -+ *((int*)& __m128_op1[0]) = 0x20204000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfffefffefffeffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfffefffefffeffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffffffff; -+ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000fff; -+ *((int*)& __m128_op1[2]) = 0xffffe000; -+ *((int*)& __m128_op1[1]) = 0x00001020; -+ *((int*)& __m128_op1[0]) = 0x20204000; -+ *((int*)& __m128_result[3]) = 0x80000fff; -+ *((int*)& __m128_result[2]) = 0xffffffff; -+ *((int*)& __m128_result[1]) = 0x80001020; -+ *((int*)& __m128_result[0]) = 0xffffffff; -+ __m128_out = __lsx_vfsub_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000100010001fffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000100010001fffe; -+ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000700000004e000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0003000000012020; -+ *((unsigned long*)& __m128i_result[1]) = 0x0038000000051fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x003c000000022021; -+ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff0000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff0000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskgez_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000005500000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001005500020000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000005500000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001005500020000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000100010001fffe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000100010001fffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000005500000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000005400000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000005500000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000005400000002; -+ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xfdfcfda8; -+ *((int*)& __m256_op0[5]) = 0x0000e282; -+ *((int*)& __m256_op0[4]) = 0x1d20ffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xfdfcfda8; -+ *((int*)& __m256_op0[1]) = 0x0000e282; -+ *((int*)& __m256_op0[0]) = 0x1d20ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000700000004e000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003000000012020; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00000000e00a18f5; -+ *((unsigned long*)& __m128i_op2[0]) = 0x000000002023dcdc; -+ *((unsigned long*)& __m128i_result[1]) = 0x000700000004e000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0003000000012020; -+ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff0101ffffe000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffa0204000; -+ *((unsigned long*)& __m128i_result[1]) = 0x001f7fc100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x001f7fff00000000; -+ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0038000000051fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003c000000022021; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff0101ffffe000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffa0204000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f370101ff04ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f3bffffa0226021; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000fffffffe000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000102020204000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefff00000001fff; -+ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000100010001fffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000100010001fffe; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0x80000000; -+ *((int*)& __m256_result[5]) = 0x80000000; -+ *((int*)& __m256_result[4]) = 0x80000000; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0x80000000; -+ *((int*)& __m256_result[1]) = 0x80000000; -+ *((int*)& __m256_result[0]) = 0x80000000; -+ __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000005500000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001005500020000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000005500000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001005500020000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000080000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x7fff0101ffffe000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7fffffffa0204000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x7f370101ff04ffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7f3bffffa0226021; -+ *((unsigned long*)& __m128d_result[1]) = 0x7fff0101ffffe000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7fffffffa0204000; -+ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x003f000400000003; -+ *((unsigned long*)& __m128i_result[0]) = 0x003f000400000003; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0effeffefdffa1e0; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe6004c5f64284224; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000000010000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickve_w(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003f000400000003; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003f000400000003; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000010000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000400004; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000003f0004; -+ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001f7fc100000404; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000002a000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fff0101ffffe000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffa0204000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffe1ffc100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000400000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfefff00000001fff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffe1ffc100000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000400000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffe1ffc100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefff00000401fff; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffe1ffc100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000400000; -+ *((int*)& __m128_result[3]) = 0xfffc2000; -+ *((int*)& __m128_result[2]) = 0xfff82000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003ef89df07f0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003ec0fc0fbfe001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3ff800ff2fe6c00d; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff40408ece0e0de; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000045340a6; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000028404044; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffff000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffff000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffff000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffff000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00f7000000f70006; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00f7000000f70006; -+ *((unsigned long*)& __m256d_result[3]) = 0x416ee00000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x416ee000c0000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x416ee00000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x416ee000c0000000; -+ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000fdfcfda8; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000e2821d20ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000fdfcfda8; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000e2821d20ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff7f810100001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001fffc0ffffe001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000002259662; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc4dbe60354005d25; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f01000000f8ff00; -+ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000045340a6; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000028404044; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000fffffffe000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000102020204000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x045340a628404044; -+ __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0xffffe000; -+ *((int*)& __m128_result[0]) = 0xffffe000; -+ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff007fff810001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000400530050ffa6; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandi_b(__m256i_op0,0xcc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff007fff810001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000400530050ffa6; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff7f810100001000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001fffc0ffffe001; -+ *((unsigned long*)& __m128i_result[1]) = 0xff7f810100001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000400530050ffa6; -+ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff000ff6220c0c1; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffe8081000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000007ff000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff7f810100001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000400530050ffa6; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff007fff810001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000400530050ffa6; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffff811001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000a1ff4c; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x0002a000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x0002a000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000000002a000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000060a3db; -+ *((unsigned long*)& __m128i_op0[0]) = 0xa70594c000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ff9f5c25; -+ *((unsigned long*)& __m128i_result[0]) = 0x58fa6b4000000000; -+ __m128i_out = __lsx_vneg_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000007ff000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000a1ff4c; -+ *((unsigned long*)& __m128i_result[1]) = 0x000300037ff000ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0003000300a10003; -+ __m128i_out = __lsx_vmaxi_h(__m128i_op0,3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x045340a628404044; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x7ff000ff6220c0c1; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffe8081000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffff0001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000300037ff000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0003000300a10003; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x3c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7ff000ff6220c0c1; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffe8081000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7ff000ff6220c0c1; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffe8081000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xb110606000000000; -+ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff9f5c25; -+ *((unsigned long*)& __m128i_op0[0]) = 0x58fa6b4000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ff9f5c25; -+ *((unsigned long*)& __m128i_op1[0]) = 0x58fa6b4000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; -+ __m128i_out = __lsx_vclz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_wu(__m128i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000080800000808; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf3f3f3f3f3f3f3f3; -+ *((unsigned long*)& __m256i_result[2]) = 0xf3f3f3f3f3f3f3f3; -+ *((unsigned long*)& __m256i_result[1]) = 0xf3f3f3f3f3f3f3f3; -+ *((unsigned long*)& __m256i_result[0]) = 0xf3f3f3f3f3f3f3f3; -+ __m256i_out = __lasx_xvmini_b(__m256i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xf3f3f3f3; -+ *((int*)& __m256_op0[6]) = 0xf3f3f3f3; -+ *((int*)& __m256_op0[5]) = 0xf3f3f3f3; -+ *((int*)& __m256_op0[4]) = 0xf3f3f3f3; -+ *((int*)& __m256_op0[3]) = 0xf3f3f3f3; -+ *((int*)& __m256_op0[2]) = 0xf3f3f3f3; -+ *((int*)& __m256_op0[1]) = 0xf3f3f3f3; -+ *((int*)& __m256_op0[0]) = 0xf3f3f3f3; -+ *((int*)& __m256_op1[7]) = 0xf3f3f3f3; -+ *((int*)& __m256_op1[6]) = 0xf3f3f3f3; -+ *((int*)& __m256_op1[5]) = 0xf3f3f3f3; -+ *((int*)& __m256_op1[4]) = 0xf3f3f3f3; -+ *((int*)& __m256_op1[3]) = 0xf3f3f3f3; -+ *((int*)& __m256_op1[2]) = 0xf3f3f3f3; -+ *((int*)& __m256_op1[1]) = 0xf3f3f3f3; -+ *((int*)& __m256_op1[0]) = 0xf3f3f3f3; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000080800000808; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf3f3f3f3f3f3f3f3; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf3f3f3f3f3f3f3f3; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf3f3f3f3f3f3f3f3; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf3f3f3f3f3f3f3f3; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xf3f3f3f3f3f3f4f3; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xf3f3f3f3f3f3f4f3; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff800fff01; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff001ffe02; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_d(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000300037ff000ff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0003000300a10003; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff800fff01; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000007ff000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x1); -+ *((unsigned long*)& __m128d_op0[1]) = 0x000300037ff000ff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0003000300a10003; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000000007ff000ff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0003000300000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0003000300a10003; -+ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0003000300000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0003000300a10003; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffd00000000; -+ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfffdfffe80008000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0xffeffff4; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000000007ff000ff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffe80008000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe2; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffdfffe80007fe2; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf3f3f3f3f3f3f4f3; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf3f3f3f3f3f3f4f3; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000f3f3f4f3; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000f3f3f4f3; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000300037ff000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003000300a10003; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000300037ff000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0003000300a10003; -+ *((unsigned long*)& __m128i_op2[1]) = 0x000000007ff000ff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x26); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; -+ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x000000007ff000ff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x7ff000ff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrml_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_w(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000004000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000004000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000040; -+ *((int*)& __m256_op0[6]) = 0x00000020; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000040; -+ *((int*)& __m256_op0[2]) = 0x00000020; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000004000000020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000004000000020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xf8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_du(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x58); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffff7fffffff7; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff7fffffff7; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffff7fffffff7; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff7fffffff7; -+ __m256i_out = __lasx_xvmini_w(__m256i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x4000400040004000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffff7fffffff7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff7fffffff7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffff7fffffff7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff7fffffff7; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffff7fffffff7; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff7fffffff7; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffff700000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff7fffffff7; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_d(__m256i_op0,10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; -+ __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_h(__m128i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080700000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vclz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0808080700000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffefffe; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffefffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0042003e0042002f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffc0001fffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffe0004fffe0004; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0004fffe0004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0042003e0042002f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001fffc0001fffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0042003e0042002f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001fffc0001fffc; -+ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0004fffe0004; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfffe0004fffe0004; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x4b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000007070707; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xc1bdceee242070db; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe8c7b756d76aa478; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3f433212dce09025; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0042003e0042002f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffc0001fffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffbeffc2ffbeffd1; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0042003e0042002f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffc0001fffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0042003e0042002f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001fffc0001fffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0707070707070707; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0707070707070707; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000001fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_du(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0018001800180018; -+ *((unsigned long*)& __m256i_result[2]) = 0x0018001800180018; -+ *((unsigned long*)& __m256i_result[1]) = 0x0018001800180018; -+ *((unsigned long*)& __m256i_result[0]) = 0x0018001800180018; -+ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_result[1]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; -+ __m256i_out = __lasx_xvnori_b(__m256i_op0,0xc2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000001fffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000001ffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000001ffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; -+ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_hu(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x2c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0018001800180018; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0018001800180018; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0018001800180018; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0018001800180018; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3000300030003000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3000300030003000; -+ __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_result[3]) = 0x1e9e1e9e1e9e1e9e; -+ *((unsigned long*)& __m256i_result[2]) = 0x1e9e1e9e1e9e1e9e; -+ *((unsigned long*)& __m256i_result[1]) = 0x1e9e1e9e1e9e1e9e; -+ *((unsigned long*)& __m256i_result[0]) = 0x1e9e1e9e1e9e1e9e; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; -+ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000001; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0002000000020000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0002000000020000; -+ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrml_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbsrl_v(__m128i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; -+ __m256d_out = __lasx_xvflogb_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x27); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001d0000001d; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001d0000001d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001d0000001d; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001d0000001d; -+ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000020000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000020000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000010000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ long_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_d(__m256i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000004; -+ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x7e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; -+ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000555500005555; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000555500005555; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000555500005555; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000555500005555; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrph_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x5a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ long_int_result = 0x0000000000000000; -+ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x0); -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256d_op1[2]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256d_op1[1]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256d_op1[0]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x01000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00ffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00ffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000555500005555; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000555500005555; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000555500005555; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000555500005555; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x01fe01fe01fe01fe; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256d_result[3]) = 0x437fe01fe01fe020; -+ *((unsigned long*)& __m256d_result[2]) = 0x437fe01fe01fe020; -+ *((unsigned long*)& __m256d_result[1]) = 0x437fe01fe01fe020; -+ *((unsigned long*)& __m256d_result[0]) = 0x437fe01fe01fe020; -+ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01fe01fe01fe01fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_vpickve2gr_b(__m128i_op0,0x8); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x45); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0xbf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; -+ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x037fe01f001fe020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x037fe01f001fe020; -+ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; -+ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x437fe01fe01fe020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x437fe01fe01fe020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x037fe01f001fe020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x037fe01f001fe020; -+ *((unsigned long*)& __m256i_result[3]) = 0x437f201f201f2020; -+ *((unsigned long*)& __m256i_result[2]) = 0x037f201f001f2020; -+ *((unsigned long*)& __m256i_result[1]) = 0x437f201f201f2020; -+ *((unsigned long*)& __m256i_result[0]) = 0x037f201f001f2020; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x437f201f201f2020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x037f201f001f2020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x437f201f201f2020; -+ *((unsigned long*)& __m256i_op1[0]) = 0x037f201f001f2020; -+ *((unsigned long*)& __m256i_op2[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x21bb481000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x01bf481000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x21bb481000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x01bf481000000000; -+ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000086fe0000403e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000403e00004040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000086fe0000403e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000403e00004040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000086fe0000403e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000403e00004040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000086fe0000403e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000403e00004040; -+ *((unsigned long*)& __m256i_result[3]) = 0x00001bfa000000f9; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000f900004040; -+ *((unsigned long*)& __m256i_result[1]) = 0x00001bfa000000f9; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000f900004040; -+ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; -+ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000086fe0000403e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000403e00004040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000086fe0000403e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000403e00004040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000437f0000201f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000201f00002020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000437f0000201f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000201f00002020; -+ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00001bfa000000f9; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000f900004040; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00001bfa000000f9; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000000f900004040; -+ *((unsigned long*)& __m256d_result[3]) = 0x60183329ceb52cf0; -+ *((unsigned long*)& __m256d_result[2]) = 0x6040392cdaf9b3ff; -+ *((unsigned long*)& __m256d_result[1]) = 0x60183329ceb52cf0; -+ *((unsigned long*)& __m256d_result[0]) = 0x6040392cdaf9b3ff; -+ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x21bb481000ff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x01bf481000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x21bb481000ff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x01bf481000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xb1b3b1b1b1b7b1b1; -+ *((unsigned long*)& __m256i_result[2]) = 0xb1b7b1b1b1b1b1b1; -+ *((unsigned long*)& __m256i_result[1]) = 0xb1b3b1b1b1b7b1b1; -+ *((unsigned long*)& __m256i_result[0]) = 0xb1b7b1b1b1b1b1b1; -+ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xb7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x5d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; -+ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_du(__m128i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x43800000; -+ *((int*)& __m128_result[0]) = 0x43800000; -+ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000008e4bfc4eff0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000001ffee10000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000008e4bfc4eff0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000001ffee10000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d000000000d; -+ *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0000060d0d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d000000000d; -+ *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0000060d0d; -+ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x1); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000008; -+ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000008; -+ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; -+ __m256i_out = __lasx_xvreplve0_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xff0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256d_op0[1]) = 0xff0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0d0d0d0d0d0d0d0d; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; -+ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vclz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_h(__m128i_op0,14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256d_op0[2]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256d_op0[0]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_result[2]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_result[0]) = 0x6040190d00000000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x43800000; -+ *((int*)& __m128_op0[0]) = 0x43800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000100; -+ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffdfffdfffdfffd; -+ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffedffedffedffed; -+ *((unsigned long*)& __m128i_result[0]) = 0xffedffedffedffed; -+ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800200027; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000800200027; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000800200027; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000800200027; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000800200028; -+ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128i_result[1]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m128i_result[0]) = 0xffefffefffefffef; -+ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256d_op1[2]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256d_op1[0]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffdfffdfffdfffd; -+ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x7e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffdfffdfffd; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffdfffcfffdfffc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffdfffcfffdfffc; -+ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800200027; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000800200027; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_result[3]) = 0x080808000828082f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080008280820; -+ *((unsigned long*)& __m256i_result[1]) = 0x080808000828082f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080008280820; -+ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800200027; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000800200027; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000400100013; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000400100014; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000400100013; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; -+ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x080808000828082f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0808080008280820; -+ *((unsigned long*)& __m256i_op0[1]) = 0x080808000828082f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0808080008280820; -+ *((unsigned long*)& __m256i_op1[3]) = 0x04e8296f18181818; -+ *((unsigned long*)& __m256i_op1[2]) = 0x132feea900000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x04e8296f18181818; -+ *((unsigned long*)& __m256i_op1[0]) = 0x132feea900000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00828082f0808080; -+ *((unsigned long*)& __m256i_result[2]) = 0xf18181818132feea; -+ *((unsigned long*)& __m256i_result[1]) = 0x00828082f0808080; -+ *((unsigned long*)& __m256i_result[0]) = 0xf18181818132feea; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x24); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800200027; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000800200027; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000006040190d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000006040190d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000860601934; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000860601934; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000800200028; -+ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000800200027; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000800200027; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_result[3]) = 0x006018000000001a; -+ *((unsigned long*)& __m256i_result[2]) = 0x0060401900000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x006018000000001a; -+ *((unsigned long*)& __m256i_result[0]) = 0x0060401900000000; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000860601934; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000800200028; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000860601934; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000800200028; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffdfffcfffdfffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffcfffdfffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0a0a000000000a0a; -+ *((unsigned long*)& __m256i_result[2]) = 0x0a0a0a0a00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0a0a000000000a0a; -+ *((unsigned long*)& __m256i_result[0]) = 0x0a0a0a0a00000000; -+ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0a0a000000000a0a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0a0a000000000a0a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0a0a000000000a0a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0a0a000000000a0a; -+ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x04e8296f18181818; -+ *((unsigned long*)& __m256i_op1[2]) = 0x132feea900000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x04e8296f18181818; -+ *((unsigned long*)& __m256i_op1[0]) = 0x132feea900000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_result[2]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x132feea900000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x6040190d00000000; -+ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_du(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x132feea900000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op2[3]) = 0x2020080800000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000004044f4f; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0ef11ae55a5a6767; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_result[2]) = 0x6040190d20227a78; -+ *((unsigned long*)& __m256i_result[1]) = 0x132feeabd2d33b38; -+ *((unsigned long*)& __m256i_result[0]) = 0x6040190d00000000; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000400100013; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000400100014; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000400100013; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0a0a000000000a0a; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0a0a0a0a00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0a0a000000000a0a; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0a0a0a0a00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000020200000202; -+ *((unsigned long*)& __m256i_result[2]) = 0x4100004141410000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000020200000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x4100004141410000; -+ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000860601934; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000860601934; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000800200028; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000000006040190d; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000006040190d; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000006040190c; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff9fbfe6f3; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000006040190c; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff9fbfe6f3; -+ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000008; -+ *((int*)& __m256_op0[6]) = 0x60601934; -+ *((int*)& __m256_op0[5]) = 0x00000008; -+ *((int*)& __m256_op0[4]) = 0x00200028; -+ *((int*)& __m256_op0[3]) = 0x00000008; -+ *((int*)& __m256_op0[2]) = 0x60601934; -+ *((int*)& __m256_op0[1]) = 0x00000008; -+ *((int*)& __m256_op0[0]) = 0x00200028; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffefffefffefffef; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0a0a000000000a0a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0a0a000000000a0a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0004001000100004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0004000400100010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0004001000100004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0004000400100010; -+ __m256i_out = __lasx_xvclz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ long_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0a0a000000000a0a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0a0a000000000a0a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x006018000000001a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0060401900000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x006018000000001a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0060401900000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000006170; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000006170; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000006170; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000006170; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000030b8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000030b8; -+ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0004000f00100003; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000400030010000f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0004000f00100003; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000400030010000f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_d(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffeffff; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000800000008000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000800000008000; -+ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000040000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000040000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000040000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000040000000000; -+ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0004000f00100003; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000400030010000f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0004000f00100003; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000400030010000f; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffbfffcffeffff0; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffbfffcffeffff0; -+ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0004000f00100003; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000400030010000f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0004000f00100003; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000400030010000f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0400100004001000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0400100004001000; -+ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000f0000000f; -+ __m128i_out = __lsx_vclz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6040190d20227a78; -+ *((unsigned long*)& __m256i_op0[1]) = 0x132feeabd2d33b38; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0004000f00100003; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000400030010000f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0004000f00100003; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000400030010000f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000c0300000019a; -+ *((unsigned long*)& __m256i_result[2]) = 0x0c08032100004044; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000265ffa5a6767; -+ *((unsigned long*)& __m256i_result[0]) = 0x0c08032100000000; -+ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f18181818; -+ *((unsigned long*)& __m256i_op0[2]) = 0x132feea900000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f18181818; -+ *((unsigned long*)& __m256i_op0[0]) = 0x132feea900000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f18181818; -+ *((unsigned long*)& __m256i_op0[2]) = 0x132feea900000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f18181818; -+ *((unsigned long*)& __m256i_op0[0]) = 0x132feea900000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x04e8296f18181818; -+ *((unsigned long*)& __m256i_result[2]) = 0x132feea900000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x04e8296f18181818; -+ *((unsigned long*)& __m256i_result[0]) = 0x132feea900000000; -+ __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f18181818; -+ *((unsigned long*)& __m256i_op0[2]) = 0x132feea900000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f18181818; -+ *((unsigned long*)& __m256i_op0[0]) = 0x132feea900000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x4393a0a5bc606060; -+ *((unsigned long*)& __m256d_result[2]) = 0x43b32feea9000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x4393a0a5bc606060; -+ *((unsigned long*)& __m256d_result[0]) = 0x43b32feea9000000; -+ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000010000000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000008000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000008000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000800000008000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000800000008000; -+ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x04e8296f08181818; -+ *((unsigned long*)& __m256d_op1[2]) = 0x032feea900000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x04e8296f08181818; -+ *((unsigned long*)& __m256d_op1[0]) = 0x032feea900000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x296e000018170000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x296e000018170000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x04e8296f; -+ *((int*)& __m256_op0[6]) = 0x18181818; -+ *((int*)& __m256_op0[5]) = 0x132feea9; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x04e8296f; -+ *((int*)& __m256_op0[2]) = 0x18181818; -+ *((int*)& __m256_op0[1]) = 0x132feea9; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x5cbe15f2; -+ *((int*)& __m256_result[6]) = 0x53261036; -+ *((int*)& __m256_result[5]) = 0x559a674d; -+ *((int*)& __m256_result[4]) = 0x7f800000; -+ *((int*)& __m256_result[3]) = 0x5cbe15f2; -+ *((int*)& __m256_result[2]) = 0x53261036; -+ *((int*)& __m256_result[1]) = 0x559a674d; -+ *((int*)& __m256_result[0]) = 0x7f800000; -+ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000080; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vftintrph_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6040190d20227a78; -+ *((unsigned long*)& __m256i_op0[1]) = 0x132feeabd2d33b38; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x9fe7fffffffff32e; -+ *((unsigned long*)& __m256i_result[2]) = 0x6040190ddfdd8587; -+ *((unsigned long*)& __m256i_result[1]) = 0xecd011542d2cc4c7; -+ *((unsigned long*)& __m256i_result[0]) = 0x6040190dffffffff; -+ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000030b8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000030b8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000030b8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000030b8; -+ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vmaxi_b(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000030b8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000030b8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_result[2]) = 0x00020002000230ba; -+ *((unsigned long*)& __m256i_result[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m256i_result[0]) = 0x00020002000230ba; -+ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000030b8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000030b8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9fe7fffffffff32e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x6040190ddfdd8587; -+ *((unsigned long*)& __m256i_op1[1]) = 0xecd011542d2cc4c7; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6040190dffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_w(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000080; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x35); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f08181818; -+ *((unsigned long*)& __m256i_op0[2]) = 0x032feea900000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f08181818; -+ *((unsigned long*)& __m256i_op0[0]) = 0x032feea900000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; -+ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000001; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000001; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m256_op0[7]) = 0x4393a0a5; -+ *((int*)& __m256_op0[6]) = 0xbc606060; -+ *((int*)& __m256_op0[5]) = 0x43b32fee; -+ *((int*)& __m256_op0[4]) = 0xa9000000; -+ *((int*)& __m256_op0[3]) = 0x4393a0a5; -+ *((int*)& __m256_op0[2]) = 0xbc606060; -+ *((int*)& __m256_op0[1]) = 0x43b32fee; -+ *((int*)& __m256_op0[0]) = 0xa9000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000001; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffff0000; -+ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x3eab77367fff4848; -+ *((unsigned long*)& __m256d_op1[2]) = 0x408480007fff0000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x3eab77367fff4848; -+ *((unsigned long*)& __m256d_op1[0]) = 0x408480007fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4393a0a5bc606060; -+ *((unsigned long*)& __m256i_op0[2]) = 0x43b32feea9000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4393a0a5bc606060; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43b32feea9000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x3eab77367fff4848; -+ *((unsigned long*)& __m256i_op1[2]) = 0x408480007fff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x3eab77367fff4848; -+ *((unsigned long*)& __m256i_op1[0]) = 0x408480007fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x04e8296f3c611818; -+ *((unsigned long*)& __m256i_result[2]) = 0x032eafee29010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x04e8296f3c611818; -+ *((unsigned long*)& __m256i_result[0]) = 0x032eafee29010000; -+ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x3eab77367fff4848; -+ *((unsigned long*)& __m256d_op0[2]) = 0x408480007fff0000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x3eab77367fff4848; -+ *((unsigned long*)& __m256d_op0[0]) = 0x408480007fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x4084800000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x4084800000000000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_d(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x3eab77367fff4848; -+ *((unsigned long*)& __m256d_op0[2]) = 0x408480007fff0000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x3eab77367fff4848; -+ *((unsigned long*)& __m256d_op0[0]) = 0x408480007fff0000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f3c611818; -+ *((unsigned long*)& __m256i_op0[2]) = 0x032eafee29010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f3c611818; -+ *((unsigned long*)& __m256i_op0[0]) = 0x032eafee29010000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00000000ffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00000000ffffff; -+ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00b213171dff0606; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00e9a80014ff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00b213171dff0606; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00e9a80014ff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00b213181dff0607; -+ *((unsigned long*)& __m256i_result[2]) = 0x00e9a80114ff0001; -+ *((unsigned long*)& __m256i_result[1]) = 0x00b213181dff0607; -+ *((unsigned long*)& __m256i_result[0]) = 0x00e9a80114ff0001; -+ __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00b213171dff0606; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00e9a80014ff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00b213171dff0606; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00e9a80014ff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00b213171dff0606; -+ *((unsigned long*)& __m256i_result[2]) = 0x00e9a80014ff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00b213171dff0606; -+ *((unsigned long*)& __m256i_result[0]) = 0x00e9a80014ff0000; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffff0001ffff0001; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffff0001ffff0001; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffff0001ffff0001; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffff0001ffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff0001ffff0001; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0001ffff0001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff0001ffff0001; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0001ffff0001; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3eab77367fff4848; -+ *((unsigned long*)& __m256i_op0[2]) = 0x408480007fff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3eab77367fff4848; -+ *((unsigned long*)& __m256i_op0[0]) = 0x408480007fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0003000300030003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0003000300030000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0003000300030003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0003000300030000; -+ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00010001; -+ *((int*)& __m128_op0[2]) = 0x00010001; -+ *((int*)& __m128_op0[1]) = 0x00010001; -+ *((int*)& __m128_op0[0]) = 0x00010001; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000080; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_wu(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00010001; -+ *((int*)& __m128_op0[2]) = 0x00010001; -+ *((int*)& __m128_op0[1]) = 0x00010001; -+ *((int*)& __m128_op0[0]) = 0x00010001; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0020010101610000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0061200000610000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0020010101610000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0061200000610000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000101000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00011fff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000101000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00011fff0000ffff; -+ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00b213171dff0606; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00e9a80014ff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00b213171dff0606; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00e9a80014ff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff00000000ffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00000000ffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x3b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000000e; -+ __m128i_out = __lsx_vmaxi_w(__m128i_op0,14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3eab77367fff4848; -+ *((unsigned long*)& __m256i_op0[2]) = 0x408480007fff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3eab77367fff4848; -+ *((unsigned long*)& __m256i_op0[0]) = 0x408480007fff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000700000008; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000700000008; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x3b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00ff00ff; -+ *((int*)& __m256_op0[6]) = 0x00ff00ff; -+ *((int*)& __m256_op0[5]) = 0x00ff00ff; -+ *((int*)& __m256_op0[4]) = 0x00ff00ff; -+ *((int*)& __m256_op0[3]) = 0x00ff00ff; -+ *((int*)& __m256_op0[2]) = 0x00ff00ff; -+ *((int*)& __m256_op0[1]) = 0x00ff00ff; -+ *((int*)& __m256_op0[0]) = 0x00ff00ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00000000ffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00000000ffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00000000ffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00000000ffffff; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vneg_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[3]) = 0x7f7fff7f7f7fff7f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f7fff7f7f7fff7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x7f7fff7f7f7fff7f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f7fff7f7f7fff7f; -+ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[3]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_result[2]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_result[1]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_result[0]) = 0x000408080c111414; -+ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00200101; -+ *((int*)& __m256_op0[6]) = 0x01610000; -+ *((int*)& __m256_op0[5]) = 0x00612000; -+ *((int*)& __m256_op0[4]) = 0x00610000; -+ *((int*)& __m256_op0[3]) = 0x00200101; -+ *((int*)& __m256_op0[2]) = 0x01610000; -+ *((int*)& __m256_op0[1]) = 0x00612000; -+ *((int*)& __m256_op0[0]) = 0x00610000; -+ *((unsigned long*)& __m256i_result[3]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_result[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3f8000003f800000; -+ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000408080c111414; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000408080c111414; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000408080c111414; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x24); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x3e8000003e800000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3e8000003e800000; -+ *((unsigned long*)& __m256i_result[1]) = 0x3e8000003e800000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3e8000003e800000; -+ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00b2fe28e4420609; -+ *((unsigned long*)& __m256i_op0[2]) = 0x028da7fe15020000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00b2fe28e4420609; -+ *((unsigned long*)& __m256i_op0[0]) = 0x028da7fe15020000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000598; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000598; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x6d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000598; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000598; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000002cc0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000002cc0000; -+ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x31); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x2d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000101010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xb6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xf039b8c0; -+ *((int*)& __m128_op0[2]) = 0xc61e81ef; -+ *((int*)& __m128_op0[1]) = 0x6db7da53; -+ *((int*)& __m128_op0[0]) = 0xfbd2e34b; -+ *((unsigned long*)& __m128i_result[1]) = 0x80000000ffffd860; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff80000000; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000101010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x02020102; -+ *((int*)& __m256_op0[6]) = 0x02020102; -+ *((int*)& __m256_op0[5]) = 0x02020102; -+ *((int*)& __m256_op0[4]) = 0x02020102; -+ *((int*)& __m256_op0[3]) = 0x02020102; -+ *((int*)& __m256_op0[2]) = 0x02020102; -+ *((int*)& __m256_op0[1]) = 0x02020102; -+ *((int*)& __m256_op0[0]) = 0x02020102; -+ *((int*)& __m256_op1[7]) = 0x3e800000; -+ *((int*)& __m256_op1[6]) = 0x3e800000; -+ *((int*)& __m256_op1[5]) = 0x3e800000; -+ *((int*)& __m256_op1[4]) = 0x3e800000; -+ *((int*)& __m256_op1[3]) = 0x3e800000; -+ *((int*)& __m256_op1[2]) = 0x3e800000; -+ *((int*)& __m256_op1[1]) = 0x3e800000; -+ *((int*)& __m256_op1[0]) = 0x3e800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000598; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000598; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000101010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vsat_du(__m128i_op0,0x34); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000001c000000134; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000001c000000134; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x000001c000000134; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x000001c000000134; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000038000000268; -+ *((unsigned long*)& __m256d_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000038000000268; -+ *((unsigned long*)& __m256d_result[0]) = 0x7fff7fff7fff7fff; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x80000000; -+ *((int*)& __m128_op0[2]) = 0xffffd860; -+ *((int*)& __m128_op0[1]) = 0x7fffffff; -+ *((int*)& __m128_op0[0]) = 0x80000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[0]) = 0x0202010202020102; -+ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[3]) = 0x0002000200010002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0002000200010002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0002000200010002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0002000200010002; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x80000000ffffd860; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff80000000; -+ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000038000000268; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000038000000268; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001010101; -+ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x02020102; -+ *((int*)& __m256_op0[6]) = 0x02020102; -+ *((int*)& __m256_op0[5]) = 0x02020102; -+ *((int*)& __m256_op0[4]) = 0x02020102; -+ *((int*)& __m256_op0[3]) = 0x02020102; -+ *((int*)& __m256_op0[2]) = 0x02020102; -+ *((int*)& __m256_op0[1]) = 0x02020102; -+ *((int*)& __m256_op0[0]) = 0x02020102; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffe400000707; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000af100001455; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffe400000707; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000af100001455; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000010; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m256d_op1[3]) = 0x000408080c111414; -+ *((unsigned long*)& __m256d_op1[2]) = 0x000408080c111414; -+ *((unsigned long*)& __m256d_op1[1]) = 0x000408080c111414; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000038000000268; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000038000000268; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000408080c111414; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0002000200010002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0002000200010002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0002000200010002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0002000200010002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; -+ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fe363637fe36363; -+ __m256i_out = __lasx_xvori_b(__m256i_op0,0x63); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000038000000268; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000038000000268; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000001200000011a; -+ *((unsigned long*)& __m256i_result[2]) = 0x2040204020402040; -+ *((unsigned long*)& __m256i_result[1]) = 0x000001200000011a; -+ *((unsigned long*)& __m256i_result[0]) = 0x2040204020402040; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000001010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010001; -+ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000009e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000009e; -+ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001ffff0101ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001ffff; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff81001dff9dff9e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff81001dff9d003b; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff81001dff9dff9e; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff81001dff9d003b; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff81001dff9dff9e; -+ *((unsigned long*)& __m256i_result[2]) = 0xff81001dff9d003b; -+ *((unsigned long*)& __m256i_result[1]) = 0xff81001dff9dff9e; -+ *((unsigned long*)& __m256i_result[0]) = 0xff81001dff9d003b; -+ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff81001dff9dff9e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff81001dff9d003b; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff81001dff9dff9e; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff81001dff9d003b; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; -+ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000001010002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010002; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff81001dff9dff9e; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff81001dff9d003b; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff81001dff9dff9e; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff81001dff9d003b; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0002000200010002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0002000200010002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0002000200010002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0002000200010002; -+ *((unsigned long*)& __m256i_result[3]) = 0x7f1d7f7f7f1d7f3b; -+ *((unsigned long*)& __m256i_result[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[1]) = 0x7f1d7f7f7f1d7f3b; -+ *((unsigned long*)& __m256i_result[0]) = 0x0202010202020102; -+ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x7f1d7f7f; -+ *((int*)& __m256_op0[6]) = 0x7f1d7f3b; -+ *((int*)& __m256_op0[5]) = 0x02020102; -+ *((int*)& __m256_op0[4]) = 0x02020102; -+ *((int*)& __m256_op0[3]) = 0x7f1d7f7f; -+ *((int*)& __m256_op0[2]) = 0x7f1d7f3b; -+ *((int*)& __m256_op0[1]) = 0x02020102; -+ *((int*)& __m256_op0[0]) = 0x02020102; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000010; -+ *((int*)& __m256_op0[6]) = 0x00000010; -+ *((int*)& __m256_op0[5]) = 0x00000010; -+ *((int*)& __m256_op0[4]) = 0x00000010; -+ *((int*)& __m256_op0[3]) = 0x00000010; -+ *((int*)& __m256_op0[2]) = 0x00000010; -+ *((int*)& __m256_op0[1]) = 0x00000010; -+ *((int*)& __m256_op0[0]) = 0x00000010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xffffff00; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000001; -+ *((int*)& __m128_op1[2]) = 0x00000001; -+ *((int*)& __m128_op1[1]) = 0x00000001; -+ *((int*)& __m128_op1[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001ffff0101ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000101010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0103fefd0303fefd; -+ *((unsigned long*)& __m128i_result[0]) = 0x0103fefd0103fefd; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffefff00001000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffefff00001000; -+ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000103030102ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000010102ffff; -+ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0001000101010001; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[0]) = 0x0202010202020102; -+ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fe36364661af18f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fe36364661af18f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_result[3]) = 0x40f23232330df9c8; -+ *((unsigned long*)& __m256i_result[2]) = 0x40f2323240f23232; -+ *((unsigned long*)& __m256i_result[1]) = 0x40f23232330df9c8; -+ *((unsigned long*)& __m256i_result[0]) = 0x40f2323240f23232; -+ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010100000000; -+ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vextl_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000101010015; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffed00010001; -+ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000101010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; -+ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x02020102; -+ *((int*)& __m256_op1[6]) = 0x02020102; -+ *((int*)& __m256_op1[5]) = 0x02020102; -+ *((int*)& __m256_op1[4]) = 0x02020102; -+ *((int*)& __m256_op1[3]) = 0x02020102; -+ *((int*)& __m256_op1[2]) = 0x02020102; -+ *((int*)& __m256_op1[1]) = 0x02020102; -+ *((int*)& __m256_op1[0]) = 0x02020102; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000201220001011c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000201220001011c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000201220001011c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000201220001011c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000014; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000014; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000001400000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x1f81e3779b97f4a8; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x7fe36364661af18f; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7fe363637fe36364; -+ *((unsigned long*)& __m256d_op0[1]) = 0x7fe36364661af18f; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7fe363637fe36364; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fe36364661af18f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fe363637fe36364; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fe36364661af18f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fe363637fe36364; -+ *((unsigned long*)& __m256i_result[3]) = 0x00001ff8d8d8c000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00001ff8d8d90000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00001ff8d8d8c000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00001ff8d8d90000; -+ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001400000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1f81e3779b97f4a8; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff02000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1f81e3779b97f4a8; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fe36364661af18f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fe36364661af18f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00e30064001a008f; -+ *((unsigned long*)& __m256i_result[2]) = 0x00e3006300e30063; -+ *((unsigned long*)& __m256i_result[1]) = 0x00e30064001a008f; -+ *((unsigned long*)& __m256i_result[0]) = 0x00e3006300e30063; -+ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffff02000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x1f81e3779b97f4a8; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff02000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000014; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000014; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0xc3110000; -+ *((int*)& __m128_result[1]) = 0xff800000; -+ *((int*)& __m128_result[0]) = 0xc3110000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ int_op0 = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff02000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000008; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000008; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000008; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000008; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000008; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000008; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000008; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000008; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000001; -+ *((int*)& __m256_op2[4]) = 0x00000001; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000001; -+ *((int*)& __m256_op2[0]) = 0x00000001; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x80000001; -+ *((int*)& __m256_result[4]) = 0x80000001; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x80000001; -+ *((int*)& __m256_result[0]) = 0x80000001; -+ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0x23); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101000101010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101000101010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101000101010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101000101010001; -+ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101000101010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101000101010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101000101010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101000101010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fe36364661af18f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fe36364661af18f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fe363637fe36363; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101000101010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101000101010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101000101010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101000101010001; -+ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0200000202000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0200000202000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0200000202000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0200000202000002; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0200000202000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0200000202000002; -+ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfrint_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0002000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0200000202000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0200000202000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000400010004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000400010004; -+ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0002000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; -+ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0200000202000002; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0200000202000002; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0101000101010001; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0101000101010001; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0101000101010001; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0101000101010001; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0101000101010001; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0101000101010001; -+ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00001ff8d8d8c000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00001ff8d8d90000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00001ff8d8d8c000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00001ff8d8d90000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0200000202000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0200000202000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x00001ff800000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xd8d8c00000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00001ff800000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xd8d8c00000000000; -+ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0101000101010001; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0101000101010001; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0101000101010001; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0101000101010001; -+ __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_result[0]) = 0x0202010202020102; -+ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; -+ *((unsigned long*)& __m256d_result[3]) = 0x4380100810101008; -+ *((unsigned long*)& __m256d_result[2]) = 0x4380100810101008; -+ *((unsigned long*)& __m256d_result[1]) = 0x4380100810101008; -+ *((unsigned long*)& __m256d_result[0]) = 0x4380100810101008; -+ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00001ff800000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xd8d8c00000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00001ff800000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xd8d8c00000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00001ff8; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xd8d8c000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00001ff8; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xd8d8c000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x02020102; -+ *((int*)& __m256_op1[6]) = 0x02020102; -+ *((int*)& __m256_op1[5]) = 0x02020102; -+ *((int*)& __m256_op1[4]) = 0x02020102; -+ *((int*)& __m256_op1[3]) = 0x02020102; -+ *((int*)& __m256_op1[2]) = 0x02020102; -+ *((int*)& __m256_op1[1]) = 0x02020102; -+ *((int*)& __m256_op1[0]) = 0x02020102; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0007fff800000000; -+ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001400000014; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101000101010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101000101010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000000010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000000010000; -+ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0014001400140000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000554; -+ __m128i_out = __lsx_vmsknz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0014001400140000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001400000014; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001400000000; -+ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00001ff8d8d8c000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00001ff8d8d90000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00001ff8d8d8c000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00001ff8d8d90000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00001ef8d8d8c000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00001ef8d8d80000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00001ef8d8d8c000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00001ef8d8d80000; -+ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff2fff2fff2fff2; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff2fff2fff2fff2; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff2fff2fff2fff2; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff2fff2fff2fff2; -+ __m256i_out = __lasx_xvmini_h(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001400000014; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001400000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000053a4f452; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001400000014; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001400000000; -+ __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001400000014; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001400000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff9000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffc000400000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0007001400000014; -+ *((unsigned long*)& __m128i_result[0]) = 0x0004001000000000; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000400010004; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000400010004; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000400010004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000400010004; -+ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000053a4f452; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000053a; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000400010004; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000400010004; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000e0001000e; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000e0001000e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000e0001000e; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000e0001000e; -+ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000053a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0007001400000014; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0004001000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000053a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000700140000053a; -+ *((unsigned long*)& __m128i_result[0]) = 0x0004001000000000; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000e0001000e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000e0001000e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000e0001000e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000e0001000e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000053a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff9000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffc000400000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffc000400000000; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000014; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000014; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xfffc0004; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; -+ __m128d_out = __lsx_vflogb_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x0000000e; -+ *((int*)& __m256_op1[6]) = 0x0000000e; -+ *((int*)& __m256_op1[5]) = 0x0000000e; -+ *((int*)& __m256_op1[4]) = 0x0000000e; -+ *((int*)& __m256_op1[3]) = 0x0000000e; -+ *((int*)& __m256_op1[2]) = 0x0000000e; -+ *((int*)& __m256_op1[1]) = 0x0000000e; -+ *((int*)& __m256_op1[0]) = 0x0000000e; -+ *((int*)& __m256_result[7]) = 0x0000000e; -+ *((int*)& __m256_result[6]) = 0x0000000e; -+ *((int*)& __m256_result[5]) = 0x0000000e; -+ *((int*)& __m256_result[4]) = 0x0000000e; -+ *((int*)& __m256_result[3]) = 0x0000000e; -+ *((int*)& __m256_result[2]) = 0x0000000e; -+ *((int*)& __m256_result[1]) = 0x0000000e; -+ *((int*)& __m256_result[0]) = 0x0000000e; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0080000700000014; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffbffda; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001010101; -+ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000005003a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0080000700000014; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffbffda; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000005003a; -+ *((unsigned long*)& __m128i_op2[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0080000700000014; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffbffda; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffc000400000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00003fff00010000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvreplve_w(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000005003a; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmini_w(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0080000700000014; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffbffda; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfrint_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00003fff00010000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00123fff00120012; -+ *((unsigned long*)& __m128i_result[0]) = 0x0012001200120012; -+ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00123fff00120012; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0012001200120012; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000005003a; -+ *((unsigned long*)& __m128i_result[1]) = 0x00123fff00120012; -+ *((unsigned long*)& __m128i_result[0]) = 0x001200120017004c; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0007000700070007; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xf3f3f3f3f3f3f3f3; -+ *((unsigned long*)& __m256i_result[2]) = 0xf2f2f2f2f2f2f2f2; -+ *((unsigned long*)& __m256i_result[1]) = 0xf3f3f3f3f3f3f3f3; -+ *((unsigned long*)& __m256i_result[0]) = 0xf2f2f2f2f2f2f2f2; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000008000000080; -+ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0xaa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00123fff00120012; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0012001200120012; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00003fff00010000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1200091212121212; -+ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000e0000000d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000e0000000d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffff03ffffff07; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff03ffffff07; -+ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1200091212121212; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000f0001000f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000f0001000d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000f0001000f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000f0001000d; -+ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000008000000080; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; -+ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000f0001000f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000f0001000d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000f0001000f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000f0001000d; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000010000000f; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000010000000f; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000010000000f; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000010000000d; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x55); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000008000000080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x51); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000008000000080; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0x26); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x80000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x80000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_h(__m128i_op0,-8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskgez_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000e000e000e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000e000e000e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000e000e000e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000e0000000d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000e000e000e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000e0000000d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000dfffffff1; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000cfffffff3; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000dfffffff1; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000cfffffff3; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; -+ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000dfffffff1; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000cfffffff3; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000dfffffff1; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000cfffffff3; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff0000000f; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000000f; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000000d; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_h(__m256i_op0,2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000dfffffff1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000cfffffff3; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000dfffffff1; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000cfffffff3; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00003f3f00003f3f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00003f3f00003f3f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_hu_w(__m256i_op0,__m256i_op1,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0000000f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000000f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff0000000f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000000f; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff0000000f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000000f; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ long_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; -+ __m128i_out = __lsx_vpcnt_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; -+ __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00003f3f00003f3f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00003f3f00003f3f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff0000000f; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000000f; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0x56); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000000d; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000000d; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaxi_d(__m128i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000000d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000e; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000e; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000000d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000000d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff00ffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff00ffffffffff; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000000e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000000e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000000d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000000d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; -+ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00008000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00008000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0400000004000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000400; -+ *((unsigned long*)& __m256i_result[1]) = 0x0400000004000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000400; -+ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000008000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000000d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000000d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000000d; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000000d; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; -+ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; -+ __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ long_int_result = 0x0000000000000000; -+ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x2); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000000d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000000d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000fffe0000000c; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fffe0000000c; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; -+ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_h(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000003ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000003ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_b(__m256i_op0,15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvbitrevi_h(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; -+ __m128i_out = __lsx_vextl_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0008000000000000; -+ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_h(__m128i_op0,11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_h(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; -+ __m256i_out = __lasx_xvmini_b(__m256i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_h(__m256i_op0,-1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op0[1]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f900000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f900000002; -+ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff0607ffff0607; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0607ffff0607; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff0607ffff0607; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0607ffff0607; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_result[3]) = 0xfff8fffffff8ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff8fffffff8ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xfff8fffffff8ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff8fffffff8ffff; -+ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; -+ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; -+ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xfff8fffffff8ffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xfff8fffffff8ffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xfff8fffffff8ffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xfff8fffffff8ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00f9f9f900000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00f9f9f900000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000f9f9f9f9; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000faf3f3f2; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000f9f9f9f9; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000faf3f3f2; -+ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00f9f9f900000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00f9f9f900000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007cfcfd80000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x007cfcfd80000001; -+ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff0607ffff0607; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0607ffff0607; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff0607ffff0607; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0607ffff0607; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000faf3f3f2; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000faf3f3f2; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff0607ffff0383; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0607ffffc0c1; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff0607ffff0383; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0607ffffc0c1; -+ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000001000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000001000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; -+ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007cfcfd80000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007cfcfd80000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000001000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000001000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff0607ffff0607; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0607ffff0607; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff0607ffff0607; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0607ffff0607; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00000000f9f9f9f9; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000000faf3f3f2; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000000f9f9f9f9; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000faf3f3f2; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffdbbbcf; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffb8579f; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffdbbbcf; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffb8579f; -+ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffcfffcfffcfffc; -+ __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x5); -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffdbbbcf; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffb8579f; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffdbbbcf; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffb8579f; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffcfffc; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffcfffc; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffcfffc; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffcfffc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000003fff; -+ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffdbbbcf; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffb8579f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffdbbbcf; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffb8579f; -+ *((unsigned long*)& __m256i_op2[3]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_op2[2]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_op2[1]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_op2[0]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op2[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0008000000000000; -+ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000fffcfffcfffc; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffcfffcfffc; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmsknz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000001555; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000015554001c003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000001555; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000015554001c003; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000304; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000030401010202; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000304; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000030401010202; -+ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0x00030005; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0x00030005; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; -+ __m128i_out = __lsx_vmini_w(__m128i_op0,8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffc001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000c000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffc001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000c000; -+ __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x6d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000fffcfffcfffc; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffcfffcfffc; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffcfffcfffcfffc; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000fffcfffcfffc; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fffcfffcfffc; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x0000ffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x0000ffff; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x0000ffff; -+ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; -+ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3e25c8317394dae6; -+ *((unsigned long*)& __m128i_op0[0]) = 0xcda585aebbb2836a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xcda585aebbb2836a; -+ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xcda585aebbb2836a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcda585aebbb2836a; -+ *((unsigned long*)& __m128i_result[1]) = 0xd78cfd70b5f65d76; -+ *((unsigned long*)& __m128i_result[0]) = 0x5779108fdedda7e4; -+ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3e25c8317394dae6; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcda585aebbb2836a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0xfefeff00fefeff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefeff00fefeff00; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000fffcfffcfffc; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffcfffcfffc; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff000300030000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffc000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff000300030000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffc000; -+ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcda585aebbb2836a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000080808080; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffc4cdfd16; -+ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000fffcfffcfffc; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffcfffcfffc; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00000000f9f9f9f9; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000000faf3f3f2; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000000f9f9f9f9; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000faf3f3f2; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000fffcfffcfffc; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fffcfffcfffc; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xffdbbbcf; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0xffb8579f; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xffdbbbcf; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0xffb8579f; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0xfff8579f; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0xfff8579f; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskgez_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128d_op1[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd78cfd70b5f65d76; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5779108fdedda7e4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xd78cfd70b5f65d77; -+ *((unsigned long*)& __m128i_result[0]) = 0x5779108fdedda7e5; -+ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x5b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000003fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003fff; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffdbbbcf; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffb8579f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffdbbbcf; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffb8579f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff00bb; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0057; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff00bb; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0057; -+ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; -+ __m128i_out = __lsx_vclo_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfefeff00fefeff00; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfefeff00fefeff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x00007e7e00007e7e; -+ *((unsigned long*)& __m128i_result[0]) = 0x00007e7e00007e7e; -+ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff8579f; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xd78cfd70b5f65d77; -+ *((unsigned long*)& __m128d_op1[0]) = 0x5779108fdedda7e5; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080808080800008; -+ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x80808080; -+ *((int*)& __m128_op0[2]) = 0x80808080; -+ *((int*)& __m128_op0[1]) = 0x80808080; -+ *((int*)& __m128_op0[0]) = 0x80800008; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x80000000; -+ *((int*)& __m128_result[2]) = 0x80000000; -+ *((int*)& __m128_result[1]) = 0x80000000; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000b3a6000067da; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00004e420000c26a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x7a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f9f9f9f9; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000faf3f3f2; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f9f9f9f9; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000faf3f3f2; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00bb; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0057; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff00bb; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0057; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000fffa003e; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffb009c; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000fffa003e; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffb009c; -+ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff8579f; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0007a861; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0007a861; -+ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000b3a6000067da; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00004e420000c26a; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd78cfd70b5f65d76; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5779108fdedda7e4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000b3a6000067da; -+ *((unsigned long*)& __m128i_result[0]) = 0x5779108f0000c26a; -+ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd78cfd70b5f65d76; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5779108fdedda7e4; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vslei_w(__m128i_op0,-16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0000b3a6; -+ *((int*)& __m128_op0[2]) = 0x000067da; -+ *((int*)& __m128_op0[1]) = 0x00004e42; -+ *((int*)& __m128_op0[0]) = 0x0000c26a; -+ *((unsigned long*)& __m128d_result[1]) = 0x379674c000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x3789f68000000000; -+ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff0007a861; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff0007a861; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd78cfd70b5f65d76; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5779108fdedda7e4; -+ *((unsigned long*)& __m128i_result[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; -+ __m128i_out = __lsx_vslli_d(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x379674c000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3789f68000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x379674c000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3789f68000000000; -+ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vffint_s_w(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080800008; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x975ca6046e2e4889; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1748c4f9ed1a5870; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_wu(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080ffffffff8080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00008080ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xff80ffffffffff80; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff80ffffffff; -+ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0x0007a861; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0x0007a861; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; -+ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x379674c000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3789f68000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x379674c000000000; -+ __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x379674c000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3789f68000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfefeff00fefeff00; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfefeff00fefeff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00c0000000800000; -+ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbsrl_v(__m256i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff80ffffffffff80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff80ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff7ffffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffffffe; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff0007a861; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff0007a861; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x379674c000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffff7ffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x379674c000000000; -+ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xff80ffffffffff80; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000ff80ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff0007a861; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0007a861; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x975ca6046e2e4889; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1748c4f9ed1a5870; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1748c4f9ed1a5870; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x6a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff960001005b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffa500010003; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffff7ffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0020000000000000; -+ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x2b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff7ffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffff7ffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000003; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000003; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000003; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000003; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; -+ __m256i_out = __lasx_xvclz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffee00ba; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffee00ba; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffee; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffee; -+ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x80008000fff98000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x80008000fff98000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6a5d5b056f2f4978; -+ *((unsigned long*)& __m128i_op1[0]) = 0x17483c07141b5971; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002001000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000008000020000; -+ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff80ffffffffff80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff80ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6a5d5b056f2f4978; -+ *((unsigned long*)& __m128i_op1[0]) = 0x17483c07141b5971; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0800010001ff8000; -+ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffee00ba; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffee00ba; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x80008000fff98000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x80008000fff98000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00fffff500ba; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00fffff500ba; -+ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_w(__m256i_op0,15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0x0007a861; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0x0007a861; -+ *((int*)& __m256_op1[7]) = 0x80008000; -+ *((int*)& __m256_op1[6]) = 0x80008000; -+ *((int*)& __m256_op1[5]) = 0x80008000; -+ *((int*)& __m256_op1[4]) = 0xfff98000; -+ *((int*)& __m256_op1[3]) = 0x80008000; -+ *((int*)& __m256_op1[2]) = 0x80008000; -+ *((int*)& __m256_op1[1]) = 0x80008000; -+ *((int*)& __m256_op1[0]) = 0xfff98000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffee00ba; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffee00ba; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xefefefefefee00aa; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xefefefefefee00aa; -+ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0800010001ff8000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vreplvei_b(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1748c4f9ed1a5870; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1748c4f9ed1a5870; -+ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x1748c4f9ed1a5870; -+ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_b(__m128i_op0,12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000800080008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80008000fff98000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80008000fff98000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; -+ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; -+ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x21); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000800080008000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x80008000fff98000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000800080008000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x80008000fff98000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; -+ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x000000ff; -+ *((int*)& __m256_op1[6]) = 0x000000ff; -+ *((int*)& __m256_op1[5]) = 0x000000ff; -+ *((int*)& __m256_op1[4]) = 0x000000ff; -+ *((int*)& __m256_op1[3]) = 0x000000ff; -+ *((int*)& __m256_op1[2]) = 0x000000ff; -+ *((int*)& __m256_op1[1]) = 0x000000ff; -+ *((int*)& __m256_op1[0]) = 0x000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6a5d5b056f2f4978; -+ *((unsigned long*)& __m128i_op1[0]) = 0x17483c07141b5971; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xd4bade5e2e902836; -+ __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0017004800c400f9; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ed001a00580070; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffff7ffffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x800b7fe38062007b; -+ *((unsigned long*)& __m128i_result[0]) = 0x0076800d802c0037; -+ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffa003e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffb009c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffa003e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffb009c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffee; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffee; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffff0000; -+ *((int*)& __m256_op1[4]) = 0xffff0000; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffff0000; -+ *((int*)& __m256_op1[0]) = 0xffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6a5d5b056f2f4978; -+ *((unsigned long*)& __m128i_op0[0]) = 0x17483c07141b5971; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xd4bade5e2e902836; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x345002920f3017d6; -+ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; -+ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x800000ff800000ff; -+ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x345002920f3017d6; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; -+ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x40fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x40fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x40fe00fe00fe00fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x40fe00fe00fe00fe; -+ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x800000ff800000ff; -+ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2e9028362e902836; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2e9028362e902836; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x345002920f3017d6; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffff7fffffff7; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffff7fffffff7; -+ __m128i_out = __lsx_vmini_w(__m128i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000002; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000002; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x34500292; -+ *((int*)& __m128_op1[0]) = 0x0f3017d6; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffff7ffffffffe; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffff7ffffffffe; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; -+ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256d_op1[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00ff00ff; -+ *((int*)& __m256_op0[6]) = 0x00ff00ff; -+ *((int*)& __m256_op0[5]) = 0x00ff00ff; -+ *((int*)& __m256_op0[4]) = 0x00ff00ff; -+ *((int*)& __m256_op0[3]) = 0x00ff00ff; -+ *((int*)& __m256_op0[2]) = 0x00ff00ff; -+ *((int*)& __m256_op0[1]) = 0x00ff00ff; -+ *((int*)& __m256_op0[0]) = 0x00ff00ff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffff7fffffff7; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffff7fffffff7; -+ *((unsigned long*)& __m128i_result[1]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcdcfcfcfcdc; -+ __m128i_out = __lsx_vslli_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1748c4f9ed1a5870; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff00000000ffff; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x80fe80ff80fe00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff80ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x80fe80ff80fe00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff80ff; -+ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x67eb85afb2ebb000; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5252adadadadadad; -+ *((unsigned long*)& __m128i_op1[0]) = 0xadad52525252adad; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000adad0000adad; -+ *((unsigned long*)& __m128i_result[0]) = 0x000052520000adad; -+ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000adad0000adad; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000052520000adad; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1748c4f9ed1a5870; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; -+ __m128i_out = __lsx_vssrlrni_d_q(__m128i_op0,__m128i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpermi_q(__m256i_op0,__m256i_op1,0xca); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff7cffd6ffc700b0; -+ __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000080ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000080ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x08000000000000f8; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x08000000000000f8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ long_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x800000ff800000ff; -+ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0080000000000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0080000000000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0080000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0080000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff7cffd6ffc700b0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x008300290038ff50; -+ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0080000000000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0080000000000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x08000000000000f8; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x08000000000000f8; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0200000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x2000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0200000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x2000000000000000; -+ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x7fff8000; -+ *((int*)& __m256_op1[6]) = 0x7fff0000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00008000; -+ *((int*)& __m256_op1[3]) = 0x7fff8000; -+ *((int*)& __m256_op1[2]) = 0x7fff0000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00008000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00830029; -+ *((int*)& __m128_op0[0]) = 0x0038ff50; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0080000000000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0080000000000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000800080008000; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; -+ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff7ffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000000010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; -+ __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff800000ff; -+ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0200000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0200000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000020000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000200000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x800000ff800000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff7fffffff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff7fffffff7fff; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x4000c08000000080; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000080c000c080; -+ *((unsigned long*)& __m256i_result[1]) = 0x4000c08000000080; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000080c000c080; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffe00000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffe00000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfefee00000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfefee00000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000014155445; -+ *((unsigned long*)& __m128i_result[1]) = 0x33f5c2d7d9f5d800; -+ *((unsigned long*)& __m128i_result[0]) = 0xe4c23ffb002a3a22; -+ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfefee00000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfefee00000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xfefee00000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfefee00000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffc0007ffe0002; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000400000018002; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffc0007ffe0002; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000400000018002; -+ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000007fff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000007fff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; -+ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_result[2]) = 0x8100810081008100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0100010001000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x8100810081008100; -+ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff800000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x800080ff800080ff; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x33f5c2d7d9f5d800; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe4c23ffb002a3a22; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4000c08000000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000080c000c080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4000c08000000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000080c000c080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000000010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0x67eb85af0000b000; -+ *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; -+ __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000400000003fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000400000003fff; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x800080ff800080ff; -+ __m256i_out = __lasx_xvreplve0_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128i_result[1]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcdcfcfcfcdc; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85af0000b000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x67eb85af0000b000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0x67eb85af0000b000; -+ *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4000c08000000080; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000080c000c080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4000c08000000080; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000080c000c080; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000400080ffc080; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080ff0080; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000400080ffc080; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080ff0080; -+ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4000c08000000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000080c000c080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4000c08000000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000080c000c080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000200000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000200000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004000; -+ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x31); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x67eb85af0000b000; -+ *((unsigned long*)& __m128d_op1[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvclo_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc07f8000c07f8000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc07f8000c07f8000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000fff01fe0; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000fff01fe0; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x2a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000007fff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff01fffe00000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xff01fffe00000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; -+ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000400080ffc080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080ff0080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000400080ffc080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080ff0080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000200000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000200000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff000000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff000000000080; -+ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff000000000080; -+ *((unsigned long*)& __m256d_result[3]) = 0x416fe00000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x4060000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x416fe00000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x4060000000000000; -+ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x67eb85af0000b000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0x67157b5100005000; -+ *((unsigned long*)& __m128i_result[0]) = 0x387c7e0a133f2000; -+ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000400080ffc080; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080ff0080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000400080ffc080; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080ff0080; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000400080ffc080; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000400080ffc080; -+ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0x680485c8b304b019; -+ *((unsigned long*)& __m128i_result[0]) = 0xc89d7f0fed582019; -+ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff01fffe00000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff01fffe00000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x800080ff800080ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000000010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1000000010001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000002000; -+ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcdcfcfcfcdc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000003ddc5dac; -+ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x3ddc5dac; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xffffffff; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000001030103; -+ __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffc606ec5; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000014155445; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x76); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000200000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000200000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000004000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x3fffbfff80000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00004000007f8000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x3fffbfff80000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00004000007f8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x67157b5100005000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x387c7e0a133f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000800080010000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000800080010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000800080010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000800080010000; -+ __m256i_out = __lasx_xvpickev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xfc606ec5; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x14155445; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x01030103; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x3fffbfff80000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00004000007f8000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x3fffbfff80000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00004000007f8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x680485c8b304b019; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc89d7f0fed582019; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000003ddc5dac; -+ *((unsigned long*)& __m128i_op2[1]) = 0x67157b5100005000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x387c7e0a133f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0x680485c8b304b019; -+ *((unsigned long*)& __m128i_result[0]) = 0xc89d7f0ff90da019; -+ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000a95afc60a5c5; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000b6e414157f84; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000204264602444; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000266404046604; -+ __m128i_out = __lsx_vandi_b(__m128i_op0,0x66); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x8000800080008000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; -+ *((unsigned long*)& __m128i_op1[1]) = 0x67157b5100005000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x387c7e0a133f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000004870ba0; -+ __m128i_out = __lsx_vmulwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67157b5100005000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x387c7e0a133f2000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000003ddc5dac; -+ *((unsigned long*)& __m128i_result[1]) = 0x67157b5100005000; -+ *((unsigned long*)& __m128i_result[0]) = 0x387c7e0a511b7dac; -+ __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x680485c8b304b019; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc89d7f0ff90da019; -+ *((unsigned long*)& __m128i_op1[1]) = 0x680485c8b304b019; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc89d7f0ff90da019; -+ *((unsigned long*)& __m128i_result[1]) = 0x00680486ffffffda; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff913bfffffffd; -+ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f010000000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f010000000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f010100000101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f010100000101; -+ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000003ddc5dac; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x007f010000000100; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x007f010000000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f010100000101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f010100000101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000200000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000200000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0008000000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0008000000000010; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x04000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x04000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x04000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x04000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00680486ffffffda; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff913bfffffffd; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00680486ffffffda; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff913bfffffffd; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x000000003ddc5dac; -+ *((unsigned long*)& __m128i_result[1]) = 0x00680486ffffffda; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff913bb9951901; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001030103; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0020006000200060; -+ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000400080ffc080; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000400080ffc080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff80ff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff80ff; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x67eb85af; -+ *((int*)& __m128_op0[2]) = 0xb2ebb000; -+ *((int*)& __m128_op0[1]) = 0xc8847ef6; -+ *((int*)& __m128_op0[0]) = 0xed3f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00680486; -+ *((int*)& __m128_op0[2]) = 0xffffffda; -+ *((int*)& __m128_op0[1]) = 0xffff913b; -+ *((int*)& __m128_op0[0]) = 0xb9951901; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x01030103; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00200060; -+ *((int*)& __m128_op2[0]) = 0x00200060; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0xffffffda; -+ *((int*)& __m128_result[1]) = 0xffff913b; -+ *((int*)& __m128_result[0]) = 0x001fed4d; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00080000; -+ *((int*)& __m256_op0[4]) = 0x00000010; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00080000; -+ *((int*)& __m256_op0[0]) = 0x00000010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x7f010000; -+ *((int*)& __m256_op0[5]) = 0x00010000; -+ *((int*)& __m256_op0[4]) = 0x00007f7f; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x7f010000; -+ *((int*)& __m256_op0[1]) = 0x00010000; -+ *((int*)& __m256_op0[0]) = 0x00007f7f; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00680486ffffffda; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff913bb9951901; -+ *((unsigned long*)& __m128i_op1[1]) = 0x67157b5100005000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x387c7e0a133f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000003; -+ *((unsigned long*)& __m128i_result[0]) = 0x0c0f000a070f0204; -+ __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x478b478b38031779; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6b769e690fa1e119; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001030103; -+ *((unsigned long*)& __m128i_result[1]) = 0x0047004700380017; -+ *((unsigned long*)& __m128i_result[0]) = 0x006bff9e0010ffe2; -+ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000004870ba0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x478b478b38031779; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6b769e690fa1e119; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000004870ba0; -+ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000200000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000200000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000003ddc5dac; -+ *((unsigned long*)& __m128i_result[1]) = 0x67ebb2ebc884ed3f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003ddc; -+ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001030103; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000103; -+ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000200000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000200000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x39); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; -+ long_int_result = 0x000000003ddc5dac; -+ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x0); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001030103; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffc; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,-4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6b75948a91407a42; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0b5471b633e54fde; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000004870ba0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000004870ba0; -+ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000004870ba0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3f80000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3f80000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x4efffe00; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x47000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x4efffe00; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x47000000; -+ __m256_out = __lasx_xvffint_s_w(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000017fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000017fff; -+ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xfffffffc; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xfffffffc; -+ *((int*)& __m128_op1[3]) = 0x00000001; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000103; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000017fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000017fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x04870ba0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000004870ba0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x478b478b38031779; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6b769e690fa1e119; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fe98c2a0; -+ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff00ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff8000fffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00017fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff8000fffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe00017fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000007f00fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000fe0000007f; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000007f00fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000fe0000007f; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000103; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffc; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001000000010; -+ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x3a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128d_op1[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000103; -+ *((unsigned long*)& __m128d_result[1]) = 0x8000000100000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000103; -+ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000004870ba0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000010; -+ *((unsigned long*)& __m128i_op2[1]) = 0x8000000100000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x8000000000000103; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000010300000103; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010300000000; -+ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000004efffe00; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000047000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000004efffe00; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000047000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff01; -+ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_result[1]) = 0x33f5c2d7d975d7fe; -+ *((unsigned long*)& __m128i_result[0]) = 0xe4423f7b769f8ffe; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000010000ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000010000ff00; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x33f5c2d7d975d7fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe4423f7b769f8ffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x33f5c2d7d975d7fe; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fe96fe95; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6afc01000001ff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fe96fe95; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6afc01000001ff00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000010000ff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000010000ff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x7e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x33f5c2d7d975d7fe; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff010000ff01; -+ __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsat_w(__m128i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff010000ff01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000004efffe00; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000047000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000004efffe00; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000047000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x33f5c2d7d975d7fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000956a00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000956a00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x007fffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xb500000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x007fffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xb500000000000000; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x29); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x33f5c2d7d975d7fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000956a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000004efffe00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000956a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000004efffe00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x007fffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xb500000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x007fffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xb500000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x007fffffffff9569; -+ *((unsigned long*)& __m256i_result[2]) = 0xb50000004efffe00; -+ *((unsigned long*)& __m256i_result[1]) = 0x007fffffffff9569; -+ *((unsigned long*)& __m256i_result[0]) = 0xb50000004efffe00; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000956a; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000004efffe00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000956a; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000004efffe00; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000000000000956a; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000000004efffe00; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000000000956a; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000000004efffe00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000057348fe3; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000057348fe3; -+ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ long_int_result = 0x000000000000ffff; -+ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x0); -+ *((int*)& __m256_op0[7]) = 0x0000ff01; -+ *((int*)& __m256_op0[6]) = 0x00ff0000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0000ff01; -+ *((int*)& __m256_op0[3]) = 0x0000ff01; -+ *((int*)& __m256_op0[2]) = 0x00ff0000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0000ff01; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x67eb85b0b2ebb001; -+ *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; -+ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x67eb85b0; -+ *((int*)& __m128_op0[2]) = 0xb2ebb001; -+ *((int*)& __m128_op0[1]) = 0xc8847ef6; -+ *((int*)& __m128_op0[0]) = 0xed3f2000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010001000100010; -+ unsigned_int_result = 0x0000000000100010; -+ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x2); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; -+ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x38); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000101000001010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000101000001010; -+ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xa87745dbd93e4ea1; -+ *((unsigned long*)& __m128i_op1[0]) = 0xaa49601e26d39860; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001f0000001f; -+ __m128i_out = __lsx_vpcnt_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000101000001010; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000101000001010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000101000001010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000101000001010; -+ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000020; -+ *((int*)& __m128_op1[2]) = 0x00000020; -+ *((int*)& __m128_op1[1]) = 0x0000001f; -+ *((int*)& __m128_op1[0]) = 0x0000001f; -+ *((int*)& __m128_result[3]) = 0x00000020; -+ *((int*)& __m128_result[2]) = 0x00000020; -+ *((int*)& __m128_result[1]) = 0x0000001f; -+ *((int*)& __m128_result[0]) = 0x0000001f; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x33eac9fdca42f660; -+ *((unsigned long*)& __m128i_op0[0]) = 0xaa472d26fe867091; -+ *((unsigned long*)& __m128i_op1[1]) = 0x33eac9fdca42f660; -+ *((unsigned long*)& __m128i_op1[0]) = 0xaa472d26fe867091; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff5; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff5; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff5; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff5; -+ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xc0c0c0c0c0c0c0c0; -+ *((unsigned long*)& __m128i_result[0]) = 0xc0c0c0c0c0c0c0c0; -+ __m128i_out = __lsx_vslli_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001f; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000008000001e; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffff5; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffff5; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffff5; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffff5; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff01; -+ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000008000001e; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe1; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff7fffffe2; -+ __m128i_out = __lsx_vneg_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000008000001e; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000200000001b; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000000; -+ __m128i_out = __lsx_vclz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0xd48acbfe13102acf; -+ *((unsigned long*)& __m128i_result[0]) = 0xf4af70d0c4000000; -+ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x67eb8590b2ebafe1; -+ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001f00000000; -+ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00100010; -+ *((int*)& __m256_op1[5]) = 0x00100010; -+ *((int*)& __m256_op1[4]) = 0x00100010; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00100010; -+ *((int*)& __m256_op1[1]) = 0x00100010; -+ *((int*)& __m256_op1[0]) = 0x00100010; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000020; -+ *((int*)& __m128_op1[2]) = 0x00000020; -+ *((int*)& __m128_op1[1]) = 0x0000001f; -+ *((int*)& __m128_op1[0]) = 0x0000001f; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; -+ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000200000001b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd400c02000002acf; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf4000020c4000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x6453f5e01d6e5000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000fdec000000000; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x4000000040000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x27); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6453f5e01d6e5000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000fdec000000000; -+ int_result = 0x000000001d6e5000; -+ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x2); -+ *((unsigned long*)& __m128i_op0[1]) = 0x801dd5cb0004e058; -+ *((unsigned long*)& __m128i_op0[0]) = 0x77eb15638eeb5fc2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000200000001b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000004e03d; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000008eeb5fc2; -+ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000101000001010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000101000001010; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000101000001010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000101000001010; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0000ff01; -+ *((int*)& __m256_op0[6]) = 0x00ff0000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0000ff01; -+ *((int*)& __m256_op0[3]) = 0x0000ff01; -+ *((int*)& __m256_op0[2]) = 0x00ff0000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0000ff01; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000808; -+ *((int*)& __m256_op1[4]) = 0x00000808; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000808; -+ *((int*)& __m256_op1[0]) = 0x00000808; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000fff000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fff000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000001ffe00000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000001ffe00000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff010ff0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff010ff0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000201; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000201; -+ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffac0a000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x801d5de0000559e0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x77eb86788eebafe1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffac00000000; -+ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffac0a000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ac00000000; -+ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff010ff0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff010ff0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x801d5de0000559e0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x77eb86788eebaf00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x2e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffac0a000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000200000001b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffac0a000000; -+ __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000fff000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fff000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000fff000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000fff000000000; -+ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff010ff0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff010ff0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ff0100ff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff01; -+ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x6f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000ac00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffac0a000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffac0a000000; -+ __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffac0a000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000085af0000b000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00017ea200002000; -+ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000fff000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fff000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000fff000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000fff000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000001; -+ *((int*)& __m256_op1[6]) = 0xffe00000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000001; -+ *((int*)& __m256_op1[2]) = 0xffe00000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaxi_h(__m128i_op0,-2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000085af0000b000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00017ea200002000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmadd_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; -+ *((unsigned long*)& __m128i_result[1]) = 0x98147a504d145000; -+ *((unsigned long*)& __m128i_result[0]) = 0x377b810912c0e000; -+ __m128i_out = __lsx_vneg_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; -+ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffefffe; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00ff00ff; -+ *((int*)& __m256_op0[6]) = 0x00ff00ff; -+ *((int*)& __m256_op0[5]) = 0x00ff00ff; -+ *((int*)& __m256_op0[4]) = 0x00ff00ff; -+ *((int*)& __m256_op0[3]) = 0x00ff00ff; -+ *((int*)& __m256_op0[2]) = 0x00ff00ff; -+ *((int*)& __m256_op0[1]) = 0x00ff00ff; -+ *((int*)& __m256_op0[0]) = 0x00ff00ff; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffefffe00000000; -+ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000085af0000b000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00017ea200002000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x98147a504d145000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x377b810912c0e000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfffffffffffffff7; -+ *((unsigned long*)& __m128d_result[1]) = 0x98147a504d145000; -+ *((unsigned long*)& __m128d_result[0]) = 0x377b810912c0e000; -+ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x98147a504d145000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x377b810912c0e000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x98147a504d145000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x377b810912c0e000; -+ *((unsigned long*)& __m128i_result[1]) = 0x98147a504d145000; -+ *((unsigned long*)& __m128i_result[0]) = 0x377b810912c0e000; -+ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x98147a4f4d144fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x377b810812c0dfff; -+ *((unsigned long*)& __m128i_result[1]) = 0x98137a4d4d144fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x377a810612c0dfff; -+ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x98147a504d145000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x377b810912c0e000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x5a57bacbd7e39680; -+ *((unsigned long*)& __m128i_op2[0]) = 0x6bae051ffed76001; -+ *((unsigned long*)& __m128i_result[1]) = 0xf3eb458161080000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffe9454286c0e000; -+ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5a57bacbd7e39680; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6bae051ffed76001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf3e6586b60d7b152; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf7077b934ac0e000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4e3e133738bb47d2; -+ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x98147a504d145000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x377b810912c0e000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4e3e133738bb47d2; -+ *((unsigned long*)& __m128i_result[1]) = 0xff98007a004d0050; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff9ff4a0057000e; -+ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x98147a504d145000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x377b810912c0e000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x98147a504d145000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x377b810912c0e000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; -+ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x98147a504d145000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x377b810912c0e000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; -+ __m128i_out = __lsx_vslt_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080805; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080805; -+ __m128i_out = __lsx_vclz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xf3e6586b; -+ *((int*)& __m128_op0[2]) = 0x60d7b152; -+ *((int*)& __m128_op0[1]) = 0xf7077b93; -+ *((int*)& __m128_op0[0]) = 0x4ac0e000; -+ *((int*)& __m128_op1[3]) = 0x1498507a; -+ *((int*)& __m128_op1[2]) = 0x144d0050; -+ *((int*)& __m128_op1[1]) = 0x7b370981; -+ *((int*)& __m128_op1[0]) = 0xc01200e0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x000001fffdfffdff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000001fffdfffdff; -+ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0080000000800000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0080000000800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0080000000800000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0080000000800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x43); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0080000000800000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0080000000800000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0080000000800000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0080000000800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x4e3e1337; -+ *((int*)& __m128_op0[0]) = 0x38bb47d2; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0xff800000; -+ *((int*)& __m128_result[1]) = 0x41e80000; -+ *((int*)& __m128_result[0]) = 0xc1600000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000001fffdfffdff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000001fffdfffdff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000010101010101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010101010101; -+ __m128i_out = __lsx_vmini_bu(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000700000004fdff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000300000000fdff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0080000000800000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0080000000800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0080000000800000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0080000000800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; -+ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffff7f8c; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x98147a504d145000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x377b810912c0e000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080805; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080805; -+ *((unsigned long*)& __m128i_result[1]) = 0x0020002000200020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0020002000200014; -+ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x7ff80000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x7ff80000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x7ff80000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x7ff80000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4e3e133738bb47d2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x9c7c266e71768fa4; -+ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000700000004fdff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000300000000fdff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff7fffefffa01ff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffbfffefffe01ff; -+ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff6ff4ffff8db8; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffbaf4ffffb805; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9c7c266e71768fa4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff4ffb800ff0080; -+ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000005; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000005; -+ *((int*)& __m128_op1[3]) = 0xfffefffe; -+ *((int*)& __m128_op1[2]) = 0xfffefffe; -+ *((int*)& __m128_op1[1]) = 0xfffefffe; -+ *((int*)& __m128_op1[0]) = 0xfffefffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000040; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000040; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfff7fffe; -+ *((int*)& __m128_op0[2]) = 0xfffa01ff; -+ *((int*)& __m128_op0[1]) = 0xfffbfffe; -+ *((int*)& __m128_op0[0]) = 0xfffe01ff; -+ *((int*)& __m128_result[3]) = 0xfff7fffe; -+ *((int*)& __m128_result[2]) = 0xfffa01ff; -+ *((int*)& __m128_result[1]) = 0xfffbfffe; -+ *((int*)& __m128_result[0]) = 0xfffe01ff; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000700000004fdff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000300000000fdff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0006fff20003fff8; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002fffa00000000; -+ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff7fffefffa01ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffbfffefffe01ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcfcfcfcfcfd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0305030203020502; -+ *((unsigned long*)& __m128i_result[0]) = 0x0301030203020502; -+ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4e3e13368c17f6e6; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00009c7c00007176; -+ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfefefefe01010101; -+ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfefefefe01010101; -+ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_h(__m128i_op0,-4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefe01010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefe01010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfefefefe01010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfefefefe01010101; -+ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcfcfcfcfcfd; -+ *((unsigned long*)& __m128i_result[1]) = 0xfcfcfcfcfcfcfcfd; -+ *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcfcfcfcfcfd; -+ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0305030203020502; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0301030203020502; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000003050302; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000003010302; -+ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x03050302; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x03010302; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xfefefefe; -+ *((int*)& __m256_op0[4]) = 0x01010101; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xfefefefe; -+ *((int*)& __m256_op0[0]) = 0x01010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfefefefe3f800000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfefefefe3f800000; -+ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9c7c266e71768fa4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x40404040; -+ *((int*)& __m256_op1[6]) = 0x40404040; -+ *((int*)& __m256_op1[5]) = 0x40404040; -+ *((int*)& __m256_op1[4]) = 0x40404040; -+ *((int*)& __m256_op1[3]) = 0x40404040; -+ *((int*)& __m256_op1[2]) = 0x40404040; -+ *((int*)& __m256_op1[1]) = 0x40404040; -+ *((int*)& __m256_op1[0]) = 0x40404040; -+ *((int*)& __m256_result[7]) = 0x40404040; -+ *((int*)& __m256_result[6]) = 0x40404040; -+ *((int*)& __m256_result[5]) = 0x40404040; -+ *((int*)& __m256_result[4]) = 0x40404040; -+ *((int*)& __m256_result[3]) = 0x40404040; -+ *((int*)& __m256_result[2]) = 0x40404040; -+ *((int*)& __m256_result[1]) = 0x40404040; -+ *((int*)& __m256_result[0]) = 0x40404040; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000071768fa4; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0404000004040000; -+ __m256i_out = __lasx_xvslli_w(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00009c7c00007176; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000040; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000040; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x40404040; -+ *((int*)& __m256_op2[6]) = 0x40404040; -+ *((int*)& __m256_op2[5]) = 0x40404040; -+ *((int*)& __m256_op2[4]) = 0x40404040; -+ *((int*)& __m256_op2[3]) = 0x40404040; -+ *((int*)& __m256_op2[2]) = 0x40404040; -+ *((int*)& __m256_op2[1]) = 0x40404040; -+ *((int*)& __m256_op2[0]) = 0x40404040; -+ *((int*)& __m256_result[7]) = 0xc0404040; -+ *((int*)& __m256_result[6]) = 0xc0404040; -+ *((int*)& __m256_result[5]) = 0xc0404040; -+ *((int*)& __m256_result[4]) = 0xc0404040; -+ *((int*)& __m256_result[3]) = 0xc0404040; -+ *((int*)& __m256_result[2]) = 0xc0404040; -+ *((int*)& __m256_result[1]) = 0xc0404040; -+ *((int*)& __m256_result[0]) = 0xc0404040; -+ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ unsigned_int_result = 0x0000000000000000; -+ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x3); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfefefefe3f800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfefefefe3f800000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000fe0000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000fe0000000; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcfcfcfc0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefe3f800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefe3f800000; -+ *((unsigned long*)& __m256i_result[3]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256i_result[2]) = 0xfefefefeffe0e0e0; -+ *((unsigned long*)& __m256i_result[1]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256i_result[0]) = 0xfefefefeffe0e0e0; -+ __m256i_out = __lasx_xvori_b(__m256i_op0,0xe0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfefefefe3f800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfefefefe3f800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000040404040; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x3a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1010101010101010; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000040404000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000040404000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000404; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000404; -+ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00009c7c00007176; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfcfcfcfcfcfcfcfd; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfcfcfcfcfcfc0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0404000004040000; -+ *((unsigned long*)& __m256i_result[3]) = 0x4000400040004000; -+ *((unsigned long*)& __m256i_result[2]) = 0x4000400040004000; -+ *((unsigned long*)& __m256i_result[1]) = 0x4000400040004000; -+ *((unsigned long*)& __m256i_result[0]) = 0x4000400040004000; -+ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040004000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040004000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000040404000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000040404000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000040004000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000040004000; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000404; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000404; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000020202000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000020202000; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_h(__m128i_op0,-16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000404; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000404; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_result[2]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_result[1]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_result[0]) = 0x0404040404040404; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfefefefeffe0e0e0; -+ *((unsigned long*)& __m256d_op0[1]) = 0xe0e0e0e0e0e0e0e0; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfefefefeffe0e0e0; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000040004000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000040004000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfefefefe3f800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfefefefe3f800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; -+ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfcfcfcfcfcfcfcfd; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfcfcfcfcfcfc0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00009c7c00007176; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffcfcfcfc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffcfc6080; -+ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00009c7c00007176; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x9c7c266e3faa293c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_vpickve2gr_b(__m128i_op0,0xe); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e3faa293c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000f3040705; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00009c7c; -+ *((int*)& __m128_op0[0]) = 0x00007176; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xf3040705; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0xf3040705; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0xf3040705; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000f3040705; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000f3040705; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0404040404040404; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefefe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003f800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000003f800000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffefefefe; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffefefefe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000040404040; -+ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7c7c000000007176; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x3e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x40404040; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x40404040; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0xfefefefe; -+ *((int*)& __m256_op1[4]) = 0x3f800000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0xfefefefe; -+ *((int*)& __m256_op1[0]) = 0x3f800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefefe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffefefefe; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffefefefe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000040404040; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_b(__m128i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000f3040705; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000f3040705; -+ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000020202000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000020202000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x3d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7c7c000000007176; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000f3040705; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000001f1f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x32); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000100; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefefe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefefe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404040; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfe01fe01fd02fd02; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000003fc03fc0; -+ *((unsigned long*)& __m256i_result[1]) = 0xfe01fe01fd02fd02; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000003fc03fc0; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7c7c9c0000007176; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00000000f3040705; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7c7c9c0000007176; -+ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000f3040705; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000f3040705; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001f1f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff000000001f1f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000404; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000404; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7c7c9c0000007176; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00ff000000001f1f; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7c7c9c0000007176; -+ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfe01fe01fc01fc01; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfe01fe01fc01fc01; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffc01fc01; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffc01fc01; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000003fc03bbc; -+ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfe01fe01fd02fd02; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03fc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfe01fe01fd02fd02; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03fc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000405; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000405; -+ *((unsigned long*)& __m256i_result[3]) = 0xfe01fe017e81fd02; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000003fc001fe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfe01fe017e81fd02; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000003fc001fe; -+ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffc01fc01; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffc01fc01; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000405; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000405; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfc01fc0101fe01dd; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfc01fc0101fe01dd; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbfd10d0d7b6b6b73; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc5c53492f25acbf2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000f3040705; -+ *((unsigned long*)& __m128i_result[1]) = 0xbfd10d0d7b6b6b73; -+ *((unsigned long*)& __m128i_result[0]) = 0xc5c534920000c4ed; -+ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffc01fc01; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffc01fc01; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x41cfe01dde000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x41cfe01dde000000; -+ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xff000000001f1f00; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00009c7c00007176; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfe01fe01fc01fc01; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfe01fe01fc01fc01; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfc01000000003fc0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfc01000000003fc0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbfd10d0d7b6b6b73; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc5c53492f25acbf2; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff000000001f1f00; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xbfd10d0d7b6b6b73; -+ *((unsigned long*)& __m128i_result[0]) = 0xc5c53492f25acbf2; -+ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x30); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfe01fe01; -+ *((int*)& __m256_op0[6]) = 0x7e81fd02; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x3fc001fe; -+ *((int*)& __m256_op0[3]) = 0xfe01fe01; -+ *((int*)& __m256_op0[2]) = 0x7e81fd02; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x3fc001fe; -+ *((int*)& __m256_op1[7]) = 0xfe01fe01; -+ *((int*)& __m256_op1[6]) = 0x7e81fd02; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x3fc001fe; -+ *((int*)& __m256_op1[3]) = 0xfe01fe01; -+ *((int*)& __m256_op1[2]) = 0x7e81fd02; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x3fc001fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbfd10d0d7b6b6b73; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc5c534920000c4ed; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfe01fe017e81fd02; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000003fc001fe; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfe01fe017e81fd02; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000000003fc001fe; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x41cfe01dde000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x41cfe01dde000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x41cfe01dde000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x41cfe01dde000000; -+ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffc01fc01; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffc01fc01; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x41cfe01dde000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x41cfe01dde000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000013fc03bbc; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000013fc03bbc; -+ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000001010100; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000405; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000001010100; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000405; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000001010100; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000405; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000001010100; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000405; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x01010100; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000405; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x01010100; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000405; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x01010100; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000405; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x01010100; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000405; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0x01010100; -+ *((int*)& __m256_result[5]) = 0x80000000; -+ *((int*)& __m256_result[4]) = 0x00000405; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0x01010100; -+ *((int*)& __m256_result[1]) = 0x80000000; -+ *((int*)& __m256_result[0]) = 0x00000405; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfe01fe01fd02fd02; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03fc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfe01fe01fd02fd02; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03fc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3f00c0003f00c000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3f00c0003f00c000; -+ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbfd10d0d7b6b6b73; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc5c534920000c4ed; -+ *((unsigned long*)& __m128i_result[1]) = 0xbfd10d0d7b6b6b73; -+ *((unsigned long*)& __m128i_result[0]) = 0xc5c534920000c4ed; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffc01fc01; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffc01fc01; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000003fc03bbc; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffe00fe00; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000001fe01dde; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffe00fe00; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000001fe01dde; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbfd10d0d7b6b6b73; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc5c534920000c4ed; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; -+ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; -+ __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,-2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000001010100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000405; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000001010100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000405; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffe00000ffe00000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffe00000ffe00000; -+ __m256i_out = __lasx_xvsrani_h_w(__m256i_op0,__m256i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xc2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffe00000ffe00000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffe00000ffe00000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000001010100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000405; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000001010100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000405; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; -+ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xf6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000405; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000405; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000800080; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000800080; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000202; -+ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrm_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbfd10d0d7b6b6b73; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc5c534920000c4ed; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xedededededededed; -+ *((unsigned long*)& __m128i_result[0]) = 0xedededededededed; -+ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00800080; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000202; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00800080; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000202; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00800080; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000202; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00800080; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000202; -+ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000009c007c00; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000071007600; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000010000000100; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000100; -+ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000010000000100; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000010000000100; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x1fa0000000080000; -+ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x7f800000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x7f800000; -+ __m256_out = __lasx_xvfrecip_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000009c007c00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000071007600; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000009000900; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000009000900; -+ __m128i_out = __lsx_vmini_bu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000009000900; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000009000900; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; -+ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xd454545454545454; -+ *((unsigned long*)& __m128i_result[0]) = 0xd454545454545454; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0x54); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1f60010000080100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1f60010000080100; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfe01fe010000fd02; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03fc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfe01fe010000fd02; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03fc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfe01fe010000fd02; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000003fc03fc0; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfe01fe010000fd02; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000003fc03fc0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007f807f80; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007f807f80; -+ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_d(__m256i_op0,15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000800080; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000800080; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1fa0000000080000; -+ __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000007fffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000007fffff; -+ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000009000900; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000009000900; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000009000900; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000009000900; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000009000900; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000009000900; -+ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffc000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffeff000c057c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffc000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffeff000c057c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000f0f0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000f0f0; -+ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000800080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000800080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000202; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000202; -+ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfrint_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0606060606060606; -+ *((unsigned long*)& __m128i_result[0]) = 0x0606060606060606; -+ __m128i_out = __lsx_vmaxi_b(__m128i_op0,6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000f0f0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000f0f0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000f0f0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000f0f0; -+ __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000f0f0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000f0f0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007878; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007878; -+ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfsub_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1f60010000080100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1f60010000080100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1f60010000080100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1f60010000080100; -+ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007878; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007878; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010001000107878; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010001000107878; -+ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x80000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0xff88ff88; -+ *((int*)& __m256_op0[3]) = 0x80000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0xff88ff88; -+ *((int*)& __m256_op1[7]) = 0xfe01fe01; -+ *((int*)& __m256_op1[6]) = 0x0000fd02; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x3fc03fc0; -+ *((int*)& __m256_op1[3]) = 0xfe01fe01; -+ *((int*)& __m256_op1[2]) = 0x0000fd02; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x3fc03fc0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_h(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; -+ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1fa0000000080000; -+ __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x1fa0000000080000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x000000003ddc5dac; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0010001000100010; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0010001000107878; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0010001000107878; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00800080; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000202; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00800080; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000202; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0xff88ff88; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0xff88ff88; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0x80000000; -+ *((int*)& __m256_result[5]) = 0x80000000; -+ *((int*)& __m256_result[4]) = 0xffc8ff88; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0x80000000; -+ *((int*)& __m256_result[1]) = 0x80000000; -+ *((int*)& __m256_result[0]) = 0xffc8ff88; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000020; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x56a09e662ab46b31; -+ *((unsigned long*)& __m128d_op1[0]) = 0xb4b8122ef4054bb3; -+ *((unsigned long*)& __m128d_result[1]) = 0xd6a09e662ab46b31; -+ *((unsigned long*)& __m128d_result[0]) = 0x34b8122ef4054bb3; -+ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd6a09e662ab46b31; -+ *((unsigned long*)& __m128i_op0[0]) = 0x34b8122ef4054bb3; -+ *((unsigned long*)& __m128i_result[1]) = 0xd6e09e262af46b71; -+ *((unsigned long*)& __m128i_result[0]) = 0x34f8126ef4454bf3; -+ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x80000000ffc8ff88; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x80000000ffc8ff88; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001ff91ff100000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001ff91ff100000; -+ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x56a09e662ab46b31; -+ *((unsigned long*)& __m128i_op1[0]) = 0xb4b8122ef4054bb3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4b47edd10bfab44d; -+ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001ff91ff100000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001ff91ff100000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000800080; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000800080; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000202; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffffff7fff80; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001ff91ff0ffdfe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffff7fff80; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001ff91ff0ffdfe; -+ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f807f80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f807f80; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007f7f; -+ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007f433c78; -+ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000a0008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000a0008; -+ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd6a09e662ab46b31; -+ *((unsigned long*)& __m128i_op0[0]) = 0x34b8122ef4054bb3; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xeb504f33155a3598; -+ *((unsigned long*)& __m128i_result[0]) = 0x1a5c0917fa02a5d9; -+ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrph_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000a0008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000a0008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007f433c78; -+ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f807f80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f807f80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff00; -+ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000a0008; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000a0008; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffff5fff7; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffff5fff7; -+ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007f433c78; -+ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x1); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007f433c78; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000007f433c79; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007f433c79; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000007f8000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000007f8000; -+ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f8000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000007f8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000029; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000029; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000029; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000029; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000029; -+ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000a0008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000a0008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f807f80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f807f80; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000007f8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000007f8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x7b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmax_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x9c9c9c9c9c9c9c9c; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000000a0008; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000000a0008; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_result[1]) = 0x4e4e4e4e00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; -+ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000900000020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; -+ __m128i_out = __lsx_vmaxi_w(__m128i_op0,9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff00; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x477f0000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x477f0000; -+ __m256_out = __lasx_xvffint_s_w(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_result[2]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_result[1]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_result[0]) = 0xebebebebebebebeb; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000f788f788; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000f788f788; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_d(__m256i_op0,14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0xbff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xbff0000000000000; -+ __m128d_out = __lsx_vffint_d_l(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_op0[2]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_op0[1]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_op0[0]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff00; -+ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000c6c6c6c6; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c6c6c6c6; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; -+ __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_op0[2]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_op0[1]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_op0[0]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_result[2]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_result[1]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_result[0]) = 0xebebebebebebebeb; -+ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ long_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3131313131313131; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3131313131313131; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3131313131313131; -+ __m128i_out = __lsx_vextl_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000f788f788; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000f788f788; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd6a09e662ab46b31; -+ *((unsigned long*)& __m128i_op0[0]) = 0x34b8122ef4054bb3; -+ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9b509be72f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3513f2e3a1774d2c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000501ffff0005; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xc6c6c6c6; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0xc6c6c6c6; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0xc6c6c6c6; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0xc6c6c6c6; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9c9ca19d509ae734; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd1b09480f2123460; -+ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001fffeff98; -+ *((unsigned long*)& __m128i_result[0]) = 0x0014ffe4ff76ffc4; -+ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; -+ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3131313131313131; -+ *((unsigned long*)& __m128i_result[1]) = 0x0313100003131000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0313100003131000; -+ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000600000006; -+ __m128i_out = __lsx_vmaxi_w(__m128i_op0,6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000f788f788; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000f788f788; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000f788f788; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000f788f788; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x007f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x007f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f00000000; -+ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0313100003131000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0313100003131000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_w(__m128i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffeff98; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0014ffe4ff76ffc4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; -+ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000501ffff0005; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000600000001; -+ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffeff98; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0014ffe4ff76ffc4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3131313131313131; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000017fff7fcc; -+ *((unsigned long*)& __m128i_result[0]) = 0x18a3188b9854187b; -+ __m128i_out = __lsx_vavgr_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000600000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000c6c6c6c6; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c6c6c6c6; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000c6c7; -+ *((unsigned long*)& __m128i_result[0]) = 0x8d8d8d8d8d8cc6c6; -+ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000c6c6c6c6; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c6c6c6c6; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffeff98; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0014ffe4ff76ffc4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3131313131313131; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000c6c7; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8d8d8d8d8d8cc6c6; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x3c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x31313131; -+ *((int*)& __m128_op0[0]) = 0x31313131; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x31313131; -+ *((int*)& __m128_op1[0]) = 0x31313131; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000008; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x80000000; -+ *((int*)& __m128_result[2]) = 0x80000008; -+ *((int*)& __m128_result[1]) = 0xa2f54a1e; -+ *((int*)& __m128_result[0]) = 0xa2f54a1e; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000008; -+ *((unsigned long*)& __m128i_op1[0]) = 0xa2f54a1ea2f54a1e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_result[0]) = 0x00004a1e00004a1e; -+ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000013; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000013; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000013; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000013; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000013; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00004a1e00004a1e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000100; -+ *((unsigned long*)& __m128i_result[0]) = 0x4000000040000000; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_d(__m128i_op0,14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0313100003131000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0313100003131000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x80000000; -+ *((int*)& __m128_op0[2]) = 0x80000008; -+ *((int*)& __m128_op0[1]) = 0xa2f54a1e; -+ *((int*)& __m128_op0[0]) = 0xa2f54a1e; -+ *((int*)& __m128_op1[3]) = 0x80000000; -+ *((int*)& __m128_op1[2]) = 0x80000008; -+ *((int*)& __m128_op1[1]) = 0xa2f54a1e; -+ *((int*)& __m128_op1[0]) = 0xa2f54a1e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0313100003131000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0313100003131000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000013; -+ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_w(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadd_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadda_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000200008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200000; -+ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x6a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000200008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000200008; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200000; -+ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffed; -+ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000200008; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffff00ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffff00ffff; -+ __m128i_out = __lsx_vslei_b(__m128i_op0,11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000013; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrani_w_d(__m256i_op0,__m256i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000020000000200; -+ __m256i_out = __lasx_xvfclass_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff88ff88; -+ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffffffe; -+ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffff0078ffff0078; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffff0078ffff0078; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0xffffffffffffffff; -+ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x3); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff88ff88; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8d78336c83652b86; -+ *((unsigned long*)& __m128i_op1[0]) = 0x39c51f389c0d6112; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffff0001ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ff9b0082; -+ *((unsigned long*)& __m128i_result[0]) = 0x003a0037fff2fff8; -+ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000201fe01fc; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000201fe01fc; -+ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8d78336c83652b86; -+ *((unsigned long*)& __m128i_op0[0]) = 0x39c51f389c0d6112; -+ int_result = 0xffffffff9c0d6112; -+ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x0); -+ *((unsigned long*)& __m128i_op0[1]) = 0x8d78336c83652b86; -+ *((unsigned long*)& __m128i_op0[0]) = 0x39c51f389c0d6112; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000001ce28f9c0; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000004e06b0890; -+ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000001ce28f9c0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000004e06b0890; -+ *((unsigned long*)& __m128i_result[1]) = 0xfefefefdbffefdfe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefefeeffef7fefe; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff7300000ca00430; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001a00000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_hu(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000020000000200; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0101010240010202; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffe00; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffe00; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe00; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffe00; -+ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfefefefdbffefdfe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfefefeeffef7feff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfcfcfcffbdfcfffc; -+ *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcedfcf5fcfd; -+ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000fffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000f0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000000ffff88ff88; -+ *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000000ffff88ff88; -+ *((unsigned long*)& __m256i_result[3]) = 0xff88ff88ff880000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff88ff88ff880000; -+ *((unsigned long*)& __m256i_result[1]) = 0xff88ff88ff880000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff88ff88ff880000; -+ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffe00; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffe00; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffe00; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffe00; -+ *((unsigned long*)& __m256d_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x000000ffff88ff88; -+ *((unsigned long*)& __m256d_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000000ffff88ff88; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsat_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfefefefdbffefdfe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfefefeeffef7fefe; -+ int_op1 = 0xffffffff9c0d6112; -+ *((unsigned long*)& __m128i_result[1]) = 0xbffefdfebffefdfe; -+ *((unsigned long*)& __m128i_result[0]) = 0xbffefdfebffefdfe; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000ffff88ff88; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ffff88ff88; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ffff88ff88; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ffff88ff88; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfefefefdbffefdfe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfefefeeffef7fefe; -+ *((unsigned long*)& __m128i_result[1]) = 0xfef7fefebffefdfe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfefefefdfefefeef; -+ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x2d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe0001fefc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001fffe0001fefc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0007000000050000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003000100010001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001fffe0001fefc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0006000100040001; -+ *((unsigned long*)& __m128i_result[0]) = 0x00010002ffff0105; -+ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100040; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffc0; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fff0ffc0; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffc0; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff0ffc0; -+ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000040; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff88ff88ff880000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff88ff88ff880000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff88ff88ff880000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff88ff88ff880000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffc0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff0ffc0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffc0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff0ffc0; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff78ffc0; -+ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0001fffe0001fefc; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0007000000050000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0003000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000ffff88ff88; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000ffff88ff88; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000800000000000; -+ __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x2f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0006000100040001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00010002ffff0105; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x28); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff88ff88ff880000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff88ff88ff880000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff88ff88ff880000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff88ff88ff880000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000800000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000800000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000800000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0100010001000101; -+ *((unsigned long*)& __m128i_result[0]) = 0x0100010001000101; -+ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000100040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000100040; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256d_result[3]) = 0x00000000ff890000; -+ *((unsigned long*)& __m256d_result[2]) = 0x00000000ff790000; -+ *((unsigned long*)& __m256d_result[1]) = 0x00000000ff890000; -+ *((unsigned long*)& __m256d_result[0]) = 0x00000000ff790000; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_d(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; -+ __m256i_out = __lasx_xvclz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0007000000050000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0003000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000100040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000100040; -+ unsigned_int_result = 0x0000000000000040; -+ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x6); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ff890000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff790000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ff890000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff790000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff790000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff790000; -+ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000bffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000040001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x6d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0007000000050000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0003000100010001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0080000100200001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0008000200020002; -+ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000060002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000060002; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe4c8b96e2560afe9; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc001a1867fffa207; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000c0010000a186; -+ *((unsigned long*)& __m128i_result[0]) = 0x00067fff0002a207; -+ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; -+ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000020ff790020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000020ff790020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128d_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffcfffffffc; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffcfffffffc; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffcfffffffc; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffcfffffffc; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xe4c8b96e2560afe9; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc001a1867fffa207; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe4c8b96e2560afe9; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc001a1867fffa207; -+ *((unsigned long*)& __m128i_result[1]) = 0xe2560afe9c001a18; -+ *((unsigned long*)& __m128i_result[0]) = 0xe2560afe9c001a18; -+ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x24); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xe2560afe9c001a18; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe2560afe9c001a18; -+ *((unsigned long*)& __m128i_result[1]) = 0x89582bf870006860; -+ *((unsigned long*)& __m128i_result[0]) = 0x89582bf870006860; -+ __m128i_out = __lsx_vslli_w(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000020ff790020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000020ff790020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; -+ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xa5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; -+ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x89582bf870006860; -+ *((unsigned long*)& __m128i_op1[0]) = 0x89582bf870006860; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x94); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000087; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000087; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xff800000; -+ *((int*)& __m256_result[6]) = 0xff800000; -+ *((int*)& __m256_result[5]) = 0xc30e0000; -+ *((int*)& __m256_result[4]) = 0xff800000; -+ *((int*)& __m256_result[3]) = 0xff800000; -+ *((int*)& __m256_result[2]) = 0xff800000; -+ *((int*)& __m256_result[1]) = 0xc30e0000; -+ *((int*)& __m256_result[0]) = 0xff800000; -+ __m256_out = __lasx_xvflogb_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvmadd_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000c0010000a186; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00067fff0002a207; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0002; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ff0000857a; -+ *((unsigned long*)& __m128i_result[0]) = 0x05fafe0101fe000e; -+ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000100080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000100080; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000100040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000100040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffff8900000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff8900000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000c0010000a186; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00067fff0002a207; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff0000857a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x05fafe0101fe000e; -+ unsigned_int_result = 0x000000000000857a; -+ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x4); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000100080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000100080; -+ *((unsigned long*)& __m256i_result[3]) = 0x001a001a001a009a; -+ *((unsigned long*)& __m256i_result[2]) = 0x001a001a002a009a; -+ *((unsigned long*)& __m256i_result[1]) = 0x001a001a001a009a; -+ *((unsigned long*)& __m256i_result[0]) = 0x001a001a002a009a; -+ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x001a001a001a009a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x001a001a002a009a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x001a001a001a009a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x001a001a002a009a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001a000000da; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001a000000da; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001a000000da; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001a000000da; -+ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xe2560afe9c001a18; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe2560afe9c001a18; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff0000857a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x05fafe0101fe000e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000d82; -+ *((unsigned long*)& __m128i_result[0]) = 0x046a09ec009c0000; -+ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc30e0000ff800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc30e0000ff800000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_result[2]) = 0xc3030000ff800000; -+ *((unsigned long*)& __m256i_result[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_result[0]) = 0xc3030000ff800000; -+ __m256i_out = __lasx_xvmini_b(__m256i_op0,3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000d82; -+ *((unsigned long*)& __m128i_op0[0]) = 0x046a09ec009c0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x046a09ec009c0000; -+ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffff8900000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffff8900000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; -+ __m256i_out = __lasx_xvslei_h(__m256i_op0,-16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000600007fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000008ffffa209; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000011; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000016; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000011; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000016; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000011; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000016; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000600007fff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000008ffffa209; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x046a09ec009c0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_d(__m128i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000000; -+ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_vpickve2gr_h(__m128i_op0,0x1); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000600007fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000008ffffa209; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000600007fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000008ffffa209; -+ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000080040; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x046a09ec; -+ *((int*)& __m128_op0[0]) = 0x009c0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff0000857a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x05fafe0101fe000e; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff7a86; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffe01fff2; -+ __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000100080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000100080; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000006d; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000010006d; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000006d; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000010006d; -+ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000002000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000002000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000080040; -+ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x80000000; -+ *((int*)& __m128_result[2]) = 0x80000000; -+ *((int*)& __m128_result[1]) = 0x80000000; -+ *((int*)& __m128_result[0]) = 0x80000000; -+ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x80000000; -+ *((int*)& __m128_op0[2]) = 0x80000000; -+ *((int*)& __m128_op0[1]) = 0x80000000; -+ *((int*)& __m128_op0[0]) = 0x80000000; -+ *((int*)& __m128_op1[3]) = 0x000000ff; -+ *((int*)& __m128_op1[2]) = 0x0000857a; -+ *((int*)& __m128_op1[1]) = 0x05fafe01; -+ *((int*)& __m128_op1[0]) = 0x01fe000e; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000000000000006d; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000000010006d; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000000000000006d; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000000000010006d; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000080040; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000080040; -+ *((unsigned long*)& __m256d_result[3]) = 0x00000000000000ad; -+ *((unsigned long*)& __m256d_result[2]) = 0x00000000001800ad; -+ *((unsigned long*)& __m256d_result[1]) = 0x00000000000000ad; -+ *((unsigned long*)& __m256d_result[0]) = 0x00000000001800ad; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000006; -+ *((int*)& __m128_op1[2]) = 0x00007fff; -+ *((int*)& __m128_op1[1]) = 0x00000008; -+ *((int*)& __m128_op1[0]) = 0xffffa209; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x0000006d; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0010006d; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x0000006d; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0010006d; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00080040; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00080040; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00080040; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00080040; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00080040; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x0010006d; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00080040; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x0010006d; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x2b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000010006d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000010006d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000080; -+ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff88ffc0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff78ffc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000001ff1; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000001ff1; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x53); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc3030000ff800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc3030000ff800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff0000857a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x05fafe0101fe000e; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff0000857a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x05fafe0101fe000e; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000ff0000857a; -+ *((unsigned long*)& __m128i_result[0]) = 0x05fafe0101fe000e; -+ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000006d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000400008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000006d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000400008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000010006d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000010006d; -+ *((unsigned long*)& __m256i_result[3]) = 0x010101010101016c; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101410128; -+ *((unsigned long*)& __m256i_result[1]) = 0x010101010101016c; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101410128; -+ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000010006d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000010006d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000006d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000400008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000006d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000400008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_op2[3]) = 0xcd636363; -+ *((int*)& __m128_op2[2]) = 0xcd636363; -+ *((int*)& __m128_op2[1]) = 0xcd636363; -+ *((int*)& __m128_op2[0]) = 0xcd636363; -+ *((int*)& __m128_result[3]) = 0xcd636363; -+ *((int*)& __m128_result[2]) = 0xcd636363; -+ *((int*)& __m128_result[1]) = 0xcd636363; -+ *((int*)& __m128_result[0]) = 0xcd636363; -+ __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0xcd636363; -+ *((int*)& __m128_op1[2]) = 0xcd636363; -+ *((int*)& __m128_op1[1]) = 0xcd636363; -+ *((int*)& __m128_op1[0]) = 0xcd636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ long_op1 = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; -+ __m128i_out = __lsx_vinsgr2vr_d(__m128i_op0,long_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000006d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000400008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000006d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000400008; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000080040; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000008002d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000008002d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010000000000; -+ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000007f00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000007f00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000007f00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000007f00000000; -+ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff0000857a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x05fafe0101fe000e; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000010000080040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010000080040; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000007f00000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000007f00000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000007f00000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000007f00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x2e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_h(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x00cd006300cd0063; -+ *((unsigned long*)& __m128i_result[0]) = 0x00cd006300cd0063; -+ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000010000080040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000080040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000010000080040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010000080040; -+ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000010006d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000010006d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000800400010006d; -+ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000000010000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000cd630000cd63; -+ *((unsigned long*)& __m128i_op1[1]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffcd63ffffcd63; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffd765ffffd765; -+ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffcd63ffffcd63; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffd765ffffd765; -+ *((unsigned long*)& __m128i_result[1]) = 0x1f1f1f1f1f1f1f1f; -+ *((unsigned long*)& __m128i_result[0]) = 0x1f1f1f1f1f1f1f1f; -+ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000100080; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000100080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000010000080040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000080040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000010000080040; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000fff8ffc0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ff00fff8ffc0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000fff8ffc0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ff00fff8ffc0; -+ __m256i_out = __lasx_xvneg_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x2d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1f1f1f1f1f1f1f1f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1f1f1f1f1f1f1f1f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x1f1f1f1f1f1f1f1f; -+ *((unsigned long*)& __m128i_op2[0]) = 0x1f1f1f1f1f1f1f1f; -+ *((unsigned long*)& __m128i_result[1]) = 0x00081f1f1f1f1f1f; -+ *((unsigned long*)& __m128i_result[0]) = 0x1f1f1f1f1f1f1f1f; -+ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_result[1]) = 0xf359f359f359f359; -+ *((unsigned long*)& __m128i_result[0]) = 0xf359f359f359f359; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff00000000ffff; -+ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0x93); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00010000; -+ *((int*)& __m128_op0[2]) = 0x00010000; -+ *((int*)& __m128_op0[1]) = 0x0000cd63; -+ *((int*)& __m128_op0[0]) = 0x0000cd63; -+ *((int*)& __m128_op1[3]) = 0xffffcd63; -+ *((int*)& __m128_op1[2]) = 0xffffcd63; -+ *((int*)& __m128_op1[1]) = 0xffffd765; -+ *((int*)& __m128_op1[0]) = 0xffffd765; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000048; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000048; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000800000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000010; -+ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8ff40; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ff0100090040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8ff40; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ff0100090040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0001000000010000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000cd630000cd63; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffff00000000ffff; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xffff00000000ffff; -+ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000800000010; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000800000010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000002000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000002000000; -+ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00081f1f; -+ *((int*)& __m128_op0[2]) = 0x1f1f1f1f; -+ *((int*)& __m128_op0[1]) = 0x1f1f1f1f; -+ *((int*)& __m128_op0[0]) = 0x1f1f1f1f; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000010000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000cd630000cd63; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000329d0000329d; -+ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; -+ __m128i_out = __lsx_vclz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8ffc0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ff00fff8ffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8ffc0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ff00fff8ffc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000fff80000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000fff80000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000fff80000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff80000; -+ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8ffc0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ff00fff8ffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8ffc0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ff00fff8ffc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000fff8ffc0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ff00fff8ffc0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000fff8ffc0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ff00fff8ffc0; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000fff8fff8; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ff00fff8ffc0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000fff8fff8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ff00fff8ffc0; -+ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0x82); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x1c083b1f3b1f3b1f; -+ *((unsigned long*)& __m128d_op0[0]) = 0xf244b948a323ab42; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8fff8; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ff00fff8ffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8fff8; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ff00fff8ffc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000fff8ff40; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ff0100090040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000fff8ff40; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ff0100090040; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffefff80; -+ __m256i_out = __lasx_xvsub_q(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; -+ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_b(__m128i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_w(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,-8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00010000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00010000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x02000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x02000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000002000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000002000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000002000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000002000000; -+ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x43); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc3030000ff800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc3030000ff800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003cfc0000006f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003cfc0000006f; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000800400010006d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0008001c0010001c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0008001c0010001c; -+ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000010; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000010; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf359f359f359f359; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf359f359f359f359; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; -+ *((unsigned long*)& __m128i_result[1]) = 0x86dd8341b164f12b; -+ *((unsigned long*)& __m128i_result[0]) = 0x9611c3985b3159f5; -+ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0200000002000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0200000002000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff01fb0408; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf2b180c9fc1fefdc; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff01fb0408; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf2b180c9fc1fefdc; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00003cfc0000006f; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00003cfc0000006f; -+ *((unsigned long*)& __m256i_result[3]) = 0x02007f8002000400; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000c5dc02005f64; -+ *((unsigned long*)& __m256i_result[1]) = 0x02007f8002000400; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000c5dc02005f64; -+ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8ff40; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ff0100090040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8ff40; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ff0100090040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff02; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff02; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x86dd8341b164f12b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9611c3985b3159f5; -+ *((unsigned long*)& __m128i_result[1]) = 0x86dd8341b164f12b; -+ *((unsigned long*)& __m128i_result[0]) = 0x9611c3985b3159f5; -+ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; -+ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; -+ *((unsigned long*)& __m128i_op1[1]) = 0x86dd8341b164f12b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9611c3985b3159f5; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000035697d4e; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000013ecaadf2; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xf359f359f359f359; -+ *((unsigned long*)& __m128d_op0[0]) = 0xf359f359f359f359; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xfff8ff40; -+ *((int*)& __m256_op0[5]) = 0x0000ff01; -+ *((int*)& __m256_op0[4]) = 0x00090040; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xfff8ff40; -+ *((int*)& __m256_op0[1]) = 0x0000ff01; -+ *((int*)& __m256_op0[0]) = 0x00090040; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001700000017; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001700000017; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001700000017; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001700000017; -+ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000010; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x9d9d9d9d9d9d9d8d; -+ *((unsigned long*)& __m256i_result[2]) = 0x9d9d9d9d9d9d9d9d; -+ *((unsigned long*)& __m256i_result[1]) = 0x9d9d9d9d9d9d9d8d; -+ *((unsigned long*)& __m256i_result[0]) = 0x9d9d9d9d9d9d9d9d; -+ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x62); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf359f359f359f359; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf359f359f359f359; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffff359f358; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffff359f358; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x86dd8341b164f12b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9611c3985b3159f5; -+ *((unsigned long*)& __m128i_result[1]) = 0x0021b761002c593c; -+ *((unsigned long*)& __m128i_result[0]) = 0x002584710016cc56; -+ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x86dd8341; -+ *((int*)& __m128_op1[2]) = 0xb164f12b; -+ *((int*)& __m128_op1[1]) = 0x9611c398; -+ *((int*)& __m128_op1[0]) = 0x5b3159f5; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x86dd8341b164f12b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9611c3985b3159f5; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff86dd83ff9611c3; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x28); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffefff7f00100080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffefff7f00100080; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff01fb0408; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf2b180c9fc1fefdc; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff01fb0408; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf2b180c9fc1fefdc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0021b761002c593c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x002584710016cc56; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0200000002000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x02000000fdffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0200000002000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x02000000fdffffff; -+ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; -+ *((unsigned long*)& __m128i_result[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_result[0]) = 0xf9796558e39953fd; -+ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000d3259a; -+ __m128i_out = __lsx_vbsrl_v(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0200000002000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x02000000fdffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0200000002000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x02000000fdffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000004ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000004ffffffff; -+ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff86dd83ff9611c3; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000035697d4e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000013ecaadf2; -+ *((unsigned long*)& __m128i_result[1]) = 0xe280e67f00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00007f7f00007f80; -+ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff01fb0408; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf2b180c9fc1fefdc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff01fb0408; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf2b180c9fc1fefdc; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0xf2b180c9fc1fefdc; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0xf2b180c9fc1fefdc; -+ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf2b180c9fc1fefdc; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf2b180c9fc1fefdc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000002ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000002ff; -+ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfsqrt_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ef; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; -+ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ef; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000016e00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000016e00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000155b200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000b70000; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000016e00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000016e00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000016e00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000016e00; -+ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000035697d4e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000013ecaadf2; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000016e00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000016e00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000000000155b200; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000b70000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000016e00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000016e00; -+ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x000002ff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x000002ff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x000002ff; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x000002ff; -+ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00016e00; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00016e00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfffffffff359f358; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfffffffff359f358; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001fff000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000029170; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fff000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000029170; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001fff000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001fff000; -+ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4ee376188658d85f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5728dcc85ac760d2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x4e1d76187a58285f; -+ *((unsigned long*)& __m128i_result[0]) = 0x572824385a39602e; -+ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0021b761002c593c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x002584710016cc56; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000001e03; -+ *((unsigned long*)& __m128i_result[1]) = 0x0021b761002c593c; -+ *((unsigned long*)& __m128i_result[0]) = 0x002584710016ea59; -+ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; -+ *((unsigned long*)& __m128i_result[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_result[0]) = 0xf9796558e39953fd; -+ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001fff000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001fff000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffdfff80; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffdfff80; -+ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001e03; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000011e04; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffdfff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffdfff80; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000016e00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000016e00; -+ *((unsigned long*)& __m256i_result[3]) = 0xffdfff80ffdfff80; -+ *((unsigned long*)& __m256i_result[2]) = 0xffdfff80ffdfff80; -+ *((unsigned long*)& __m256i_result[1]) = 0xffdfff80ffdfff80; -+ *((unsigned long*)& __m256i_result[0]) = 0xffdfff80ffdfff80; -+ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x2a2a2a2a2a2a2a2a; -+ *((unsigned long*)& __m256i_result[2]) = 0x2a2a2a2a2a2a2a2a; -+ *((unsigned long*)& __m256i_result[1]) = 0x2a2a2a2a2a2a2a2a; -+ *((unsigned long*)& __m256i_result[0]) = 0x2a2a2a2a2a2a2a2a; -+ __m256i_out = __lasx_xvnori_b(__m256i_op0,0xd5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001fff000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001fff000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffdfff80ffdfff80; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffdfff80ffdfff80; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffdfff80ffdfff80; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffdfff80ffdfff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff00; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff359f358; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffff359f358; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffff00ff00; -+ __m128i_out = __lsx_vslt_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; -+ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000d3460001518a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000084300000e55f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000016; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000016; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001fff000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000029170; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fff000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000029170; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001fff000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000029170; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fff000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000029170; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000001ff03ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000203ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001ff03ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000203ff; -+ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000100; -+ *((int*)& __m256_op0[5]) = 0x00000002; -+ *((int*)& __m256_op0[4]) = 0xff910072; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000100; -+ *((int*)& __m256_op0[1]) = 0x00000002; -+ *((int*)& __m256_op0[0]) = 0xff910072; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000001fff0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000feff0001ffb8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000001fff0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000feff0001ffb8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2a2a2a2a2a2a2a2a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2a2a2a2a2a2a2a2a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2a2a2a2a2a2a2a2a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2a2a2a2a2a2a2a2a; -+ *((unsigned long*)& __m256i_result[3]) = 0x2a2a2a2a2a2a2a2a; -+ *((unsigned long*)& __m256i_result[2]) = 0x2a2a2a2a2a2a2a2a; -+ *((unsigned long*)& __m256i_result[1]) = 0x2a2a2a2a2a2a2a2a; -+ *((unsigned long*)& __m256i_result[0]) = 0x2a2a2a2a2a2a2a2a; -+ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fff0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000feff0001ffb8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fff0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000feff0001ffb8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000016; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000016; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffd5d5ffffd5d6; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffd5d5ffffd5d6; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff6361; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4d0a902890b800dc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff6361; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4d0a902890b800dc; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; -+ *((unsigned long*)& __m128i_result[1]) = 0x001a64b345308091; -+ *((unsigned long*)& __m128i_result[0]) = 0x001f2f2cab1c732a; -+ __m128i_out = __lsx_vsrli_d(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff6361; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4d0a902890b800dc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff6361; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4d0a902890b800dc; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001ff03ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000203ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001ff03ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000203ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000001ff03fe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffec75c2d209f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000001ff03fe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffec75c2d209f; -+ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff6361; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4d0a902890b800dc; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff6361; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4d0a902890b800dc; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff6361; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4d0a902890b800dc; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff6361; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4d0a902890b800dc; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffb2f600006f48; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffb2f600006f48; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000014414104505; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1011050040004101; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000014414104505; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1011050040004101; -+ *((unsigned long*)& __m128i_result[1]) = 0x1010111105050000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4040000041410101; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001a64b345308091; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001f2f2cab1c732a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000014414104505; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1011050040004101; -+ *((unsigned long*)& __m128i_result[1]) = 0x001a323b5430048c; -+ *((unsigned long*)& __m128i_result[0]) = 0x008f792cab1cb915; -+ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1010111105050000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4040000041410101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000808000020200; -+ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x2d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001ff03ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000203ff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001ff03ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000203ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001ff03ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001ff03ff; -+ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1010111105050000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4040000041410101; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000110011; -+ *((unsigned long*)& __m128i_result[0]) = 0x0005000500000000; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001a323b5430048c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x008f792cab1cb915; -+ *((unsigned long*)& __m128i_result[1]) = 0x001a323b00ffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x008f792c00ffffff; -+ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fff0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000feff0001ffb8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fff0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000feff0001ffb8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ff03ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000203ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ff03ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000203ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000fafe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000fafe; -+ __m256i_out = __lasx_xvmskgez_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ff03fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffec75c2d209f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ff03fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffec75c2d209f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ff03fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffec75c2d209f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ff03fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffec75c2d209f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000001ff000003fe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000001ff000003fe; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffb2f600006f48; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffb2f600006f48; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x00000000000000ff; -+ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000808000020200; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff8000020000; -+ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffb3430a; -+ *((int*)& __m256_op0[4]) = 0x006ed8b8; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffb3430a; -+ *((int*)& __m256_op0[0]) = 0x006ed8b8; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x000001ff; -+ *((int*)& __m256_op1[4]) = 0x000003fe; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x000001ff; -+ *((int*)& __m256_op1[0]) = 0x000003fe; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x000000ff; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x000000ff; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xfff3430a; -+ *((int*)& __m256_result[4]) = 0x000000ff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xfff3430a; -+ *((int*)& __m256_result[0]) = 0x000000ff; -+ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000001; -+ *((int*)& __m256_op0[6]) = 0x00000001; -+ *((int*)& __m256_op0[5]) = 0xffffb2f6; -+ *((int*)& __m256_op0[4]) = 0x00006f48; -+ *((int*)& __m256_op0[3]) = 0x00000001; -+ *((int*)& __m256_op0[2]) = 0x00000001; -+ *((int*)& __m256_op0[1]) = 0xffffb2f6; -+ *((int*)& __m256_op0[0]) = 0x00006f48; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x000000ff; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; -+ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001a64b345308091; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001f2f2cab1c732a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1baf8eabd26bc629; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1c2640b9a8e9fb49; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002dab8746acf8e; -+ *((unsigned long*)& __m128i_result[0]) = 0x00036dd1c5c15856; -+ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffb2f600006f48; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffb2f600006f48; -+ *((unsigned long*)& __m256i_result[3]) = 0x4000400140004001; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffff2f640006f48; -+ *((unsigned long*)& __m256i_result[1]) = 0x4000400140004001; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffff2f640006f48; -+ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000aa822a8228222; -+ *((unsigned long*)& __m128i_op0[0]) = 0x03aa558ec8546eb6; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001a64b345308091; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001f2f2cab1c732a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0155ffff754affff; -+ *((unsigned long*)& __m128i_result[0]) = 0x034cffff03e5ffff; -+ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3fd1000000000000; -+ __m256i_out = __lasx_xvldi(-943); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001e03; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001a64b345308091; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001f2f2cab1c732a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000780c00000; -+ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256d_op2[2]) = 0xffffb2f600006f48; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256d_op2[0]) = 0xffffb2f600006f48; -+ *((unsigned long*)& __m256d_result[3]) = 0x8000000100000001; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffb2f600006f48; -+ *((unsigned long*)& __m256d_result[1]) = 0x8000000100000001; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffb2f600006f48; -+ __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff000000ff000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff000000ff000000; -+ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffb2f600006f48; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffb2f600006f48; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000008c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000008c; -+ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1baf8eabd26bc629; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1c2640b9a8e9fb49; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0002dab8746acf8e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00036dd1c5c15856; -+ *((unsigned long*)& __m128i_result[1]) = 0x1bb1686346d595b7; -+ *((unsigned long*)& __m128i_result[0]) = 0x1c29ad8a6daa539f; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff00ffffffff; -+ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000006de1; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5f9ccf33cf600000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x41f0000000000000; -+ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010000000001; -+ __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000006de1; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5f9ccf33cf600000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000007; -+ *((unsigned long*)& __m128i_result[0]) = 0x0007000700070000; -+ __m128i_out = __lsx_vsat_hu(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x000aa822a79308f6; -+ *((unsigned long*)& __m128d_op1[0]) = 0x03aa558e1d37b5a1; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000008c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000008c; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000008b; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff010000008b; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000fafe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000fafe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0000008c; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0000008c; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x0000008c; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x0000008c; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000118; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000118; -+ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000008c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000008c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001180000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001180000000; -+ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000aa822a79308f6; -+ *((unsigned long*)& __m128i_op0[0]) = 0x03aa558e1d37b5a1; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff80fd820000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000aa822a79308f6; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000084d12ce; -+ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000008b; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff010000008b; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x000aa822; -+ *((int*)& __m128_op0[2]) = 0xa79308f6; -+ *((int*)& __m128_op0[1]) = 0x03aa355e; -+ *((int*)& __m128_op0[0]) = 0x1d37b5a1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000118; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000118; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000024170000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000aa822a79308f6; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000024170000; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x32); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000118; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000118; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000aa822a79308f6; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000aa822a79308f6; -+ *((unsigned long*)& __m128i_op1[0]) = 0x03aa558e1d37b5a1; -+ *((unsigned long*)& __m128i_result[1]) = 0x00155044ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x03aa558e2584c86f; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0002de46; -+ *((int*)& __m128_op0[2]) = 0x682de060; -+ *((int*)& __m128_op0[1]) = 0x09b50da6; -+ *((int*)& __m128_op0[0]) = 0xe67b8fc0; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x084d12ce; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x24170000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000024170000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000020300000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000044470000; -+ __m128i_out = __lsx_vadda_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000024170000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x56); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000118; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000118; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x2e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000044470000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00004dce00004700; -+ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0000fafe; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0000fafe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001ffff0001ffff; -+ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vclz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000044470000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0000ffff; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000024170000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000042ab41; -+ *((unsigned long*)& __m128i_result[0]) = 0xb1b1b1b1b16f0670; -+ __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0b4c600000000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000042ab41; -+ *((unsigned long*)& __m128i_op0[0]) = 0xb1b1b1b1b16f0670; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000042ab41; -+ *((unsigned long*)& __m128i_result[0]) = 0xb1b1b1b1b16f0670; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000042ab41; -+ *((unsigned long*)& __m128i_op0[0]) = 0xb1b1b1b1b16f0670; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000044470000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; -+ __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0b4c600000000002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0004280808080808; -+ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0xa4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x084d12ce; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x24170000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000024170000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000044470000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001ffff0001ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; -+ __m128i_out = __lsx_vmini_b(__m128i_op0,5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0004280808080808; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010203030201000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000808080800; -+ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000024170000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000024170000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ff0000ffff; -+ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000000ff0000ffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; -+ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x03fbfffc03fc07fc; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x03fbfffc03fc07fc; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000404040; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x03fbfffc03fc07fc; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x03fbfffc03fc07fc; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff80000000; -+ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000001fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; -+ __m128i_out = __lsx_vsat_du(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0000ffff; -+ *((int*)& __m256_op0[6]) = 0x0000ffff; -+ *((int*)& __m256_op0[5]) = 0x0000ffff; -+ *((int*)& __m256_op0[4]) = 0x0000ffff; -+ *((int*)& __m256_op0[3]) = 0x0000ffff; -+ *((int*)& __m256_op0[2]) = 0x0000ffff; -+ *((int*)& __m256_op0[1]) = 0x0000ffff; -+ *((int*)& __m256_op0[0]) = 0x0000ffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x03fbfffc03fc07fc; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x03fbfffc03fc07fc; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000ffff0000ffff; -+ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000404040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x08080807f7f7f7f8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x08080805f5f5f5f8; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; -+ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffff00; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000001ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000001ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x08080807f5f5f5f8; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ff00; -+ *((unsigned long*)& __m128i_result[1]) = 0x04040403fafafafc; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff80; -+ __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7efefefe80ffffff; -+ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x08080807f5f5f5f8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0202f5f80000ff00; -+ __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0xffff0000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffe0000000; -+ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; -+ *((unsigned long*)& __m128i_op1[1]) = 0x04040403fafafafc; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ff80; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; -+ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x007efffefffefffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff80fffffffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x007efffefffefffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff80fffffffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0202f5f80000ff00; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffbfff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3f7f7f7f407fffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3f7f7f7f407fffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000fdfdfe; -+ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x27); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffe0000000; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fdfdfe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0001fffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00010000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7ffe0001fffe0001; -+ *((unsigned long*)& __m256i_result[2]) = 0x7ffe0001fffeffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000fdfdfe; -+ __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xc5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrani_w_d(__m256i_op0,__m256i_op1,0x34); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; -+ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x36); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff8001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7efefefe80ffffff; -+ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_w(__m256i_op0,4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4079808280057efe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007ffcfcfd020202; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x004000800080007e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000fc00fd0002; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100c00000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x26); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ffe0001fffe0001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ffe0001fffeffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fdfdfe; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000017f00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007f7f03030000; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000017f00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007f7f03030000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000017f00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007f7f03030000; -+ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x37); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ long_op0 = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000020006; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000020006; -+ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0xff800000; -+ *((int*)& __m128_result[1]) = 0xff800000; -+ *((int*)& __m128_result[0]) = 0xff800000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; -+ int_op1 = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000ffffff0000; -+ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00020006; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00020006; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00020006; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00020006; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x37b0003000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x37b0003000000000; -+ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffe045fffffeff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffff7d; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000017f00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00007f7f03030000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_h(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrp_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_h(__m128i_op0,3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_result[0]) = 0x5252525252525252; -+ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256d_op1[2]) = 0x4079808280057efe; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x007ffcfcfd020202; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0fffffff0fffffff; -+ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_d(__m256i_op0,9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x90007fff90008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0ffffffe90008000; -+ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffff8000; -+ *((int*)& __m256_op0[5]) = 0x7efefefe; -+ *((int*)& __m256_op0[4]) = 0x80ffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x7efefefe; -+ *((int*)& __m256_op0[0]) = 0x80ffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x07ffffff07ffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x07ffffff08000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x07ffffff08000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x207f207f207f2000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000207f2000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x207f207f207f2000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000207f2000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[3]) = 0xdf80df80df80dfff; -+ *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffdf80dfff; -+ *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; -+ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xdf80df80df80dfff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffdf80dfff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_b(__m256i_op0,11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000290; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000290; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7efefefe80ffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff003fffc0; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000003fffc0; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x90007fff90008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0ffffffe90008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x87ffffff87ffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xc880bfffc880c080; -+ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_result[0]) = 0x87ffffffc880c080; -+ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000290; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000290; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; -+ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xdf80df80df80dfff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffdf80dfff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffc00fffffc00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffc00fffffc00; -+ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,-2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_w(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010100000101; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0xff800000; -+ *((int*)& __m128_result[1]) = 0xff800000; -+ *((int*)& __m128_result[0]) = 0xff800000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000010100000101; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000010100000101; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00ff007f007f00; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00ff007f007f00; -+ __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2e2b34ca59fa4c88; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3b2c8aefd44be966; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x2e34594c3b000000; -+ __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000101; -+ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2e2b34ca59fa4c88; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3b2c8aefd44be966; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f8000007f800000; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff007f007f00; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff007f007f00; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00ff007f007f00; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00ff007f007f00; -+ __m256i_out = __lasx_xvmini_d(__m256i_op0,-5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffc00fffffc00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffc00fffffc00; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff00ff007f007f00; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00ff007f007f00; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[2]) = 0xc03fc03fc03fc03f; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_result[0]) = 0xc03fc03fc03fc03f; -+ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x3a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc03fc03fc03fc03f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc03fc03fc03fc03f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000002d; -+ *((unsigned long*)& __m256i_result[2]) = 0xc02dc02dc02dc02d; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000002d; -+ *((unsigned long*)& __m256i_result[0]) = 0xc02dc02dc02dc02d; -+ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xed); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2e34594c3b000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x002e0059003b0000; -+ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvmskgez_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256d_op0[2]) = 0xff00ff007f007f00; -+ *((unsigned long*)& __m256d_op0[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256d_op0[0]) = 0xff00ff007f007f00; -+ *((unsigned long*)& __m256d_op1[3]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256d_op1[2]) = 0xff00ff007f007f00; -+ *((unsigned long*)& __m256d_op1[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m256d_op1[0]) = 0xff00ff007f007f00; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x001a001a001a001a; -+ *((unsigned long*)& __m128i_result[0]) = 0x001a001a001a001a; -+ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x000000ff; -+ *((int*)& __m256_op0[4]) = 0x000000ff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x000000ff; -+ *((int*)& __m256_op0[0]) = 0x000000ff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000101; -+ *((int*)& __m256_op1[4]) = 0x00000101; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000101; -+ *((int*)& __m256_op1[0]) = 0x00000101; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7ff80000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x7ff80000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x2e34594c; -+ *((int*)& __m128_op0[0]) = 0x3b000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x800000ff000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x800000ff000000ff; -+ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; -+ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001a001a001a001a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001a001a001a001a; -+ *((unsigned long*)& __m128i_result[1]) = 0x001a001a001a000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x001a001a001a000b; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2e34594c3b000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xe9e9e9e9e9e9e9e9; -+ *((unsigned long*)& __m128i_result[0]) = 0x171d423524e9e9e9; -+ __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x002e0059003b0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000005c000000b2; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000007600000000; -+ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2e34594c3b000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x017001a002c80260; -+ *((unsigned long*)& __m128i_result[0]) = 0x01d8000000000000; -+ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x000000ff; -+ *((int*)& __m256_op0[4]) = 0x000000ff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x000000ff; -+ *((int*)& __m256_op0[0]) = 0x000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010000000100; -+ __m256i_out = __lasx_xvfclass_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x017001a002c80260; -+ *((unsigned long*)& __m128i_op0[0]) = 0x01d8000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2e34594c3b000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00feff0100feff01; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00feff0100feff01; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; -+ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000005c000000b2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000007600000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffffffff; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2e34594c3b000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000002e34594c; -+ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x800000ff000000ff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x800000ff000000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x90007fff90008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0ffffffe90008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x4800408ef07f7f01; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0800000eeffffe02; -+ __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0xff800000; -+ *((int*)& __m128_result[1]) = 0xff800000; -+ *((int*)& __m128_result[0]) = 0xff800000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000010000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000010000000; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff800000ff800000; -+ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00feff0100feff01; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00feff0100feff01; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000010000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000010000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff8000010f800000; -+ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff8000010f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fff80000; -+ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001a001a001a000b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001a001a001a000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001a001a001a000b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001a001a001a000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x001a001a001a0008; -+ *((unsigned long*)& __m128i_result[0]) = 0x001a001a001a000b; -+ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff8000010f800000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; -+ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000002d; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc02dc02dc02dc02d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000002d; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc02dc02dc02dc02d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; -+ __m256d_out = __lasx_xvflogb_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffe0000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff80000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff8000010f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ff8000010f78; -+ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x002a001a001a000b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000002a001a; -+ *((unsigned long*)& __m128i_result[0]) = 0x001a000b00000000; -+ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x78); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x001a001a; -+ *((int*)& __m128_op0[2]) = 0x001a0008; -+ *((int*)& __m128_op0[1]) = 0x001a001a; -+ *((int*)& __m128_op0[0]) = 0x001a000b; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xff800001; -+ *((int*)& __m128_op1[0]) = 0x0f800000; -+ *((int*)& __m128_op2[3]) = 0xff800000; -+ *((int*)& __m128_op2[2]) = 0xff800000; -+ *((int*)& __m128_op2[1]) = 0xff800000; -+ *((int*)& __m128_op2[0]) = 0xff800000; -+ *((int*)& __m128_result[3]) = 0xffffffff; -+ *((int*)& __m128_result[2]) = 0xffffffff; -+ *((int*)& __m128_result[1]) = 0xffc00001; -+ *((int*)& __m128_result[0]) = 0xff800000; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000002a001a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001a000b00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffe000ffffffffff; -+ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff8000010f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000900000009; -+ *((unsigned long*)& __m128i_result[0]) = 0xff80000a0f800009; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x002a001a001a000b; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x002a001a001a000b; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xff800001; -+ *((int*)& __m128_op0[0]) = 0x0f800000; -+ *((int*)& __m128_op1[3]) = 0x00000009; -+ *((int*)& __m128_op1[2]) = 0x00000009; -+ *((int*)& __m128_op1[1]) = 0xff80000a; -+ *((int*)& __m128_op1[0]) = 0x0f800009; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xff7fff80; -+ *((int*)& __m128_op0[2]) = 0xff800001; -+ *((int*)& __m128_op0[1]) = 0xe593d844; -+ *((int*)& __m128_op0[0]) = 0xe593c8c4; -+ *((int*)& __m128_op1[3]) = 0xff800000; -+ *((int*)& __m128_op1[2]) = 0xff800000; -+ *((int*)& __m128_op1[1]) = 0xe593c8c4; -+ *((int*)& __m128_op1[0]) = 0xe593c8c4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff8000010f78; -+ *((unsigned long*)& __m128i_op1[1]) = 0x002a001a001a000b; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001a0000000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe593c8c4e593c8c4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff8000010f78; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff7f0080ff7ef088; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0010001000030000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0010001000030000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0010001000030000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0010001000030000; -+ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x0000ffff; -+ *((int*)& __m256_op0[4]) = 0x0000ffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x0000ffff; -+ *((int*)& __m256_op0[0]) = 0x0000ffff; -+ *((int*)& __m256_op1[7]) = 0x00100010; -+ *((int*)& __m256_op1[6]) = 0x00030000; -+ *((int*)& __m256_op1[5]) = 0x00100010; -+ *((int*)& __m256_op1[4]) = 0x00030000; -+ *((int*)& __m256_op1[3]) = 0x00100010; -+ *((int*)& __m256_op1[2]) = 0x00030000; -+ *((int*)& __m256_op1[1]) = 0x00100010; -+ *((int*)& __m256_op1[0]) = 0x00030000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff800000ff800000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff800000ff800000; -+ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x002a001a001a000b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000002a001a; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000001a000b; -+ __m128i_out = __lsx_vexth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0010001000030000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0010001000030000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0010001000030000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0010001000030000; -+ *((int*)& __m256_result[7]) = 0x49800080; -+ *((int*)& __m256_result[6]) = 0x48400000; -+ *((int*)& __m256_result[5]) = 0x49800080; -+ *((int*)& __m256_result[4]) = 0x48400000; -+ *((int*)& __m256_result[3]) = 0x49800080; -+ *((int*)& __m256_result[2]) = 0x48400000; -+ *((int*)& __m256_result[1]) = 0x49800080; -+ *((int*)& __m256_result[0]) = 0x48400000; -+ __m256_out = __lasx_xvffint_s_w(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001a0000000b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_b(__m128i_op0,15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe593c8c4e593c8c4; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8080000080800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x9380c4009380c400; -+ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000001a0000000b; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000080000000ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff000000000000; -+ __m256i_out = __lasx_xvslei_h(__m256i_op0,-8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffc00001ff800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x003ffffe00800000; -+ __m128i_out = __lsx_vneg_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003ffffe00800000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff810001ff810002; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f804000ff810001; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003ffffe00800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000034; -+ __m128i_out = __lsx_vmskltz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000002a001a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000001a000b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_h(__m128i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xe593c8c4e593c8c4; -+ *((unsigned long*)& __m128d_result[1]) = 0x805ffffe01001fe0; -+ *((unsigned long*)& __m128d_result[0]) = 0x9a49e11102834d70; -+ __m128d_out = __lsx_vfrecip_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffff801000000010; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffff800300000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffff801000000010; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffff800300000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000004843ffdff; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000004843ffdff; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff801000000010; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff800300000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff801000000010; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff800300000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff801000000010; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff800300000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff801000000010; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff800300000000; -+ __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvpickve_w(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_d(__m256i_op0,-3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffff801000000010; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffff800300000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffff801000000010; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffff800300000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffe0000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffe0000000; -+ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x805ffffe01001fe0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9a49e11102834d70; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8144ffff01c820a4; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9b2ee1a4034b4e34; -+ *((unsigned long*)& __m128i_result[1]) = 0xff1affff01001fe0; -+ *((unsigned long*)& __m128i_result[0]) = 0xff1aff6d02834d70; -+ __m128i_out = __lsx_vmod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x841f000fc28f801f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x007c0000003e0080; -+ __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x003ffffe00800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe593c8c4e593c8c4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8144ffff01c820a4; -+ *((unsigned long*)& __m128i_op1[0]) = 0x9b2ee1a4034b4e34; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff80c400000148; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff80c1ffffe8de; -+ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff801000000010; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff800300000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff801000000010; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff800300000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000cc; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000cc; -+ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xc1bdceee242070dc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe907b754d7eaa478; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff1affff01001fe0; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff1aff6d02834d70; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f800d007f803680; -+ *((unsigned long*)& __m128i_result[0]) = 0x0100418026803800; -+ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffef; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffef; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffee; -+ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff1affff01001fe0; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff1aff6d02834d70; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000034; -+ *((unsigned long*)& __m128i_result[1]) = 0xfe1bfefe00011ee1; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe1bfe6c03824c60; -+ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000004843ffdff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000004843ffdff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000c040c0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000c040c0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff80c400000148; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff80c1ffffe8de; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000148; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000034; -+ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x841f000fc28f801f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x107c003c083c007c; -+ __m128i_out = __lsx_vslli_b(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffe00000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffe00000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000007f8; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000002de; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000007f8; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000002de; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000007f7; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffff808; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000007f7; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffff808; -+ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000c040c0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000c040c0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00000004843ffdff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000004843ffdff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffe000ffffffff08; -+ *((unsigned long*)& __m256i_result[1]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffe000ffffffff08; -+ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff1afffefec0ec85; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff1aff6d48ce567f; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff80c400000148; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff80c1ffffe8de; -+ *((unsigned long*)& __m128i_result[1]) = 0xffe3ffd8ffe30919; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffffffff; -+ __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; -+ __m256i_out = __lasx_xvneg_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff80000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x841f000fc28f801f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x841f000fc28f801f; -+ *((unsigned long*)& __m128i_op2[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xe593c8c4e593c8c4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x76ecfc8b85ac78db; -+ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x001c001c001c001c; -+ *((unsigned long*)& __m256i_result[2]) = 0x001c001c001c001c; -+ *((unsigned long*)& __m256i_result[1]) = 0x001c001c001c001c; -+ *((unsigned long*)& __m256i_result[0]) = 0x001c001c001d001d; -+ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffe000ffffffff08; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffe000ffffffff08; -+ *((unsigned long*)& __m256i_result[3]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0fffffff0fffffff; -+ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0fffffff0fffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_result[2]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_result[1]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_result[0]) = 0x0fffffff10000006; -+ __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; -+ *((unsigned long*)& __m128i_op1[1]) = 0x01017f3c00000148; -+ *((unsigned long*)& __m128i_op1[0]) = 0x117d7f7b093d187f; -+ *((unsigned long*)& __m128i_result[1]) = 0x117d7f7b093d187f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000034; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x70); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x117d7f7b093d187f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfe1bfefe00011ee1; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe1bfe6c03824c60; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f7f7f7f0000001a; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f7f017f7f7f7f7f; -+ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffe000ffffffff08; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffe000ffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffe000ffffffff08; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000001fffffff9; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_result[2]) = 0x10ffffff10000006; -+ *((unsigned long*)& __m256i_result[1]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_result[0]) = 0x10ffffff10000006; -+ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000498000000080; -+ *((unsigned long*)& __m256i_result[2]) = 0x00004843ffffffe0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000498000000080; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000684000000000; -+ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; -+ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000126000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x2555205ea7bc4020; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000126000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x2555205ea7bc4020; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_op1[2]) = 0x10ffffff10000006; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0fffffff10000006; -+ *((unsigned long*)& __m256i_op1[0]) = 0x10ffffff10000006; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000004980008; -+ *((unsigned long*)& __m256i_result[2]) = 0x003ffffffc400000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000004980008; -+ *((unsigned long*)& __m256i_result[0]) = 0x003ffffffc400000; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x46); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x413e276583869d79; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f7f017f9d8726d3; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7c7cd2eb63637c52; -+ *((unsigned long*)& __m128i_op1[0]) = 0x82ffd2210127add2; -+ *((unsigned long*)& __m128i_result[1]) = 0xffc2007aff230027; -+ *((unsigned long*)& __m128i_result[0]) = 0x0080005eff600001; -+ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000498000000080; -+ *((unsigned long*)& __m256i_result[2]) = 0x000048430000ffe0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000498000000080; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000684000000000; -+ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffc2007aff230027; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0080005eff600001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x01017f3c00000148; -+ *((unsigned long*)& __m128i_op1[0]) = 0x117d7f7b093d187f; -+ *((unsigned long*)& __m128i_result[1]) = 0xff23002700000148; -+ *((unsigned long*)& __m128i_result[0]) = 0xff600001093d187f; -+ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000497fe0000080; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000683fe0000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000497fe0000080; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000683fe0000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffb6811fffff80; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff97c120000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffb6811fffff80; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff97c120000000; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x413e276583869d79; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f7f017f9d8726d3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x413e276583869d79; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f7f017f9d8726d3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffc2007a; -+ *((int*)& __m128_op0[2]) = 0xff230027; -+ *((int*)& __m128_op0[1]) = 0x0080005e; -+ *((int*)& __m128_op0[0]) = 0xff600001; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000117d00007f7b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000093d0000187f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7d7f027f7c7f7c79; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7e7f7e7f027f032f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7d7f13fc7c7ffbf4; -+ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x2); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000004843ffdff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000004843ffdff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_result[2]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_result[0]) = 0x4980008068400000; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffb6811fffff80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff97c120000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffb6811fffff80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff97c120000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; -+ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000004843ffdff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000004843ffdff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00043fff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00043fff00000000; -+ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffb6804cb9; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffb7bbdec0; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffb680489b; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffb7bc02a0; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0xfffffffd; -+ *((int*)& __m256_result[4]) = 0xfffffffd; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0xfffffffd; -+ *((int*)& __m256_result[0]) = 0xfffffffd; -+ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffb6811fffff80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff97c120000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffb6811fffff80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff97c120000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xdb410010cbe10010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xdb410010cbe10010; -+ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffd27db010d20fbf; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffff00000000f; -+ __m128i_out = __lsx_vsat_w(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffd27db010d20fbf; -+ int_op1 = 0x0000000000000040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0fbf0fbf0fbf0fbf; -+ *((unsigned long*)& __m128i_result[0]) = 0x0fbf0fbf0fbf0fbf; -+ __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x3de00103153ff5fb; -+ *((unsigned long*)& __m256d_op0[2]) = 0xbffffffe80000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x3de00103153ff5fb; -+ *((unsigned long*)& __m256d_op0[0]) = 0xbffffffe80000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x40f69fe73c26f4ee; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x40f69fe73c26f4ee; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; -+ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffd27db010d20fbf; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffd27db010d20fbf; -+ *((unsigned long*)& __m128i_result[1]) = 0x9727b8499727b849; -+ *((unsigned long*)& __m128i_result[0]) = 0x12755900b653f081; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x40f69fe73c26f4ee; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x40f69fe73c26f4ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_result[3]) = 0x40f69fe63c26f4f5; -+ *((unsigned long*)& __m256i_result[2]) = 0x7ff7ffff00000007; -+ *((unsigned long*)& __m256i_result[1]) = 0x40f69fe63c26f4f5; -+ *((unsigned long*)& __m256i_result[0]) = 0x7ff7ffff00000007; -+ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0fffffffffffffff; -+ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x3c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x40f69fe6; -+ *((int*)& __m256_op0[6]) = 0x3c26f4f5; -+ *((int*)& __m256_op0[5]) = 0x7ff7ffff; -+ *((int*)& __m256_op0[4]) = 0x00000007; -+ *((int*)& __m256_op0[3]) = 0x40f69fe6; -+ *((int*)& __m256_op0[2]) = 0x3c26f4f5; -+ *((int*)& __m256_op0[1]) = 0x7ff7ffff; -+ *((int*)& __m256_op0[0]) = 0x00000007; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9727b8499727b849; -+ *((unsigned long*)& __m128i_op0[0]) = 0x12755900b653f081; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7d7f13fc7c7ffbf4; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff9727ffff9727; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffe79ffffba5f; -+ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffff9727; -+ *((int*)& __m128_op0[2]) = 0xffff9727; -+ *((int*)& __m128_op0[1]) = 0xfffffe79; -+ *((int*)& __m128_op0[0]) = 0xffffba5f; -+ *((int*)& __m128_result[3]) = 0xffff9727; -+ *((int*)& __m128_result[2]) = 0xffff9727; -+ *((int*)& __m128_result[1]) = 0xfffffe79; -+ *((int*)& __m128_result[0]) = 0xffffba5f; -+ __m128_out = __lsx_vfsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00060fbf00040fbf; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00020fbf00000fbf; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x9727b8499727b849; -+ *((unsigned long*)& __m128i_op2[0]) = 0x12755900b653f081; -+ *((unsigned long*)& __m128i_result[1]) = 0x00060fbf00040fbf; -+ *((unsigned long*)& __m128i_result[0]) = 0x00020fbf00000fbf; -+ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x0fffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x0fffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x0fffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x0fffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000555889; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002580f01; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010000000455889; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010000002480f01; -+ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff9727ffff9727; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffe79ffffba5f; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff972700000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffba5f00000000; -+ __m128i_out = __lsx_vslli_d(__m128i_op0,0x20); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00060fbf00040fbf; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00020fbf00000fbf; -+ *((unsigned long*)& __m128i_result[1]) = 0x00060fbf02040fbf; -+ *((unsigned long*)& __m128i_result[0]) = 0x00020fbf02000fbf; -+ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff8; -+ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000002c21ffeff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc0000000c0000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000002c21ffeff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc0000000c0000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff8; -+ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x498000804843ffe0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4980008068400000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x498100814843ffe1; -+ *((unsigned long*)& __m256i_result[2]) = 0x4981008168410001; -+ *((unsigned long*)& __m256i_result[1]) = 0x498100814843ffe1; -+ *((unsigned long*)& __m256i_result[0]) = 0x4981008168410001; -+ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x49810081; -+ *((int*)& __m256_op1[6]) = 0x4843ffe1; -+ *((int*)& __m256_op1[5]) = 0x49810081; -+ *((int*)& __m256_op1[4]) = 0x68410001; -+ *((int*)& __m256_op1[3]) = 0x49810081; -+ *((int*)& __m256_op1[2]) = 0x4843ffe1; -+ *((int*)& __m256_op1[1]) = 0x49810081; -+ *((int*)& __m256_op1[0]) = 0x68410001; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x80000000; -+ *((int*)& __m256_result[6]) = 0x80000000; -+ *((int*)& __m256_result[5]) = 0x80000000; -+ *((int*)& __m256_result[4]) = 0x80000000; -+ *((int*)& __m256_result[3]) = 0x80000000; -+ *((int*)& __m256_result[2]) = 0x80000000; -+ *((int*)& __m256_result[1]) = 0x80000000; -+ *((int*)& __m256_result[0]) = 0x80000000; -+ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000001fffffff9; -+ *((unsigned long*)& __m256i_result[3]) = 0x9ffffd8020010001; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff9fffffff9; -+ *((unsigned long*)& __m256i_result[1]) = 0x9ffffd8020010001; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff9fffffff9; -+ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x9ffffd8020010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff9fffffff9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x9ffffd8020010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff9fffffff9; -+ *((unsigned long*)& __m256i_op1[3]) = 0x40f69fe73c26f4ee; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x40f69fe73c26f4ee; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000018ffff2b13; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000018ffff2b13; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00060fbf00040fbf; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00020fbf00000fbf; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffac5cffffac5c; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffac5cffffac5c; -+ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000555889; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002580f01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00060fbf02040fbf; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00020fbf02000fbf; -+ *((unsigned long*)& __m128i_result[1]) = 0x00060fbf02596848; -+ *((unsigned long*)& __m128i_result[0]) = 0x00020fbf04581ec0; -+ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff9727ffff9727; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffe79ffffba5f; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x010169d9010169d9; -+ *((unsigned long*)& __m128i_result[0]) = 0x01010287010146a1; -+ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x498100814843ffe1; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4981008168410001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x498100814843ffe1; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4981008168410001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x40f69fe73c26f4ee; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x40f69fe73c26f4ee; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff896099cbdbfff1; -+ *((unsigned long*)& __m256i_result[2]) = 0xc987ffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xff896099cbdbfff1; -+ *((unsigned long*)& __m256i_result[0]) = 0xc987ffffffffffff; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00060fbf02596848; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00020fbf04581ec0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x010169d9010169d9; -+ *((unsigned long*)& __m128i_op1[0]) = 0x01010287010146a1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000200000001; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_op1[1]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffac5cffffac5c; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffac5cffffac5c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x010169d9010169d9; -+ *((unsigned long*)& __m128i_op1[0]) = 0x01010287010146a1; -+ *((unsigned long*)& __m128i_result[1]) = 0xff01ff01ac025c87; -+ *((unsigned long*)& __m128i_result[0]) = 0xff01ff01ac465ca1; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff896099cbdbfff1; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc987ffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff896099cbdbfff1; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc987ffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00769f673424000f; -+ *((unsigned long*)& __m256i_result[2]) = 0x3678000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x00769f673424000f; -+ *((unsigned long*)& __m256i_result[0]) = 0x3678000100000001; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffd27db010d20fbf; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffd27db010d20fbf; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0xffa4fb6021a41f7e; -+ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_result[3]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_result[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_result[1]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_result[0]) = 0xfffffffffffffff8; -+ __m256d_out = __lasx_xvfrint_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x9ffffd8020010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff9fffffff9; -+ *((unsigned long*)& __m256i_op1[1]) = 0x9ffffd8020010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff9fffffff9; -+ *((unsigned long*)& __m256i_result[3]) = 0x00009fff00002001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00009fff00002001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00060fbf; -+ *((int*)& __m128_op0[2]) = 0x02040fbf; -+ *((int*)& __m128_op0[1]) = 0x00020fbf; -+ *((int*)& __m128_op0[0]) = 0x02000fbf; -+ *((int*)& __m128_op1[3]) = 0x63636363; -+ *((int*)& __m128_op1[2]) = 0x63636363; -+ *((int*)& __m128_op1[1]) = 0xffd27db0; -+ *((int*)& __m128_op1[0]) = 0x10d20fbf; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; -+ __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; -+ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00009fff; -+ *((int*)& __m256_op0[6]) = 0x00002001; -+ *((int*)& __m256_op0[5]) = 0x0000ffff; -+ *((int*)& __m256_op0[4]) = 0x0000ffff; -+ *((int*)& __m256_op0[3]) = 0x00009fff; -+ *((int*)& __m256_op0[2]) = 0x00002001; -+ *((int*)& __m256_op0[1]) = 0x0000ffff; -+ *((int*)& __m256_op0[0]) = 0x0000ffff; -+ *((int*)& __m256_op1[7]) = 0xfffeb683; -+ *((int*)& __m256_op1[6]) = 0x9ffffd80; -+ *((int*)& __m256_op1[5]) = 0xfffe97c0; -+ *((int*)& __m256_op1[4]) = 0x20010001; -+ *((int*)& __m256_op1[3]) = 0xfffeb683; -+ *((int*)& __m256_op1[2]) = 0x9ffffd80; -+ *((int*)& __m256_op1[1]) = 0xfffe97c0; -+ *((int*)& __m256_op1[0]) = 0x20010001; -+ *((int*)& __m256_op2[7]) = 0x00009fff; -+ *((int*)& __m256_op2[6]) = 0x00002001; -+ *((int*)& __m256_op2[5]) = 0x0000ffff; -+ *((int*)& __m256_op2[4]) = 0x0000ffff; -+ *((int*)& __m256_op2[3]) = 0x00009fff; -+ *((int*)& __m256_op2[2]) = 0x00002001; -+ *((int*)& __m256_op2[1]) = 0x0000ffff; -+ *((int*)& __m256_op2[0]) = 0x0000ffff; -+ *((int*)& __m256_result[7]) = 0xfffeb683; -+ *((int*)& __m256_result[6]) = 0x80002001; -+ *((int*)& __m256_result[5]) = 0xfffe97c0; -+ *((int*)& __m256_result[4]) = 0x8000ffff; -+ *((int*)& __m256_result[3]) = 0xfffeb683; -+ *((int*)& __m256_result[2]) = 0x80002001; -+ *((int*)& __m256_result[1]) = 0xfffe97c0; -+ *((int*)& __m256_result[0]) = 0x8000ffff; -+ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb68380002001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c08000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb68380002001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c08000ffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000007fff5b41c0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000007fff5b41d0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000007fff5b41c0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000007fff5b41d0; -+ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x59); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00009fff; -+ *((int*)& __m256_op0[6]) = 0x00002001; -+ *((int*)& __m256_op0[5]) = 0x0000ffff; -+ *((int*)& __m256_op0[4]) = 0x0000ffff; -+ *((int*)& __m256_op0[3]) = 0x00009fff; -+ *((int*)& __m256_op0[2]) = 0x00002001; -+ *((int*)& __m256_op0[1]) = 0x0000ffff; -+ *((int*)& __m256_op0[0]) = 0x0000ffff; -+ *((int*)& __m256_op1[7]) = 0xfffeb683; -+ *((int*)& __m256_op1[6]) = 0x9ffffd80; -+ *((int*)& __m256_op1[5]) = 0xfffe97c0; -+ *((int*)& __m256_op1[4]) = 0x20010001; -+ *((int*)& __m256_op1[3]) = 0xfffeb683; -+ *((int*)& __m256_op1[2]) = 0x9ffffd80; -+ *((int*)& __m256_op1[1]) = 0xfffe97c0; -+ *((int*)& __m256_op1[0]) = 0x20010001; -+ *((int*)& __m256_result[7]) = 0x00009fff; -+ *((int*)& __m256_result[6]) = 0x9ffffd80; -+ *((int*)& __m256_result[5]) = 0x0000ffff; -+ *((int*)& __m256_result[4]) = 0x20010001; -+ *((int*)& __m256_result[3]) = 0x00009fff; -+ *((int*)& __m256_result[2]) = 0x9ffffd80; -+ *((int*)& __m256_result[1]) = 0x0000ffff; -+ *((int*)& __m256_result[0]) = 0x20010001; -+ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff01ff01ac025c87; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff01ff01ac465ca1; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff01ff0100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xac465ca100000000; -+ __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00009fff00002001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00009fff00002001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0002000200000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x6363636163636363; -+ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00009fff9ffffd80; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff20010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00009fff9ffffd80; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff20010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00002080df5b41cf; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00002080df5b41cf; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000009fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff40a6; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000009fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff40a6; -+ __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff01ff01ac025c87; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff01ff01ac465ca1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636163636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfffeb683; -+ *((int*)& __m256_op0[6]) = 0x9ffffd80; -+ *((int*)& __m256_op0[5]) = 0xfffe97c0; -+ *((int*)& __m256_op0[4]) = 0x20010001; -+ *((int*)& __m256_op0[3]) = 0xfffeb683; -+ *((int*)& __m256_op0[2]) = 0x9ffffd80; -+ *((int*)& __m256_op0[1]) = 0xfffe97c0; -+ *((int*)& __m256_op0[0]) = 0x20010001; -+ *((int*)& __m256_op1[7]) = 0x00009fff; -+ *((int*)& __m256_op1[6]) = 0x9ffffd80; -+ *((int*)& __m256_op1[5]) = 0x0000ffff; -+ *((int*)& __m256_op1[4]) = 0x20010001; -+ *((int*)& __m256_op1[3]) = 0x00009fff; -+ *((int*)& __m256_op1[2]) = 0x9ffffd80; -+ *((int*)& __m256_op1[1]) = 0x0000ffff; -+ *((int*)& __m256_op1[0]) = 0x20010001; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00002080; -+ *((int*)& __m256_op2[4]) = 0xdf5b41cf; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00002080; -+ *((int*)& __m256_op2[0]) = 0xdf5b41cf; -+ *((int*)& __m256_result[7]) = 0xfffeb683; -+ *((int*)& __m256_result[6]) = 0x007ffd80; -+ *((int*)& __m256_result[5]) = 0xfffe97c0; -+ *((int*)& __m256_result[4]) = 0xdf5b41cf; -+ *((int*)& __m256_result[3]) = 0xfffeb683; -+ *((int*)& __m256_result[2]) = 0x007ffd80; -+ *((int*)& __m256_result[1]) = 0xfffe97c0; -+ *((int*)& __m256_result[0]) = 0xdf5b41cf; -+ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfffeb683; -+ *((int*)& __m256_op0[6]) = 0x9ffffd80; -+ *((int*)& __m256_op0[5]) = 0xfffe97c0; -+ *((int*)& __m256_op0[4]) = 0x20010001; -+ *((int*)& __m256_op0[3]) = 0xfffeb683; -+ *((int*)& __m256_op0[2]) = 0x9ffffd80; -+ *((int*)& __m256_op0[1]) = 0xfffe97c0; -+ *((int*)& __m256_op0[0]) = 0x20010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000019ffdf403; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000011ffd97c3; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000019ffdf403; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000011ffd97c3; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffeb8649d0d6250; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffeb8649d0d6250; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op2[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op2[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op2[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op2[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x7f800000; -+ *((int*)& __m256_result[6]) = 0x7f800000; -+ *((int*)& __m256_result[5]) = 0x7f800000; -+ *((int*)& __m256_result[4]) = 0x7f800000; -+ *((int*)& __m256_result[3]) = 0x7f800000; -+ *((int*)& __m256_result[2]) = 0x7f800000; -+ *((int*)& __m256_result[1]) = 0x7f800000; -+ *((int*)& __m256_result[0]) = 0x7f800000; -+ __m256_out = __lasx_xvfrecip_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000200000001; -+ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0002000200000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0xff01ff01ac025c87; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff01ff01ac465ca1; -+ *((unsigned long*)& __m128i_result[1]) = 0x64616462b76106dc; -+ *((unsigned long*)& __m128i_result[0]) = 0x64616462b71d06c2; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000019ffdf403; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000011ffd97c3; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000019ffdf403; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000011ffd97c3; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000019ffdf403; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000019ffdf403; -+ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x64616462b76106dc; -+ *((unsigned long*)& __m128i_op1[0]) = 0x64616462b71d06c2; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000001; -+ *((int*)& __m256_op1[6]) = 0x9ffdf403; -+ *((int*)& __m256_op1[5]) = 0x00000001; -+ *((int*)& __m256_op1[4]) = 0x1ffd97c3; -+ *((int*)& __m256_op1[3]) = 0x00000001; -+ *((int*)& __m256_op1[2]) = 0x9ffdf403; -+ *((int*)& __m256_op1[1]) = 0x00000001; -+ *((int*)& __m256_op1[0]) = 0x1ffd97c3; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000200a000020020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000200a000020020; -+ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256d_op1[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00020000ffff0001; -+ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000001; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00010001; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00060fbf; -+ *((int*)& __m128_op1[2]) = 0x02040fbf; -+ *((int*)& __m128_op1[1]) = 0x00020fbf; -+ *((int*)& __m128_op1[0]) = 0x02000fbf; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0002000200000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000400000001; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_h(__m128i_op0,7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00020000; -+ *((int*)& __m128_op0[0]) = 0xffff0001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256d_op1[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256d_op1[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb683007ffd80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c0df5b41cf; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb683007ffd80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c0df5b41cf; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffeb664007ffd61; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffe97a1df5b41b0; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffeb664007ffd61; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffe97a1df5b41b0; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00020000; -+ *((int*)& __m128_op0[0]) = 0xffff0001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb664007ffd61; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97a1df5b41b0; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb664007ffd61; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97a1df5b41b0; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff007ffd61; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff007ffd61; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; -+ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x62); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000fffe00009fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000fffe00002001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000fffe00009fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000fffe00002001; -+ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0002000400000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0003000500000001; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x63636363; -+ *((int*)& __m128_op0[2]) = 0x63636363; -+ *((int*)& __m128_op0[1]) = 0x63636363; -+ *((int*)& __m128_op0[0]) = 0x63636363; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vftint_wu_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfffeb664; -+ *((int*)& __m256_op0[6]) = 0x007ffd61; -+ *((int*)& __m256_op0[5]) = 0xfffe97a1; -+ *((int*)& __m256_op0[4]) = 0xdf5b41b0; -+ *((int*)& __m256_op0[3]) = 0xfffeb664; -+ *((int*)& __m256_op0[2]) = 0x007ffd61; -+ *((int*)& __m256_op0[1]) = 0xfffe97a1; -+ *((int*)& __m256_op0[0]) = 0xdf5b41b0; -+ *((int*)& __m256_op1[7]) = 0xfffeb683; -+ *((int*)& __m256_op1[6]) = 0x9ffffd80; -+ *((int*)& __m256_op1[5]) = 0xfffe97c0; -+ *((int*)& __m256_op1[4]) = 0x20010001; -+ *((int*)& __m256_op1[3]) = 0xfffeb683; -+ *((int*)& __m256_op1[2]) = 0x9ffffd80; -+ *((int*)& __m256_op1[1]) = 0xfffe97c0; -+ *((int*)& __m256_op1[0]) = 0x20010001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000001faf19b60; -+ *((unsigned long*)& __m256i_op1[2]) = 0x6c2905ae7c14c561; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000001faf19b60; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6c2905ae7c14c561; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x94d7fb5200000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x94d7fb5200000000; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00020004; -+ *((int*)& __m128_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000e3ab0001352b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000e3ab0001352b; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000038ea4d4a; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000038ea4d4a; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff00007fff0000; -+ __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000038ea4d4a; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000038ea4d4a; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000038ea4d4a; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000038ea4d4a; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff00007fff0000; -+ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000038ea4d4a; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000038ea4d4a; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff8; -+ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x94d7fb5200000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x94d7fb5200000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000038ea4d4a; -+ *((unsigned long*)& __m256i_op2[2]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000038ea4d4a; -+ *((unsigned long*)& __m256i_op2[0]) = 0x7fff00007fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x94d7fb5200000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x94d7fb5200000000; -+ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001388928513889; -+ *((unsigned long*)& __m128i_op0[0]) = 0x006938094a013889; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001388928513889; -+ *((unsigned long*)& __m128i_op1[0]) = 0x006938094a013889; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002711250a27112; -+ *((unsigned long*)& __m128i_result[0]) = 0x00d2701294027112; -+ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb683007ffd80; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c0df5b41cf; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb683007ffd80; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c0df5b41cf; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001497c98ea4fca; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001497c98ea4fca; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0001497c98ea4fca; -+ *((unsigned long*)& __m256i_op2[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0001497c98ea4fca; -+ *((unsigned long*)& __m256i_op2[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000006715b036; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000006715b036; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0xfffeb664; -+ *((int*)& __m256_op1[6]) = 0x007ffd61; -+ *((int*)& __m256_op1[5]) = 0xfffe97a1; -+ *((int*)& __m256_op1[4]) = 0xdf5b41b0; -+ *((int*)& __m256_op1[3]) = 0xfffeb664; -+ *((int*)& __m256_op1[2]) = 0x007ffd61; -+ *((int*)& __m256_op1[1]) = 0xfffe97a1; -+ *((int*)& __m256_op1[0]) = 0xdf5b41b0; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x94d7fb52; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xfffeb664; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xfffe97a1; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xfffeb664; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xfffe97a1; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000003fffffffd; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000003fffffffd; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000003fffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000003fffffffd; -+ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0002711250a27112; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00d2701294027112; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff7112ffff7112; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff7012ffff7112; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002711250a27112; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00d2701294027112; -+ *((unsigned long*)& __m128i_result[1]) = 0x080a791a58aa791a; -+ *((unsigned long*)& __m128i_result[0]) = 0x08da781a9c0a791a; -+ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x94d7fb5200000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00020000; -+ *((int*)& __m128_op0[0]) = 0xffff0001; -+ *((int*)& __m128_op1[3]) = 0x63636363; -+ *((int*)& __m128_op1[2]) = 0x63636363; -+ *((int*)& __m128_op1[1]) = 0x63636363; -+ *((int*)& __m128_op1[0]) = 0x63636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; -+ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x3c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00020000ffff0001; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00020000ffff0001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000003030000; -+ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00020000ffff0001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000001; -+ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffeb664007ffd61; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffe97a1df5b41b0; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffeb664007ffd61; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffe97a1df5b41b0; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb664007ffd61; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97a1df5b41b0; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb664007ffd61; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97a1df5b41b0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc1f03e1042208410; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x00f0001000000010; -+ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf000f000f000f000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf000f010f000f010; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf000f000f000f000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf000f010f000f010; -+ *((unsigned long*)& __m256i_result[3]) = 0x00f0000000f00010; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff0ff00fff0ff10; -+ *((unsigned long*)& __m256i_result[1]) = 0x00f0000000f00010; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff0ff00fff0ff10; -+ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00f0001000000010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x00f0001000000010; -+ __m128i_out = __lsx_vsrai_h(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0002711350a27112; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00d5701794027113; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff61010380; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff61010380; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000006; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000006; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000006; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000006; -+ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ef00ff010f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff010f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc1f03e1042208410; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000001000110; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000431f851f; -+ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffeffff97a1; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffdf5b000041b0; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffeffff97a1; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffdf5b000041b0; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000010000685e; -+ *((unsigned long*)& __m256i_result[2]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000010000685e; -+ *((unsigned long*)& __m256i_result[0]) = 0x000020a4ffffbe4f; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00027113; -+ *((int*)& __m128_op0[2]) = 0x50a27112; -+ *((int*)& __m128_op0[1]) = 0x00d57017; -+ *((int*)& __m128_op0[0]) = 0x94027113; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002711350a27112; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00d5701794027113; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfff0ff000000000f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000f00f000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfff0ff000000000f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000f00f000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00f8000000000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x000800f800000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00f8000000000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x000800f800000000; -+ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000110; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000431f851f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000001011010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000043431f1f; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xf0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xf2f444429d96dbe1; -+ *((unsigned long*)& __m128d_op0[0]) = 0xddd76c75f2f44442; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128d_op1[0]) = 0xc1f03e1042208410; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x07fee332883f86b0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x07fed3c8f7ad28d0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x07fee332883f86b0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x07fed3c8f7ad28d0; -+ *((unsigned long*)& __m256i_result[3]) = 0x07fee332883f86b0; -+ *((unsigned long*)& __m256i_result[2]) = 0x07fed3c8f7ad28d0; -+ *((unsigned long*)& __m256i_result[1]) = 0x07fee332883f86b0; -+ *((unsigned long*)& __m256i_result[0]) = 0x07fed3c8f7ad28d0; -+ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x400000003fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x4000000040000000; -+ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x400000003fffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4000000040000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x400000003fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x4000000040000000; -+ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x400000003fffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0x4000000040000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffeffff97a1; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffdf5b000041b0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffeffff97a1; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffdf5b000041b0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x07fee332883f86b0; -+ *((unsigned long*)& __m256i_op2[2]) = 0x07fed3c8f7ad28d0; -+ *((unsigned long*)& __m256i_op2[1]) = 0x07fee332883f86b0; -+ *((unsigned long*)& __m256i_op2[0]) = 0x07fed3c8f7ad28d0; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffeffff97a1; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffdf5b000041b0; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffeffff97a1; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffdf5b000041b0; -+ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00f0000000f00010; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfff0ff00fff0ff10; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00f0000000f00010; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfff0ff00fff0ff10; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_w(__m256i_op0,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffeffff97a1; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffdf5b000041b0; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffeffff97a1; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffdf5b000041b0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00f8000000000008; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000800f800000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00f8000000000008; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000800f800000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xe3f7fff7fffcbd08; -+ *((unsigned long*)& __m256i_result[2]) = 0x0dbfa28000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xe3f7fff7fffcbd08; -+ *((unsigned long*)& __m256i_result[0]) = 0x0dbfa28000000000; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00f0000000f00010; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0ff00fff0ff10; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00f0000000f00010; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0ff00fff0ff10; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0087ff87f807ff87; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0087ff87f807ff87; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x004001be00dc008e; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ffff0100010001; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x004001be00dc008e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000010000685e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000010000685e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000a400ff004f; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000a400ff004f; -+ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0087ff87f807ff87; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0087ff87f807ff87; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000000000; -+ __m128i_out = __lsx_vclz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00ff00ef; -+ *((int*)& __m128_op0[2]) = 0x00ff010f; -+ *((int*)& __m128_op0[1]) = 0x00ff00ff; -+ *((int*)& __m128_op0[0]) = 0x00ff010f; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfrint_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000400080003fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000bc2000007e10; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000400080003fff; -+ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x07fee332883f86b0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x07fed3c8f7ad28d0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x07fee332883f86b0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x07fed3c8f7ad28d0; -+ *((unsigned long*)& __m256i_result[3]) = 0x01c03f8034c03200; -+ *((unsigned long*)& __m256i_result[2]) = 0x3dc02b400a003400; -+ *((unsigned long*)& __m256i_result[1]) = 0x01c03f8034c03200; -+ *((unsigned long*)& __m256i_result[0]) = 0x3dc02b400a003400; -+ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x23); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x01c03f8034c03200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x3dc02b400a003400; -+ *((unsigned long*)& __m256i_op0[1]) = 0x01c03f8034c03200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x3dc02b400a003400; -+ *((unsigned long*)& __m256i_op1[3]) = 0x01c03f8034c03200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3dc02b400a003400; -+ *((unsigned long*)& __m256i_op1[1]) = 0x01c03f8034c03200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3dc02b400a003400; -+ *((unsigned long*)& __m256i_op2[3]) = 0x07fee332883f86b0; -+ *((unsigned long*)& __m256i_op2[2]) = 0x07fed3c8f7ad28d0; -+ *((unsigned long*)& __m256i_op2[1]) = 0x07fee332883f86b0; -+ *((unsigned long*)& __m256i_op2[0]) = 0x07fed3c8f7ad28d0; -+ *((unsigned long*)& __m256i_result[3]) = 0x01ce3c0050d32d40; -+ *((unsigned long*)& __m256i_result[2]) = 0x3fadafc013acf600; -+ *((unsigned long*)& __m256i_result[1]) = 0x01ce3c0050d32d40; -+ *((unsigned long*)& __m256i_result[0]) = 0x3fadafc013acf600; -+ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000400080003fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000bc2000007e10; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000400080003fff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000bc2000007e04; -+ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; -+ __m128i_out = __lsx_vneg_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000400080003fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000bc2000007e04; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000400080003fff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000bc2000007e04; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffbfff7fffc000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff43dfffff81fb; -+ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_result[3]) = 0x97a297a297a297a2; -+ *((unsigned long*)& __m256i_result[2]) = 0x97a297a297a297a2; -+ *((unsigned long*)& __m256i_result[1]) = 0x97a297a297a297a2; -+ *((unsigned long*)& __m256i_result[0]) = 0x97a297a297a297a2; -+ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000234545b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0dec4d1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000002345454; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000c0dec4ca; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffbfff7fffc000; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffff43dfffff81fb; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000a400ff004f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000a400ff004f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000010000005e; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x30eb020302101b03; -+ *((unsigned long*)& __m128i_op0[0]) = 0x020310d0c0030220; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000002345454; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c0dec4ca; -+ *((unsigned long*)& __m128i_result[1]) = 0x000030ebffffffdc; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000203ffffff25; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000002345454; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0dec4ca; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000060006; -+ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000a400ff004f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000a400ff004f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000a400ff004f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000a400ff004f; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x000000010000685e; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256d_op0[1]) = 0x000000010000685e; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0087ff87f807ff87; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0087ff87f807ff87; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xb3b3b3b3b3b3b3b3; -+ *((unsigned long*)& __m256i_result[2]) = 0xb3b3b3b3b3b3b3b3; -+ *((unsigned long*)& __m256i_result[1]) = 0xb3b3b3b3b3b3b3b3; -+ *((unsigned long*)& __m256i_result[0]) = 0xb3b3b3b3b3b3b3b3; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x4c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c0dec4d1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff3f213b2f; -+ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000000010000685e; -+ *((unsigned long*)& __m256i_op2[2]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000010000685e; -+ *((unsigned long*)& __m256i_op2[0]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000203000010d0; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffc00300000220; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x27); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x30eb020302101b03; -+ *((unsigned long*)& __m128i_op0[0]) = 0x020310d0c0030220; -+ *((unsigned long*)& __m128i_result[1]) = 0x30eb022002101b20; -+ *((unsigned long*)& __m128i_result[0]) = 0x020310edc003023d; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x30eb022002101b20; -+ *((unsigned long*)& __m128i_op0[0]) = 0x020310edc003023d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x30eb020302101b03; -+ *((unsigned long*)& __m128i_op1[0]) = 0x020310d0c0030220; -+ *((unsigned long*)& __m128i_result[1]) = 0x30eb022002101b20; -+ *((unsigned long*)& __m128i_result[0]) = 0x020310edc003023d; -+ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c0dec4d1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000040223c2e; -+ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x000000010000685e; -+ *((unsigned long*)& __m256d_op1[2]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256d_op1[1]) = 0x000000010000685e; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x30eb020302101b03; -+ *((unsigned long*)& __m128i_op0[0]) = 0x020310d0c0030220; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0dec4d1; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000010000685e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000010000685e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000003ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001ffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000003ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001ffffffffffff; -+ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000003ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001ffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000003ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001ffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000010000005e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_w_d(__m256i_op0,__m256i_op1,0x3c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x000b000b000b000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000b000b000b000b; -+ __m128i_out = __lsx_vmaxi_h(__m128i_op0,11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x30eb020302101b03; -+ *((unsigned long*)& __m128i_op0[0]) = 0x020310d0c0030220; -+ *((unsigned long*)& __m128i_op1[1]) = 0x30eb020302101b03; -+ *((unsigned long*)& __m128i_op1[0]) = 0x020310d0c0030220; -+ *((unsigned long*)& __m128i_result[1]) = 0x020310d0c0030220; -+ *((unsigned long*)& __m128i_result[0]) = 0x020310d0c0030220; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000b000b000b000b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000b000b000b000b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000b000b000b000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000b000b000b000b; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x30eb022002101b20; -+ *((unsigned long*)& __m128i_op1[0]) = 0x020310edc003023d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0xffff97a2; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0xffff97a2; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001010000; -+ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x30eb022002101b20; -+ *((unsigned long*)& __m128i_op0[0]) = 0x020310edc003023d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffc3; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000010000685e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000010000685e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000020a4ffffbe4f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000040000001b; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000040000001b; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x01010000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x01010000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x101b0330eb022002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x030220020310edc0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0080800080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000080008000; -+ __m128i_out = __lsx_vslli_b(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x30eb022002101b20; -+ *((unsigned long*)& __m128i_op0[0]) = 0x020310edc003023d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x020310edc003023d; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff97a2; -+ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; -+ __m256i_out = __lasx_xvmod_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000008; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000040000001b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000008; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000040000001b; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x021b7d24; -+ *((int*)& __m128_op0[2]) = 0x49678a35; -+ *((int*)& __m128_op0[1]) = 0x030298a6; -+ *((int*)& __m128_op0[0]) = 0x21030a49; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000002; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0020004000400040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0020004000400040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0020004000400040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0020004000400040; -+ __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x328e1080889415a0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3960b1a401811060; -+ *((unsigned long*)& __m128i_op1[1]) = 0x328e1080889415a0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3960b1a401811060; -+ *((unsigned long*)& __m128i_op2[1]) = 0x020310edc003023d; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x32f3c7a38f9f4b8b; -+ *((unsigned long*)& __m128i_result[0]) = 0x2c9e5069f5d57780; -+ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000027; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000027; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x020310edc003023d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000080c43b700; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x56); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000027; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000027; -+ *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefe7f; -+ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefe7f; -+ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x3f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x30eb022002101b20; -+ *((unsigned long*)& __m128i_op0[0]) = 0x020310edc003023d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x30eb022002101b20; -+ *((unsigned long*)& __m128i_op1[0]) = 0x020310edc003023d; -+ *((unsigned long*)& __m128i_result[1]) = 0x022002101b200203; -+ *((unsigned long*)& __m128i_result[0]) = 0x022002101b200203; -+ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x30); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0000; -+ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000027; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000027; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; -+ __m256i_out = __lasx_xvslti_hu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x022002101b200203; -+ *((unsigned long*)& __m128i_op0[0]) = 0x022002101b200203; -+ *((unsigned long*)& __m128i_op1[1]) = 0x022002101b200203; -+ *((unsigned long*)& __m128i_op1[0]) = 0x022002101b200203; -+ *((unsigned long*)& __m128i_op2[1]) = 0x000000080c43b700; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x036caeeca7592703; -+ *((unsigned long*)& __m128i_result[0]) = 0x022002101b200203; -+ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x021b7d2449678a35; -+ *((unsigned long*)& __m128i_op0[0]) = 0x030298a621030a49; -+ int_result = 0xffffffffffff8a35; -+ int_out = __lsx_vpickve2gr_h(__m128i_op0,0x4); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00000000abba7980; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ccf98000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000001010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000001010000; -+ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00c0c000c0000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc0000000c000c000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000027; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000027; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000027; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000027; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x6); -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; -+ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00c0c000c0000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc0000000c000c000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00c0c000c0000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc0000000c000c000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x001e001e001e001e; -+ *((unsigned long*)& __m128i_result[0]) = 0x001e001e001e001e; -+ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; -+ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrp_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,-4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00010001; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00010001; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00010001; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x021b7d24c9678a35; -+ *((unsigned long*)& __m128i_op0[0]) = 0x030298a6a1030a49; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x021b7d24c9678a35; -+ *((unsigned long*)& __m128i_result[0]) = 0x030298a6a1030a49; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001e001e001e001e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001e001e001e001e; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x021b7d24c9678a35; -+ *((unsigned long*)& __m128i_op1[0]) = 0x030298a6a1030a49; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff4; -+ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x021b7d2449678a35; -+ *((unsigned long*)& __m128i_op0[0]) = 0x030298a621030a49; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x021b7d24c9678a35; -+ *((unsigned long*)& __m128i_op2[0]) = 0x030298a6a1030a49; -+ *((unsigned long*)& __m128i_result[1]) = 0x021b7d24c9678a35; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x021b7d24c9678a35; -+ *((unsigned long*)& __m128i_op0[0]) = 0x030298a6a1030a49; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_result[1]) = 0xada4808924882588; -+ *((unsigned long*)& __m128i_result[0]) = 0xacad25090caca5a4; -+ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xada4808924882588; -+ *((unsigned long*)& __m128i_op0[0]) = 0xacad25090caca5a4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x021b7d24c9678a35; -+ *((unsigned long*)& __m128i_op1[0]) = 0x030298a6a1030a49; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x021b7d24c9678a35; -+ *((unsigned long*)& __m128i_op0[0]) = 0x030298a6a1030a49; -+ *((unsigned long*)& __m128i_result[1]) = 0x00197f26cb658837; -+ *((unsigned long*)& __m128i_result[0]) = 0x01009aa4a301084b; -+ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001e001e001e001e; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001e001e001e001e; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffaeffaeffaeffae; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffaeffaeffaeffae; -+ *((unsigned long*)& __m128i_result[1]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_result[0]) = 0x001effae001effae; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x59f7fd70; -+ *((int*)& __m128_result[0]) = 0x59f7fd70; -+ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00197f26cb658837; -+ *((unsigned long*)& __m128i_op0[0]) = 0x01009aa4a301084b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_result[1]) = 0x0037ffd40083ffe5; -+ *((unsigned long*)& __m128i_result[0]) = 0x001e0052001ffff9; -+ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffaeffaeffaeffae; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffaeffaeffaeffae; -+ *((unsigned long*)& __m128i_result[1]) = 0x0051005200510052; -+ *((unsigned long*)& __m128i_result[0]) = 0x0051005200510052; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvpickve_w(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0051005200510052; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0051005200510052; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffaeffaeffaeffae; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffaeffaeffaeffae; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffe65ecc1be5bc; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffe65ecc1be5bc; -+ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x001effae001effae; -+ *((unsigned long*)& __m128d_op0[0]) = 0x001effae001effae; -+ *((unsigned long*)& __m128d_result[1]) = 0x2006454690d3de87; -+ *((unsigned long*)& __m128d_result[0]) = 0x2006454690d3de87; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x59f7fd7059f7fd70; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001700000017; -+ *((unsigned long*)& __m128i_result[0]) = 0x59f7fd8759f7fd87; -+ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001700000017; -+ *((unsigned long*)& __m128i_op0[0]) = 0x59f7fd8759f7fd87; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000001700000017; -+ *((unsigned long*)& __m128i_op1[0]) = 0x59f7fd8759f7fd87; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff7fff; -+ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2006454690d3de87; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2006454690d3de87; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2006454690d3de87; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2006454690d3de87; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0037ffd40083ffe5; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001e0052001ffff9; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00df020f0078007f; -+ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00df020f; -+ *((int*)& __m128_op0[0]) = 0x0078007f; -+ *((int*)& __m128_op1[3]) = 0x0037ffd4; -+ *((int*)& __m128_op1[2]) = 0x0083ffe5; -+ *((int*)& __m128_op1[1]) = 0x001e0052; -+ *((int*)& __m128_op1[0]) = 0x001ffff9; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2006454690d3de87; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2006454690d3de87; -+ *((unsigned long*)& __m128i_result[1]) = 0x2006454652525252; -+ *((unsigned long*)& __m128i_result[0]) = 0x2006454652525252; -+ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffae001effae; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_result[1]) = 0xffaeffadffaeffad; -+ *((unsigned long*)& __m128i_result[0]) = 0xffaeffadffaeffad; -+ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001700000017; -+ *((unsigned long*)& __m128i_op0[0]) = 0x59f7fd8759f7fd87; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000001700000017; -+ *((unsigned long*)& __m128i_op1[0]) = 0x59f7fd8759f7fd87; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000021100000211; -+ *((unsigned long*)& __m128i_result[0]) = 0xfb141d31fb141d31; -+ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001700000017; -+ *((unsigned long*)& __m128i_op0[0]) = 0x59f7fd8759f7fd87; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffae001effae; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000001700000017; -+ *((unsigned long*)& __m128i_op2[0]) = 0x59f7fd8759f7fd87; -+ *((unsigned long*)& __m128i_result[1]) = 0xfd200ed2fd370775; -+ *((unsigned long*)& __m128i_result[0]) = 0x96198318780e32c5; -+ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfd200ed2fd370775; -+ *((unsigned long*)& __m128d_op0[0]) = 0x96198318780e32c5; -+ *((unsigned long*)& __m128d_result[1]) = 0xfd200ed2fd370775; -+ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; -+ __m128d_out = __lsx_vfrint_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_op1[1]) = 0x2006454690d3de87; -+ *((unsigned long*)& __m128i_op1[0]) = 0x2006454690d3de87; -+ *((unsigned long*)& __m128i_result[1]) = 0x202544f490f2de35; -+ *((unsigned long*)& __m128i_result[0]) = 0x202544f490f2de35; -+ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001effae001effae; -+ unsigned_int_result = 0x000000000000001e; -+ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x3); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0x004d004d004d004d; -+ *((unsigned long*)& __m128i_result[0]) = 0x004d004d004d004d; -+ __m128i_out = __lsx_vldi(1101); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfd200ed2fd370775; -+ *((unsigned long*)& __m128i_op0[0]) = 0x96198318780e32c5; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffe65ecc1be5bc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffe65ecc1be5bc; -+ *((unsigned long*)& __m128i_result[1]) = 0xfe212874311c22b9; -+ *((unsigned long*)& __m128i_result[0]) = 0x971a9dbaacf34d09; -+ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x202544f490f2de35; -+ *((unsigned long*)& __m128i_op0[0]) = 0x202544f490f2de35; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; -+ __m128i_out = __lsx_vmsknz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0040000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0040000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0040000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0040000000000000; -+ __m256i_out = __lasx_xvsrlrni_w_d(__m256i_op0,__m256i_op1,0x2a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000021100000211; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfb141d31fb141d31; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; -+ *((unsigned long*)& __m128i_op2[1]) = 0x2006454690d3de87; -+ *((unsigned long*)& __m128i_op2[0]) = 0x2006454690d3de87; -+ *((unsigned long*)& __m128i_result[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_result[0]) = 0xbbc8ecc5f3ced5f3; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; -+ *((unsigned long*)& __m128i_result[1]) = 0xd1c0c0a5baf8f8d3; -+ *((unsigned long*)& __m128i_result[0]) = 0xecbbbbc5d5f3f3f3; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x7c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfffbfffb; -+ *((int*)& __m128_op0[2]) = 0xfffbfffb; -+ *((int*)& __m128_op0[1]) = 0xfffbfffb; -+ *((int*)& __m128_op0[0]) = 0xfffbfffb; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffbfffbfffbfffb; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffbfffbfffbfffb; -+ __m128i_out = __lsx_vfrintrne_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffaefffbffaefffb; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffaefffbffaefffb; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0005ffff0005; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000500000004; -+ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; -+ *((unsigned long*)& __m128i_op1[1]) = 0x004d004d004d004d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x004d004d004d004d; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; -+ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2006454652525252; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2006454652525252; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; -+ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0040000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0040000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0040000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0040000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; -+ *((unsigned long*)& __m128i_result[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_result[0]) = 0xbbc8ecc5f3ced5f3; -+ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffaefffbffaefffb; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffaefffbffaefffb; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffc105d1aa; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffbc19ecca; -+ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128d_op0[0]) = 0xbbc8ecc5f3ced5f3; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; -+ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; -+ *((unsigned long*)& __m128i_result[1]) = 0xff80ffa2fff0ff74; -+ *((unsigned long*)& __m128i_result[0]) = 0xff76ffd8ffe6ffaa; -+ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd1c0c0a5baf8f8d3; -+ *((unsigned long*)& __m128i_op0[0]) = 0xecbbbbc5d5f3f3f3; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffaefffbffaefffb; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffaefffbffaefffb; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000d16fc0a0; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ec6abbc0; -+ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; -+ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ long_int_result = 0xffffffffffffffff; -+ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x0); -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffc105d1aa; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbc19ecca; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffff9bffbfb; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffdffdfb; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x004d004d004d004d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x004d004d004d004d; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffc105d1aa; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffbc19ecca; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff3efa; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff43e6; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; -+ *((unsigned long*)& __m128i_result[1]) = 0x0303030303030303; -+ *((unsigned long*)& __m128i_result[0]) = 0x0303030303030303; -+ __m128i_out = __lsx_vmini_bu(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xff80ffa2fff0ff74; -+ *((unsigned long*)& __m128d_op0[0]) = 0xff76ffd8ffe6ffaa; -+ *((unsigned long*)& __m128d_op1[1]) = 0xff80ffa2fff0ff74; -+ *((unsigned long*)& __m128d_op1[0]) = 0xff76ffd8ffe6ffaa; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0303030303030303; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0303030303030303; -+ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; -+ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff80ffa2fff0ff74; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff76ffd8ffe6ffaa; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffc105d1aa; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffbc19ecca; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffe03ff63ff9bf; -+ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; -+ *((unsigned long*)& __m128i_result[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_result[0]) = 0xbbc8ecc5f3ced5f3; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; -+ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff80ffa2fff0ff74; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff76ffd8ffe6ffaa; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; -+ *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; -+ *((unsigned long*)& __m128i_result[1]) = 0xe01ae8a3fc55dd23; -+ *((unsigned long*)& __m128i_result[0]) = 0xdd9ff64ef9daeace; -+ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ unsigned_int_result = 0x00000000ffffffff; -+ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x5); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0303030303030303; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0303030303030303; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x02f3030303030303; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x06d9090909090909; -+ __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x02f3030303030303; -+ *((unsigned long*)& __m128i_op1[1]) = 0x004d004d004d004d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x004d004d004d004d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x02f3030303100303; -+ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; -+ __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x004d004d004d004d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x004d004d004d004d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001340134013401; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001340134013401; -+ __m128i_out = __lsx_vsrari_d(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0303030303030303; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0303030303030303; -+ *((unsigned long*)& __m128i_result[1]) = 0x1313131313131313; -+ *((unsigned long*)& __m128i_result[0]) = 0x1313131313131313; -+ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x030804010d090107; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1313131313131313; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1313131313131313; -+ *((unsigned long*)& __m128i_result[1]) = 0x0039d21e3229d4e8; -+ *((unsigned long*)& __m128i_result[0]) = 0x6d339b4f3b439885; -+ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x06d9090909090909; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0039d21e3229d4e8; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6d339b4f3b439885; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000db24848; -+ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1313131313131313; -+ *((unsigned long*)& __m128i_op0[0]) = 0x1313131313131313; -+ *((unsigned long*)& __m128i_op1[1]) = 0x34947b4b11684f92; -+ *((unsigned long*)& __m128i_op1[0]) = 0xd73691661e5b68b4; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffff000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000d00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffef; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000c; -+ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xff01ff0100000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff01ff0100000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xff01ff0100000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff01ff0100000000; -+ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x06d9090909090909; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x48); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0039d21e3229d4e8; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6d339b4f3b439885; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffff000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000d00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffc0000000000000; -+ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x2e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfffffff000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000d00000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffc0000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000001; -+ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x34947b4b11684f92; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd73691661e5b68b4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000016f303dff6d2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000016f303dff6d2; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x7fffffff00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x34947b4b11684f92; -+ *((unsigned long*)& __m128i_result[0]) = 0xee297a731e5c5f86; -+ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; -+ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x05f5e2320605e1e2; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_h(__m128i_op0,-2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x03080401; -+ *((int*)& __m128_op0[2]) = 0x0d090107; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; -+ __m256i_out = __lasx_xvldi(1820); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x34947b4b11684f92; -+ *((unsigned long*)& __m128i_op1[0]) = 0xee297a731e5c5f86; -+ *((unsigned long*)& __m128i_result[1]) = 0xff6cffb5ff98ff6e; -+ *((unsigned long*)& __m128i_result[0]) = 0xffd7ff8dffa4ff7a; -+ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff6cffb5ff98ff6e; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffd7ff8dffa4ff7a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x34947b4b11684f92; -+ *((unsigned long*)& __m128i_op1[0]) = 0xee297a731e5c5f86; -+ *((unsigned long*)& __m128i_op2[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffc0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000868686868686; -+ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000180; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007f80; -+ __m256i_out = __lasx_xvmadd_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff1b00e4; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010003; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010081; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010003; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100018080; -+ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000868686868686; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1c3fc7; -+ *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1c3fc7; -+ *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; -+ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x000000000000001e; -+ *((unsigned long*)& __m128i_result[1]) = 0x1e1e1e1e1e1e1e1e; -+ *((unsigned long*)& __m128i_result[0]) = 0x1e1e1e1e1e1e1e1e; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1c3fc7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1c3fc7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; -+ __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0xff1cff1c; -+ *((int*)& __m256_op1[6]) = 0xff1cff1c; -+ *((int*)& __m256_op1[5]) = 0xff1cff1c; -+ *((int*)& __m256_op1[4]) = 0xff1cff1c; -+ *((int*)& __m256_op1[3]) = 0xff1cff1c; -+ *((int*)& __m256_op1[2]) = 0xff1cff1c; -+ *((int*)& __m256_op1[1]) = 0xff1cff1c; -+ *((int*)& __m256_op1[0]) = 0xff1cff1c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffff1cffffff1c; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffff1cffffff1c; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffff1cffffff1c; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff1cffffff1c; -+ __m256i_out = __lasx_xvexth_w_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; -+ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_d(__m128i_op0,15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x5252525252525252; -+ *((unsigned long*)& __m128d_op0[0]) = 0x5252dcdcdcdcdcdc; -+ *((unsigned long*)& __m128d_result[1]) = 0x2d8bf1f8fc7e3f20; -+ *((unsigned long*)& __m128d_result[0]) = 0x2d8b24b936d1b24d; -+ __m128d_out = __lsx_vfrecip_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff80; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x15); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001c; -+ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; -+ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf8f8372f752402ee; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffc0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff01; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000868686868686; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1e1e1e1e1e1e1e1e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1e1e1e1e1e1e1e1e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0f0f0f0f0f0f0f0f; -+ *((unsigned long*)& __m128i_result[0]) = 0x0f0f525252525252; -+ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_h(__m256i_op0,11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0674c8868a74fc80; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfdce8003090b0906; -+ *((unsigned long*)& __m128d_result[1]) = 0x4399d3221a29d3f2; -+ *((unsigned long*)& __m128d_result[0]) = 0xc3818bffe7b7a7b8; -+ __m128d_out = __lsx_vffint_d_l(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000400000004; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x4399d3221a29d3f2; -+ *((unsigned long*)& __m128d_op0[0]) = 0xc3818bffe7b7a7b8; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x4399d3221a29d3f2; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f1f1f1f1f1; -+ *((unsigned long*)& __m128i_result[0]) = 0xf1f1f1f1f1f1f1f1; -+ __m128i_out = __lsx_vmini_b(__m128i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000400000004; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffff1cff1c; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff1cff18; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffff1cff1c; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff1cff18; -+ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff1cff1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff1cff18; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff1cff1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff1cff18; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1f3f06d4fcba4e98; -+ *((unsigned long*)& __m128i_op0[0]) = 0x2e1135681fa8d951; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4399d3221a29d3f2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000007d07fffffff; -+ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff1cff1b00e300e4; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff1cff1b00e300e4; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff1cff1b00e300e4; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff1cff1b00e30100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0020000000200000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x002000000020ffff; -+ __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffff1cff1c; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff1cff1c; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffff1cff1c; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff1cff1c; -+ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffff1cff1c; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffff1cff1c; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffff1cff1c; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffff1cff1c; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; -+ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0xdc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4399d3221a29d3f2; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc3818bffe7b7a7b8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vmskltz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op2[1]) = 0x4399d3221a29d3f2; -+ *((unsigned long*)& __m128i_op2[0]) = 0xc3818bffe7b7a7b8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4399d3221a29d3f2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4399d3221a29d3f2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x084d1a0907151a3d; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x084d1a0907151a3d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000007d07fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; -+ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000868686868686; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000868600008785; -+ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; -+ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0674c8868a74fc80; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfdce8003090b0906; -+ int_result = 0x00000000090b0906; -+ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x0); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000008686; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00008e5680008685; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00007fff7fff8000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000868686868686; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0674c8868a74fc80; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfdce8003090b0906; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0674c8868a74fc80; -+ *((unsigned long*)& __m128i_op2[0]) = 0xfdce8003090b0906; -+ *((unsigned long*)& __m128i_result[1]) = 0x0029aeaca57d74e6; -+ *((unsigned long*)& __m128i_result[0]) = 0xdbe332365392c686; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000b000b000b000b; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000b000b000b000b; -+ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; -+ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0020000000200000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x002000000020ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4399d3221a29d3f2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000056f64adb9464; -+ *((unsigned long*)& __m128i_op1[0]) = 0x29ca096f235819c2; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000004399d32; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xff1cff1c; -+ *((int*)& __m256_op0[6]) = 0xff1cff1c; -+ *((int*)& __m256_op0[5]) = 0xff1cff1c; -+ *((int*)& __m256_op0[4]) = 0xff1cff1c; -+ *((int*)& __m256_op0[3]) = 0xff1cff1c; -+ *((int*)& __m256_op0[2]) = 0xff1cff1c; -+ *((int*)& __m256_op0[1]) = 0xff1cff1c; -+ *((int*)& __m256_op0[0]) = 0xff1cff1c; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0674c886fcba4e98; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfdce8003090b0906; -+ *((unsigned long*)& __m128i_result[1]) = 0x003fffc0ffc0003f; -+ *((unsigned long*)& __m128i_result[0]) = 0xffc0ffc0003f003f; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xd3220000d3f20000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8bff0000a7b80000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0909000009090000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0909000009090000; -+ __m128i_out = __lsx_vmini_bu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x000b000b000b000b; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000b000b000b000b; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4399d3221a29d3f2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0674c886fcba4e98; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfdce8003090b0906; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff001a00000000; -+ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0029aeaca57d74e6; -+ *((unsigned long*)& __m128i_op0[0]) = 0xdbe332365392c686; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000056f64adb9464; -+ *((unsigned long*)& __m128i_op1[0]) = 0x29ca096f235819c2; -+ *((unsigned long*)& __m128i_result[1]) = 0x002a05a2f059094a; -+ *((unsigned long*)& __m128i_result[0]) = 0x05ad3ba576eae048; -+ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0674c886fcba4e98; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfdce8003090b0906; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003fffc0ffc0003f; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffc0ffc0003f003f; -+ *((unsigned long*)& __m128i_op2[1]) = 0x002a05a2f059094a; -+ *((unsigned long*)& __m128i_op2[0]) = 0x05ad3ba576eae048; -+ *((unsigned long*)& __m128i_result[1]) = 0xd4a6cc27d02397ce; -+ *((unsigned long*)& __m128i_result[0]) = 0x24b85f887e903abe; -+ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_result[2]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_result[1]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_result[0]) = 0x6b6b6b6b6b6b6b6b; -+ __m256i_out = __lasx_xvori_b(__m256i_op0,0x6b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_op0[2]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_op0[1]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_op0[0]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_op1[3]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_op1[2]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_op1[1]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_op1[0]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; -+ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff001a00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x003fffc0ffc0003f; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffc0ffc0003f003f; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff0000000000ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff00ff; -+ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0017001700176d6d; -+ *((unsigned long*)& __m256i_result[2]) = 0x0017001700176d6d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0017001700176d6d; -+ *((unsigned long*)& __m256i_result[0]) = 0x0017001700176d6d; -+ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0909000009090000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0909000009090000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0909000009090000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0909000009090000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x002a05a2f059094a; -+ *((unsigned long*)& __m128i_op2[0]) = 0x05ad3ba576eae048; -+ *((unsigned long*)& __m128i_result[1]) = 0x0909e0480909e048; -+ *((unsigned long*)& __m128i_result[0]) = 0x0909e0480909e048; -+ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000004000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00007770ffff9411; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000400000004c; -+ *((unsigned long*)& __m128i_result[0]) = 0x00007770ffff941d; -+ __m128i_out = __lsx_vaddi_du(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000004000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00007770ffff9411; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000004000000040; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00007770ffff9411; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000100000001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x37b951002d81a921; -+ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x003fffc0ffc0003f; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffc0ffc0003f003f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00007770ffff941d; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000ffff000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000077529b522400; -+ __m128i_out = __lsx_vmulwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00007fff7fff8000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000b81c8382; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000077af9450; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00007efe7f7f8000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00007efe7f7f8000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000b81c8382; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000077af9450; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000077af9450; -+ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x003fffc0; -+ *((int*)& __m128_op0[2]) = 0xffc0003f; -+ *((int*)& __m128_op0[1]) = 0xffc0ffc0; -+ *((int*)& __m128_op0[0]) = 0x003f003f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000004000000040; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00007770ffff9411; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00007770ffff941d; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000400000004c; -+ *((unsigned long*)& __m128i_result[0]) = 0x000047404f4f040d; -+ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x4f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000077af9450; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000047404f4f040d; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000214f; -+ *((unsigned long*)& __m128i_result[0]) = 0xc31b63d846ebc810; -+ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000400000004c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00007770ffff941d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00007770ffff941d; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000400000004c; -+ *((unsigned long*)& __m128i_result[0]) = 0x00007770ffff941d; -+ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; -+ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_hu(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x98); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000100000001000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x37b951002d81a921; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000047404f4f040d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000082000000826; -+ *((unsigned long*)& __m128i_result[0]) = 0x1b5c4c203e685617; -+ __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000100000001000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x37b951002d81a921; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x3e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000214f; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc31b63d846ebc810; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00ff0000800000ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff941d; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000010a7; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000046ebaa2c; -+ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; -+ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000b81c8382; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000077af9450; -+ *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f149ed7273; -+ *((unsigned long*)& __m128i_result[0]) = 0xf1f1f1f1865e65a1; -+ __m128i_out = __lsx_vxori_b(__m128i_op0,0xf1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000077af9450; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3ff0000000000000; -+ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00007fff7fff8000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000007f7f7f; -+ __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f7f7f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x007f007f00007f7f; -+ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0x58); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000010a7; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000046ebaa2c; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf1f1f1f149ed7273; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf1f1f1f1865e65a1; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000800000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000800000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf1f1f1f149ed7273; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf1f1f1f1865e65a1; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff941d; -+ *((unsigned long*)& __m128i_op2[1]) = 0xf1f1f1f149ed7273; -+ *((unsigned long*)& __m128i_op2[0]) = 0xf1f1f1f1865e65a1; -+ *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f149ed7273; -+ *((unsigned long*)& __m128i_result[0]) = 0x78508ad4ec2ffcde; -+ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffdfdc0d; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3ff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffdfdc0d; -+ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; -+ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; -+ __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf1f1f1f149ed7273; -+ *((unsigned long*)& __m128i_op0[0]) = 0x78508ad4ec2ffcde; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffdfdc0d; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00000000ffdfdc0d; -+ *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f149ed7273; -+ *((unsigned long*)& __m128i_result[0]) = 0x78508ad4ae70fd87; -+ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; -+ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; -+ __m256i_out = __lasx_xvclz_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000014eb54ab; -+ *((unsigned long*)& __m128i_op0[0]) = 0x14eb6a002a406a00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffdfdc0d; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000a752a55; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a753500950fa306; -+ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; -+ int_op1 = 0x00000000090b0906; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000090b0906; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; -+ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000014eb54ab; -+ *((unsigned long*)& __m128d_op0[0]) = 0x14eb6a002a406a00; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00007fff7fff8000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x007fffff; -+ *((int*)& __m128_op0[1]) = 0x007fffff; -+ *((int*)& __m128_op0[0]) = 0xff800000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x007f7f7f; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x380fdfdfc0000000; -+ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000014eb54ab; -+ *((unsigned long*)& __m128i_op1[0]) = 0x14eb6a002a406a00; -+ *((unsigned long*)& __m128i_result[1]) = 0xe0001fffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; -+ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x66); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff8000; -+ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; -+ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff8; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffff8000; -+ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x380fdfdfc0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffc7f100004000; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f7f7f; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffc7f100004000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000c7f14000; -+ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x0a752a55; -+ *((int*)& __m128_op0[1]) = 0x0a753500; -+ *((int*)& __m128_op0[0]) = 0x950fa306; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x380fdfdf; -+ *((int*)& __m128_op1[0]) = 0xc0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000014eb54ab; -+ *((unsigned long*)& __m128d_op0[0]) = 0x14eb6a002a406a00; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000014eb54ab; -+ *((unsigned long*)& __m128d_op1[0]) = 0x14eb6a002a406a00; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; -+ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000014eb54ab; -+ *((unsigned long*)& __m128i_op1[0]) = 0x14eb6a002a406a00; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff14eb54ab; -+ *((unsigned long*)& __m128i_result[0]) = 0x14ea6a002a406a00; -+ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff14eb54ab; -+ *((unsigned long*)& __m128i_op0[0]) = 0x14ea6a002a406a00; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff80008a7555aa; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a7535006af05cf9; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; -+ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000014eb54ab; -+ *((unsigned long*)& __m128i_op0[0]) = 0x14eb6a002a406a00; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff80008a7555aa; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a7535006af05cf9; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff758aaa56; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffa9fb0d07; -+ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000a752a55; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0a753500950fa306; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff14eb54ab; -+ *((unsigned long*)& __m128i_op1[0]) = 0x14ea6a002a406a00; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00007fff7fff8000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000a752a55; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a753500950fa306; -+ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x68); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x0a752a55; -+ *((int*)& __m128_op0[1]) = 0x0a753500; -+ *((int*)& __m128_op0[0]) = 0x950fa306; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x0a752a55; -+ *((int*)& __m128_op1[1]) = 0x0a753500; -+ *((int*)& __m128_op1[0]) = 0x950fa306; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000a752a55; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0a753500950fa306; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x000000000a752a55; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0a753500950fa306; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000a752a55; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a753500a9fa0d06; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000a752a55; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a753500a9fa0d06; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xf589caff5605f2fa; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000090b0906; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100002000; -+ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000090b0906; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000005060503; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000073737; -+ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000090b0906; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000005060503; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000073737; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000050007; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000039; -+ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000050007; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000039; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100002000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_w_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000a752a55; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0a753500a9fa0d06; -+ *((unsigned long*)& __m128i_result[1]) = 0x0d060d060d060d06; -+ *((unsigned long*)& __m128i_result[0]) = 0x0d060d060d060d06; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf589caff5605f2fa; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000014eb54ab; -+ *((unsigned long*)& __m128i_op1[0]) = 0x14eb6a002a406a00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000eb00ab; -+ *((unsigned long*)& __m128i_result[0]) = 0x017400ff004500fa; -+ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x0a752a55; -+ *((int*)& __m128_op0[1]) = 0x0a753500; -+ *((int*)& __m128_op0[0]) = 0xa9fa0d06; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,-7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00007fff7fff8000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000040; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100002000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffc0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; -+ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00007fff7fff8000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0xce); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000014; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000014; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000014; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf589caff5605f2fa; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000055; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000054; -+ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; -+ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0d060d060d060d06; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0d060d060d060d06; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0d060d060d060d06; -+ __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0d060d060d060d06; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0d060d060d060d06; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_w(__m128i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000200; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000a74aa8a55ab; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6adeb5dfcb000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003ff8; -+ __m128i_out = __lsx_vmsknz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000a74aa8a55ab; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6adeb5dfcb000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a7480007fff8000; -+ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0x7200000072000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7200000072000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7200000072000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7200000072000000; -+ __m256i_out = __lasx_xvldi(-3214); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7200000072000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7200000072000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7200000072000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7200000072000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x721e001e721e001e; -+ *((unsigned long*)& __m256i_result[2]) = 0x721e001e721e001e; -+ *((unsigned long*)& __m256i_result[1]) = 0x721e001e721e001e; -+ *((unsigned long*)& __m256i_result[0]) = 0x721e001e721e001e; -+ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000055; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000054; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; -+ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000a74aa8a55ab; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6adeb5dfcb000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000007000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffeffff0000; -+ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003ff8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003ff8; -+ __m128i_out = __lsx_vsat_w(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00003ff8; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0xff800000; -+ *((int*)& __m128_result[1]) = 0xff800000; -+ *((int*)& __m128_result[0]) = 0xc3080000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0000033a; -+ *((int*)& __m128_op0[2]) = 0x0bde0853; -+ *((int*)& __m128_op0[1]) = 0x0a960e6b; -+ *((int*)& __m128_op0[0]) = 0x0a4f0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_result[2]) = 0x2020000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_result[0]) = 0x2020000000000000; -+ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0000; -+ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x2020000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x2020000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7fffffffffffffff; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003ff8; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x467fe000; -+ __m128_out = __lsx_vffint_s_w(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0xffffff1dffffff1d; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffff1dffffff1d; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffff1dffffff1d; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff1dffffff1d; -+ __m256i_out = __lasx_xvldi(2845); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001f00000020; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001f00000020; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xff00ffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xff00ffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffffffff; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x000fffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x000fffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000467fe000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000003ff8; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000003ff8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000467fef81; -+ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003ff8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000467fef81; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x000000ffffff1dff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffff1dffffff1dff; -+ *((unsigned long*)& __m256i_op2[1]) = 0x000000ffffff1dff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffff1dffffff1dff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0020; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff8001ffff0001; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0020; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff8001ffff0001; -+ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000467fef81; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000013; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000ffffff1dff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff1dffffff1dff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ffffff1dff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff1dffffff1dff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff1dffffff1dff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff1dffffff1dff; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x2aaaaa85aaaaaa85; -+ *((unsigned long*)& __m256i_op1[2]) = 0x2aaa48f4aaaa48f4; -+ *((unsigned long*)& __m256i_op1[1]) = 0x2aaaaa85aaaaaa85; -+ *((unsigned long*)& __m256i_op1[0]) = 0x2aaa48f4aaaa48f4; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; -+ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000003ff8; -+ *((unsigned long*)& __m128d_op1[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128d_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0020; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff8001ffff0001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0020; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff8001ffff0001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00ff008000ff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00ff008000ff0000; -+ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x001fffffffe00011; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x001fffffffe00011; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000003ff8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00011; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00011; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; -+ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0020; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff8001ffff0001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0020; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff8001ffff0001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff8001ffff8001; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff8001ffff8001; -+ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x6e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x41dffc0000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x41dffc0000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x01533b5e7489ae24; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe519ab7e71e33848; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x01533b5e7489ae24; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffab7e71e33848; -+ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0xbc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff8001ffff8001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff8001ffff8001; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fff800000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffc0017fffc001; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fff800000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffc0017fffc001; -+ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff9dff9dff9dff9d; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x01533b5e7489ae24; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffab7e71e33848; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x3b5eae24ab7e3848; -+ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003ff8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff9dff9dff9dff9d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffceffceffcf1fcb; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x00000000090b0906; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_vpickve2gr_h(__m128i_op0,0x3); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000c6c60000c6c6; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000c6c58000c6b2; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000c6c40000c6c6; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000c6c78000c6b2; -+ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x21); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000000000000; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x30); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x0000ffff; -+ *((int*)& __m128_op0[1]) = 0x3b5eae24; -+ *((int*)& __m128_op0[0]) = 0xab7e3848; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00003f80; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x800fffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x800fffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x800fffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x800fffffffffffff; -+ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x01533b5e7489ae24; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffab7e71e33848; -+ *((unsigned long*)& __m128d_op1[1]) = 0x01533b5e7489ae24; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffab7e71e33848; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffab7e71e33848; -+ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffab7e71e33848; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffe1ffffffe1; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffab5f71e33829; -+ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x1f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000075dbe982; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000071e48cca; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0ebb7d300e3c9199; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x41dffbffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffff00ff800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x41dffbffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffff00ff800000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfbff0000ffff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfbff0000ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x800fffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x800fffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x800fffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x800fffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0020; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff8001ffff0001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0020; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff8001ffff0001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0020; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff8001ffff0001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff8001ffff0001; -+ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x01533b5e7489ae24; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffab7e71e33848; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x41dffbffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ff800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x41dffbffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ff800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0xff00ff00ff00ff00; -+ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x01533b5e7489ae24; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffab7e71e33848; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xce9135c49ffff570; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x23); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000800000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000800000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000800000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000800000; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfbff0000ffff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfbff0000ffff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfbff0000ffff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xfbff0000ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x41dffbffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ff800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x41dffbffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ff800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f80ffffff808000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f80ffffff808000; -+ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_w(__m128i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f80ffffff808000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f80ffffff808000; -+ *((unsigned long*)& __m256i_result[3]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f0000007f7fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f0000007f7fff; -+ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x001fffffffe00000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x001f001fffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffe0ffe000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x001f001fffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffe0ffe000000000; -+ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7f80ffffff808000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7f80ffffff808000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x001f001fffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffe0ffe000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x001f001fffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffe0ffe000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffe0ffe000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fa0001fff808000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffe0ffe000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fa0001fff808000; -+ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; -+ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; -+ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x60600000ffff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x6060000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x60600000ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x6060000000000000; -+ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x60); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x001f001fffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffe0ffe000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x001f001fffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffe0ffe000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffe0ffe000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fa0001fff808000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffe0ffe000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fa0001fff808000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x007f0000ffffff80; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x007f0000ffffff80; -+ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x001f001fffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffe0ffe000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x001f001fffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffe0ffe000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xce9035c49ffff570; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[0]) = 0xce9035c49ffff574; -+ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xce9035c49ffff570; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op1[0]) = 0xce9035c49ffff574; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000454ffff9573; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000004; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000004; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000004; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000004; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000004; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000004; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; -+ __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000454ffff9573; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000454ffff9573; -+ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xa4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x41dffbffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffff00ff800000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x41dffbffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffff00ff800000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffe6ffe6e6800001; -+ *((unsigned long*)& __m256d_op1[2]) = 0x19660019ff806680; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffe6ffe6e6800001; -+ *((unsigned long*)& __m256d_op1[0]) = 0x19660019ff806680; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00ff0000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00ff0000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00ff0000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00ff0000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_h(__m128i_op0,-14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffff00; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ff8000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffff00; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ff8000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000016; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000016; -+ __m128i_out = __lsx_vaddi_du(__m128i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff800000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x007f0000ff807f81; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff800000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x007f0000ff807f81; -+ *((unsigned long*)& __m256i_result[3]) = 0x5d5d5d5d5d22a2a2; -+ *((unsigned long*)& __m256i_result[2]) = 0xa2dda2a25d22dd23; -+ *((unsigned long*)& __m256i_result[1]) = 0x5d5d5d5d5d22a2a2; -+ *((unsigned long*)& __m256i_result[0]) = 0xa2dda2a25d22dd23; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0xa2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffff800000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x007f0000ff807f81; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffff800000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x007f0000ff807f81; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0000; -+ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffff8000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffff8000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffff8000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffff8000; -+ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001000000010; -+ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0000; -+ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00c00040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000008000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00c00040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000008000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfffffffffffbfffc; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000001000000048; -+ *((unsigned long*)& __m128d_result[1]) = 0xfffffffffffbfffc; -+ *((unsigned long*)& __m128d_result[0]) = 0xc090380000000000; -+ __m128d_out = __lsx_vflogb_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffbfffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000048; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffeffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000016; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffbfffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc090380000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000200000000d; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x001d001d001d001d; -+ *((unsigned long*)& __m128i_result[0]) = 0x001d001d001d001d; -+ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000200000000d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x41dffbffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ff800000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x41dffbffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ff800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ int_op0 = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffbfffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc090380000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffbfffc; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc090380000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffbfffc; -+ *((unsigned long*)& __m128i_result[0]) = 0xc090380000000000; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_result[2]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_result[0]) = 0x1c1c1c1c1c1c1c1c; -+ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001d001d001d001d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x001d001d001d0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001d001d001d001d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001d001d001d0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00fffbfffc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff01ff1100000048; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; -+ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ffff8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffff8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; -+ __m128i_out = __lsx_vmaxi_h(__m128i_op0,4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x7070545438381c1c; -+ *((unsigned long*)& __m256i_result[2]) = 0x7070545438381c1c; -+ *((unsigned long*)& __m256i_result[1]) = 0x7070545438381c1c; -+ *((unsigned long*)& __m256i_result[0]) = 0x7070545438381c1c; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7070545438381c1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7070545438381c1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7070545438381c1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7070545438381c1c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ffff8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffff8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffff00ffff8000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff00ffff8000; -+ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[0]) = 0xff01ff01ff01ff01; -+ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ffff8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffff8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff00007fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff00007fff; -+ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfe03fe01fe01fe01; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe3bfa3ffe3bfb21; -+ *((unsigned long*)& __m128i_op1[1]) = 0x001d001d001d001d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x001d001d001d0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x001d001d001d001d; -+ *((unsigned long*)& __m128i_result[0]) = 0x001d001d001d0000; -+ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff01ff01ff01ff01; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001200000012; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001200000012; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001200000012; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001200000012; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x3d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000001; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000001; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000001200000012; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000001200000012; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000001200000012; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001200000012; -+ *((unsigned long*)& __m256i_result[3]) = 0x1a1a1a2c1a1a1a2c; -+ *((unsigned long*)& __m256i_result[2]) = 0x1a1a1a2c1a1a1a2c; -+ *((unsigned long*)& __m256i_result[1]) = 0x1a1a1a2c1a1a1a2c; -+ *((unsigned long*)& __m256i_result[0]) = 0x1a1a1a2c1a1a1a2c; -+ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfe3bfb01fe3bfe01; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfe03fe3ffe01fa21; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_b(__m256i_op0,2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfe3bfb01fe3bfe01; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfe03fe3ffe01fa21; -+ *((unsigned long*)& __m128i_result[1]) = 0xfe3bfb01fe3bfe01; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe03fe3ffe01fa21; -+ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfe3bfb01fe3bfe01; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe03fe3ffe01fa21; -+ *((unsigned long*)& __m128i_result[1]) = 0xfe3bfb01fe3bfe01; -+ *((unsigned long*)& __m128i_result[0]) = 0xfe03fe3ffe01fa21; -+ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xfe3bfb01fe3bfe01; -+ *((unsigned long*)& __m128i_op2[0]) = 0xfe03fe3ffe01fa21; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfe3bfb01fe3bfe01; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfe03fe3ffe01fa21; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfe3bfb01fe3bfe01; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfe03fe3ffe01fa21; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfe3bfb01fe3bfe01; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfe03fe3ffe01fa21; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; -+ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1a1a1a2c1a1a1a2c; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1a1a1a2c1a1a1a2c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1a1a1a2c1a1a1a2c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1a1a1a2c1a1a1a2c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffff8000; -+ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000001200000012; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000001200000012; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000001200000012; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001200000012; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000001ffff8000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000001ffff8000; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ unsigned_int_result = 0x00000000ffffffff; -+ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x4); -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x60); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfe3bfb01fe3bfe01; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfe03fe3ffe01fa21; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xfe3bfb01; -+ *((int*)& __m128_op0[2]) = 0xfe3bfe01; -+ *((int*)& __m128_op0[1]) = 0xfe03fe3f; -+ *((int*)& __m128_op0[0]) = 0xfe01fa21; -+ *((int*)& __m128_op1[3]) = 0xfe3bfb01; -+ *((int*)& __m128_op1[2]) = 0xfe3bfe01; -+ *((int*)& __m128_op1[1]) = 0xfe03fe3f; -+ *((int*)& __m128_op1[0]) = 0xfe01fa21; -+ *((int*)& __m128_op2[3]) = 0x00000000; -+ *((int*)& __m128_op2[2]) = 0x00000000; -+ *((int*)& __m128_op2[1]) = 0x00000000; -+ *((int*)& __m128_op2[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffffff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffeffffff00; -+ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fe03fe01; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fe01fe01; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000007020701; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000007010701; -+ __m128i_out = __lsx_vpcnt_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffeffffff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffeffffff00; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000100; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000100; -+ __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0700f8ff0700f8ff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000007020701; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000007010701; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f8000008680f1ff; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff9fffefff9ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0280000000000000; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000100; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0002000200000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0002000200000000; -+ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000008680f1ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xff80ffffff80ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xff80ffff8680f1ff; -+ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000008680f1ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0280000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ffffff00000000; -+ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x1c1c1c1c; -+ *((int*)& __m256_op0[6]) = 0x1c1c1c1c; -+ *((int*)& __m256_op0[5]) = 0xfffffffe; -+ *((int*)& __m256_op0[4]) = 0xffffff00; -+ *((int*)& __m256_op0[3]) = 0x1c1c1c1c; -+ *((int*)& __m256_op0[2]) = 0x1c1c1c1c; -+ *((int*)& __m256_op0[1]) = 0xfffffffe; -+ *((int*)& __m256_op0[0]) = 0xffffff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffffff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x3f8000003f800000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffeffffff00; -+ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffc0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffc0; -+ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffeffffff00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffeffffff00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffeffffff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffeffffff00; -+ *((unsigned long*)& __m256i_result[3]) = 0x3838383838383838; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffdfffffe00; -+ *((unsigned long*)& __m256i_result[1]) = 0x3838383838383838; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffdfffffe00; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0x0a0000000a000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a0000000a000000; -+ __m128i_out = __lsx_vldi(-3318); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff000000; -+ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x18); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff80ffff7e02; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00feff8000ff80ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0280000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff81ffff7f03; -+ *((unsigned long*)& __m128i_result[0]) = 0x04ffff8101ff81ff; -+ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff81ffff7f03; -+ *((unsigned long*)& __m128i_op0[0]) = 0x04ffff8101ff81ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0a0000000a000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a0000000a000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0a0000001e000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a000000f6000000; -+ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0002000200000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0002000200000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000020002000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000020002000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x3838383838383838; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffdfffffe00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x3838383838383838; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffdfffffe00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; -+ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff80ff807e017f01; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f3b7f3f7f3b7f21; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0a0000001e000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a000000f6000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0980ff8174017f01; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xff80ff80; -+ *((int*)& __m128_op0[2]) = 0x7e017f01; -+ *((int*)& __m128_op0[1]) = 0x7f3b7f3f; -+ *((int*)& __m128_op0[0]) = 0x7f3b7f21; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000020002000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000020002000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffc0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfff0fff0fff0fc00; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfff0fff0fff0fc00; -+ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff0fc00; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff0fc00; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000f880f87e; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000f880f87e; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; -+ __m256i_out = __lasx_xvsrlrni_h_w(__m256i_op0,__m256i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffff000000; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff80ffff7e02; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00feff8000ff80ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf931fd04f832fe02; -+ *((unsigned long*)& __m128i_result[1]) = 0x80007fc000003f00; -+ *((unsigned long*)& __m128i_result[0]) = 0x7d187e427c993f80; -+ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0a000000; -+ *((int*)& __m128_op0[2]) = 0x0a000000; -+ *((int*)& __m128_op0[1]) = 0x0a000000; -+ *((int*)& __m128_op0[0]) = 0x0a000000; -+ *((int*)& __m128_result[3]) = 0x75000000; -+ *((int*)& __m128_result[2]) = 0x75000000; -+ *((int*)& __m128_result[1]) = 0x75000000; -+ *((int*)& __m128_result[0]) = 0x75000000; -+ __m128_out = __lsx_vfrecip_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000f880f87e; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000f880f87e; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; -+ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000017f7f7f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000017f7f7f7f; -+ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0700f8ff0700f8ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0700f8ff0700f8ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff8000; -+ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000004000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0280000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7500000075000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7500000075000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3bc000003a800000; -+ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000017f7f7f7f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000017f7f7f7f; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000017fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000017fff; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x80007fc000003f00; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7d187e427c993f80; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7500000075000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7500000075000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00007d1800007c99; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff800000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff000000017fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff000000017fff; -+ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000017fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000017fff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00007d1800007c99; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0a0000001e000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a621b3ebe5e1c02; -+ *((unsigned long*)& __m128i_result[1]) = 0x04ffc0000f000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x05314c2bdf2f4c4e; -+ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000017f7f7f7f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000017f7f7f7f; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010080; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7500000075007500; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00feff8000ff80ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff800000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00007d1800007c99; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000f50000007500; -+ *((unsigned long*)& __m128i_result[0]) = 0x00007e1600007d98; -+ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000007fffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000fe00fe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00fe00fe00fe00fe; -+ __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000f50000007500; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00007e1600007d98; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000fe00fe; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000f50000fe75fe; -+ *((unsigned long*)& __m128i_result[0]) = 0x00fe7efe00fe7dfe; -+ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; -+ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010080; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0a0000000a000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0a0000000a000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f7f00007f7f7500; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3b42017f3a7f7f01; -+ *((unsigned long*)& __m128i_result[1]) = 0x04faf60009f5f092; -+ *((unsigned long*)& __m128i_result[0]) = 0x04fafa9200000000; -+ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff9fffefff9ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x04faf60009f5f092; -+ *((unsigned long*)& __m128i_op1[0]) = 0x04fafa9200000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfc06066e00000000; -+ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x04faf60009f5f092; -+ *((unsigned long*)& __m128i_op0[0]) = 0x04fafa9200000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfff9fffefff9ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000004fa000009f5; -+ *((unsigned long*)& __m128i_result[0]) = 0x000004f3fffffff9; -+ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3bc000003a800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0a0000000a000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a0000000a000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x4480000044800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x45c0000044800000; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000f50000007500; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00007e1600007d98; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000f50000000900; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000090900000998; -+ __m128i_out = __lsx_vmini_b(__m128i_op0,9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff800000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffc0000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffc0000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffbfffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000800000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffbfffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000800000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0102020202010202; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0102020202010202; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; -+ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0xa9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000fe00fe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00fe00fe00fe00fe; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000f50000007500; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00007e1600007d98; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00fe00fe7fffffff; -+ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4480000044800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x45c0000044800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe7fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x4481000144810001; -+ *((unsigned long*)& __m128i_result[0]) = 0x45c04000c4808000; -+ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3fffffff3fffc000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3fffffff3fffc000; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x04faf60009f5f092; -+ *((unsigned long*)& __m128i_op0[0]) = 0x04fafa9200000000; -+ int_op1 = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x04faf600fff5f092; -+ *((unsigned long*)& __m128i_result[0]) = 0x04fafa9200000000; -+ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000100010; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000f50000000900; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000090900000998; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00007a8000000480; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000485000004cc; -+ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00007a8000000480; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000485000004cc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0a0000000a000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0a0000000a000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100010; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000010000f; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000010000f; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000f50000000900; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000090900000998; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff00ffffff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_h(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3bc000003a800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe7fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x3a8100013a810001; -+ *((unsigned long*)& __m128i_result[0]) = 0x7bc04000ba808000; -+ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00007a8000000480; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000485000004cc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00007a8000000480; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000485000004cc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000f50000000900; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000090a00000998; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff3a81ffff89fd; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffb3c3ffff51ba; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0802080408060803; -+ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3bc000003a800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000f50000000900; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000090a00000998; -+ *((unsigned long*)& __m128i_result[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000ef0000000003b; -+ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff00ffffff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000f50000000900; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000090900000998; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff000900ffff98; -+ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff760386bdae46; -+ *((unsigned long*)& __m128i_op0[0]) = 0xc1fc7941bc7e00ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0802080408060803; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff000086bd; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ca000000c481; -+ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3bc000003a800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe7fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x1d4000001d400000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1e5f007f5d400000; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000ef0000000003b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffff; -+ *((int*)& __m256_op0[6]) = 0xffffffff; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0xffffffff; -+ *((int*)& __m256_op0[2]) = 0xffffffff; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00100010; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00100010; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00100010; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00100010; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff000086bd; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ca000000c481; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslli_h(__m256i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff00ffffff00ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ff00ff00ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00ff000900ffff98; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; -+ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000f50000000900; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000090900000998; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; -+ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x20); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000003fffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000003fffff; -+ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff760386bdae46; -+ *((unsigned long*)& __m128i_op1[0]) = 0xc1fc7941bc7e00ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff7603; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0xc3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000003fffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000003fffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffff7603; -+ *((unsigned long*)& __m128d_op1[1]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7fffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x45000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x44000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x3cb504f3; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x3d3504f3; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4500000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4400000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff000000ff000000; -+ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000003fffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000003fffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff010100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff010100000001; -+ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffff80; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffff80; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000ef0000000003b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000ef0000000003b; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrani_h_w(__m256i_op0,__m256i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0802080408060803; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00001fffe0001fff; -+ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffff010100000001; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffff010100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000ef0000000003b; -+ *((int*)& __m128_result[3]) = 0x577fff00; -+ *((int*)& __m128_result[2]) = 0x577fff00; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x596f0000; -+ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffff0101; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffff0101; -+ *((int*)& __m256_op0[0]) = 0x00000001; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0xffff0101; -+ *((int*)& __m256_result[4]) = 0x00000001; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0xffff0101; -+ *((int*)& __m256_result[0]) = 0x00000001; -+ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x3fffffff3fffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x3fffffff3fffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff010100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff010100000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff010100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff010100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000810001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000810001; -+ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000440efffff000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000003b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000440efffff000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000003b; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffff0101; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffff0101; -+ *((int*)& __m256_op0[0]) = 0x00000001; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000440efffff000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000003b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x440ef000440ef000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x4400000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000440efffff000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000003b; -+ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff010100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff010100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000008000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000008000000080; -+ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x39); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffff0101; -+ *((int*)& __m256_op1[4]) = 0x00000001; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffff0101; -+ *((int*)& __m256_op1[0]) = 0x00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x440ef000440ef000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4400000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000ef0000000003b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0f8d33000f8d3300; -+ *((unsigned long*)& __m128i_result[0]) = 0x0003b80000000000; -+ __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; -+ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff2356fe165486; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5efeb3165bd7653d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseqi_w(__m128i_op0,5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vseqi_h(__m128i_op0,0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0f8d33000f8d3300; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0003b80000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0f8d33000f8d32fd; -+ *((unsigned long*)& __m128i_result[0]) = 0x0003b7fffffffffd; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000007fff9; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff2356fe165486; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5efeb3165bd7653d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000235600005486; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000b31600006544; -+ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vneg_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff2356fe165486; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5efeb3165bd7653d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff2356fe165486; -+ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffff010100000001; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffff010100000001; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000235600005486; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000b31600006544; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_wu(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff2356fe165486; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000ef0000000003b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000003b0000ffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff2356fe165486; -+ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x70); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x50); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff0fffffff0; -+ __m256i_out = __lasx_xvmini_w(__m256i_op0,-16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_hu(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; -+ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfefefefefdfdfdfd; -+ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfefefefefdfdfdfd; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x26); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvmini_w(__m256i_op0,-1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000ef0000000003b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00003a7fc58074ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000eeff1100e; -+ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff2356fe165486; -+ *((unsigned long*)& __m128i_op0[0]) = 0x5efeb3165bd7653d; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000007; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000007; -+ __m128i_out = __lsx_vmini_du(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000ef0000000003b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3a8000003a800000; -+ __m128i_out = __lsx_vexth_q_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfffffff0; -+ *((int*)& __m256_op0[6]) = 0xfffffff0; -+ *((int*)& __m256_op0[5]) = 0xfffffff0; -+ *((int*)& __m256_op0[4]) = 0xfffffff0; -+ *((int*)& __m256_op0[3]) = 0xfffffff0; -+ *((int*)& __m256_op0[2]) = 0xfffffff0; -+ *((int*)& __m256_op0[1]) = 0xfffffff0; -+ *((int*)& __m256_op0[0]) = 0xfffffff0; -+ *((unsigned long*)& __m256d_result[3]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xfffffffe00000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xfffffffe00000000; -+ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfefefefefdfdfdfd; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefdfdfdfd; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010202020203; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010201010102; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010202020203; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010201010102; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x3a800000; -+ *((int*)& __m128_op0[2]) = 0x3a800000; -+ *((int*)& __m128_op0[1]) = 0x000ef000; -+ *((int*)& __m128_op0[0]) = 0x0000003b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0xffffffff; -+ *((int*)& __m256_op0[4]) = 0xffffffff; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0xffffffff; -+ *((int*)& __m256_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000feff2356; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fd165486; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000007; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000246d9755; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000002427c2ee; -+ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000ef0000000003b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000056; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffff86; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000eefff; -+ *((unsigned long*)& __m128i_result[0]) = 0xf8e1a03affffe3e2; -+ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x000ef0000000003b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000eefff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf8e1a03affffe3e2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000eefff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xf8e1a03affffe3e2; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000efffefff; -+ *((unsigned long*)& __m128i_result[0]) = 0xa03aa03ae3e2e3e2; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ffffff00ffffff; -+ __m128i_out = __lsx_vslei_b(__m128i_op0,15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; -+ unsigned_int_result = 0x00000000000000ff; -+ unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0xc); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000056000056; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3a8000003a800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000efffefff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xa03aa03ae3e2e3e2; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_d_q(__m128i_op0,__m128i_op1,0x75); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffff0fffffff0; -+ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000246d9755; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000002427c2ee; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010202020203; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010201010102; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010202020203; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010201010102; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff0fffffff0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0101010202020203; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0101010201010102; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0101010202020203; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0101010201010102; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000eefff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf8e1a03affffe3e2; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000003e0000003f; -+ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010202020203; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010201010102; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010202020203; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010201010102; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0x00ffffff; -+ *((int*)& __m128_op0[0]) = 0x00ffffff; -+ *((int*)& __m128_op1[3]) = 0x0000feff; -+ *((int*)& __m128_op1[2]) = 0x23560000; -+ *((int*)& __m128_op1[1]) = 0x0000fd16; -+ *((int*)& __m128_op1[0]) = 0x54860000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000eefff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf8e1a03affffe3e2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3a80613fda5dcb4a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x93f0b81a914c003b; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000051649b6; -+ *((unsigned long*)& __m128i_result[0]) = 0xd2f005e44bb43416; -+ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000051649b6; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd2f005e44bb43416; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000003e0000003f; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000051649b6; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000003e0000003f; -+ __m128i_out = __lsx_vmax_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000760151; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003e0021009a009a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000246d9755; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000003e2427c2ee; -+ *((unsigned long*)& __m128i_result[1]) = 0x00001e5410082727; -+ *((unsigned long*)& __m128i_result[0]) = 0x00007f7f00107f7f; -+ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000051649b6; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000003e0000003f; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x41945926d8000000; -+ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x41945926d8000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00001e5410082727; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00007f7f00107f7f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001001001000080; -+ *((unsigned long*)& __m128i_result[0]) = 0x4195d926d8018000; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000f0f0f0f0f0f0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xff01ff01ff01f010; -+ *((unsigned long*)& __m256i_op1[2]) = 0xff01ff01ff01f010; -+ *((unsigned long*)& __m256i_op1[1]) = 0xff01ff01ff01f010; -+ *((unsigned long*)& __m256i_op1[0]) = 0xff01ff01ff01f010; -+ *((unsigned long*)& __m256i_result[3]) = 0x000078780000f0f1; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000078780000f0f1; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x3a80613fda5dcb4a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x93f0b81a914c003b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000feff23560000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000fd1654860000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1e242e4d68dc0000; -+ *((unsigned long*)& __m128i_result[0]) = 0x2ff8fddb7ae20000; -+ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000eefff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf8e1a03affffe3e2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000246d9755; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000002427c2ee; -+ *((unsigned long*)& __m128i_result[1]) = 0xf8e10000a03a0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff2427e3e2c2ee; -+ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363abdf16; -+ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e08016161198; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000246d9755; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000002427c2ee; -+ *((unsigned long*)& __m128i_result[1]) = 0x636363633f3e47c1; -+ *((unsigned long*)& __m128i_result[0]) = 0x41f8e080f1ef4eaa; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363abdf16; -+ *((unsigned long*)& __m128i_op1[0]) = 0x41f8e08016161198; -+ *((unsigned long*)& __m128i_result[1]) = 0x9c9d9b9bbfaa20e9; -+ *((unsigned long*)& __m128i_result[0]) = 0xbe081c963e6fee68; -+ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0fffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0fffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaxi_b(__m128i_op0,-16); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001001001000080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4195d926d8018000; -+ *((int*)& __m128_result[3]) = 0x33800000; -+ *((int*)& __m128_result[2]) = 0x35800000; -+ *((int*)& __m128_result[1]) = 0x37800000; -+ *((int*)& __m128_result[0]) = 0x37000000; -+ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff010100000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff010100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffffffff; -+ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0fffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x41957fff7fff7fff; -+ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_result[0]) = 0xbf6b810181018101; -+ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363abdf16; -+ *((unsigned long*)& __m128i_op1[0]) = 0x41f8e08016161198; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000030000; -+ __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x63636363; -+ *((int*)& __m128_op0[2]) = 0x63abdf16; -+ *((int*)& __m128_op0[1]) = 0x41f8e080; -+ *((int*)& __m128_op0[0]) = 0x16161198; -+ *((unsigned long*)& __m128i_result[1]) = 0x6363636363abdf16; -+ *((unsigned long*)& __m128i_result[0]) = 0x420000003f800000; -+ __m128i_out = __lsx_vfrintrp_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000080801030000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000080103040000; -+ __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x6c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000001e001e001e0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000001e001e001e0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffffffff; -+ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_du(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000001e001e001e0; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000001e001e001e0; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9c9d9b9bbfaa20e9; -+ *((unsigned long*)& __m128i_op0[0]) = 0xbe081c963e6fee68; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000feff23560000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000fd1654860000; -+ *((unsigned long*)& __m128i_result[1]) = 0x6363636463abdf17; -+ *((unsigned long*)& __m128i_result[0]) = 0x41f8e08016161198; -+ __m128i_out = __lsx_vabsd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7fff7fff; -+ *((int*)& __m128_op0[2]) = 0x7fff7fff; -+ *((int*)& __m128_op0[1]) = 0xbf6b8101; -+ *((int*)& __m128_op0[0]) = 0x81018101; -+ *((int*)& __m128_op1[3]) = 0xe3636363; -+ *((int*)& __m128_op1[2]) = 0x63abdf16; -+ *((int*)& __m128_op1[1]) = 0x41f8e080; -+ *((int*)& __m128_op1[0]) = 0x16161198; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x01010101; -+ *((int*)& __m256_op0[6]) = 0x01010101; -+ *((int*)& __m256_op0[5]) = 0x01010101; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x01010101; -+ *((int*)& __m256_op0[2]) = 0x01010101; -+ *((int*)& __m256_op0[1]) = 0x01010101; -+ *((int*)& __m256_op0[0]) = 0x00000001; -+ *((int*)& __m256_op1[7]) = 0x000001e0; -+ *((int*)& __m256_op1[6]) = 0x01e001e0; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x000001e0; -+ *((int*)& __m256_op1[2]) = 0x01e001e0; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636463abdf17; -+ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e08016161198; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x6363636463abdf17; -+ *((unsigned long*)& __m128i_result[0]) = 0x41f8e08016161198; -+ __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x01010101; -+ *((int*)& __m256_op0[6]) = 0x01010101; -+ *((int*)& __m256_op0[5]) = 0x01010101; -+ *((int*)& __m256_op0[4]) = 0x00000001; -+ *((int*)& __m256_op0[3]) = 0x01010101; -+ *((int*)& __m256_op0[2]) = 0x01010101; -+ *((int*)& __m256_op0[1]) = 0x01010101; -+ *((int*)& __m256_op0[0]) = 0x00000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x5847b72626ce61ef; -+ *((unsigned long*)& __m128d_op0[0]) = 0x110053f401e7cced; -+ *((unsigned long*)& __m128i_result[1]) = 0x5847b72626ce61ef; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000010100000101; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000807bf0a1f80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000800ecedee68; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5847b72626ce61ef; -+ *((unsigned long*)& __m128i_op1[0]) = 0x110053f401e7cced; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x5847bf2de5d8816f; -+ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000010100000101; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000010100000101; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000010100000101; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x000000000000001e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001e00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_d(__m256i_op0,12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x636363633f3e47c1; -+ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e080f1ef4eaa; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000807bf0a1f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000800ecedee68; -+ *((unsigned long*)& __m128i_result[1]) = 0x63636b6afe486741; -+ *((unsigned long*)& __m128i_result[0]) = 0x41f8e880ffffffff; -+ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000010100000101; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x5847b72626ce61ef; -+ *((unsigned long*)& __m128i_op0[0]) = 0x110053f401e7cced; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5847b72626ce61ef; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0005847b00011005; -+ *((unsigned long*)& __m128i_result[0]) = 0x0005847b00000000; -+ __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x2c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0005847b00011005; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0005847b00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000807bf0a1f80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000800ecedee68; -+ *((unsigned long*)& __m128i_result[1]) = 0x0005840100000005; -+ *((unsigned long*)& __m128i_result[0]) = 0x0005847b00000000; -+ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x63636b6afe486741; -+ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e880ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x63636b6afe486741; -+ *((unsigned long*)& __m128i_result[0]) = 0x41f8e880ffffffff; -+ __m128i_out = __lsx_vmaxi_d(__m128i_op0,-2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x63636b6afe486741; -+ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e880ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000027; -+ __m128i_out = __lsx_vmskltz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x57f160c4a1750eda; -+ *((unsigned long*)& __m128i_result[1]) = 0x000002bf8b062000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffd0ba876d000; -+ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000807bf0a1f80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000800ecedee68; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0005840100000005; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0005847b00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001f0a20001cedf; -+ *((unsigned long*)& __m128i_result[0]) = 0x0058000000580000; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010110; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010110; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000002bf8b062000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffd0ba876d000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x63636b6afe486741; -+ *((unsigned long*)& __m128i_op1[0]) = 0x41f8e880ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000ff110db0; -+ *((unsigned long*)& __m128i_result[0]) = 0x41f7be08ffff578a; -+ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000002bf8b062000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffd0ba876d000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe363636363abdf16; -+ *((unsigned long*)& __m128i_op1[0]) = 0x41f8e08016161198; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0005840100000005; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0005847b00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0004e8f09e99b528; -+ *((unsigned long*)& __m128i_result[0]) = 0xcf1225129ad22b6e; -+ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x63636b6afe486741; -+ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e880ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe363636363abdf16; -+ *((unsigned long*)& __m128i_op1[0]) = 0x41f8e08016161198; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000cecd00004657; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000c90000011197; -+ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x63636b6a; -+ *((int*)& __m128_op0[2]) = 0xfe486741; -+ *((int*)& __m128_op0[1]) = 0x41f8e880; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0xe3636363; -+ *((int*)& __m128_op1[2]) = 0x63abdf16; -+ *((int*)& __m128_op1[1]) = 0x41f8e080; -+ *((int*)& __m128_op1[0]) = 0x16161198; -+ *((int*)& __m128_op2[3]) = 0x00c27580; -+ *((int*)& __m128_op2[2]) = 0x00bccf42; -+ *((int*)& __m128_op2[1]) = 0x00a975be; -+ *((int*)& __m128_op2[0]) = 0x00accf03; -+ *((int*)& __m128_result[3]) = 0xff800000; -+ *((int*)& __m128_result[2]) = 0xff800000; -+ *((int*)& __m128_result[1]) = 0x4471fb84; -+ *((int*)& __m128_result[0]) = 0xffffffff; -+ __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x63636b6afe486741; -+ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e880ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslti_d(__m128i_op0,-13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010110; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010110; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0005840100000005; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0005847b00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x636363633f3e47c1; -+ *((unsigned long*)& __m128i_op1[0]) = 0x41f8e080f1ef4eaa; -+ *((unsigned long*)& __m128i_result[1]) = 0xa000308000008002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0500847b00000000; -+ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa000308000008002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0500847b00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vslti_w(__m128i_op0,7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0101010101010110; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0101010101010110; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000001e00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000f00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x63636363; -+ *((int*)& __m128_op0[2]) = 0x3f3e47c1; -+ *((int*)& __m128_op0[1]) = 0x41f8e080; -+ *((int*)& __m128_op0[0]) = 0xf1ef4eaa; -+ *((int*)& __m128_op1[3]) = 0x0000cecd; -+ *((int*)& __m128_op1[2]) = 0x00004657; -+ *((int*)& __m128_op1[1]) = 0x0000c900; -+ *((int*)& __m128_op1[0]) = 0x00011197; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x00c2758000bccf42; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00a975be00accf03; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x00000000ffffffff; -+ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000001e00000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0002000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x0000000c; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x0000000c; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xa000308000008002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0500847b00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00c2758000bccf42; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00a975be00accf03; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00c2758000bccf42; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00a975be00accf03; -+ *((unsigned long*)& __m128i_result[1]) = 0x00c2758000bccf42; -+ *((unsigned long*)& __m128i_result[0]) = 0x00a975be00accf03; -+ __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffbfffffffb; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffbfffffffb; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffbfffffffb; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffbfffffffb; -+ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffa7; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00c2758000bccf42; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00a975be00accf03; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00c2758000bccf42; -+ *((unsigned long*)& __m128i_op2[0]) = 0x00a975be00accf03; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000930400008a10; -+ *((unsigned long*)& __m128i_result[0]) = 0x00006f9100007337; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000930400008a10; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00006f9100007337; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00c2758000bccf42; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00a975be00accf03; -+ *((unsigned long*)& __m128i_result[1]) = 0x00250023001c001d; -+ *((unsigned long*)& __m128i_result[0]) = 0x309d2f342a5d2b34; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00c2758000bccf42; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00a975be00accf03; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00250023001c001d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x309d2f342a5d2b34; -+ *((unsigned long*)& __m128i_result[1]) = 0x00060eb000000006; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000075c00000cf0; -+ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x0000cecd; -+ *((int*)& __m128_op1[2]) = 0x00004657; -+ *((int*)& __m128_op1[1]) = 0x0000c900; -+ *((int*)& __m128_op1[0]) = 0x00011197; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefffffffeff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffbfffffffb; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffbfffffffb; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffbfffffffb; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffbfffffffb; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffbfffffffb; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffbfffffffb; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffbfffffffb; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffbfffffffb; -+ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00060eb000000006; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000075c00000cf0; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffaf1500000fffa; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000f8a40000f310; -+ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000cecd00004657; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000c90000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00019d9a00008cae; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; -+ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00250023001c001d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x309d2f342a5d2b34; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_du(__m128i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000f00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000700000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsat_wu(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000f00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000f00000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00050eb00000fffa; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000f8a50000f310; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; -+ __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffaf1500000fffa; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000f8a40000f310; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000003e2; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x26); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xe4e4e4e4e4e4e4e4; -+ *((unsigned long*)& __m256i_result[2]) = 0xe4e4e4e4e4e4e4e4; -+ *((unsigned long*)& __m256i_result[1]) = 0xe4e4e4e4e4e4e4e4; -+ *((unsigned long*)& __m256i_result[0]) = 0xe4e4e4e4e4e4e4e4; -+ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000cecd00004657; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000c90000011197; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000200000800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100800000; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000000; -+ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0008000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0008000000000000; -+ __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000000c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseqi_w(__m256i_op0,9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfffffffb; -+ *((int*)& __m256_op0[6]) = 0xfffffffb; -+ *((int*)& __m256_op0[5]) = 0xfffffffb; -+ *((int*)& __m256_op0[4]) = 0xfffffffb; -+ *((int*)& __m256_op0[3]) = 0xfffffffb; -+ *((int*)& __m256_op0[2]) = 0xfffffffb; -+ *((int*)& __m256_op0[1]) = 0xfffffffb; -+ *((int*)& __m256_op0[0]) = 0xfffffffb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; -+ __m256i_out = __lasx_xvfclass_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003e2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000003e2; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003e2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000003ffe2; -+ __m128i_out = __lsx_vexth_h_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfffffffb; -+ *((int*)& __m256_op0[6]) = 0xfffffffb; -+ *((int*)& __m256_op0[5]) = 0xfffffffb; -+ *((int*)& __m256_op0[4]) = 0xfffffffb; -+ *((int*)& __m256_op0[3]) = 0xfffffffb; -+ *((int*)& __m256_op0[2]) = 0xfffffffb; -+ *((int*)& __m256_op0[1]) = 0xfffffffb; -+ *((int*)& __m256_op0[0]) = 0xfffffffb; -+ *((int*)& __m256_op1[7]) = 0x0000ffff; -+ *((int*)& __m256_op1[6]) = 0x0001000e; -+ *((int*)& __m256_op1[5]) = 0x0000ffff; -+ *((int*)& __m256_op1[4]) = 0x0000ffff; -+ *((int*)& __m256_op1[3]) = 0x0000ffff; -+ *((int*)& __m256_op1[2]) = 0x0000ffff; -+ *((int*)& __m256_op1[1]) = 0x0000ffff; -+ *((int*)& __m256_op1[0]) = 0x0000ffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x007fffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x007fffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x003fffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x003fffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0xc7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x003fffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x003fffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x001fffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x001fffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_op2[7]) = 0x001fffff; -+ *((int*)& __m256_op2[6]) = 0xffffffff; -+ *((int*)& __m256_op2[5]) = 0xffffffff; -+ *((int*)& __m256_op2[4]) = 0xffffffff; -+ *((int*)& __m256_op2[3]) = 0x001fffff; -+ *((int*)& __m256_op2[2]) = 0xffffffff; -+ *((int*)& __m256_op2[1]) = 0xffffffff; -+ *((int*)& __m256_op2[0]) = 0xffffffff; -+ *((int*)& __m256_result[7]) = 0x001fffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0x001fffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00050eb00000fffa; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000f8a50000f310; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000003e2; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003e2; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00050eb00000fffa; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000f8a50000f310; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000001; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x007fffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x007fffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x003fffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x003fffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000f1384; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000004ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_b(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000001000000000; -+ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000001000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000004ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000667ae56; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; -+ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; -+ __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; -+ int_op1 = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x003fffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x003fffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x0667ae56; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000020; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftinth_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000001fffd; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000001fffd; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000700020004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000700020004; -+ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffe0002; -+ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000020; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000667ae56; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000004ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000667ae56; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; -+ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffe0002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000667ae56; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000667ae56; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0040000000000003; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0040000000000003; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[3]) = 0x0020000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0020000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0040000000000003; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0040000000000003; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003e2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vsrai_h(__m128i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[3]) = 0x001fffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x001fffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; -+ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000700020004; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000700020004; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0040000000000003; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0040000000000003; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000003; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000070002000a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000070002000a; -+ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000070002000a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000070002000a; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000060002000a; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000060002000a; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000070002000a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000070002000a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0040000000000003; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0040000000000003; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; -+ *((unsigned long*)& __m256i_result[3]) = 0xffbffffffffffffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m256i_result[1]) = 0xffbffffffffffffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffa; -+ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000fffe0001; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0001fffe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0000ffff; -+ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff00ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff00ff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffd; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000070002000a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000070002000a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000070002000a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000070002000a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000070002000a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000070002000a; -+ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffbffffffffffffe; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffbffffffffffffe; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffff00ff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffff0000; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0x0000ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000700000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000700000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; -+ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x60); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff00ff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000a; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000a; -+ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x7f800000; -+ *((int*)& __m128_result[0]) = 0x7f800000; -+ __m128_out = __lsx_vfrecip_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; -+ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001fffe00014b41; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0001fffe0001ffde; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff0002ffffb4bf; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff0002ffff0022; -+ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000070002000a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000070002000a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x001fffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x001fffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x32); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000700000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000700000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000700000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000700000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000007; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000020000000b; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000007; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000000020000000a; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x000000000000000a; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000000000000000a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000032; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000032; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffce; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffce; -+ __m256i_out = __lasx_xvneg_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001fffe00014b41; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe0001ffde; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000100020002; -+ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f8100017f810001; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f8100017f810001; -+ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x7f8100017f810001; -+ *((unsigned long*)& __m128d_op0[0]) = 0x7f8100017f810001; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffce; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffce; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000700000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000700000000; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; -+ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f8000007f800000; -+ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0080000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0080000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0040000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[1]) = 0x0040000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; -+ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f8000017f800001; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f8000017f800001; -+ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x80000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x80000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x80000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x80000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0040000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0040000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000a000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000a000; -+ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0080000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00020001; -+ *((int*)& __m128_op0[0]) = 0x00020002; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x7f800000; -+ *((int*)& __m128_result[1]) = 0x607fffc0; -+ *((int*)& __m128_result[0]) = 0x607fff80; -+ __m128_out = __lsx_vfrsqrt_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000017f800001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000017f800001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000007f800001; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000007f800001; -+ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000100020002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0002000100020002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000100020002; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffce; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffce; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x21); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f7f00007f7f0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f7f80807f7f8080; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000fffe0000fffe; -+ *((unsigned long*)& __m128i_op2[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; -+ __m256i_out = __lasx_xvclz_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvneg_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((int*)& __m128_result[3]) = 0x7f800000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[0]) = 0x6363636463636363; -+ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256d_result[3]) = 0x606a20bd700e59a3; -+ *((unsigned long*)& __m256d_result[2]) = 0x6066a09e66c5f1bb; -+ *((unsigned long*)& __m256d_result[1]) = 0x606a20bd700e59a3; -+ *((unsigned long*)& __m256d_result[0]) = 0x6066a09e66c5f1bb; -+ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffce; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffce; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x6b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000e2e36363; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000063636363; -+ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x317fce80317fce80; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000500020002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000700020033; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000500020002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000700020033; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000500020002; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000700020033; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000500020002; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000700020033; -+ *((unsigned long*)& __m256i_result[3]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1400080008000000; -+ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x26); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x317fce80317fce80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; -+ __m128i_out = __lsx_vmsknz_b(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa2e3a36363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0xa2e3a36463636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000a2e300006363; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000a2e300006363; -+ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1400080008000000; -+ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x317fce80317fce80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xf0000000f0000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslli_h(__m128i_op0,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x317fce80317fce80; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000fffe0000fffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; -+ __m128i_out = __lsx_vslt_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f80000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpcnt_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ff00; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000400000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000007f80; -+ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f80000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7f80000000000007; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000700000007; -+ __m128i_out = __lsx_vmaxi_w(__m128i_op0,7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x317fce80317fce80; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0808080808080808; -+ *((unsigned long*)& __m256i_result[3]) = 0x0807f7f80807f7f8; -+ *((unsigned long*)& __m256i_result[2]) = 0x0807f7f80807f7f8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0807f7f80807f7f8; -+ *((unsigned long*)& __m256i_result[0]) = 0x0807f7f80807f7f8; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x1400080008000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x1400080008000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x1400080008000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x1400080008000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000501ffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000701ffffce; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000501ffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000701ffffce; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000080000000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; -+ __m128i_out = __lsx_vaddi_du(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa2e3a36363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0xa2e3a36463636363; -+ *((unsigned long*)& __m128i_op1[1]) = 0x7f80000000000007; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000700000007; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000e32c50e; -+ *((unsigned long*)& __m128i_result[0]) = 0xf2b2ce330e32c50e; -+ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1400080008000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x7f800000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000008; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe8440000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe8440000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffe8440000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffe8440000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe8440000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffe8440000; -+ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000014; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000014; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000014; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000014; -+ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000020000000200; -+ __m256i_out = __lasx_xvfclass_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffe0001fffe0001; -+ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x1); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; -+ __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xf5fffc00; -+ *((int*)& __m256_op0[6]) = 0xfc000000; -+ *((int*)& __m256_op0[5]) = 0xf5fffc00; -+ *((int*)& __m256_op0[4]) = 0xfc000000; -+ *((int*)& __m256_op0[3]) = 0xf5fffc00; -+ *((int*)& __m256_op0[2]) = 0xfc000000; -+ *((int*)& __m256_op0[1]) = 0xf5fffc00; -+ *((int*)& __m256_op0[0]) = 0xfc000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xf5fffc00fc000000; -+ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0x4f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffe00; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffe00; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe00; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffe00; -+ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x4f4f4f4f; -+ *((int*)& __m128_op0[2]) = 0x4f4f4f4f; -+ *((int*)& __m128_op0[1]) = 0x4f4f4f4f; -+ *((int*)& __m128_op0[0]) = 0x4f4f4f4f; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000cf4f4f00; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000cf4f4f00; -+ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000000cf4f4f00; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000cf4f4f00; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff80ffffff80ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000018080807f; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001ffff80fe; -+ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0xffffffffffff8a35; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ unsigned_long_int_result = 0x0000000000000000; -+ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffed; -+ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x13); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000007ffffffce; -+ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; -+ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000020000000200; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000020000010201; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000020000010201; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000020000010201; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000020000010201; -+ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffed; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffed; -+ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffe7ffffffe7; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f4f00004f4f0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f4f00004f4f0000; -+ unsigned_int_result = 0x000000004f4f0000; -+ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x0); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; -+ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x9c83e21a22001818; -+ *((unsigned long*)& __m128i_op1[0]) = 0xdd3b8b02563b2d7b; -+ *((unsigned long*)& __m128i_op2[1]) = 0x000000009c83e21a; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000022001818; -+ *((unsigned long*)& __m128i_result[1]) = 0xf2c97aaa7d8fa270; -+ *((unsigned long*)& __m128i_result[0]) = 0x0b73e427f7cfcb88; -+ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f4f4f4f4f4f0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f4f4f4f4f4f0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f007f7f7f00; -+ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x9c83e21a22001818; -+ *((unsigned long*)& __m128d_op0[0]) = 0xdd3b8b02563b2d7b; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x7f7f7f007f7f7f00; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x7f7f7f007f7f7f00; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; -+ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000001c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000001de; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000001c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000001de; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000060000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000060000000; -+ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x44); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf5fffc00fc000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0001001900010019; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0a02041904010019; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0001001900010019; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0a02041904010019; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000007b007e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000007b007e; -+ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffed; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffeffed; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffeffed; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffeffed; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffeffed; -+ __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000007b007e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000007b007e; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffe700000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffe7007b007e; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffe700000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffe7007b007e; -+ __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256d_result[3]) = 0xc039000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0xc039000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0xc039000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0xc039000000000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000009c83e21a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000022001818; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; -+ __m128i_out = __lsx_vslti_hu(__m128i_op0,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000400000004000; -+ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffeffed; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffeffed; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffeffed; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffeffed; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffeffed; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffeffed; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffeffed; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffeffed; -+ *((unsigned long*)& __m256i_op2[3]) = 0xc039000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0xc039000000000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0xc039000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0xc039000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xbf3ffffffffeffed; -+ *((unsigned long*)& __m256i_result[2]) = 0xbf3ffffffffeffed; -+ *((unsigned long*)& __m256i_result[1]) = 0xbf3ffffffffeffed; -+ *((unsigned long*)& __m256i_result[0]) = 0xbf3ffffffffeffed; -+ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc039000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc039000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc039000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc039000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xc03b000200020002; -+ *((unsigned long*)& __m256i_result[2]) = 0xc03b000200020002; -+ *((unsigned long*)& __m256i_result[1]) = 0xc03b000200020002; -+ *((unsigned long*)& __m256i_result[0]) = 0xc03b000200020002; -+ __m256i_out = __lasx_xvbitrevi_h(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7f7f7f007f7f7f00; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf2c97aaa7d8fa270; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0b73e427f7cfcb88; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffb1fb1000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf2c97aaa7d8fa270; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0b73e427f7cfcb88; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x3f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x9c83e21a22001818; -+ *((unsigned long*)& __m128i_op0[0]) = 0xdd3b8b02563b2d7b; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ebd20000714f; -+ *((unsigned long*)& __m128i_result[0]) = 0x00012c8a0000a58a; -+ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000007b00f9007e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000007b00f9007e; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000007b00f9007e; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000007b00f9007e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000f601f200fc; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000f601f200fc; -+ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffe7ffffffe7; -+ *((unsigned long*)& __m256i_op1[3]) = 0xbf3ffffffffeffed; -+ *((unsigned long*)& __m256i_op1[2]) = 0xbf3ffffffffeffed; -+ *((unsigned long*)& __m256i_op1[1]) = 0xbf3ffffffffeffed; -+ *((unsigned long*)& __m256i_op1[0]) = 0xbf3ffffffffeffed; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000009c83e21a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000022001818; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000e21a00001818; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; -+ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000400000004000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000400000004000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000400000004000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; -+ __m256i_out = __lasx_xvfclass_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x000000009c83e21a; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000022001818; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf2c97aaa7d8fa270; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0b73e427f7cfcb88; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ebd20000714f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00012c8a0000a58a; -+ *((unsigned long*)& __m128i_result[1]) = 0xf654ad7447e59090; -+ *((unsigned long*)& __m128i_result[0]) = 0x27b1b106b8145f50; -+ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf654ad7447e59090; -+ *((unsigned long*)& __m128i_op1[0]) = 0x27b1b106b8145f50; -+ *((unsigned long*)& __m128i_result[1]) = 0x0a545374471b7070; -+ *((unsigned long*)& __m128i_result[0]) = 0x274f4f0648145f50; -+ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0a545374471b7070; -+ *((unsigned long*)& __m128i_op0[0]) = 0x274f4f0648145f50; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_result[1]) = 0xa8a736e19e9e28bf; -+ *((unsigned long*)& __m128i_result[0]) = 0x9e9f9e9f9e9f9e9f; -+ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000400000004000; -+ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; -+ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ebd20000714f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00012c8a0000a58a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ebd20000714f; -+ *((unsigned long*)& __m128i_result[0]) = 0x00012c8a0000a58a; -+ __m128i_out = __lsx_vmax_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007b007e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000007b007e; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc03b000200020002; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc03b000200020002; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc03b000200020002; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc03b000200020002; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000001ec020; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000001ec020; -+ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; -+ *((unsigned long*)& __m128i_result[1]) = 0x09e009e009e009e0; -+ *((unsigned long*)& __m128i_result[0]) = 0x09e009e009e009e0; -+ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xa8a74bff9e9e0070; -+ *((unsigned long*)& __m128i_op0[0]) = 0x9e9e72ff9e9ff9ff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; -+ __m128i_out = __lsx_vsat_du(__m128i_op0,0x2f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xc039000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xc039000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xc039000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xc039000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf654ad7447e59090; -+ *((unsigned long*)& __m128i_op0[0]) = 0x27b1b106b8145f50; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000120000000d; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000000e; -+ __m128i_out = __lsx_vpcnt_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; -+ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000120000000d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000e0000000e; -+ unsigned_long_int_result = 0x0000000e0000000e; -+ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op2[1]) = 0x000000120000000d; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000cfffffff2; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000dfffffff1; -+ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0001000c; -+ *((int*)& __m128_op0[2]) = 0xfffffff2; -+ *((int*)& __m128_op0[1]) = 0x0001000d; -+ *((int*)& __m128_op0[0]) = 0xfffffff1; -+ *((int*)& __m128_op1[3]) = 0xffff8a17; -+ *((int*)& __m128_op1[2]) = 0xffffc758; -+ *((int*)& __m128_op1[1]) = 0xffff69bb; -+ *((int*)& __m128_op1[0]) = 0xffffad3b; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000120000000d; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000011ffee; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000dfff2; -+ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000120000000d; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000e0000000e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; -+ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007b007e; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000007b007e; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000007b007e; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000007b007e; -+ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000007ffffffce; -+ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000000011ffee; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000dfff2; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf654ad7447e59090; -+ *((unsigned long*)& __m128i_op1[0]) = 0x27b1b106b8145f50; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffb81a6f70; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000047eba0b0; -+ __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ebd20000714f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00012c8a0000a58a; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffb81a6f70; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000d48eaa1a2; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffb81ae0bf; -+ *((unsigned long*)& __m128i_result[0]) = 0x00012c9748eaffff; -+ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000001de2dc20; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000001de2dc20; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000ebd20000714f; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00012c8a0000a58a; -+ *((unsigned long*)& __m128d_op1[1]) = 0xf654ad7447e59090; -+ *((unsigned long*)& __m128d_op1[0]) = 0x27b1b106b8145f50; -+ *((unsigned long*)& __m128d_result[1]) = 0xf654ad7447e59090; -+ *((unsigned long*)& __m128d_result[0]) = 0x27b1b106b8145f50; -+ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0xffffffe7; -+ *((int*)& __m256_op0[6]) = 0xffffffe7; -+ *((int*)& __m256_op0[5]) = 0xffffffe7; -+ *((int*)& __m256_op0[4]) = 0xffffffe7; -+ *((int*)& __m256_op0[3]) = 0xffffffe7; -+ *((int*)& __m256_op0[2]) = 0xffffffe7; -+ *((int*)& __m256_op0[1]) = 0xffffffe7; -+ *((int*)& __m256_op0[0]) = 0xffffffe7; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000500000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000700000032; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000500000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000700000032; -+ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000040e7; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000004000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000040e7; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000200000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000200000000000; -+ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x21); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000011ffee; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000dfff2; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; -+ __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf654ad7447e59090; -+ *((unsigned long*)& __m128i_op0[0]) = 0x27b1b106b8145f50; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x3f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ebd20000714f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00012c8a0000a58a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; -+ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000e7; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000000001fe; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000001ce; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000000001fe; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001ce; -+ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000001fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000001ce; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000001fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000001ce; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000000001fd; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001fd; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003fff00003fff; -+ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf6548a1747e59090; -+ *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; -+ *((unsigned long*)& __m128i_result[1]) = 0xf6548a1747e59090; -+ *((unsigned long*)& __m128i_result[0]) = 0x27b169bbb8145f50; -+ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000005; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000400000003ffb; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000400100004001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000400000003ffb; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000400100004001; -+ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000019001c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000019001c; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000001fe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000001fe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpermi_q(__m256i_op0,__m256i_op1,0xb9); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000ebd20000714f; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00012c8a0000a58a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslei_du(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf6548a1747e59090; -+ *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf6548a1747e59090; -+ *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; -+ *((unsigned long*)& __m128i_result[1]) = 0xf6548a1747e59090; -+ *((unsigned long*)& __m128i_result[0]) = 0x27b169bbb8145f50; -+ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf6548a1747e59090; -+ *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000047e59090; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffb8145f50; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xd3d3d3d3d3d3d3d3; -+ *((unsigned long*)& __m256i_result[2]) = 0xd3d3d3d3d3d3d3d3; -+ *((unsigned long*)& __m256i_result[1]) = 0xd3d3d3d3d3d3d3d3; -+ *((unsigned long*)& __m256i_result[0]) = 0xd3d3d3d3d3d3d3d3; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0xd3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf6548a1747e59090; -+ *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf6548a1747e59090; -+ *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000008; -+ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256d_op0[2]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256d_op0[1]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256d_op0[0]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op2[3]) = 0xd3d3d3d3d3d3d3d3; -+ *((unsigned long*)& __m256d_op2[2]) = 0xd3d3d3d3d3d3d3d3; -+ *((unsigned long*)& __m256d_op2[1]) = 0xd3d3d3d3d3d3d3d3; -+ *((unsigned long*)& __m256d_op2[0]) = 0xd3d3d3d3d3d3d3d3; -+ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; -+ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffefffefffefffe; -+ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x17c64aaef639f093; -+ *((unsigned long*)& __m128d_op0[0]) = 0xdb8f439722ec502d; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x17c64aaef639f093; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m128_op0[3]) = 0xf6548a17; -+ *((int*)& __m128_op0[2]) = 0x47e59090; -+ *((int*)& __m128_op0[1]) = 0x27b169bb; -+ *((int*)& __m128_op0[0]) = 0xb8145f50; -+ *((int*)& __m128_op1[3]) = 0x004eff62; -+ *((int*)& __m128_op1[2]) = 0x00d2ff76; -+ *((int*)& __m128_op1[1]) = 0xff700028; -+ *((int*)& __m128_op1[0]) = 0x00be00a0; -+ *((int*)& __m128_result[3]) = 0xb7032c34; -+ *((int*)& __m128_result[2]) = 0x093d35ab; -+ *((int*)& __m128_result[1]) = 0xe7a6533b; -+ *((int*)& __m128_result[0]) = 0x800001b8; -+ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xb7032c34093d35ab; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe7a6533b800001b8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000900000009; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; -+ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100003ffe; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100003fcd; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100003ffe; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100003fcd; -+ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000900000009; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000900000009; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000090; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000090; -+ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000400000003ffb; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000400100004001; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000400000003ffb; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000400100004001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000400000003ffb; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000400100004001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000400000003ffb; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000400100004001; -+ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfffefffe; -+ *((int*)& __m256_op0[6]) = 0xfffefffe; -+ *((int*)& __m256_op0[5]) = 0xfffefffe; -+ *((int*)& __m256_op0[4]) = 0xfffefffe; -+ *((int*)& __m256_op0[3]) = 0xfffefffe; -+ *((int*)& __m256_op0[2]) = 0xfffefffe; -+ *((int*)& __m256_op0[1]) = 0xfffefffe; -+ *((int*)& __m256_op0[0]) = 0xfffefffe; -+ *((int*)& __m256_op1[7]) = 0x000023a3; -+ *((int*)& __m256_op1[6]) = 0x00003fff; -+ *((int*)& __m256_op1[5]) = 0x000023a3; -+ *((int*)& __m256_op1[4]) = 0x00003fef; -+ *((int*)& __m256_op1[3]) = 0x000023a3; -+ *((int*)& __m256_op1[2]) = 0x00003fff; -+ *((int*)& __m256_op1[1]) = 0x000023a3; -+ *((int*)& __m256_op1[0]) = 0x00003fef; -+ *((int*)& __m256_result[7]) = 0xfffefffe; -+ *((int*)& __m256_result[6]) = 0xfffefffe; -+ *((int*)& __m256_result[5]) = 0xfffefffe; -+ *((int*)& __m256_result[4]) = 0xfffefffe; -+ *((int*)& __m256_result[3]) = 0xfffefffe; -+ *((int*)& __m256_result[2]) = 0xfffefffe; -+ *((int*)& __m256_result[1]) = 0xfffefffe; -+ *((int*)& __m256_result[0]) = 0xfffefffe; -+ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x004eff6200d2ff76; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff70002800be00a0; -+ *((unsigned long*)& __m128i_result[1]) = 0x004eff6200d2ff76; -+ *((unsigned long*)& __m128i_result[0]) = 0xff70002800be00a0; -+ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003fff00003fff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffebffffffebfff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffebffffffebfff; -+ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000090; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000090; -+ *((unsigned long*)& __m128d_op1[1]) = 0x004eff6200d2ff76; -+ *((unsigned long*)& __m128d_op1[0]) = 0xff70002800be00a0; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0xff800000; -+ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000400000003ffb; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000400100004001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000400000003ffb; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000400100004001; -+ *((unsigned long*)& __m256i_result[3]) = 0x00003fef00003fea; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003ff000003ff0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00003fef00003fea; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003ff000003ff0; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x17c64aaef639f093; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0xf6548a1747e59090; -+ *((unsigned long*)& __m128i_op2[0]) = 0x27b169bbb8145f50; -+ *((unsigned long*)& __m128i_result[1]) = 0x10f881a20ffd02b0; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff800000; -+ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00003fef00003fea; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003ff000003ff0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00003fef00003fea; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003ff000003ff0; -+ *((unsigned long*)& __m256i_result[3]) = 0x00003fea00013feb; -+ *((unsigned long*)& __m256i_result[2]) = 0x00003fe900014022; -+ *((unsigned long*)& __m256i_result[1]) = 0x00003fea00013feb; -+ *((unsigned long*)& __m256i_result[0]) = 0x00003fe900014022; -+ __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000005858585a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000005858585a; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000005858585a; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000005858585a; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000023a300003fff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000023a300003fef; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000023a300003fff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000023a300003fef; -+ *((unsigned long*)& __m256i_result[3]) = 0x000011d1ac2c4c2d; -+ *((unsigned long*)& __m256i_result[2]) = 0x000011d1ac2c4c25; -+ *((unsigned long*)& __m256i_result[1]) = 0x000011d1ac2c4c2d; -+ *((unsigned long*)& __m256i_result[0]) = 0x000011d1ac2c4c25; -+ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ebd20000714f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00012c8a0000a58a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000ebd20000714f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00012c8a0000a58a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000e29e; -+ *((unsigned long*)& __m128i_result[0]) = 0x000259140000ffff; -+ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000008e8c000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x000000000fffc000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000008e8c000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x000000000fffc000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op1[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256d_op1[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000400000004000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00003feec0108022; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003fe9c015802c; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00003feec0108022; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003fe9c015802c; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007f124010c022; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007f174015c02c; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007f124010c022; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007f174015c02c; -+ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x08e8c000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x0fffc000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x08e8c000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x0fffc000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00003fea; -+ *((int*)& __m256_op0[6]) = 0x00013feb; -+ *((int*)& __m256_op0[5]) = 0x00003fe9; -+ *((int*)& __m256_op0[4]) = 0x00014022; -+ *((int*)& __m256_op0[3]) = 0x00003fea; -+ *((int*)& __m256_op0[2]) = 0x00013feb; -+ *((int*)& __m256_op0[1]) = 0x00003fe9; -+ *((int*)& __m256_op0[0]) = 0x00014022; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvfrint_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0xffffffff; -+ *((int*)& __m256_op1[6]) = 0xffffffff; -+ *((int*)& __m256_op1[5]) = 0xffffffff; -+ *((int*)& __m256_op1[4]) = 0xffffffff; -+ *((int*)& __m256_op1[3]) = 0xffffffff; -+ *((int*)& __m256_op1[2]) = 0xffffffff; -+ *((int*)& __m256_op1[1]) = 0xffffffff; -+ *((int*)& __m256_op1[0]) = 0xffffffff; -+ *((int*)& __m256_op2[7]) = 0x00000000; -+ *((int*)& __m256_op2[6]) = 0x00000000; -+ *((int*)& __m256_op2[5]) = 0x00000000; -+ *((int*)& __m256_op2[4]) = 0x00000000; -+ *((int*)& __m256_op2[3]) = 0x00000000; -+ *((int*)& __m256_op2[2]) = 0x00000000; -+ *((int*)& __m256_op2[1]) = 0x00000000; -+ *((int*)& __m256_op2[0]) = 0x00000000; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffffff; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffffff; -+ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x02b010f881a281a2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; -+ __m128i_out = __lsx_vmini_hu(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_op1[1]) = 0x02b010f881a281a2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; -+ *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; -+ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvslti_w(__m256i_op0,11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x02b010f881a281a2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; -+ *((unsigned long*)& __m128i_op1[1]) = 0x02b010f881a281a2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x02b010f881a281a2; -+ *((unsigned long*)& __m128i_result[0]) = 0x27b169bbb8140001; -+ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrlrni_hu_w(__m256i_op0,__m256i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x02b010f881a281a2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8140001; -+ *((unsigned long*)& __m128i_result[1]) = 0x000010f8000081a2; -+ *((unsigned long*)& __m128i_result[0]) = 0x000069bb00000001; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000010f8000081a2; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000069bb00000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001000010f8; -+ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x10f881a20ffd02b0; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ff800000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x10f881a20ffd02b0; -+ *((unsigned long*)& __m128d_result[0]) = 0x00000000ff800000; -+ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; -+ __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x10f881a20ffd02b0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; -+ *((unsigned long*)& __m128i_result[1]) = 0xf1f181a2f1f1f1b0; -+ *((unsigned long*)& __m128i_result[0]) = 0xf1f1f1f1f180f1f1; -+ __m128i_out = __lsx_vmini_b(__m128i_op0,-15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffc8027; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffc7ff1; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffc8027; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffc7ff1; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000014; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000014; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000014; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000014; -+ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x14); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x10f881a20ffd02b0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff800000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff8ffa2fffdffb0; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff800000; -+ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[3]) = 0x1d1d1d1e1d1d1d1e; -+ *((unsigned long*)& __m256i_result[2]) = 0x1d1d1d1e1d1d1d1e; -+ *((unsigned long*)& __m256i_result[1]) = 0x1d1d1d1e1d1d1d1e; -+ *((unsigned long*)& __m256i_result[0]) = 0x1d1d1d1e1d1d1d1e; -+ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x1d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x10f8000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001000010f8; -+ *((unsigned long*)& __m128i_result[1]) = 0x10f8000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000001000010f8; -+ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001000010f8; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff8ffa2fffdffb0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0f0f0f0f00000f00; -+ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xf1f181a2; -+ *((int*)& __m128_op0[2]) = 0xf1f1f1b0; -+ *((int*)& __m128_op0[1]) = 0xf1f1f1f1; -+ *((int*)& __m128_op0[0]) = 0xf180f1f1; -+ *((int*)& __m128_result[3]) = 0x7fc00000; -+ *((int*)& __m128_result[2]) = 0x7fc00000; -+ *((int*)& __m128_result[1]) = 0x7fc00000; -+ *((int*)& __m128_result[0]) = 0x7fc00000; -+ __m128_out = __lsx_vflogb_s(__m128_op0); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00003fef00003fea; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003ff000003ff0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00003fef00003fea; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003ff000003ff0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00003fef00003fea; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003ff000003ff0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00003fef00003fea; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003ff000003ff0; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007fde00007fd4; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007fe000007fe0; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007fde00007fd4; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007fe000007fe0; -+ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00007fde00007fd4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00007fe000007fe0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00007fde00007fd4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007fe000007fe0; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff7eddffff7ed3; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff7edfffff7edf; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff7eddffff7ed3; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff7edfffff7edf; -+ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslti_h(__m128i_op0,15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff7eddffff7ed3; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff7edfffff7edf; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff7eddffff7ed3; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff7edfffff7edf; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00007edd; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00007ed3; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00007edf; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00007edf; -+ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x10f8000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x1e); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfff8ffa2fffdffb0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff800000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0108015e01030150; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000017f0000; -+ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x10f8000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfff8ffa2fffdffb0; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ff800000; -+ *((unsigned long*)& __m128d_result[1]) = 0x10f8000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x00000000ff800000; -+ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00003fef00003fea; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003ff000003ff0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00003fef00003fea; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003ff000003ff0; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff8ffa2fffdffb0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x50); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00007fde00007fd4; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00007fe000007fe0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00007fde00007fd4; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00007fe000007fe0; -+ *((unsigned long*)& __m256i_result[3]) = 0x000081220000812c; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000812000008120; -+ *((unsigned long*)& __m256i_result[1]) = 0x000081220000812c; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000812000008120; -+ __m256i_out = __lasx_xvneg_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff7eddffff7ed3; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff7edfffff7edf; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff7eddffff7ed3; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff7edfffff7edf; -+ *((unsigned long*)& __m256i_op1[3]) = 0x00003fef00003fea; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00003ff000003ff0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x00003fef00003fea; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00003ff000003ff0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff3eedffff3ee3; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff3eedffff3ee3; -+ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x10f8000100000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000001000010f8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x087c000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000087c; -+ __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00003fea00013fec; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003fe50001c013; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00003fea00013fec; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003fe50001c013; -+ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000ff0000ff00; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000ff0000ff00; -+ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x10f881a20ffd02b0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0ff780a10efc01af; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fe7f0000; -+ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x0ff780a1; -+ *((int*)& __m128_op0[2]) = 0x0efc01af; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0xfe7f0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000fe7f0000; -+ __m128i_out = __lsx_vfrintrne_s(__m128_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x087c000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000087c; -+ *((unsigned long*)& __m128i_result[1]) = 0xf784000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffff784; -+ __m128i_out = __lsx_vneg_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_wu(__m128i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; -+ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x087c000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000087c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x10f8000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001000010f8; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffefffff784; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000081220000812c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000812000008120; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000081220000812c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000812000008120; -+ *((unsigned long*)& __m256i_result[3]) = 0xe9e968c9e9e968c1; -+ *((unsigned long*)& __m256i_result[2]) = 0xe9e968c9e9e968c9; -+ *((unsigned long*)& __m256i_result[1]) = 0xe9e968c9e9e968c1; -+ *((unsigned long*)& __m256i_result[0]) = 0xe9e968c9e9e968c9; -+ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x16); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000081220000812c; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000812000008120; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000081220000812c; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000812000008120; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; -+ *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefefe; -+ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffefffff784; -+ *((unsigned long*)& __m128i_op1[1]) = 0x10f8000100000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000001000010f8; -+ *((unsigned long*)& __m128i_result[1]) = 0x0177fff0fffffff0; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000011ff8bc; -+ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256d_op0[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0005000500050005; -+ *((unsigned long*)& __m256d_op0[0]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256d_op1[3]) = 0x00003fea00013fec; -+ *((unsigned long*)& __m256d_op1[2]) = 0x00003fe50001c013; -+ *((unsigned long*)& __m256d_op1[1]) = 0x00003fea00013fec; -+ *((unsigned long*)& __m256d_op1[0]) = 0x00003fe50001c013; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000180000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000180000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; -+ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe013fcf2e015fc38; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe013fd00dff78420; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe013fcf2e015fc38; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe013fd00dff78420; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00003fea0014734d; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00003fe900140d85; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00003fea0014734d; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00003fe900140d85; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff0000ff00; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000ff; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff0000ff00; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0177fff0fffffff0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000011ff8bc; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffefffff784; -+ *((unsigned long*)& __m128i_result[1]) = 0x00bbfff7fffffff7; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff008ff820; -+ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000050005; -+ *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefefe; -+ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xfffffffe; -+ *((int*)& __m128_op0[0]) = 0xfffff784; -+ *((int*)& __m128_op1[3]) = 0x0177fff0; -+ *((int*)& __m128_op1[2]) = 0xfffffff0; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x011ff8bc; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfd02fd02; -+ *((int*)& __m256_op0[6]) = 0xfd02fd02; -+ *((int*)& __m256_op0[5]) = 0xfd02fd02; -+ *((int*)& __m256_op0[4]) = 0xfd02fd02; -+ *((int*)& __m256_op0[3]) = 0xfd02fd02; -+ *((int*)& __m256_op0[2]) = 0xfd02fd02; -+ *((int*)& __m256_op0[1]) = 0xfd02fd02; -+ *((int*)& __m256_op0[0]) = 0xfd02fd02; -+ *((int*)& __m256_result[7]) = 0x81fa28e4; -+ *((int*)& __m256_result[6]) = 0x81fa28e4; -+ *((int*)& __m256_result[5]) = 0x81fa28e4; -+ *((int*)& __m256_result[4]) = 0x81fa28e4; -+ *((int*)& __m256_result[3]) = 0x81fa28e4; -+ *((int*)& __m256_result[2]) = 0x81fa28e4; -+ *((int*)& __m256_result[1]) = 0x81fa28e4; -+ *((int*)& __m256_result[0]) = 0x81fa28e4; -+ __m256_out = __lasx_xvfrecip_s(__m256_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00bbfff7fffffff7; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff008ff820; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00bbfff7fffffff7; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff008ff820; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000011ff040; -+ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0005000500050005; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000050005; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; -+ *((unsigned long*)& __m256i_op1[3]) = 0xf007fe76f008fe19; -+ *((unsigned long*)& __m256i_op1[2]) = 0xf08aff01f07cc291; -+ *((unsigned long*)& __m256i_op1[1]) = 0xf007fe76f008fe19; -+ *((unsigned long*)& __m256i_op1[0]) = 0xf08aff01f07cc291; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000001400; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000003c01ff9; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000003c01ff9; -+ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x66); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0177fff0fffffff0; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000011ff8bc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0177fff0fffffff0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000011ff8bc; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000011f0000f040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0177fff0fffffff0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff8bc; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000001400; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000003c01ff9; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000003c01ff9; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffff08a7de0; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffff07c4170; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffff08a7de0; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffff07c4170; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffff08a7de0; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffff07c4170; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffff08a7de0; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffff07c4170; -+ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000001400; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000003c01ff9; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000003c01ff9; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffec00; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffc3fe007; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffc3fe007; -+ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000011ff040; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe0000ff18; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; -+ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0177fff0fffffff0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff8bc; -+ *((unsigned long*)& __m128i_op2[1]) = 0x00bbfff7fffffff7; -+ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff008ff820; -+ *((unsigned long*)& __m128i_result[1]) = 0xffe8008fffe7008f; -+ *((unsigned long*)& __m128i_result[0]) = 0x00010001f1153780; -+ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m128d_op0[0]) = 0x00000000011ff040; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvslti_h(__m256i_op0,-11); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffee; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010012; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffe1ffc0; -+ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00bbfff7fffffff7; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff008ff820; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010012; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffe1ffc0; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffff009ff83f; -+ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff040; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000011ff040; -+ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x81fa28e4; -+ *((int*)& __m256_op0[6]) = 0x81fa28e4; -+ *((int*)& __m256_op0[5]) = 0x81fa28e4; -+ *((int*)& __m256_op0[4]) = 0x81fa28e4; -+ *((int*)& __m256_op0[3]) = 0x81fa28e4; -+ *((int*)& __m256_op0[2]) = 0x81fa28e4; -+ *((int*)& __m256_op0[1]) = 0x81fa28e4; -+ *((int*)& __m256_op0[0]) = 0x81fa28e4; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x0); -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000001400; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000003c01ff9; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000003c01ff9; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000001400; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000003c01ff9; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000003c01ff9; -+ __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010012; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffe1ffc0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010012; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffe1ffc0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010012; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffe1ffc0; -+ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff040; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_result[2]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_result[1]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_result[0]) = 0xfd02fd02fd02fd02; -+ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffe4ffe4ffe4ffe4; -+ *((unsigned long*)& __m128i_result[0]) = 0xffe4ffe4ffe4ffe4; -+ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x1b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffe4ffe4ffe4ffe4; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffe4ffe4ffe4ffe4; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff040; -+ *((unsigned long*)& __m128i_result[1]) = 0xff00e400ff00e400; -+ *((unsigned long*)& __m128i_result[0]) = 0xff01e41ffff0e440; -+ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_result[3]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_result[2]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_result[1]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_result[0]) = 0xfd12fd12fd12fd12; -+ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0xfa15fa15fa15fa14; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0xfa15fa15fa15fa14; -+ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfa15fa15fa15fa14; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfa15fa15fa15fa14; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x05ea05ea05ea05ec; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[0]) = 0x05ea05ea05ea05ec; -+ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0xffffffffffffffff; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x2); -+ *((int*)& __m128_op0[3]) = 0xffffffff; -+ *((int*)& __m128_op0[2]) = 0xffffffff; -+ *((int*)& __m128_op0[1]) = 0xffffffff; -+ *((int*)& __m128_op0[0]) = 0xffffffff; -+ *((int*)& __m128_op1[3]) = 0xffffffff; -+ *((int*)& __m128_op1[2]) = 0xffffffff; -+ *((int*)& __m128_op1[1]) = 0xffffffff; -+ *((int*)& __m128_op1[0]) = 0xffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0177fff0fffffff0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000011ff8bc; -+ *((unsigned long*)& __m128i_result[1]) = 0x05dfffc3ffffffc0; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000047fe2f0; -+ __m128i_out = __lsx_vslli_d(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x05dfffc3ffffffc0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000047fe2f0; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000047fe2f0; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000047fe2f0; -+ __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010012; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fec20704; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000012; -+ __m128i_out = __lsx_vexth_wu_hu(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xdfdfdfdfdfdfdfdf; -+ *((unsigned long*)& __m128i_result[0]) = 0xdfdfdfdfdfdfdfdf; -+ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000047fe2f0; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000047fe2f0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010012; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fec20704; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000043fe2fc; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000000001fffff; -+ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe011df03e; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xf03ef03ef03ef03e; -+ *((unsigned long*)& __m128i_result[0]) = 0xf03ef03ef03ef03e; -+ __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x00000000047fe2f0; -+ *((unsigned long*)& __m128d_op1[0]) = 0x00000000047fe2f0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf03ef03ef03ef03e; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf03ef03ef03ef03e; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vslei_d(__m128i_op0,-9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0xfd12fd12; -+ *((int*)& __m256_op0[6]) = 0xfd12fd12; -+ *((int*)& __m256_op0[5]) = 0xfd12fd12; -+ *((int*)& __m256_op0[4]) = 0xfd12fd12; -+ *((int*)& __m256_op0[3]) = 0xfd12fd12; -+ *((int*)& __m256_op0[2]) = 0xfd12fd12; -+ *((int*)& __m256_op0[1]) = 0xfd12fd12; -+ *((int*)& __m256_op0[0]) = 0xfd12fd12; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; -+ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x05ea05ea05ea05ec; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x05ea05ea05ea05ec; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_result[2]) = 0x05ea05ea05ea05ec; -+ *((unsigned long*)& __m256i_result[1]) = 0x05ea05ea05ea05ec; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; -+ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x49); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010012; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fec20704; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vclo_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_result[3]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m256i_result[2]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m256i_result[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m256i_result[0]) = 0x000a000a000a000a; -+ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,10); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfa15fa15fa15fa14; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfa15fa15fa15fa14; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x05ea05ea05ea05ec; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x05ea05ea05ea05ec; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffefffffffe; -+ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff01e41ffff0e440; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffe4ffffffe4ff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffe4fffff0e4ff; -+ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_h(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m256d_op1[2]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m256d_op1[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m256d_op1[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128d_op0[0]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfffefffefffefffe; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfffefffe011df03e; -+ *((unsigned long*)& __m128d_result[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128d_result[0]) = 0xfffffffefffffffe; -+ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff01e41ffff0e440; -+ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffefffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xff01e420fff0e442; -+ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff80000000; -+ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x05ea05ea05ea05ec; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x05ea05ea05ea05ec; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x04f104f104f104f1; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x04f104f104f104f1; -+ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000808ff821; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256d_op1[2]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256d_op1[1]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256d_op1[0]) = 0xfd02fd02fd02fd02; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_op1[2]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_op1[1]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_op1[0]) = 0xfd12fd12fd12fd12; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x7fffffff; -+ *((int*)& __m256_op0[6]) = 0x80000000; -+ *((int*)& __m256_op0[5]) = 0x7fffffff; -+ *((int*)& __m256_op0[4]) = 0x80000000; -+ *((int*)& __m256_op0[3]) = 0x7fffffff; -+ *((int*)& __m256_op0[2]) = 0x80000000; -+ *((int*)& __m256_op0[1]) = 0x7fffffff; -+ *((int*)& __m256_op0[0]) = 0x80000000; -+ *((int*)& __m256_op1[7]) = 0xfd02fd02; -+ *((int*)& __m256_op1[6]) = 0xfd02fd02; -+ *((int*)& __m256_op1[5]) = 0xfd02fd02; -+ *((int*)& __m256_op1[4]) = 0xfd02fd02; -+ *((int*)& __m256_op1[3]) = 0xfd02fd02; -+ *((int*)& __m256_op1[2]) = 0xfd02fd02; -+ *((int*)& __m256_op1[1]) = 0xfd02fd02; -+ *((int*)& __m256_op1[0]) = 0xfd02fd02; -+ *((int*)& __m256_op2[7]) = 0xfd02fd02; -+ *((int*)& __m256_op2[6]) = 0xfd02fd02; -+ *((int*)& __m256_op2[5]) = 0xfd02fd02; -+ *((int*)& __m256_op2[4]) = 0xfd02fd02; -+ *((int*)& __m256_op2[3]) = 0xfd02fd02; -+ *((int*)& __m256_op2[2]) = 0xfd02fd02; -+ *((int*)& __m256_op2[1]) = 0xfd02fd02; -+ *((int*)& __m256_op2[0]) = 0xfd02fd02; -+ *((int*)& __m256_result[7]) = 0x7fffffff; -+ *((int*)& __m256_result[6]) = 0x7d02fd02; -+ *((int*)& __m256_result[5]) = 0x7fffffff; -+ *((int*)& __m256_result[4]) = 0x7d02fd02; -+ *((int*)& __m256_result[3]) = 0x7fffffff; -+ *((int*)& __m256_result[2]) = 0x7d02fd02; -+ *((int*)& __m256_result[1]) = 0x7fffffff; -+ *((int*)& __m256_result[0]) = 0x7d02fd02; -+ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0c0c0c0c0c0c0c0c; -+ *((unsigned long*)& __m256i_result[2]) = 0x0c0c0c0c0c0c0c0c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0c0c0c0c0c0c0c0c; -+ *((unsigned long*)& __m256i_result[0]) = 0x0c0c0c0c0c0c0c0c; -+ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,12); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffff7fffffff7f; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffff7fffffff7f; -+ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ int_op0 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplgr2vr_b(int_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff01fe03ff01fe03; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff01fe03ff01fe03; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xff01fe03ff01fe03; -+ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff4; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,-12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[2]) = 0x05ea05ea05ea05ec; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op1[0]) = 0x05ea05ea05ea05ec; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xfa15fa15fa15fa14; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xfa15fa15fa15fa14; -+ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xff01fe03ff01fe03; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; -+ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m256i_op1[2]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m256i_op1[1]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m256i_op1[0]) = 0x000a000a000a000a; -+ *((unsigned long*)& __m256i_result[3]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_result[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0004000500040005; -+ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001000300000004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000300000004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001000300000004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000300000004; -+ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; -+ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m128d_op1[0]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfa15fa15fa15fa14; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfa15fa15fa15fa14; -+ *((unsigned long*)& __m256i_result[3]) = 0x8282828282828282; -+ *((unsigned long*)& __m256i_result[2]) = 0x8768876887688769; -+ *((unsigned long*)& __m256i_result[1]) = 0x8282828282828282; -+ *((unsigned long*)& __m256i_result[0]) = 0x8768876887688769; -+ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x7d); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff4; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000200000001c; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000200000001c; -+ __m128i_out = __lsx_vclo_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000200000001c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000200000001c; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000200000001c; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000200000001c; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000020000000c0; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000020000000c0; -+ __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff01e41ffff0ffff; -+ *((unsigned long*)& __m128i_result[1]) = 0xff00e400ff00e400; -+ *((unsigned long*)& __m128i_result[0]) = 0xff01e41ffff0ffff; -+ __m128i_out = __lsx_vmini_d(__m128i_op0,14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8282828282828282; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8768876887688769; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8282828282828282; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8768876887688769; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000104000200; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000104000200; -+ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x00007fff00007fff; -+ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000104000200; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000104000200; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_result[2]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_result[1]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_result[0]) = 0x0004000500040005; -+ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0xff00e400ff00e400; -+ *((unsigned long*)& __m128d_op0[0]) = 0xff01e41ffff0ffff; -+ *((unsigned long*)& __m128d_op1[1]) = 0x5555000054100000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x5555000154100155; -+ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; -+ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrari_h(__m128i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001000000000000; -+ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff80000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x9f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x8282828282828282; -+ *((unsigned long*)& __m256i_op1[2]) = 0x8768876887688769; -+ *((unsigned long*)& __m256i_op1[1]) = 0x8282828282828282; -+ *((unsigned long*)& __m256i_op1[0]) = 0x8768876887688769; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00000000003fffc0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00000000003fffc0; -+ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8282828282828282; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8768876887688769; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8282828282828282; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8768876887688769; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x00000000003fffc0; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x00000000003fffc0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffc00040; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffc00040; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_op2[3]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256i_op2[2]) = 0x00007fff00000000; -+ *((unsigned long*)& __m256i_op2[1]) = 0x00007fff00007fff; -+ *((unsigned long*)& __m256i_op2[0]) = 0x00007fff00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff10; -+ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x5555000054100000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x5555000154100155; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000155; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; -+ *((unsigned long*)& __m128i_op0[0]) = 0xff01e41ffff0ffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xff01ffffe41f0000; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff00000ffff0000; -+ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff80007fff0000; -+ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xfff0008000000080; -+ *((unsigned long*)& __m128i_result[0]) = 0xfff0008000000080; -+ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vrotri_h(__m128i_op0,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000003fffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000003fffc0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffffffff; -+ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000155; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; -+ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00040004; -+ *((int*)& __m256_op0[6]) = 0x00040004; -+ *((int*)& __m256_op0[5]) = 0x00040005; -+ *((int*)& __m256_op0[4]) = 0x00040005; -+ *((int*)& __m256_op0[3]) = 0x00040004; -+ *((int*)& __m256_op0[2]) = 0x00040004; -+ *((int*)& __m256_op0[1]) = 0x00040005; -+ *((int*)& __m256_op0[0]) = 0x00040005; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff01ffffe41f0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfff00000ffff0000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000155; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000000000000002b; -+ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfee1f6f18800ff7f; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffe4ffffffe4; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffe4ffffffe4; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffe4ffffffe4; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffe4ffffffe4; -+ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x1c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0xf); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000155; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000155; -+ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffc00040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffc00040; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x1080108010060002; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x1080108010060002; -+ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffffffe4ffffffe4; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffffffe4ffffffe4; -+ *((unsigned long*)& __m256i_op1[1]) = 0xffffffe4ffffffe4; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffffffe4ffffffe4; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001d0000001c; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001d0000001c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001d0000001c; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001d0000001c; -+ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x1080108010060002; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x1080108010060002; -+ *((unsigned long*)& __m256d_op1[3]) = 0xffffffe4ffffffe4; -+ *((unsigned long*)& __m256d_op1[2]) = 0xffffffe4ffffffe4; -+ *((unsigned long*)& __m256d_op1[1]) = 0xffffffe4ffffffe4; -+ *((unsigned long*)& __m256d_op1[0]) = 0xffffffe4ffffffe4; -+ *((unsigned long*)& __m256d_op2[3]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256d_op2[1]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256d_result[3]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256d_result[2]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256d_result[1]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256d_result[0]) = 0x7fff00017fff0000; -+ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x545501550001113a; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xd45501550001113a; -+ __m128i_out = __lsx_vbitrev_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; -+ __m128i_out = __lsx_vclz_h(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000155; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xffff100000000000; -+ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfefe000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000155; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_h_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff100000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x000f000000000000; -+ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff00017fff0000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_result[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_result[0]) = 0x04f104f104f504ed; -+ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x7e); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op0[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op0[0]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_result[3]) = 0x0002ffff00020002; -+ *((unsigned long*)& __m256i_result[2]) = 0x04f504f104f504f5; -+ *((unsigned long*)& __m256i_result[1]) = 0x0002ffff00020002; -+ *((unsigned long*)& __m256i_result[0]) = 0x04f504f104f504f5; -+ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x65); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((int*)& __m256_op1[7]) = 0x00000000; -+ *((int*)& __m256_op1[6]) = 0x00000000; -+ *((int*)& __m256_op1[5]) = 0x7fff8000; -+ *((int*)& __m256_op1[4]) = 0x7fff0000; -+ *((int*)& __m256_op1[3]) = 0x00000000; -+ *((int*)& __m256_op1[2]) = 0x00000000; -+ *((int*)& __m256_op1[1]) = 0x7fff8000; -+ *((int*)& __m256_op1[0]) = 0x7fff0000; -+ *((int*)& __m256_op2[7]) = 0xffffffff; -+ *((int*)& __m256_op2[6]) = 0xffffffff; -+ *((int*)& __m256_op2[5]) = 0xffffffff; -+ *((int*)& __m256_op2[4]) = 0xffffff10; -+ *((int*)& __m256_op2[3]) = 0xffffffff; -+ *((int*)& __m256_op2[2]) = 0xffffffff; -+ *((int*)& __m256_op2[1]) = 0xffffffff; -+ *((int*)& __m256_op2[0]) = 0xffffff10; -+ *((int*)& __m256_result[7]) = 0xffffffff; -+ *((int*)& __m256_result[6]) = 0xffffffff; -+ *((int*)& __m256_result[5]) = 0xffffffff; -+ *((int*)& __m256_result[4]) = 0xffffff10; -+ *((int*)& __m256_result[3]) = 0xffffffff; -+ *((int*)& __m256_result[2]) = 0xffffffff; -+ *((int*)& __m256_result[1]) = 0xffffffff; -+ *((int*)& __m256_result[0]) = 0xffffff10; -+ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000155; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000f0000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffff10000; -+ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff80007fff0000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_result[2]) = 0x7fff81007fff0100; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_result[0]) = 0x7fff81007fff0100; -+ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff81007fff0100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000010000000100; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff81007fff0100; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000008000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0003fffc0803fff8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000008000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0003fffc0803fff8; -+ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0xd); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000008000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0003fffc0803fff8; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000008000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc0803fff8; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000fffc0000fff8; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000fffc0000fff8; -+ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_h(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff100000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000f0000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwod_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[2]) = 0x0004000400040004; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_result[0]) = 0x0004000400040004; -+ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x00000000; -+ *((int*)& __m256_result[4]) = 0x00000000; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x00000000; -+ *((int*)& __m256_result[0]) = 0x00000000; -+ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffff10000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0002ffff00020002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x04f504f104f504f5; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0002ffff00020002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x04f504f104f504f5; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x000200ff00020002; -+ *((unsigned long*)& __m256i_result[2]) = 0x00f500f100f500f5; -+ *((unsigned long*)& __m256i_result[1]) = 0x000200ff00020002; -+ *((unsigned long*)& __m256i_result[0]) = 0x00f500f100f500f5; -+ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0004000400040004; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0004000500040005; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0004000400040004; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0004000500040005; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x8a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0002fffc; -+ *((unsigned long*)& __m256i_result[2]) = 0xffff0000fffd0003; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0002fffc; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000fffd0003; -+ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0002fffc; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000fffd0003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0002fffc; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000fffd0003; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_result[3]) = 0x0001fffe0005fff9; -+ *((unsigned long*)& __m256i_result[2]) = 0x04f004f204f204f0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0001fffe0005fff9; -+ *((unsigned long*)& __m256i_result[0]) = 0x04f004f204f204f0; -+ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0005fff9; -+ *((unsigned long*)& __m256i_op0[2]) = 0x04f004f204f204f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0005fff9; -+ *((unsigned long*)& __m256i_op0[0]) = 0x04f004f204f204f0; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000900000009; -+ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x17); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0005fff9; -+ *((unsigned long*)& __m256i_op0[2]) = 0x04f004f204f204f0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0005fff9; -+ *((unsigned long*)& __m256i_op0[0]) = 0x04f004f204f204f0; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000002780; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000002780; -+ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000002780; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000002780; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffd880; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffd880; -+ __m256i_out = __lasx_xvneg_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0001000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x03fc03fc03fc03fc; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffd880; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffd880; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x2); -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vneg_w(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x03af03af03af03af; -+ *((unsigned long*)& __m256i_op0[2]) = 0x03acfc5303260e80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x03af03af03af03af; -+ *((unsigned long*)& __m256i_op0[0]) = 0x03acfc5303260e80; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000002780; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000002780; -+ *((unsigned long*)& __m256i_result[3]) = 0x03af03af03af03af; -+ *((unsigned long*)& __m256i_result[2]) = 0x03acfc5303260e81; -+ *((unsigned long*)& __m256i_result[1]) = 0x03af03af03af03af; -+ *((unsigned long*)& __m256i_result[0]) = 0x03acfc5303260e81; -+ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000002780; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000002780; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000010100020103; -+ *((unsigned long*)& __m256i_result[2]) = 0x040f040f040b236d; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000010100020103; -+ *((unsigned long*)& __m256i_result[0]) = 0x040f040f040b236d; -+ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op0[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op0[0]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op1[3]) = 0x03af03af03af03af; -+ *((unsigned long*)& __m256i_op1[2]) = 0x03acfc5303260e80; -+ *((unsigned long*)& __m256i_op1[1]) = 0x03af03af03af03af; -+ *((unsigned long*)& __m256i_op1[0]) = 0x03acfc5303260e80; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000b0cfffff4f3; -+ *((unsigned long*)& __m256i_result[2]) = 0x000f9bb562f56c80; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000b0cfffff4f3; -+ *((unsigned long*)& __m256i_result[0]) = 0x000f9bb562f56c80; -+ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvclo_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; -+ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x03af03af03af03af; -+ *((unsigned long*)& __m256i_op0[2]) = 0x03acfc5303260e81; -+ *((unsigned long*)& __m256i_op0[1]) = 0x03af03af03af03af; -+ *((unsigned long*)& __m256i_op0[0]) = 0x03acfc5303260e81; -+ *((unsigned long*)& __m256i_op1[3]) = 0x03af03af03af03af; -+ *((unsigned long*)& __m256i_op1[2]) = 0x03acfc5303260e81; -+ *((unsigned long*)& __m256i_op1[1]) = 0x03af03af03af03af; -+ *((unsigned long*)& __m256i_op1[0]) = 0x03acfc5303260e81; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x1b); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x1716151417161514; -+ *((unsigned long*)& __m256d_op0[2]) = 0x1716151417161514; -+ *((unsigned long*)& __m256d_op0[1]) = 0x1716151417161514; -+ *((unsigned long*)& __m256d_op0[0]) = 0x1716151417161514; -+ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000002780; -+ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000002780; -+ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000002780; -+ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000002780; -+ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[2]) = 0x8000000000002780; -+ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256d_result[0]) = 0x8000000000002780; -+ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); -+ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x8a8a8a8a8a8a8a8a; -+ *((unsigned long*)& __m128i_result[0]) = 0x8a8a8a8a8a8a8a8a; -+ __m128i_out = __lsx_vori_b(__m128i_op0,0x8a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8a8a8a8a8a8a8a8a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x8a8a8a8a8a8a8a8a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x8a8a8a8a; -+ *((int*)& __m128_op1[2]) = 0x8a8a8a8a; -+ *((int*)& __m128_op1[1]) = 0x8a8a8a8a; -+ *((int*)& __m128_op1[0]) = 0x8a8a8a8a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vslei_h(__m128i_op0,-10); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x00000000; -+ *((int*)& __m256_op0[6]) = 0x00000000; -+ *((int*)& __m256_op0[5]) = 0x00000000; -+ *((int*)& __m256_op0[4]) = 0x00000000; -+ *((int*)& __m256_op0[3]) = 0x00000000; -+ *((int*)& __m256_op0[2]) = 0x00000000; -+ *((int*)& __m256_op0[1]) = 0x00000000; -+ *((int*)& __m256_op0[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_op1[3]) = 0x1716151417161514; -+ *((unsigned long*)& __m256i_op1[2]) = 0x1716151417161514; -+ *((unsigned long*)& __m256i_op1[1]) = 0x1716151417161514; -+ *((unsigned long*)& __m256i_op1[0]) = 0x1716151417161514; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0fff0fff0fff0fff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0fff0fff0fff0fff; -+ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000002780; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000002780; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; -+ *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_result[2]) = 0x3fff3fff3fff3fc4; -+ *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256i_result[0]) = 0x3fff3fff3fff3fc4; -+ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000b0cfffff4f3; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000f9bb562f56c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000b0cfffff4f3; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000f9bb562f56c80; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op2[3]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op2[2]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_op2[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op2[0]) = 0x04f104f104f504ed; -+ *((unsigned long*)& __m256i_result[3]) = 0x0018761ed60b5d7f; -+ *((unsigned long*)& __m256i_result[2]) = 0xabdcdc9938afafe9; -+ *((unsigned long*)& __m256i_result[1]) = 0x0018761ed60b5d7f; -+ *((unsigned long*)& __m256i_result[0]) = 0xabdcdc9938afafe9; -+ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; -+ __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((int*)& __m128_op0[3]) = 0x00000000; -+ *((int*)& __m128_op0[2]) = 0x00000000; -+ *((int*)& __m128_op0[1]) = 0x00000000; -+ *((int*)& __m128_op0[0]) = 0x00000000; -+ *((int*)& __m128_op1[3]) = 0x00000000; -+ *((int*)& __m128_op1[2]) = 0x00000000; -+ *((int*)& __m128_op1[1]) = 0x00000000; -+ *((int*)& __m128_op1[0]) = 0x00000000; -+ *((int*)& __m128_result[3]) = 0x00000000; -+ *((int*)& __m128_result[2]) = 0x00000000; -+ *((int*)& __m128_result[1]) = 0x00000000; -+ *((int*)& __m128_result[0]) = 0x00000000; -+ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); -+ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256d_op1[3]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256d_op1[2]) = 0x3fff3fff3fff3fc4; -+ *((unsigned long*)& __m256d_op1[1]) = 0x3fff3fff3fff3fff; -+ *((unsigned long*)& __m256d_op1[0]) = 0x3fff3fff3fff3fc4; -+ *((int*)& __m256_result[7]) = 0x00000000; -+ *((int*)& __m256_result[6]) = 0x00000000; -+ *((int*)& __m256_result[5]) = 0x3ff9fffa; -+ *((int*)& __m256_result[4]) = 0x3ff9fffa; -+ *((int*)& __m256_result[3]) = 0x00000000; -+ *((int*)& __m256_result[2]) = 0x00000000; -+ *((int*)& __m256_result[1]) = 0x3ff9fffa; -+ *((int*)& __m256_result[0]) = 0x3ff9fffa; -+ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0fff0fff0fff0fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0fff0fff0fff0fff; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101000000010000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101000000010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000010101010101; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000010101010101; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000900000009; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[2]) = 0x3ff9fffa3ff9fffa; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[0]) = 0x3ff9fffa3ff9fffa; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007ff3; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007ff3; -+ __m256i_out = __lasx_xvsrani_w_d(__m256i_op0,__m256i_op1,0x2f); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((int*)& __m256_op0[7]) = 0x003f0200; -+ *((int*)& __m256_op0[6]) = 0x01400200; -+ *((int*)& __m256_op0[5]) = 0x003f00ff; -+ *((int*)& __m256_op0[4]) = 0x003f00c4; -+ *((int*)& __m256_op0[3]) = 0x003f0200; -+ *((int*)& __m256_op0[2]) = 0x01400200; -+ *((int*)& __m256_op0[1]) = 0x003f00ff; -+ *((int*)& __m256_op0[0]) = 0x003f00c4; -+ *((int*)& __m256_op1[7]) = 0x00000101; -+ *((int*)& __m256_op1[6]) = 0x01010101; -+ *((int*)& __m256_op1[5]) = 0x00000000; -+ *((int*)& __m256_op1[4]) = 0x00000000; -+ *((int*)& __m256_op1[3]) = 0x00000101; -+ *((int*)& __m256_op1[2]) = 0x01010101; -+ *((int*)& __m256_op1[1]) = 0x00000000; -+ *((int*)& __m256_op1[0]) = 0x00000000; -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vmskltz_d(__m128i_op0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffdbff980038ffaf; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffafffe80004fff1; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffdbff980038ffaf; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffafffe80004fff1; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffc; -+ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000fffd0003; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffc; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000fffd0003; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff0000fffd0004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_result[1]) = 0xffff0000fffd0004; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0002fffd; -+ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101000000010000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101000000010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000020202020202; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101000000010000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000020202020202; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101000000010000; -+ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x761ed60b5d7f0000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xdc9938afafe904f1; -+ *((unsigned long*)& __m256i_op0[1]) = 0x761ed60b5d7f0000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xdc9938afafe904f1; -+ *((unsigned long*)& __m256i_result[3]) = 0x03b0feb002eb0000; -+ *((unsigned long*)& __m256i_result[2]) = 0xfee401c5fd7f0027; -+ *((unsigned long*)& __m256i_result[1]) = 0x03b0feb002eb0000; -+ *((unsigned long*)& __m256i_result[0]) = 0xfee401c5fd7f0027; -+ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x5); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffdbff980038ffaf; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffafffe80004fff1; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffdbff980038ffaf; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffafffe80004fff1; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000020202020202; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101000000010000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000020202020202; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101000000010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000e3fec0004fff1; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000e3fec0004fff1; -+ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff0000fffd0004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff0000fffd0004; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_result[3]) = 0xffff0000fffd0004; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_result[0]) = 0xffff0000fffd0004; -+ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0xcb); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff01ff68; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000070ff017de6; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff01ff68; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000070ff017de6; -+ *((unsigned long*)& __m256i_op1[3]) = 0x761ed60b5d7f0000; -+ *((unsigned long*)& __m256i_op1[2]) = 0xdc9938afafe904f1; -+ *((unsigned long*)& __m256i_op1[1]) = 0x761ed60b5d7f0000; -+ *((unsigned long*)& __m256i_op1[0]) = 0xdc9938afafe904f1; -+ *((unsigned long*)& __m256i_result[3]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_result[2]) = 0x00004c9000e9d886; -+ *((unsigned long*)& __m256i_result[1]) = 0x00000000007f0000; -+ *((unsigned long*)& __m256i_result[0]) = 0x00004c9000e9d886; -+ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_op1 = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff0000fffd0004; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000fffd0004; -+ *((unsigned long*)& __m256i_op1[3]) = 0xffff0000fffd0004; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; -+ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000fffd0004; -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff0; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000f; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000f; -+ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x6c); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101000000010000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101000000010000; -+ *((unsigned long*)& __m256i_op1[3]) = 0x0000010101010101; -+ *((unsigned long*)& __m256i_op1[2]) = 0x0101000000010000; -+ *((unsigned long*)& __m256i_op1[1]) = 0x0000010101010101; -+ *((unsigned long*)& __m256i_op1[0]) = 0x0101000000010000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0002fffc; -+ *((unsigned long*)& __m256d_op0[2]) = 0xffff0000fffd0003; -+ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0002fffc; -+ *((unsigned long*)& __m256d_op0[0]) = 0xffff0000fffd0003; -+ *((unsigned long*)& __m256d_op1[3]) = 0x003f020001400200; -+ *((unsigned long*)& __m256d_op1[2]) = 0x003f00ff003f00c4; -+ *((unsigned long*)& __m256d_op1[1]) = 0x003f020001400200; -+ *((unsigned long*)& __m256d_op1[0]) = 0x003f00ff003f00c4; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000260a378; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000d02317; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000260a378; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000d02317; -+ *((unsigned long*)& __m256i_op1[3]) = 0x003f020001400200; -+ *((unsigned long*)& __m256i_op1[2]) = 0x003f00ff003f00c4; -+ *((unsigned long*)& __m256i_op1[1]) = 0x003f020001400200; -+ *((unsigned long*)& __m256i_op1[0]) = 0x003f00ff003f00c4; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0x00a300a300a300a3; -+ *((unsigned long*)& __m128i_result[0]) = 0x00a300a300a300a3; -+ __m128i_out = __lsx_vrepli_h(0xa3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; -+ __m256i_out = __lasx_xvldi(-4080); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffe15; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffe15; -+ __m128i_out = __lsx_vrepli_d(-491); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ -+ *((unsigned long*)& __m256i_result[3]) = 0xfebcfebcfebcfebc; -+ *((unsigned long*)& __m256i_result[2]) = 0xfebcfebcfebcfebc; -+ *((unsigned long*)& __m256i_result[1]) = 0xfebcfebcfebcfebc; -+ *((unsigned long*)& __m256i_result[0]) = 0xfebcfebcfebcfebc; -+ __m256i_out = __lasx_xvrepli_h(-324); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0xecececececececec; -+ *((unsigned long*)& __m128i_result[0]) = 0xecececececececec; -+ __m128i_out = __lsx_vrepli_b(-20); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0x00ffff00ff00ff00; -+ *((unsigned long*)& __m128i_result[0]) = 0x00ffff00ff00ff00; -+ __m128i_out = __lsx_vldi(-1686); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x3fd1000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x3fd1000000000000; -+ __m256i_out = __lasx_xvldi(-943); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0x004d004d004d004d; -+ *((unsigned long*)& __m128i_result[0]) = 0x004d004d004d004d; -+ __m128i_out = __lsx_vrepli_h(0x4d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1cff1c; -+ *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; -+ __m256i_out = __lasx_xvrepli_h(-228); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0x7200000072000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x7200000072000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x7200000072000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x7200000072000000; -+ __m256i_out = __lasx_xvldi(-3214); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0xffffff1dffffff1d; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffff1dffffff1d; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffff1dffffff1d; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffff1dffffff1d; -+ __m256i_out = __lasx_xvrepli_w(-227); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0x0a0000000a000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0a0000000a000000; -+ __m128i_out = __lsx_vldi(-3318); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000fff8; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000f0000000f; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000020; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000f0000000f; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000808081; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000808081; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000808081; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000808081; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff1ffca0011feca; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff1ffca0011feca; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_d(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000080008000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fff7fff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000080008000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fff000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0404040404040404; -+ *((unsigned long*)& __m128i_op0[0]) = 0xec68e3ef5a98ed54; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_d(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x195f307a5d04acbb; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xefff000100000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xefff000100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xc600000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001fc0000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0400040004000400; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_d(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000001000000010; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001000000010; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_d(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffe03; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffe03; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bnz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_d(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_op0[1]) = 0xe1616161e1614e60; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe1616161e1614e60; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00005555aaabfffe; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff7fff; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfbba01c0003f7e3f; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffc6cc05c64d960e; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfbd884e7003f7e3f; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff874dc687870000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010183f95466; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x01010101d58efe94; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000400; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000400; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8080808080808080; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x7f0101070101010f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000127f010116; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xff80ff80ff80ff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xff80ff80ff80ff80; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010003; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010003; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000002bfd9461; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_d(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_d(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xefffdffff0009d3d; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000010000c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x006ffffefff0000d; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; -+ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000001f0000001f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000001f0000ffff; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bnz_d(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ffc2f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00201df000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ca0200000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ca0200000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0003000300030003; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0003000300030003; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff082f000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x003f000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0202fe02fd020102; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bnz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x2c2c2c2c2c2c2c2c; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x2c2c2c2c2c2c2c2c; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0a0aa9890a0ac5f3; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffd8ffc7ffdaff8a; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffd8ffc7ffdaff8a; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000b0b100015d1e; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe0001bfff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000b0b100015d1e; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe0001bfff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00fe00ff00fe; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00fe00ff00fe; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6368d2cd63636363; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bnz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x1f001f00000007ef; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00001fff200007ef; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808081; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0038d800ff000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00fffe00fffffe00; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bnz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x8000008000008080; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080800000800080; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bnz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_d(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000002e8b164; -+ *((unsigned long*)& __m128i_op0[0]) = 0x199714a038478040; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffe00029f9f6061; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x007f008000ea007f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_d(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001fe01fe; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0100; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fe01fe; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0100; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffa2beb040; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000022beb03f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffa2beb040; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_d(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffff0020001d001f; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x687a8373f249bc44; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7861145d9241a14a; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bnz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0018; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfdfdfdfdfdfdfdfd; -+ *((unsigned long*)& __m256i_op0[2]) = 0xe27fe2821d226278; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfdfdfdfdfdfdfdfd; -+ *((unsigned long*)& __m256i_op0[0]) = 0xe27fe2821d226278; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00f7000000f70007; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00f7000000f70007; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00b213171dff0606; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00e9a80014ff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00b213171dff0606; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00e9a80014ff0000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000100010; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0010001000100010; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffff7ffffffffe; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffffffe; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000004870ba0; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_d(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x7ff8000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffe00fe00; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000000001fe01dde; -+ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffe00fe00; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000000001fe01dde; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000a0008; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000a0008; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe0001fffe; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffc0; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff0ffc0; -+ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffc0; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0ffc0; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xcd636363cd636363; -+ *((unsigned long*)& __m128i_op0[0]) = 0xcd636363cd636363; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bnz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xff808000ff808000; -+ *((unsigned long*)& __m256i_op0[2]) = 0xc3038000ff808000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xff808000ff808000; -+ *((unsigned long*)& __m256i_op0[0]) = 0xc3038000ff808000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xe593c8c4e593c8c4; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffff9727ffff9727; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffe79ffffba5f; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bnz_w(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff60000280; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000f64fab372db5; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff60000280; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000f64fab372db5; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_h(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000ffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000ffff; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff0000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff0000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000021; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000080801030000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000080103040000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbz_d(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000001f4; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000001f4; -+ int_result = 0x0000000000000001; -+ int_out = __lasx_xbnz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_v(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000011ffee; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000dfff2; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bnz_b(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf784000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff784; -+ int_result = 0x0000000000000000; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000180000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000180000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbz_w(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xf784000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff784; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff009ff83f; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bnz_h(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ int_result = 0x0000000000000001; -+ int_out = __lsx_bz_v(__m128i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000b0cfffff4f3; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000f9bb562f56c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000b0cfffff4f3; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000f9bb562f56c80; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000b0cfffff4f3; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000f9bb562f56c80; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000b0cfffff4f3; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000f9bb562f56c80; -+ int_result = 0x0000000000000000; -+ int_out = __lasx_xbnz_b(__m256i_op0); -+ ASSERTEQ_int(__LINE__, int_result, int_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextl_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextl_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextl_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x43ef878780000009; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x43ef878780000009; -+ __m256i_out = __lasx_xvextl_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x000201220001011c; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x000201220001011c; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x000201220001011c; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x000201220001011c; -+ __m256i_out = __lasx_xvextl_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextl_q_d(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001010101; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000083f95466; -+ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0101010100005400; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000083f95466; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0101010100005400; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; -+ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_result[0]) = 0x3ab7a3fc47a5c31a; -+ __m128i_out = __lsx_vld((unsigned long *)&__m128i_op0, 0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_result[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_result[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_result[0]) = 0x0ad152a5ad72feeb; -+ __m256i_out = __lasx_xvld((unsigned long *)&__m256i_op0, 0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0; -+ *((unsigned long*)& __m128i_result[0]) = 0x0; -+ __lsx_vst(__m128i_op0, (unsigned long *)&__m128i_result, 0x0); -+ ASSERTEQ_64(__LINE__, __m128i_op0, __m128i_result); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0; -+ __lasx_xvst(__m256i_op0, (unsigned long *)&__m256i_result, 0x0); -+ ASSERTEQ_64(__LINE__, __m256i_op0, __m256i_result); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_result[0]) = 0x3ab7a3fc47a5c31a; -+ __m128i_out = __lsx_vldx((unsigned long *)&__m128i_op0, 0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_result[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_result[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_result[0]) = 0x0ad152a5ad72feeb; -+ __m256i_out = __lasx_xvldx((unsigned long *)&__m256i_op0, 0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0; -+ *((unsigned long*)& __m128i_result[0]) = 0x0; -+ __lsx_vstx(__m128i_op0, (unsigned long *)&__m128i_result, 0x0); -+ ASSERTEQ_64(__LINE__, __m128i_op0, __m128i_result); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0; -+ *((unsigned long*)& __m256i_result[0]) = 0x0; -+ __lasx_xvstx(__m256i_op0, (unsigned long *)&__m256i_result, 0x0); -+ ASSERTEQ_64(__LINE__, __m256i_op0, __m256i_result); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0xc3c3c3c3c3c3c3c3; -+ *((unsigned long*)& __m128i_result[0]) = 0xc3c3c3c3c3c3c3c3; -+ __m128i_out = __lsx_vldrepl_b((unsigned long *)&__m128i_op0, 0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[3]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_result[2]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_result[1]) = 0xebebebebebebebeb; -+ *((unsigned long*)& __m256i_result[0]) = 0xebebebebebebebeb; -+ __m256i_out = __lasx_xvldrepl_b((unsigned long *)&__m256i_op0, 0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0xc31ac31ac31ac31a; -+ *((unsigned long*)& __m128i_result[0]) = 0xc31ac31ac31ac31a; -+ __m128i_out = __lsx_vldrepl_h((unsigned long *)&__m128i_op0, 0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[3]) = 0xfeebfeebfeebfeeb; -+ *((unsigned long*)& __m256i_result[2]) = 0xfeebfeebfeebfeeb; -+ *((unsigned long*)& __m256i_result[1]) = 0xfeebfeebfeebfeeb; -+ *((unsigned long*)& __m256i_result[0]) = 0xfeebfeebfeebfeeb; -+ __m256i_out = __lasx_xvldrepl_h((unsigned long *)&__m256i_op0, 0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0x47a5c31a47a5c31a; -+ *((unsigned long*)& __m128i_result[0]) = 0x47a5c31a47a5c31a; -+ __m128i_out = __lsx_vldrepl_w((unsigned long *)&__m128i_op0, 0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[3]) = 0xad72feebad72feeb; -+ *((unsigned long*)& __m256i_result[2]) = 0xad72feebad72feeb; -+ *((unsigned long*)& __m256i_result[1]) = 0xad72feebad72feeb; -+ *((unsigned long*)& __m256i_result[0]) = 0xad72feebad72feeb; -+ __m256i_out = __lasx_xvldrepl_w((unsigned long *)&__m256i_op0, 0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[0]) = 0x3ab7a3fc47a5c31a; -+ __m128i_out = __lsx_vldrepl_d((unsigned long *)&__m128i_op0, 0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[2]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[1]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[0]) = 0x0ad152a5ad72feeb; -+ __m256i_out = __lasx_xvldrepl_d((unsigned long *)&__m256i_op0, 0x0); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0x000001a8000001a8; -+ *((unsigned long*)& __m128i_result[0]) = 0x000001a8000001a8; -+ __m128i_out = __lsx_vrepli_w(424); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0x0000011300000113; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000011300000113; -+ __m128i_out = __lsx_vrepli_w(275); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_result[1]) = 0xfffffee2fffffee2; -+ *((unsigned long*)& __m128i_result[0]) = 0xfffffee2fffffee2; -+ __m128i_out = __lsx_vrepli_w(-286); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0; -+ *((unsigned long*)& __m128i_result[0]) = 0x05; -+ *((unsigned long*)& __m128i_out[1]) = 0x0; -+ *((unsigned long*)& __m128i_out[0]) = 0x0; -+ __lsx_vstelm_b(__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0; -+ *((unsigned long*)& __m128i_result[0]) = 0x5c05; -+ *((unsigned long*)& __m128i_out[1]) = 0x0; -+ *((unsigned long*)& __m128i_out[0]) = 0x0; -+ __lsx_vstelm_h(__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0; -+ *((unsigned long*)& __m128i_result[0]) = 0xc9d85c05; -+ *((unsigned long*)& __m128i_out[1]) = 0x0; -+ *((unsigned long*)& __m128i_out[0]) = 0x0; -+ __lsx_vstelm_w(__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; -+ *((unsigned long*)& __m128i_result[1]) = 0x0; -+ *((unsigned long*)& __m128i_result[0]) = 0x1dcc4255c9d85c05; -+ *((unsigned long*)& __m128i_out[1]) = 0x0; -+ *((unsigned long*)& __m128i_out[0]) = 0x0; -+ __lsx_vstelm_d(__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0; -+ *((unsigned long*)& __m256i_result[0]) = 0x8d; -+ *((unsigned long*)& __m256i_out[3]) = 0x0; -+ *((unsigned long*)& __m256i_out[2]) = 0x0; -+ *((unsigned long*)& __m256i_out[1]) = 0x0; -+ *((unsigned long*)& __m256i_out[0]) = 0x0; -+ __lasx_xvstelm_b(__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0xe); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0; -+ *((unsigned long*)& __m256i_result[0]) = 0x9100; -+ *((unsigned long*)& __m256i_out[3]) = 0x0; -+ *((unsigned long*)& __m256i_out[2]) = 0x0; -+ *((unsigned long*)& __m256i_out[1]) = 0x0; -+ *((unsigned long*)& __m256i_out[0]) = 0x0; -+ __lasx_xvstelm_h(__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x8); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0; -+ *((unsigned long*)& __m256i_result[0]) = 0xe9179100; -+ *((unsigned long*)& __m256i_out[3]) = 0x0; -+ *((unsigned long*)& __m256i_out[2]) = 0x0; -+ *((unsigned long*)& __m256i_out[1]) = 0x0; -+ *((unsigned long*)& __m256i_out[0]) = 0x0; -+ __lasx_xvstelm_w(__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x4); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; -+ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; -+ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; -+ *((unsigned long*)& __m256i_result[3]) = 0x0; -+ *((unsigned long*)& __m256i_result[2]) = 0x0; -+ *((unsigned long*)& __m256i_result[1]) = 0x0; -+ *((unsigned long*)& __m256i_result[0]) = 0x58569d7be9179100; -+ *((unsigned long*)& __m256i_out[3]) = 0x0; -+ *((unsigned long*)& __m256i_out[2]) = 0x0; -+ *((unsigned long*)& __m256i_out[1]) = 0x0; -+ *((unsigned long*)& __m256i_out[0]) = 0x0; -+ __lasx_xvstelm_d(__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x2); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_result[2]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_result[1]) = 0x6b6b6b6b6b6b6b6b; -+ *((unsigned long*)& __m256i_result[0]) = 0x6b6b6b6b6b6b6b6b; -+ __m256i_out = __lasx_xvrepli_b(-149); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffe69; -+ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffe69; -+ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe69; -+ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffe69; -+ __m256i_out = __lasx_xvrepli_d(-407); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff76; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff76; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff76; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff76; -+ __m256i_out = __lasx_xvrepli_d(-138); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffa1; -+ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffa1; -+ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffa1; -+ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffa1; -+ __m256i_out = __lasx_xvrepli_d(-95); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000019; -+ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000019; -+ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000019; -+ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000019; -+ __m256i_out = __lasx_xvrepli_d(25); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0x000000000000001e; -+ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001e; -+ *((unsigned long*)& __m256i_result[1]) = 0x000000000000001e; -+ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001e; -+ __m256i_out = __lasx_xvrepli_d(30); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m256i_result[3]) = 0x8d8d8d8d8d8d8d8d; -+ *((unsigned long*)& __m256i_result[2]) = 0x8d8d8d8d8d8d8d8d; -+ *((unsigned long*)& __m256i_result[1]) = 0x8d8d8d8d8d8d8d8d; -+ *((unsigned long*)& __m256i_result[0]) = 0x8d8d8d8d8d8d8d8d; -+ __m256i_out = __lasx_xvrepli_b(-371); -+ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffff8969ffffd7e2; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000d688ffffbd95; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xf12dfafc1ad1f7b3; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x4000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x34); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000020; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000200000002000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000200000002000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000100; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x2f); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000c0002000c0002; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000400c600700153; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000c0002000c0002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000400c600700153; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x000000010000007f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000fffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0800000400000800; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000001515151500; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001515151500; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001515000015150; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fdfd0404; -+ *((unsigned long*)& __m128i_op1[1]) = 0x3fffffff3fffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3fffffff3fffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x000000000000fc08; -+ *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000fc08; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000800080008000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffba420000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x000007e044000400; -+ *((unsigned long*)& __m128i_result[0]) = 0xfdd2100000000000; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x25); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000081e003f3f3f; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3f3f3f0e00000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000081e003f3f3f; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3f3f3f0e00000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000103c007e7e8; -+ *((unsigned long*)& __m128i_result[0]) = 0x00000103c007e7e8; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x43); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0202022302023212; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0202ff3f02022212; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000002100003010; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff3f00002010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x79); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x1a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffff7fff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xe2bb5ff00e20aceb; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe2bb5ff00e20aceb; -+ *((unsigned long*)& __m128i_result[1]) = 0x0100010000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x00e3000e00e3000e; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf58df7841423142a; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3f7477f8ff4e2152; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x3d3e0505101e4008; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x2bd5d429e34a1efb; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfc0203fccbedbba7; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc9f66947f077afd0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x89fed7c07fdf5d00; -+ *((unsigned long*)& __m128i_result[1]) = 0x14f1a50ffe65f6de; -+ *((unsigned long*)& __m128i_result[0]) = 0xa3f83bd8e03fefaf; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6ed694e00e0355db; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000010600000106; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0xe00e035606000001; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xe739e7ade77ae725; -+ *((unsigned long*)& __m128i_op0[0]) = 0xbb9013bd049bc9ec; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x56aca41400000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x7ade77ae3bd049bd; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000041400000; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1010101010101010; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1010101010101010; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x8081808180818081; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000006ff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0037f80000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x15); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x69); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0020202020202020; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0080808080c04040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0101010101010101; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0101010001808080; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000202000008081; -+ *((unsigned long*)& __m128i_result[0]) = 0x0001010100010101; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x28); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0010000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x00fff00000001000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x28); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x6b); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000adf0000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001e00; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0040000000400040; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000020002020; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808102; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000001010102; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x7); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x001000100010000b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x03fc03fc03fc03fc; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x04000400ff01ff01; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0xa); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x1010101010101010; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000fff800000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000000001ed68; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1ff6a09e667f3bd8; -+ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000007b5a; -+ *((unsigned long*)& __m128i_result[0]) = 0x999fcef600000000; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffe5c8000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x91f80badc162a0c4; -+ *((unsigned long*)& __m128i_op1[0]) = 0x99d1ffff0101ff01; -+ *((unsigned long*)& __m128i_result[1]) = 0x00ff400000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x905d0b06cf0008f8; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x3802f4fd025800f7; -+ *((unsigned long*)& __m128i_op1[1]) = 0xc8ff0bffff00ffae; -+ *((unsigned long*)& __m128i_op1[0]) = 0x91ff40fffff8ff50; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000200000000700; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000192000001240; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x33); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff0ffd0ffd; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff0ffc0001; -+ *((unsigned long*)& __m128i_op1[1]) = 0xbb7743ca4c78461f; -+ *((unsigned long*)& __m128i_op1[0]) = 0xd9743eb5fb4deb3a; -+ *((unsigned long*)& __m128i_result[1]) = 0x003fffffffc3ff44; -+ *((unsigned long*)& __m128i_result[0]) = 0x002eddd0f2931e12; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x4a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xbb7743ca4c78461f; -+ *((unsigned long*)& __m128i_op0[0]) = 0xd9743eb5fb4deb3a; -+ *((unsigned long*)& __m128i_op1[1]) = 0x22445e1ad9c3e4f0; -+ *((unsigned long*)& __m128i_op1[0]) = 0x1b43e8a30a570a63; -+ *((unsigned long*)& __m128i_result[1]) = 0x743ca4c843eb5fb5; -+ *((unsigned long*)& __m128i_result[0]) = 0x45e1ad9c3e8a30a5; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x14); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x1204900f62f72565; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x4901725600000000; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x4); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x6a); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000400000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x12); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000300000003; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x32); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x2); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x3f3f3f7fbf3fffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x47); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000040804080; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000020100000000; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0xe); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffe8ffff28fc; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffa; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00007fff0000803e; -+ *((unsigned long*)& __m128i_op1[0]) = 0x00000006ffff81e1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0ffffffe8ffff290; -+ *((unsigned long*)& __m128i_result[0]) = 0x000007fff0000804; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x44); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000418200000008e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000002100047; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x1); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636362; -+ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636362; -+ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636362; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636362; -+ *((unsigned long*)& __m128i_result[1]) = 0x0032003200320032; -+ *((unsigned long*)& __m128i_result[0]) = 0x0032003200320032; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x19); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff01010102; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7ffdf87f0b0c7f7f; -+ *((unsigned long*)& __m128i_op1[1]) = 0xf6b3eb63f6b3f6b3; -+ *((unsigned long*)& __m128i_op1[0]) = 0x363953e42b56432e; -+ *((unsigned long*)& __m128i_result[1]) = 0x010000010080000b; -+ *((unsigned long*)& __m128i_result[0]) = 0x00f700f70036002b; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x18); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xed67d6c7ed67ed67; -+ *((unsigned long*)& __m128i_op1[0]) = 0x6c72a7c856ac865c; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000700000003; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x3d); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff40ff83; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x1010101010101010; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xc); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000003030103; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000003030103; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000006060; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000006060; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000002408beb26c8; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000706e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000028c27; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000070; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x8); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x80000b0b80000b0b; -+ *((unsigned long*)& __m128i_op0[0]) = 0x8000101080001010; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffefefffffeff0; -+ *((unsigned long*)& __m128i_result[1]) = 0x0061006100020002; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000fe00fe; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x3); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000078087f08; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000078087f08; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000e0fc0000e0fc; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x6); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff0bff76; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x75); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x33); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff00ff0000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff00ffffff; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8282828282828282; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000828282828282; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0008000800000008; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00f7000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000005150; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000005150; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000000f7000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x24); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x41afddcb1c000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xd09e1bd99a2c6eb1; -+ *((unsigned long*)& __m128i_op1[0]) = 0xe82f7c27bb0778af; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000040002; -+ *((unsigned long*)& __m128i_result[0]) = 0x000d000a000f000c; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x1c); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff8000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffdff0; -+ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0144329880000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x007fffc0007ffff0; -+ *((unsigned long*)& __m128i_result[0]) = 0x004000004c400000; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x9); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x17); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000001e0000001e; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xd); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op0[0]) = 0xfffafff0fff9ff01; -+ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; -+ *((unsigned long*)& __m128i_result[1]) = 0x00000000d800cff8; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; -+ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x5); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; -+ *((unsigned long*)& __m128i_op1[1]) = 0x00000002000007d7; -+ *((unsigned long*)& __m128i_op1[0]) = 0x0000000300000ff1; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; -+ *((unsigned long*)& __m128i_result[0]) = 0x000007d700000ff1; -+ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x0); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x7fc000007fc00000; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffff00ffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffff00ffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000ff8; -+ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; -+ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x74); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; -+ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000f08; -+ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; -+ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; -+ *((unsigned long*)& __m128i_result[0]) = 0x2020202020202020; -+ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xb); -+ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); -+ -+ return 0; -+} -diff --git a/gcc/testsuite/gcc.target/loongarch/lasx-builtin.c b/gcc/testsuite/gcc.target/loongarch/lasx-builtin.c -new file mode 100644 -index 000000000..1f563ec81 ---- /dev/null -+++ b/gcc/testsuite/gcc.target/loongarch/lasx-builtin.c -@@ -0,0 +1,1509 @@ -+/* Test builtins for LOONGARCH LASX ASE instructions */ -+/* { dg-do compile } */ -+/* { dg-options "-mlasx" } */ -+/* { dg-final { scan-assembler-times "lasx_xvsll_b:.*xvsll\\.b.*lasx_xvsll_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsll_h:.*xvsll\\.h.*lasx_xvsll_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsll_w:.*xvsll\\.w.*lasx_xvsll_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsll_d:.*xvsll\\.d.*lasx_xvsll_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslli_b:.*xvslli\\.b.*lasx_xvslli_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslli_h:.*xvslli\\.h.*lasx_xvslli_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslli_w:.*xvslli\\.w.*lasx_xvslli_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslli_d:.*xvslli\\.d.*lasx_xvslli_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsra_b:.*xvsra\\.b.*lasx_xvsra_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsra_h:.*xvsra\\.h.*lasx_xvsra_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsra_w:.*xvsra\\.w.*lasx_xvsra_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsra_d:.*xvsra\\.d.*lasx_xvsra_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrai_b:.*xvsrai\\.b.*lasx_xvsrai_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrai_h:.*xvsrai\\.h.*lasx_xvsrai_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrai_w:.*xvsrai\\.w.*lasx_xvsrai_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrai_d:.*xvsrai\\.d.*lasx_xvsrai_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrar_b:.*xvsrar\\.b.*lasx_xvsrar_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrar_h:.*xvsrar\\.h.*lasx_xvsrar_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrar_w:.*xvsrar\\.w.*lasx_xvsrar_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrar_d:.*xvsrar\\.d.*lasx_xvsrar_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrari_b:.*xvsrari\\.b.*lasx_xvsrari_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrari_h:.*xvsrari\\.h.*lasx_xvsrari_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrari_w:.*xvsrari\\.w.*lasx_xvsrari_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrari_d:.*xvsrari\\.d.*lasx_xvsrari_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrl_b:.*xvsrl\\.b.*lasx_xvsrl_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrl_h:.*xvsrl\\.h.*lasx_xvsrl_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrl_w:.*xvsrl\\.w.*lasx_xvsrl_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrl_d:.*xvsrl\\.d.*lasx_xvsrl_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrli_b:.*xvsrli\\.b.*lasx_xvsrli_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrli_h:.*xvsrli\\.h.*lasx_xvsrli_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrli_w:.*xvsrli\\.w.*lasx_xvsrli_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrli_d:.*xvsrli\\.d.*lasx_xvsrli_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlr_b:.*xvsrlr\\.b.*lasx_xvsrlr_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlr_h:.*xvsrlr\\.h.*lasx_xvsrlr_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlr_w:.*xvsrlr\\.w.*lasx_xvsrlr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlr_d:.*xvsrlr\\.d.*lasx_xvsrlr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlri_b:.*xvsrlri\\.b.*lasx_xvsrlri_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlri_h:.*xvsrlri\\.h.*lasx_xvsrlri_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlri_w:.*xvsrlri\\.w.*lasx_xvsrlri_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlri_d:.*xvsrlri\\.d.*lasx_xvsrlri_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitclr_b:.*xvbitclr\\.b.*lasx_xvbitclr_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitclr_h:.*xvbitclr\\.h.*lasx_xvbitclr_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitclr_w:.*xvbitclr\\.w.*lasx_xvbitclr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitclr_d:.*xvbitclr\\.d.*lasx_xvbitclr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitclri_b:.*xvbitclri\\.b.*lasx_xvbitclri_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitclri_h:.*xvbitclri\\.h.*lasx_xvbitclri_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitclri_w:.*xvbitclri\\.w.*lasx_xvbitclri_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitclri_d:.*xvbitclri\\.d.*lasx_xvbitclri_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitset_b:.*xvbitset\\.b.*lasx_xvbitset_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitset_h:.*xvbitset\\.h.*lasx_xvbitset_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitset_w:.*xvbitset\\.w.*lasx_xvbitset_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitset_d:.*xvbitset\\.d.*lasx_xvbitset_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitseti_b:.*xvbitseti\\.b.*lasx_xvbitseti_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitseti_h:.*xvbitseti\\.h.*lasx_xvbitseti_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitseti_w:.*xvbitseti\\.w.*lasx_xvbitseti_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitseti_d:.*xvbitseti\\.d.*lasx_xvbitseti_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitrev_b:.*xvbitrev\\.b.*lasx_xvbitrev_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitrev_h:.*xvbitrev\\.h.*lasx_xvbitrev_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitrev_w:.*xvbitrev\\.w.*lasx_xvbitrev_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitrev_d:.*xvbitrev\\.d.*lasx_xvbitrev_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitrevi_b:.*xvbitrevi\\.b.*lasx_xvbitrevi_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitrevi_h:.*xvbitrevi\\.h.*lasx_xvbitrevi_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitrevi_w:.*xvbitrevi\\.w.*lasx_xvbitrevi_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitrevi_d:.*xvbitrevi\\.d.*lasx_xvbitrevi_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvadd_b:.*xvadd\\.b.*lasx_xvadd_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvadd_h:.*xvadd\\.h.*lasx_xvadd_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvadd_w:.*xvadd\\.w.*lasx_xvadd_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvadd_d:.*xvadd\\.d.*lasx_xvadd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddi_bu:.*xvaddi\\.bu.*lasx_xvaddi_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddi_hu:.*xvaddi\\.hu.*lasx_xvaddi_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddi_wu:.*xvaddi\\.wu.*lasx_xvaddi_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddi_du:.*xvaddi\\.du.*lasx_xvaddi_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsub_b:.*xvsub\\.b.*lasx_xvsub_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsub_h:.*xvsub\\.h.*lasx_xvsub_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsub_w:.*xvsub\\.w.*lasx_xvsub_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsub_d:.*xvsub\\.d.*lasx_xvsub_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubi_bu:.*xvsubi\\.bu.*lasx_xvsubi_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubi_hu:.*xvsubi\\.hu.*lasx_xvsubi_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubi_wu:.*xvsubi\\.wu.*lasx_xvsubi_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubi_du:.*xvsubi\\.du.*lasx_xvsubi_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmax_b:.*xvmax\\.b.*lasx_xvmax_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmax_h:.*xvmax\\.h.*lasx_xvmax_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmax_w:.*xvmax\\.w.*lasx_xvmax_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmax_d:.*xvmax\\.d.*lasx_xvmax_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaxi_b:.*xvmaxi\\.b.*lasx_xvmaxi_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaxi_h:.*xvmaxi\\.h.*lasx_xvmaxi_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaxi_w:.*xvmaxi\\.w.*lasx_xvmaxi_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaxi_d:.*xvmaxi\\.d.*lasx_xvmaxi_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmax_bu:.*xvmax\\.bu.*lasx_xvmax_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmax_hu:.*xvmax\\.hu.*lasx_xvmax_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmax_wu:.*xvmax\\.wu.*lasx_xvmax_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmax_du:.*xvmax\\.du.*lasx_xvmax_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaxi_bu:.*xvmaxi\\.bu.*lasx_xvmaxi_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaxi_hu:.*xvmaxi\\.hu.*lasx_xvmaxi_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaxi_wu:.*xvmaxi\\.wu.*lasx_xvmaxi_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaxi_du:.*xvmaxi\\.du.*lasx_xvmaxi_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmin_b:.*xvmin\\.b.*lasx_xvmin_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmin_h:.*xvmin\\.h.*lasx_xvmin_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmin_w:.*xvmin\\.w.*lasx_xvmin_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmin_d:.*xvmin\\.d.*lasx_xvmin_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmini_b:.*xvmini\\.b.*lasx_xvmini_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmini_h:.*xvmini\\.h.*lasx_xvmini_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmini_w:.*xvmini\\.w.*lasx_xvmini_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmini_d:.*xvmini\\.d.*lasx_xvmini_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmin_bu:.*xvmin\\.bu.*lasx_xvmin_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmin_hu:.*xvmin\\.hu.*lasx_xvmin_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmin_wu:.*xvmin\\.wu.*lasx_xvmin_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmin_du:.*xvmin\\.du.*lasx_xvmin_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmini_bu:.*xvmini\\.bu.*lasx_xvmini_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmini_hu:.*xvmini\\.hu.*lasx_xvmini_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmini_wu:.*xvmini\\.wu.*lasx_xvmini_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmini_du:.*xvmini\\.du.*lasx_xvmini_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvseq_b:.*xvseq\\.b.*lasx_xvseq_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvseq_h:.*xvseq\\.h.*lasx_xvseq_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvseq_w:.*xvseq\\.w.*lasx_xvseq_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvseq_d:.*xvseq\\.d.*lasx_xvseq_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvseqi_b:.*xvseqi\\.b.*lasx_xvseqi_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvseqi_h:.*xvseqi\\.h.*lasx_xvseqi_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvseqi_w:.*xvseqi\\.w.*lasx_xvseqi_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvseqi_d:.*xvseqi\\.d.*lasx_xvseqi_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslt_b:.*xvslt\\.b.*lasx_xvslt_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslt_h:.*xvslt\\.h.*lasx_xvslt_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslt_w:.*xvslt\\.w.*lasx_xvslt_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslt_d:.*xvslt\\.d.*lasx_xvslt_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslti_b:.*xvslti\\.b.*lasx_xvslti_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslti_h:.*xvslti\\.h.*lasx_xvslti_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslti_w:.*xvslti\\.w.*lasx_xvslti_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslti_d:.*xvslti\\.d.*lasx_xvslti_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslt_bu:.*xvslt\\.bu.*lasx_xvslt_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslt_hu:.*xvslt\\.hu.*lasx_xvslt_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslt_wu:.*xvslt\\.wu.*lasx_xvslt_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslt_du:.*xvslt\\.du.*lasx_xvslt_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslti_bu:.*xvslti\\.bu.*lasx_xvslti_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslti_hu:.*xvslti\\.hu.*lasx_xvslti_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslti_wu:.*xvslti\\.wu.*lasx_xvslti_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslti_du:.*xvslti\\.du.*lasx_xvslti_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsle_b:.*xvsle\\.b.*lasx_xvsle_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsle_h:.*xvsle\\.h.*lasx_xvsle_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsle_w:.*xvsle\\.w.*lasx_xvsle_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsle_d:.*xvsle\\.d.*lasx_xvsle_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslei_b:.*xvslei\\.b.*lasx_xvslei_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslei_h:.*xvslei\\.h.*lasx_xvslei_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslei_w:.*xvslei\\.w.*lasx_xvslei_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslei_d:.*xvslei\\.d.*lasx_xvslei_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsle_bu:.*xvsle\\.bu.*lasx_xvsle_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsle_hu:.*xvsle\\.hu.*lasx_xvsle_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsle_wu:.*xvsle\\.wu.*lasx_xvsle_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsle_du:.*xvsle\\.du.*lasx_xvsle_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslei_bu:.*xvslei\\.bu.*lasx_xvslei_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslei_hu:.*xvslei\\.hu.*lasx_xvslei_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslei_wu:.*xvslei\\.wu.*lasx_xvslei_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvslei_du:.*xvslei\\.du.*lasx_xvslei_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsat_b:.*xvsat\\.b.*lasx_xvsat_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsat_h:.*xvsat\\.h.*lasx_xvsat_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsat_w:.*xvsat\\.w.*lasx_xvsat_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsat_d:.*xvsat\\.d.*lasx_xvsat_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsat_bu:.*xvsat\\.bu.*lasx_xvsat_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsat_hu:.*xvsat\\.hu.*lasx_xvsat_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsat_wu:.*xvsat\\.wu.*lasx_xvsat_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsat_du:.*xvsat\\.du.*lasx_xvsat_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvadda_b:.*xvadda\\.b.*lasx_xvadda_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvadda_h:.*xvadda\\.h.*lasx_xvadda_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvadda_w:.*xvadda\\.w.*lasx_xvadda_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvadda_d:.*xvadda\\.d.*lasx_xvadda_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsadd_b:.*xvsadd\\.b.*lasx_xvsadd_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsadd_h:.*xvsadd\\.h.*lasx_xvsadd_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsadd_w:.*xvsadd\\.w.*lasx_xvsadd_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsadd_d:.*xvsadd\\.d.*lasx_xvsadd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsadd_bu:.*xvsadd\\.bu.*lasx_xvsadd_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsadd_hu:.*xvsadd\\.hu.*lasx_xvsadd_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsadd_wu:.*xvsadd\\.wu.*lasx_xvsadd_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsadd_du:.*xvsadd\\.du.*lasx_xvsadd_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavg_b:.*xvavg\\.b.*lasx_xvavg_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavg_h:.*xvavg\\.h.*lasx_xvavg_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavg_w:.*xvavg\\.w.*lasx_xvavg_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavg_d:.*xvavg\\.d.*lasx_xvavg_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavg_bu:.*xvavg\\.bu.*lasx_xvavg_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavg_hu:.*xvavg\\.hu.*lasx_xvavg_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavg_wu:.*xvavg\\.wu.*lasx_xvavg_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavg_du:.*xvavg\\.du.*lasx_xvavg_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavgr_b:.*xvavgr\\.b.*lasx_xvavgr_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavgr_h:.*xvavgr\\.h.*lasx_xvavgr_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavgr_w:.*xvavgr\\.w.*lasx_xvavgr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavgr_d:.*xvavgr\\.d.*lasx_xvavgr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavgr_bu:.*xvavgr\\.bu.*lasx_xvavgr_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavgr_hu:.*xvavgr\\.hu.*lasx_xvavgr_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavgr_wu:.*xvavgr\\.wu.*lasx_xvavgr_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvavgr_du:.*xvavgr\\.du.*lasx_xvavgr_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssub_b:.*xvssub\\.b.*lasx_xvssub_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssub_h:.*xvssub\\.h.*lasx_xvssub_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssub_w:.*xvssub\\.w.*lasx_xvssub_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssub_d:.*xvssub\\.d.*lasx_xvssub_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssub_bu:.*xvssub\\.bu.*lasx_xvssub_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssub_hu:.*xvssub\\.hu.*lasx_xvssub_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssub_wu:.*xvssub\\.wu.*lasx_xvssub_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssub_du:.*xvssub\\.du.*lasx_xvssub_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvabsd_b:.*xvabsd\\.b.*lasx_xvabsd_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvabsd_h:.*xvabsd\\.h.*lasx_xvabsd_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvabsd_w:.*xvabsd\\.w.*lasx_xvabsd_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvabsd_d:.*xvabsd\\.d.*lasx_xvabsd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvabsd_bu:.*xvabsd\\.bu.*lasx_xvabsd_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvabsd_hu:.*xvabsd\\.hu.*lasx_xvabsd_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvabsd_wu:.*xvabsd\\.wu.*lasx_xvabsd_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvabsd_du:.*xvabsd\\.du.*lasx_xvabsd_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmul_b:.*xvmul\\.b.*lasx_xvmul_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmul_h:.*xvmul\\.h.*lasx_xvmul_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmul_w:.*xvmul\\.w.*lasx_xvmul_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmul_d:.*xvmul\\.d.*lasx_xvmul_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmadd_b:.*xvmadd\\.b.*lasx_xvmadd_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmadd_h:.*xvmadd\\.h.*lasx_xvmadd_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmadd_w:.*xvmadd\\.w.*lasx_xvmadd_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmadd_d:.*xvmadd\\.d.*lasx_xvmadd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmsub_b:.*xvmsub\\.b.*lasx_xvmsub_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmsub_h:.*xvmsub\\.h.*lasx_xvmsub_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmsub_w:.*xvmsub\\.w.*lasx_xvmsub_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmsub_d:.*xvmsub\\.d.*lasx_xvmsub_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvdiv_b:.*xvdiv\\.b.*lasx_xvdiv_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvdiv_h:.*xvdiv\\.h.*lasx_xvdiv_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvdiv_w:.*xvdiv\\.w.*lasx_xvdiv_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvdiv_d:.*xvdiv\\.d.*lasx_xvdiv_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvdiv_bu:.*xvdiv\\.bu.*lasx_xvdiv_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvdiv_hu:.*xvdiv\\.hu.*lasx_xvdiv_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvdiv_wu:.*xvdiv\\.wu.*lasx_xvdiv_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvdiv_du:.*xvdiv\\.du.*lasx_xvdiv_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhaddw_h_b:.*xvhaddw\\.h\\.b.*lasx_xvhaddw_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhaddw_w_h:.*xvhaddw\\.w\\.h.*lasx_xvhaddw_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhaddw_d_w:.*xvhaddw\\.d\\.w.*lasx_xvhaddw_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhaddw_hu_bu:.*xvhaddw\\.hu\\.bu.*lasx_xvhaddw_hu_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhaddw_wu_hu:.*xvhaddw\\.wu\\.hu.*lasx_xvhaddw_wu_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhaddw_du_wu:.*xvhaddw\\.du\\.wu.*lasx_xvhaddw_du_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhsubw_h_b:.*xvhsubw\\.h\\.b.*lasx_xvhsubw_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhsubw_w_h:.*xvhsubw\\.w\\.h.*lasx_xvhsubw_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhsubw_d_w:.*xvhsubw\\.d\\.w.*lasx_xvhsubw_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhsubw_hu_bu:.*xvhsubw\\.hu\\.bu.*lasx_xvhsubw_hu_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhsubw_wu_hu:.*xvhsubw\\.wu\\.hu.*lasx_xvhsubw_wu_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhsubw_du_wu:.*xvhsubw\\.du\\.wu.*lasx_xvhsubw_du_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmod_b:.*xvmod\\.b.*lasx_xvmod_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmod_h:.*xvmod\\.h.*lasx_xvmod_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmod_w:.*xvmod\\.w.*lasx_xvmod_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmod_d:.*xvmod\\.d.*lasx_xvmod_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmod_bu:.*xvmod\\.bu.*lasx_xvmod_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmod_hu:.*xvmod\\.hu.*lasx_xvmod_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmod_wu:.*xvmod\\.wu.*lasx_xvmod_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmod_du:.*xvmod\\.du.*lasx_xvmod_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_b:.*xvrepl128vei\\.b.*lasx_xvrepl128vei_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_h:.*xvrepl128vei\\.h.*lasx_xvrepl128vei_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_w:.*xvrepl128vei\\.w.*lasx_xvrepl128vei_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_d:.*xvrepl128vei\\.d.*lasx_xvrepl128vei_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickev_b:.*xvpickev\\.b.*lasx_xvpickev_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickev_h:.*xvpickev\\.h.*lasx_xvpickev_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickev_w:.*xvpickev\\.w.*lasx_xvpickev_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickev_d:.*xvilvl\\.d.*lasx_xvpickev_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickod_b:.*xvpickod\\.b.*lasx_xvpickod_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickod_h:.*xvpickod\\.h.*lasx_xvpickod_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickod_w:.*xvpickod\\.w.*lasx_xvpickod_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickod_d:.*xvilvh\\.d.*lasx_xvpickod_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvilvh_b:.*xvilvh\\.b.*lasx_xvilvh_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvilvh_h:.*xvilvh\\.h.*lasx_xvilvh_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvilvh_w:.*xvilvh\\.w.*lasx_xvilvh_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvilvh_d:.*xvilvh\\.d.*lasx_xvilvh_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvilvl_b:.*xvilvl\\.b.*lasx_xvilvl_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvilvl_h:.*xvilvl\\.h.*lasx_xvilvl_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvilvl_w:.*xvilvl\\.w.*lasx_xvilvl_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvilvl_d:.*xvilvl\\.d.*lasx_xvilvl_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpackev_b:.*xvpackev\\.b.*lasx_xvpackev_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpackev_h:.*xvpackev\\.h.*lasx_xvpackev_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpackev_w:.*xvpackev\\.w.*lasx_xvpackev_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpackev_d:.*xvilvl\\.d.*lasx_xvpackev_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpackod_b:.*xvpackod\\.b.*lasx_xvpackod_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpackod_h:.*xvpackod\\.h.*lasx_xvpackod_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpackod_w:.*xvpackod\\.w.*lasx_xvpackod_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpackod_d:.*xvilvh\\.d.*lasx_xvpackod_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvshuf_b:.*xvshuf\\.b.*lasx_xvshuf_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvshuf_h:.*xvshuf\\.h.*lasx_xvshuf_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvshuf_w:.*xvshuf\\.w.*lasx_xvshuf_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvshuf_d:.*xvshuf\\.d.*lasx_xvshuf_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvand_v:.*xvand\\.v.*lasx_xvand_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvandi_b:.*xvandi\\.b.*lasx_xvandi_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvor_v:.*xvor\\.v.*lasx_xvor_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvori_b:.*xvbitseti\\.b.*lasx_xvori_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvnor_v:.*xvnor\\.v.*lasx_xvnor_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvnori_b:.*xvnori\\.b.*lasx_xvnori_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvxor_v:.*xvxor\\.v.*lasx_xvxor_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvxori_b:.*xvbitrevi\\.b.*lasx_xvxori_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitsel_v:.*xvbitsel\\.v.*lasx_xvbitsel_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbitseli_b:.*xvbitseli\\.b.*lasx_xvbitseli_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvshuf4i_b:.*xvshuf4i\\.b.*lasx_xvshuf4i_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvshuf4i_h:.*xvshuf4i\\.h.*lasx_xvshuf4i_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvshuf4i_w:.*xvshuf4i\\.w.*lasx_xvshuf4i_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_b:.*xvreplgr2vr\\.b.*lasx_xvreplgr2vr_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_h:.*xvreplgr2vr\\.h.*lasx_xvreplgr2vr_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_w:.*xvreplgr2vr\\.w.*lasx_xvreplgr2vr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_d:.*xvreplgr2vr\\.d.*lasx_xvreplgr2vr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpcnt_b:.*xvpcnt\\.b.*lasx_xvpcnt_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpcnt_h:.*xvpcnt\\.h.*lasx_xvpcnt_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpcnt_w:.*xvpcnt\\.w.*lasx_xvpcnt_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpcnt_d:.*xvpcnt\\.d.*lasx_xvpcnt_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvclo_b:.*xvclo\\.b.*lasx_xvclo_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvclo_h:.*xvclo\\.h.*lasx_xvclo_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvclo_w:.*xvclo\\.w.*lasx_xvclo_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvclo_d:.*xvclo\\.d.*lasx_xvclo_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvclz_b:.*xvclz\\.b.*lasx_xvclz_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvclz_h:.*xvclz\\.h.*lasx_xvclz_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvclz_w:.*xvclz\\.w.*lasx_xvclz_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvclz_d:.*xvclz\\.d.*lasx_xvclz_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfadd_s:.*xvfadd\\.s.*lasx_xvfadd_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfadd_d:.*xvfadd\\.d.*lasx_xvfadd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfsub_s:.*xvfsub\\.s.*lasx_xvfsub_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfsub_d:.*xvfsub\\.d.*lasx_xvfsub_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmul_s:.*xvfmul\\.s.*lasx_xvfmul_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmul_d:.*xvfmul\\.d.*lasx_xvfmul_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfdiv_s:.*xvfdiv\\.s.*lasx_xvfdiv_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfdiv_d:.*xvfdiv\\.d.*lasx_xvfdiv_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcvt_h_s:.*xvfcvt\\.h\\.s.*lasx_xvfcvt_h_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcvt_s_d:.*xvfcvt\\.s\\.d.*lasx_xvfcvt_s_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmin_s:.*xvfmin\\.s.*lasx_xvfmin_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmin_d:.*xvfmin\\.d.*lasx_xvfmin_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmina_s:.*xvfmina\\.s.*lasx_xvfmina_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmina_d:.*xvfmina\\.d.*lasx_xvfmina_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmax_s:.*xvfmax\\.s.*lasx_xvfmax_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmax_d:.*xvfmax\\.d.*lasx_xvfmax_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmaxa_s:.*xvfmaxa\\.s.*lasx_xvfmaxa_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmaxa_d:.*xvfmaxa\\.d.*lasx_xvfmaxa_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfclass_s:.*xvfclass\\.s.*lasx_xvfclass_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfclass_d:.*xvfclass\\.d.*lasx_xvfclass_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfsqrt_s:.*xvfsqrt\\.s.*lasx_xvfsqrt_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfsqrt_d:.*xvfsqrt\\.d.*lasx_xvfsqrt_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrecip_s:.*xvfrecip\\.s.*lasx_xvfrecip_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrecip_d:.*xvfrecip\\.d.*lasx_xvfrecip_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrint_s:.*xvfrint\\.s.*lasx_xvfrint_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrint_d:.*xvfrint\\.d.*lasx_xvfrint_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrsqrt_s:.*xvfrsqrt\\.s.*lasx_xvfrsqrt_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrsqrt_d:.*xvfrsqrt\\.d.*lasx_xvfrsqrt_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvflogb_s:.*xvflogb\\.s.*lasx_xvflogb_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvflogb_d:.*xvflogb\\.d.*lasx_xvflogb_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcvth_s_h:.*xvfcvth\\.s\\.h.*lasx_xvfcvth_s_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcvth_d_s:.*xvfcvth\\.d\\.s.*lasx_xvfcvth_d_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcvtl_s_h:.*xvfcvtl\\.s\\.h.*lasx_xvfcvtl_s_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcvtl_d_s:.*xvfcvtl\\.d\\.s.*lasx_xvfcvtl_d_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftint_w_s:.*xvftint\\.w\\.s.*lasx_xvftint_w_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftint_l_d:.*xvftint\\.l\\.d.*lasx_xvftint_l_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftint_wu_s:.*xvftint\\.wu\\.s.*lasx_xvftint_wu_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftint_lu_d:.*xvftint\\.lu\\.d.*lasx_xvftint_lu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrz_w_s:.*xvftintrz\\.w\\.s.*lasx_xvftintrz_w_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrz_l_d:.*xvftintrz\\.l\\.d.*lasx_xvftintrz_l_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrz_wu_s:.*xvftintrz\\.wu\\.s.*lasx_xvftintrz_wu_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrz_lu_d:.*xvftintrz\\.lu\\.d.*lasx_xvftintrz_lu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvffint_s_w:.*xvffint\\.s\\.w.*lasx_xvffint_s_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvffint_d_l:.*xvffint\\.d\\.l.*lasx_xvffint_d_l" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvffint_s_wu:.*xvffint\\.s\\.wu.*lasx_xvffint_s_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvffint_d_lu:.*xvffint\\.d\\.lu.*lasx_xvffint_d_lu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplve_b:.*xvreplve\\.b.*lasx_xvreplve_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplve_h:.*xvreplve\\.h.*lasx_xvreplve_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplve_w:.*xvreplve\\.w.*lasx_xvreplve_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplve_d:.*xvreplve\\.d.*lasx_xvreplve_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpermi_w:.*xvpermi\\.w.*lasx_xvpermi_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvandn_v:.*xvandn\\.v.*lasx_xvandn_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvneg_b:.*xvneg\\.b.*lasx_xvneg_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvneg_h:.*xvneg\\.h.*lasx_xvneg_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvneg_w:.*xvneg\\.w.*lasx_xvneg_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvneg_d:.*xvneg\\.d.*lasx_xvneg_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmuh_b:.*xvmuh\\.b.*lasx_xvmuh_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmuh_h:.*xvmuh\\.h.*lasx_xvmuh_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmuh_w:.*xvmuh\\.w.*lasx_xvmuh_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmuh_d:.*xvmuh\\.d.*lasx_xvmuh_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmuh_bu:.*xvmuh\\.bu.*lasx_xvmuh_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmuh_hu:.*xvmuh\\.hu.*lasx_xvmuh_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmuh_wu:.*xvmuh\\.wu.*lasx_xvmuh_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmuh_du:.*xvmuh\\.du.*lasx_xvmuh_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsllwil_h_b:.*xvsllwil\\.h\\.b.*lasx_xvsllwil_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsllwil_w_h:.*xvsllwil\\.w\\.h.*lasx_xvsllwil_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsllwil_d_w:.*xvsllwil\\.d\\.w.*lasx_xvsllwil_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsllwil_hu_bu:.*xvsllwil\\.hu\\.bu.*lasx_xvsllwil_hu_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsllwil_wu_hu:.*xvsllwil\\.wu\\.hu.*lasx_xvsllwil_wu_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsllwil_du_wu:.*xvsllwil\\.du\\.wu.*lasx_xvsllwil_du_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsran_b_h:.*xvsran\\.b\\.h.*lasx_xvsran_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsran_h_w:.*xvsran\\.h\\.w.*lasx_xvsran_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsran_w_d:.*xvsran\\.w\\.d.*lasx_xvsran_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssran_b_h:.*xvssran\\.b\\.h.*lasx_xvssran_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssran_h_w:.*xvssran\\.h\\.w.*lasx_xvssran_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssran_w_d:.*xvssran\\.w\\.d.*lasx_xvssran_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssran_bu_h:.*xvssran\\.bu\\.h.*lasx_xvssran_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssran_hu_w:.*xvssran\\.hu\\.w.*lasx_xvssran_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssran_wu_d:.*xvssran\\.wu\\.d.*lasx_xvssran_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrarn_b_h:.*xvsrarn\\.b\\.h.*lasx_xvsrarn_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrarn_h_w:.*xvsrarn\\.h\\.w.*lasx_xvsrarn_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrarn_w_d:.*xvsrarn\\.w\\.d.*lasx_xvsrarn_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarn_b_h:.*xvssrarn\\.b\\.h.*lasx_xvssrarn_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarn_h_w:.*xvssrarn\\.h\\.w.*lasx_xvssrarn_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarn_w_d:.*xvssrarn\\.w\\.d.*lasx_xvssrarn_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarn_bu_h:.*xvssrarn\\.bu\\.h.*lasx_xvssrarn_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarn_hu_w:.*xvssrarn\\.hu\\.w.*lasx_xvssrarn_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarn_wu_d:.*xvssrarn\\.wu\\.d.*lasx_xvssrarn_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrln_b_h:.*xvsrln\\.b\\.h.*lasx_xvsrln_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrln_h_w:.*xvsrln\\.h\\.w.*lasx_xvsrln_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrln_w_d:.*xvsrln\\.w\\.d.*lasx_xvsrln_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrln_bu_h:.*xvssrln\\.bu\\.h.*lasx_xvssrln_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrln_hu_w:.*xvssrln\\.hu\\.w.*lasx_xvssrln_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrln_wu_d:.*xvssrln\\.wu\\.d.*lasx_xvssrln_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlrn_b_h:.*xvsrlrn\\.b\\.h.*lasx_xvsrlrn_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlrn_h_w:.*xvsrlrn\\.h\\.w.*lasx_xvsrlrn_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlrn_w_d:.*xvsrlrn\\.w\\.d.*lasx_xvsrlrn_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrn_bu_h:.*xvssrlrn\\.bu\\.h.*lasx_xvssrlrn_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrn_hu_w:.*xvssrlrn\\.hu\\.w.*lasx_xvssrlrn_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrn_wu_d:.*xvssrlrn\\.wu\\.d.*lasx_xvssrlrn_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrstpi_b:.*xvfrstpi\\.b.*lasx_xvfrstpi_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrstpi_h:.*xvfrstpi\\.h.*lasx_xvfrstpi_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrstp_b:.*xvfrstp\\.b.*lasx_xvfrstp_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrstp_h:.*xvfrstp\\.h.*lasx_xvfrstp_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvshuf4i_d:.*xvshuf4i\\.d.*lasx_xvshuf4i_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbsrl_v:.*xvbsrl\\.v.*lasx_xvbsrl_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvbsll_v:.*xvbsll\\.v.*lasx_xvbsll_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvextrins_b:.*xvextrins\\.b.*lasx_xvextrins_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvextrins_h:.*xvextrins\\.h.*lasx_xvextrins_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvextrins_w:.*xvextrins\\.w.*lasx_xvextrins_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvextrins_d:.*xvextrins\\.d.*lasx_xvextrins_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmskltz_b:.*xvmskltz\\.b.*lasx_xvmskltz_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmskltz_h:.*xvmskltz\\.h.*lasx_xvmskltz_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmskltz_w:.*xvmskltz\\.w.*lasx_xvmskltz_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmskltz_d:.*xvmskltz\\.d.*lasx_xvmskltz_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsigncov_b:.*xvsigncov\\.b.*lasx_xvsigncov_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsigncov_h:.*xvsigncov\\.h.*lasx_xvsigncov_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsigncov_w:.*xvsigncov\\.w.*lasx_xvsigncov_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsigncov_d:.*xvsigncov\\.d.*lasx_xvsigncov_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmadd_s:.*xvfmadd\\.s.*lasx_xvfmadd_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmadd_d:.*xvfmadd\\.d.*lasx_xvfmadd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmsub_s:.*xvfmsub\\.s.*lasx_xvfmsub_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfmsub_d:.*xvfmsub\\.d.*lasx_xvfmsub_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfnmadd_s:.*xvfnmadd\\.s.*lasx_xvfnmadd_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfnmadd_d:.*xvfnmadd\\.d.*lasx_xvfnmadd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfnmsub_s:.*xvfnmsub\\.s.*lasx_xvfnmsub_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfnmsub_d:.*xvfnmsub\\.d.*lasx_xvfnmsub_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrne_w_s:.*xvftintrne\\.w\\.s.*lasx_xvftintrne_w_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrne_l_d:.*xvftintrne\\.l\\.d.*lasx_xvftintrne_l_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrp_w_s:.*xvftintrp\\.w\\.s.*lasx_xvftintrp_w_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrp_l_d:.*xvftintrp\\.l\\.d.*lasx_xvftintrp_l_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrm_w_s:.*xvftintrm\\.w\\.s.*lasx_xvftintrm_w_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrm_l_d:.*xvftintrm\\.l\\.d.*lasx_xvftintrm_l_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftint_w_d:.*xvftint\\.w\\.d.*lasx_xvftint_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvffint_s_l:.*xvffint\\.s\\.l.*lasx_xvffint_s_l" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrz_w_d:.*xvftintrz\\.w\\.d.*lasx_xvftintrz_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrp_w_d:.*xvftintrp\\.w\\.d.*lasx_xvftintrp_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrm_w_d:.*xvftintrm\\.w\\.d.*lasx_xvftintrm_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrne_w_d:.*xvftintrne\\.w\\.d.*lasx_xvftintrne_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftinth_l_s:.*xvftinth\\.l\\.s.*lasx_xvftinth_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintl_l_s:.*xvftintl\\.l\\.s.*lasx_xvftintl_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvffinth_d_w:.*xvffinth\\.d\\.w.*lasx_xvffinth_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvffintl_d_w:.*xvffintl\\.d\\.w.*lasx_xvffintl_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrzh_l_s:.*xvftintrzh\\.l\\.s.*lasx_xvftintrzh_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrzl_l_s:.*xvftintrzl\\.l\\.s.*lasx_xvftintrzl_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrph_l_s:.*xvftintrph\\.l\\.s.*lasx_xvftintrph_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrpl_l_s:.*xvftintrpl\\.l\\.s.*lasx_xvftintrpl_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrmh_l_s:.*xvftintrmh\\.l\\.s.*lasx_xvftintrmh_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrml_l_s:.*xvftintrml\\.l\\.s.*lasx_xvftintrml_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrneh_l_s:.*xvftintrneh\\.l\\.s.*lasx_xvftintrneh_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvftintrnel_l_s:.*xvftintrnel\\.l\\.s.*lasx_xvftintrnel_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrintrne_s:.*xvfrintrne\\.s.*lasx_xvfrintrne_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrintrne_d:.*xvfrintrne\\.d.*lasx_xvfrintrne_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrintrz_s:.*xvfrintrz\\.s.*lasx_xvfrintrz_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrintrz_d:.*xvfrintrz\\.d.*lasx_xvfrintrz_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrintrp_s:.*xvfrintrp\\.s.*lasx_xvfrintrp_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrintrp_d:.*xvfrintrp\\.d.*lasx_xvfrintrp_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrintrm_s:.*xvfrintrm\\.s.*lasx_xvfrintrm_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfrintrm_d:.*xvfrintrm\\.d.*lasx_xvfrintrm_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvld:.*xvld.*lasx_xvld" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvst:.*xvst.*lasx_xvst" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvstelm_b:.*xvstelm\\.b.*lasx_xvstelm_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvstelm_h:.*xvstelm\\.h.*lasx_xvstelm_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvstelm_w:.*xvstelm\\.w.*lasx_xvstelm_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvstelm_d:.*xvstelm\\.d.*lasx_xvstelm_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvinsve0_w:.*xvinsve0\\.w.*lasx_xvinsve0_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvinsve0_d:.*xvinsve0\\.d.*lasx_xvinsve0_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickve_w:.*xvpickve\\.w.*lasx_xvpickve_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickve_d:.*xvpickve\\.d.*lasx_xvpickve_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrn_b_h:.*xvssrlrn\\.b\\.h.*lasx_xvssrlrn_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrn_h_w:.*xvssrlrn\\.h\\.w.*lasx_xvssrlrn_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrn_w_d:.*xvssrlrn\\.w\\.d.*lasx_xvssrlrn_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrln_b_h:.*xvssrln\\.b\\.h.*lasx_xvssrln_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrln_h_w:.*xvssrln\\.h\\.w.*lasx_xvssrln_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrln_w_d:.*xvssrln\\.w\\.d.*lasx_xvssrln_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvorn_v:.*xvorn\\.v.*lasx_xvorn_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvldi:.*xvldi.*lasx_xvldi" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvldx:.*xvldx.*lasx_xvldx" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvstx:.*xvstx.*lasx_xvstx" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvextl_qu_du:.*xvextl\\.qu\\.du.*lasx_xvextl_qu_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvinsgr2vr_w:.*xvinsgr2vr\\.w.*lasx_xvinsgr2vr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvinsgr2vr_d:.*xvinsgr2vr\\.d.*lasx_xvinsgr2vr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplve0_b:.*xvreplve0\\.b.*lasx_xvreplve0_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplve0_h:.*xvreplve0\\.h.*lasx_xvreplve0_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplve0_w:.*xvreplve0\\.w.*lasx_xvreplve0_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplve0_d:.*xvreplve0\\.d.*lasx_xvreplve0_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvreplve0_q:.*xvreplve0\\.q.*lasx_xvreplve0_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_vext2xv_h_b:.*vext2xv\\.h\\.b.*lasx_vext2xv_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_vext2xv_w_h:.*vext2xv\\.w\\.h.*lasx_vext2xv_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_vext2xv_d_w:.*vext2xv\\.d\\.w.*lasx_vext2xv_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_vext2xv_w_b:.*vext2xv\\.w\\.b.*lasx_vext2xv_w_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_vext2xv_d_h:.*vext2xv\\.d\\.h.*lasx_vext2xv_d_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_vext2xv_d_b:.*vext2xv\\.d\\.b.*lasx_vext2xv_d_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_vext2xv_hu_bu:.*vext2xv\\.hu\\.bu.*lasx_vext2xv_hu_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_vext2xv_wu_hu:.*vext2xv\\.wu\\.hu.*lasx_vext2xv_wu_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_vext2xv_du_wu:.*vext2xv\\.du\\.wu.*lasx_vext2xv_du_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_vext2xv_wu_bu:.*vext2xv\\.wu\\.bu.*lasx_vext2xv_wu_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_vext2xv_du_hu:.*vext2xv\\.du\\.hu.*lasx_vext2xv_du_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_vext2xv_du_bu:.*vext2xv\\.du\\.bu.*lasx_vext2xv_du_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpermi_q:.*xvpermi\\.q.*lasx_xvpermi_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpermi_d:.*xvpermi\\.d.*lasx_xvpermi_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvperm_w:.*xvperm\\.w.*lasx_xvperm_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvldrepl_b:.*xvldrepl\\.b.*lasx_xvldrepl_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvldrepl_h:.*xvldrepl\\.h.*lasx_xvldrepl_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvldrepl_w:.*xvldrepl\\.w.*lasx_xvldrepl_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvldrepl_d:.*xvldrepl\\.d.*lasx_xvldrepl_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_w:.*xvpickve2gr\\.w.*lasx_xvpickve2gr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_wu:.*xvpickve2gr\\.wu.*lasx_xvpickve2gr_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_d:.*xvpickve2gr\\.d.*lasx_xvpickve2gr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_du:.*xvpickve2gr\\.du.*lasx_xvpickve2gr_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwev_q_d:.*xvaddwev\\.q\\.d.*lasx_xvaddwev_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwev_d_w:.*xvaddwev\\.d\\.w.*lasx_xvaddwev_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwev_w_h:.*xvaddwev\\.w\\.h.*lasx_xvaddwev_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwev_h_b:.*xvaddwev\\.h\\.b.*lasx_xvaddwev_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwev_q_du:.*xvaddwev\\.q\\.du.*lasx_xvaddwev_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwev_d_wu:.*xvaddwev\\.d\\.wu.*lasx_xvaddwev_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwev_w_hu:.*xvaddwev\\.w\\.hu.*lasx_xvaddwev_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwev_h_bu:.*xvaddwev\\.h\\.bu.*lasx_xvaddwev_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwev_q_d:.*xvsubwev\\.q\\.d.*lasx_xvsubwev_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwev_d_w:.*xvsubwev\\.d\\.w.*lasx_xvsubwev_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwev_w_h:.*xvsubwev\\.w\\.h.*lasx_xvsubwev_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwev_h_b:.*xvsubwev\\.h\\.b.*lasx_xvsubwev_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwev_q_du:.*xvsubwev\\.q\\.du.*lasx_xvsubwev_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwev_d_wu:.*xvsubwev\\.d\\.wu.*lasx_xvsubwev_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwev_w_hu:.*xvsubwev\\.w\\.hu.*lasx_xvsubwev_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwev_h_bu:.*xvsubwev\\.h\\.bu.*lasx_xvsubwev_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwev_q_d:.*xvmulwev\\.q\\.d.*lasx_xvmulwev_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwev_d_w:.*xvmulwev\\.d\\.w.*lasx_xvmulwev_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwev_w_h:.*xvmulwev\\.w\\.h.*lasx_xvmulwev_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwev_h_b:.*xvmulwev\\.h\\.b.*lasx_xvmulwev_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwev_q_du:.*xvmulwev\\.q\\.du.*lasx_xvmulwev_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwev_d_wu:.*xvmulwev\\.d\\.wu.*lasx_xvmulwev_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwev_w_hu:.*xvmulwev\\.w\\.hu.*lasx_xvmulwev_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwev_h_bu:.*xvmulwev\\.h\\.bu.*lasx_xvmulwev_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwod_q_d:.*xvaddwod\\.q\\.d.*lasx_xvaddwod_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwod_d_w:.*xvaddwod\\.d\\.w.*lasx_xvaddwod_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwod_w_h:.*xvaddwod\\.w\\.h.*lasx_xvaddwod_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwod_h_b:.*xvaddwod\\.h\\.b.*lasx_xvaddwod_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwod_q_du:.*xvaddwod\\.q\\.du.*lasx_xvaddwod_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwod_d_wu:.*xvaddwod\\.d\\.wu.*lasx_xvaddwod_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwod_w_hu:.*xvaddwod\\.w\\.hu.*lasx_xvaddwod_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwod_h_bu:.*xvaddwod\\.h\\.bu.*lasx_xvaddwod_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwod_q_d:.*xvsubwod\\.q\\.d.*lasx_xvsubwod_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwod_d_w:.*xvsubwod\\.d\\.w.*lasx_xvsubwod_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwod_w_h:.*xvsubwod\\.w\\.h.*lasx_xvsubwod_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwod_h_b:.*xvsubwod\\.h\\.b.*lasx_xvsubwod_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwod_q_du:.*xvsubwod\\.q\\.du.*lasx_xvsubwod_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwod_d_wu:.*xvsubwod\\.d\\.wu.*lasx_xvsubwod_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwod_w_hu:.*xvsubwod\\.w\\.hu.*lasx_xvsubwod_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsubwod_h_bu:.*xvsubwod\\.h\\.bu.*lasx_xvsubwod_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwod_q_d:.*xvmulwod\\.q\\.d.*lasx_xvmulwod_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwod_d_w:.*xvmulwod\\.d\\.w.*lasx_xvmulwod_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwod_w_h:.*xvmulwod\\.w\\.h.*lasx_xvmulwod_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwod_h_b:.*xvmulwod\\.h\\.b.*lasx_xvmulwod_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwod_q_du:.*xvmulwod\\.q\\.du.*lasx_xvmulwod_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwod_d_wu:.*xvmulwod\\.d\\.wu.*lasx_xvmulwod_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwod_w_hu:.*xvmulwod\\.w\\.hu.*lasx_xvmulwod_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwod_h_bu:.*xvmulwod\\.h\\.bu.*lasx_xvmulwod_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwev_d_wu_w:.*xvaddwev\\.d\\.wu\\.w.*lasx_xvaddwev_d_wu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwev_w_hu_h:.*xvaddwev\\.w\\.hu\\.h.*lasx_xvaddwev_w_hu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwev_h_bu_b:.*xvaddwev\\.h\\.bu\\.b.*lasx_xvaddwev_h_bu_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwev_d_wu_w:.*xvmulwev\\.d\\.wu\\.w.*lasx_xvmulwev_d_wu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwev_w_hu_h:.*xvmulwev\\.w\\.hu\\.h.*lasx_xvmulwev_w_hu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwev_h_bu_b:.*xvmulwev\\.h\\.bu\\.b.*lasx_xvmulwev_h_bu_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwod_d_wu_w:.*xvaddwod\\.d\\.wu\\.w.*lasx_xvaddwod_d_wu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwod_w_hu_h:.*xvaddwod\\.w\\.hu\\.h.*lasx_xvaddwod_w_hu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwod_h_bu_b:.*xvaddwod\\.h\\.bu\\.b.*lasx_xvaddwod_h_bu_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwod_d_wu_w:.*xvmulwod\\.d\\.wu\\.w.*lasx_xvmulwod_d_wu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwod_w_hu_h:.*xvmulwod\\.w\\.hu\\.h.*lasx_xvmulwod_w_hu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwod_h_bu_b:.*xvmulwod\\.h\\.bu\\.b.*lasx_xvmulwod_h_bu_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhaddw_q_d:.*xvhaddw\\.q\\.d.*lasx_xvhaddw_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhaddw_qu_du:.*xvhaddw\\.qu\\.du.*lasx_xvhaddw_qu_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhsubw_q_d:.*xvhsubw\\.q\\.d.*lasx_xvhsubw_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvhsubw_qu_du:.*xvhsubw\\.qu\\.du.*lasx_xvhsubw_qu_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwev_q_d:.*xvmaddwev\\.q\\.d.*lasx_xvmaddwev_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwev_d_w:.*xvmaddwev\\.d\\.w.*lasx_xvmaddwev_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwev_w_h:.*xvmaddwev\\.w\\.h.*lasx_xvmaddwev_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwev_h_b:.*xvmaddwev\\.h\\.b.*lasx_xvmaddwev_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwev_q_du:.*xvmaddwev\\.q\\.du.*lasx_xvmaddwev_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwev_d_wu:.*xvmaddwev\\.d\\.wu.*lasx_xvmaddwev_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwev_w_hu:.*xvmaddwev\\.w\\.hu.*lasx_xvmaddwev_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwev_h_bu:.*xvmaddwev\\.h\\.bu.*lasx_xvmaddwev_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwod_q_d:.*xvmaddwod\\.q\\.d.*lasx_xvmaddwod_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwod_d_w:.*xvmaddwod\\.d\\.w.*lasx_xvmaddwod_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwod_w_h:.*xvmaddwod\\.w\\.h.*lasx_xvmaddwod_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwod_h_b:.*xvmaddwod\\.h\\.b.*lasx_xvmaddwod_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwod_q_du:.*xvmaddwod\\.q\\.du.*lasx_xvmaddwod_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwod_d_wu:.*xvmaddwod\\.d\\.wu.*lasx_xvmaddwod_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwod_w_hu:.*xvmaddwod\\.w\\.hu.*lasx_xvmaddwod_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwod_h_bu:.*xvmaddwod\\.h\\.bu.*lasx_xvmaddwod_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwev_q_du_d:.*xvmaddwev\\.q\\.du\\.d.*lasx_xvmaddwev_q_du_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwev_d_wu_w:.*xvmaddwev\\.d\\.wu\\.w.*lasx_xvmaddwev_d_wu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwev_w_hu_h:.*xvmaddwev\\.w\\.hu\\.h.*lasx_xvmaddwev_w_hu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwev_h_bu_b:.*xvmaddwev\\.h\\.bu\\.b.*lasx_xvmaddwev_h_bu_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwod_q_du_d:.*xvmaddwod\\.q\\.du\\.d.*lasx_xvmaddwod_q_du_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwod_d_wu_w:.*xvmaddwod\\.d\\.wu\\.w.*lasx_xvmaddwod_d_wu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwod_w_hu_h:.*xvmaddwod\\.w\\.hu\\.h.*lasx_xvmaddwod_w_hu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmaddwod_h_bu_b:.*xvmaddwod\\.h\\.bu\\.b.*lasx_xvmaddwod_h_bu_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrotr_b:.*xvrotr\\.b.*lasx_xvrotr_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrotr_h:.*xvrotr\\.h.*lasx_xvrotr_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrotr_w:.*xvrotr\\.w.*lasx_xvrotr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrotr_d:.*xvrotr\\.d.*lasx_xvrotr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvadd_q:.*xvadd\\.q.*lasx_xvadd_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsub_q:.*xvsub\\.q.*lasx_xvsub_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwev_q_du_d:.*xvaddwev\\.q\\.du\\.d.*lasx_xvaddwev_q_du_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvaddwod_q_du_d:.*xvaddwod\\.q\\.du\\.d.*lasx_xvaddwod_q_du_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwev_q_du_d:.*xvmulwev\\.q\\.du\\.d.*lasx_xvmulwev_q_du_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmulwod_q_du_d:.*xvmulwod\\.q\\.du\\.d.*lasx_xvmulwod_q_du_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmskgez_b:.*xvmskgez\\.b.*lasx_xvmskgez_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvmsknz_b:.*xvmsknz\\.b.*lasx_xvmsknz_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvexth_h_b:.*xvexth\\.h\\.b.*lasx_xvexth_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvexth_w_h:.*xvexth\\.w\\.h.*lasx_xvexth_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvexth_d_w:.*xvexth\\.d\\.w.*lasx_xvexth_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvexth_q_d:.*xvexth\\.q\\.d.*lasx_xvexth_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvexth_hu_bu:.*xvexth\\.hu\\.bu.*lasx_xvexth_hu_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvexth_wu_hu:.*xvexth\\.wu\\.hu.*lasx_xvexth_wu_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvexth_du_wu:.*xvexth\\.du\\.wu.*lasx_xvexth_du_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvexth_qu_du:.*xvexth\\.qu\\.du.*lasx_xvexth_qu_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrotri_b:.*xvrotri\\.b.*lasx_xvrotri_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrotri_h:.*xvrotri\\.h.*lasx_xvrotri_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrotri_w:.*xvrotri\\.w.*lasx_xvrotri_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrotri_d:.*xvrotri\\.d.*lasx_xvrotri_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvextl_q_d:.*xvextl\\.q\\.d.*lasx_xvextl_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlni_b_h:.*xvsrlni\\.b\\.h.*lasx_xvsrlni_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlni_h_w:.*xvsrlni\\.h\\.w.*lasx_xvsrlni_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlni_w_d:.*xvsrlni\\.w\\.d.*lasx_xvsrlni_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlni_d_q:.*xvsrlni\\.d\\.q.*lasx_xvsrlni_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlrni_b_h:.*xvsrlrni\\.b\\.h.*lasx_xvsrlrni_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlrni_h_w:.*xvsrlrni\\.h\\.w.*lasx_xvsrlrni_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlrni_w_d:.*xvsrlrni\\.w\\.d.*lasx_xvsrlrni_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrlrni_d_q:.*xvsrlrni\\.d\\.q.*lasx_xvsrlrni_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlni_b_h:.*xvssrlni\\.b\\.h.*lasx_xvssrlni_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlni_h_w:.*xvssrlni\\.h\\.w.*lasx_xvssrlni_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlni_w_d:.*xvssrlni\\.w\\.d.*lasx_xvssrlni_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlni_d_q:.*xvssrlni\\.d\\.q.*lasx_xvssrlni_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlni_bu_h:.*xvssrlni\\.bu\\.h.*lasx_xvssrlni_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlni_hu_w:.*xvssrlni\\.hu\\.w.*lasx_xvssrlni_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlni_wu_d:.*xvssrlni\\.wu\\.d.*lasx_xvssrlni_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlni_du_q:.*xvssrlni\\.du\\.q.*lasx_xvssrlni_du_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrni_b_h:.*xvssrlrni\\.b\\.h.*lasx_xvssrlrni_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrni_h_w:.*xvssrlrni\\.h\\.w.*lasx_xvssrlrni_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrni_w_d:.*xvssrlrni\\.w\\.d.*lasx_xvssrlrni_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrni_d_q:.*xvssrlrni\\.d\\.q.*lasx_xvssrlrni_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrni_bu_h:.*xvssrlrni\\.bu\\.h.*lasx_xvssrlrni_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrni_hu_w:.*xvssrlrni\\.hu\\.w.*lasx_xvssrlrni_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrni_wu_d:.*xvssrlrni\\.wu\\.d.*lasx_xvssrlrni_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrlrni_du_q:.*xvssrlrni\\.du\\.q.*lasx_xvssrlrni_du_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrani_b_h:.*xvsrani\\.b\\.h.*lasx_xvsrani_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrani_h_w:.*xvsrani\\.h\\.w.*lasx_xvsrani_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrani_w_d:.*xvsrani\\.w\\.d.*lasx_xvsrani_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrani_d_q:.*xvsrani\\.d\\.q.*lasx_xvsrani_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrarni_b_h:.*xvsrarni\\.b\\.h.*lasx_xvsrarni_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrarni_h_w:.*xvsrarni\\.h\\.w.*lasx_xvsrarni_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrarni_w_d:.*xvsrarni\\.w\\.d.*lasx_xvsrarni_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvsrarni_d_q:.*xvsrarni\\.d\\.q.*lasx_xvsrarni_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrani_b_h:.*xvssrani\\.b\\.h.*lasx_xvssrani_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrani_h_w:.*xvssrani\\.h\\.w.*lasx_xvssrani_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrani_w_d:.*xvssrani\\.w\\.d.*lasx_xvssrani_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrani_d_q:.*xvssrani\\.d\\.q.*lasx_xvssrani_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrani_bu_h:.*xvssrani\\.bu\\.h.*lasx_xvssrani_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrani_hu_w:.*xvssrani\\.hu\\.w.*lasx_xvssrani_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrani_wu_d:.*xvssrani\\.wu\\.d.*lasx_xvssrani_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrani_du_q:.*xvssrani\\.du\\.q.*lasx_xvssrani_du_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarni_b_h:.*xvssrarni\\.b\\.h.*lasx_xvssrarni_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarni_h_w:.*xvssrarni\\.h\\.w.*lasx_xvssrarni_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarni_w_d:.*xvssrarni\\.w\\.d.*lasx_xvssrarni_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarni_d_q:.*xvssrarni\\.d\\.q.*lasx_xvssrarni_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarni_bu_h:.*xvssrarni\\.bu\\.h.*lasx_xvssrarni_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarni_hu_w:.*xvssrarni\\.hu\\.w.*lasx_xvssrarni_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarni_wu_d:.*xvssrarni\\.wu\\.d.*lasx_xvssrarni_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvssrarni_du_q:.*xvssrarni\\.du\\.q.*lasx_xvssrarni_du_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xbnz_b:.*xvsetanyeqz\\.b.*lasx_xbnz_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xbnz_d:.*xvsetanyeqz\\.d.*lasx_xbnz_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xbnz_h:.*xvsetanyeqz\\.h.*lasx_xbnz_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xbnz_v:.*xvseteqz\\.v.*lasx_xbnz_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xbnz_w:.*xvsetanyeqz\\.w.*lasx_xbnz_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xbz_b:.*xvsetallnez\\.b.*lasx_xbz_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xbz_d:.*xvsetallnez\\.d.*lasx_xbz_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xbz_h:.*xvsetallnez\\.h.*lasx_xbz_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xbz_v:.*xvsetnez\\.v.*lasx_xbz_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xbz_w:.*xvsetallnez\\.w.*lasx_xbz_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_caf_d:.*xvfcmp\\.caf\\.d.*lasx_xvfcmp_caf_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_caf_s:.*xvfcmp\\.caf\\.s.*lasx_xvfcmp_caf_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_ceq_d:.*xvfcmp\\.ceq\\.d.*lasx_xvfcmp_ceq_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_ceq_s:.*xvfcmp\\.ceq\\.s.*lasx_xvfcmp_ceq_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cle_d:.*xvfcmp\\.cle\\.d.*lasx_xvfcmp_cle_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cle_s:.*xvfcmp\\.cle\\.s.*lasx_xvfcmp_cle_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_clt_d:.*xvfcmp\\.clt\\.d.*lasx_xvfcmp_clt_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_clt_s:.*xvfcmp\\.clt\\.s.*lasx_xvfcmp_clt_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cne_d:.*xvfcmp\\.cne\\.d.*lasx_xvfcmp_cne_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cne_s:.*xvfcmp\\.cne\\.s.*lasx_xvfcmp_cne_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cor_d:.*xvfcmp\\.cor\\.d.*lasx_xvfcmp_cor_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cor_s:.*xvfcmp\\.cor\\.s.*lasx_xvfcmp_cor_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cueq_d:.*xvfcmp\\.cueq\\.d.*lasx_xvfcmp_cueq_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cueq_s:.*xvfcmp\\.cueq\\.s.*lasx_xvfcmp_cueq_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cule_d:.*xvfcmp\\.cule\\.d.*lasx_xvfcmp_cule_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cule_s:.*xvfcmp\\.cule\\.s.*lasx_xvfcmp_cule_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cult_d:.*xvfcmp\\.cult\\.d.*lasx_xvfcmp_cult_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cult_s:.*xvfcmp\\.cult\\.s.*lasx_xvfcmp_cult_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cun_d:.*xvfcmp\\.cun\\.d.*lasx_xvfcmp_cun_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cune_d:.*xvfcmp\\.cune\\.d.*lasx_xvfcmp_cune_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cune_s:.*xvfcmp\\.cune\\.s.*lasx_xvfcmp_cune_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_cun_s:.*xvfcmp\\.cun\\.s.*lasx_xvfcmp_cun_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_saf_d:.*xvfcmp\\.saf\\.d.*lasx_xvfcmp_saf_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_saf_s:.*xvfcmp\\.saf\\.s.*lasx_xvfcmp_saf_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_seq_d:.*xvfcmp\\.seq\\.d.*lasx_xvfcmp_seq_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_seq_s:.*xvfcmp\\.seq\\.s.*lasx_xvfcmp_seq_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sle_d:.*xvfcmp\\.sle\\.d.*lasx_xvfcmp_sle_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sle_s:.*xvfcmp\\.sle\\.s.*lasx_xvfcmp_sle_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_slt_d:.*xvfcmp\\.slt\\.d.*lasx_xvfcmp_slt_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_slt_s:.*xvfcmp\\.slt\\.s.*lasx_xvfcmp_slt_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sne_d:.*xvfcmp\\.sne\\.d.*lasx_xvfcmp_sne_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sne_s:.*xvfcmp\\.sne\\.s.*lasx_xvfcmp_sne_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sor_d:.*xvfcmp\\.sor\\.d.*lasx_xvfcmp_sor_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sor_s:.*xvfcmp\\.sor\\.s.*lasx_xvfcmp_sor_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sueq_d:.*xvfcmp\\.sueq\\.d.*lasx_xvfcmp_sueq_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sueq_s:.*xvfcmp\\.sueq\\.s.*lasx_xvfcmp_sueq_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sule_d:.*xvfcmp\\.sule\\.d.*lasx_xvfcmp_sule_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sule_s:.*xvfcmp\\.sule\\.s.*lasx_xvfcmp_sule_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sult_d:.*xvfcmp\\.sult\\.d.*lasx_xvfcmp_sult_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sult_s:.*xvfcmp\\.sult\\.s.*lasx_xvfcmp_sult_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sun_d:.*xvfcmp\\.sun\\.d.*lasx_xvfcmp_sun_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sune_d:.*xvfcmp\\.sune\\.d.*lasx_xvfcmp_sune_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sune_s:.*xvfcmp\\.sune\\.s.*lasx_xvfcmp_sune_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvfcmp_sun_s:.*xvfcmp\\.sun\\.s.*lasx_xvfcmp_sun_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickve_d_f:.*xvpickve\\.d.*lasx_xvpickve_d_f" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvpickve_w_f:.*xvpickve\\.w.*lasx_xvpickve_w_f" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrepli_b:.*xvrepli\\.b.*lasx_xvrepli_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrepli_d:.*xvrepli\\.d.*lasx_xvrepli_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrepli_h:.*xvrepli\\.h.*lasx_xvrepli_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lasx_xvrepli_w:.*xvrepli\\.w.*lasx_xvrepli_w" 1 } } */ -+ -+typedef signed char v32i8 __attribute__ ((vector_size(32), aligned(32))); -+typedef signed char v32i8_b __attribute__ ((vector_size(32), aligned(1))); -+typedef unsigned char v32u8 __attribute__ ((vector_size(32), aligned(32))); -+typedef unsigned char v32u8_b __attribute__ ((vector_size(32), aligned(1))); -+typedef short v16i16 __attribute__ ((vector_size(32), aligned(32))); -+typedef short v16i16_h __attribute__ ((vector_size(32), aligned(2))); -+typedef unsigned short v16u16 __attribute__ ((vector_size(32), aligned(32))); -+typedef unsigned short v16u16_h __attribute__ ((vector_size(32), aligned(2))); -+typedef int v8i32 __attribute__ ((vector_size(32), aligned(32))); -+typedef int v8i32_w __attribute__ ((vector_size(32), aligned(4))); -+typedef unsigned int v8u32 __attribute__ ((vector_size(32), aligned(32))); -+typedef unsigned int v8u32_w __attribute__ ((vector_size(32), aligned(4))); -+typedef long long v4i64 __attribute__ ((vector_size(32), aligned(32))); -+typedef long long v4i64_d __attribute__ ((vector_size(32), aligned(8))); -+typedef unsigned long long v4u64 __attribute__ ((vector_size(32), aligned(32))); -+typedef unsigned long long v4u64_d __attribute__ ((vector_size(32), aligned(8))); -+typedef float v8f32 __attribute__ ((vector_size(32), aligned(32))); -+typedef float v8f32_w __attribute__ ((vector_size(32), aligned(4))); -+typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); -+typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); -+ -+typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); -+typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); -+ -+typedef float __m256 __attribute__ ((__vector_size__ (32), __may_alias__)); -+typedef long long __m256i __attribute__ ((__vector_size__ (32), __may_alias__)); -+typedef double __m256d __attribute__ ((__vector_size__ (32), __may_alias__)); -+ -+/* Unaligned version of the same types. */ -+typedef float __m256_u __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); -+typedef long long __m256i_u __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); -+typedef double __m256d_u __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); -+ -+v32i8 __lasx_xvsll_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsll_b(_1, _2);} -+v16i16 __lasx_xvsll_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsll_h(_1, _2);} -+v8i32 __lasx_xvsll_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsll_w(_1, _2);} -+v4i64 __lasx_xvsll_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsll_d(_1, _2);} -+v32i8 __lasx_xvslli_b(v32i8 _1){return __builtin_lasx_xvslli_b(_1, 1);} -+v16i16 __lasx_xvslli_h(v16i16 _1){return __builtin_lasx_xvslli_h(_1, 1);} -+v8i32 __lasx_xvslli_w(v8i32 _1){return __builtin_lasx_xvslli_w(_1, 1);} -+v4i64 __lasx_xvslli_d(v4i64 _1){return __builtin_lasx_xvslli_d(_1, 1);} -+v32i8 __lasx_xvsra_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsra_b(_1, _2);} -+v16i16 __lasx_xvsra_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsra_h(_1, _2);} -+v8i32 __lasx_xvsra_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsra_w(_1, _2);} -+v4i64 __lasx_xvsra_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsra_d(_1, _2);} -+v32i8 __lasx_xvsrai_b(v32i8 _1){return __builtin_lasx_xvsrai_b(_1, 1);} -+v16i16 __lasx_xvsrai_h(v16i16 _1){return __builtin_lasx_xvsrai_h(_1, 1);} -+v8i32 __lasx_xvsrai_w(v8i32 _1){return __builtin_lasx_xvsrai_w(_1, 1);} -+v4i64 __lasx_xvsrai_d(v4i64 _1){return __builtin_lasx_xvsrai_d(_1, 1);} -+v32i8 __lasx_xvsrar_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrar_b(_1, _2);} -+v16i16 __lasx_xvsrar_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrar_h(_1, _2);} -+v8i32 __lasx_xvsrar_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrar_w(_1, _2);} -+v4i64 __lasx_xvsrar_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrar_d(_1, _2);} -+v32i8 __lasx_xvsrari_b(v32i8 _1){return __builtin_lasx_xvsrari_b(_1, 1);} -+v16i16 __lasx_xvsrari_h(v16i16 _1){return __builtin_lasx_xvsrari_h(_1, 1);} -+v8i32 __lasx_xvsrari_w(v8i32 _1){return __builtin_lasx_xvsrari_w(_1, 1);} -+v4i64 __lasx_xvsrari_d(v4i64 _1){return __builtin_lasx_xvsrari_d(_1, 1);} -+v32i8 __lasx_xvsrl_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrl_b(_1, _2);} -+v16i16 __lasx_xvsrl_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrl_h(_1, _2);} -+v8i32 __lasx_xvsrl_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrl_w(_1, _2);} -+v4i64 __lasx_xvsrl_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrl_d(_1, _2);} -+v32i8 __lasx_xvsrli_b(v32i8 _1){return __builtin_lasx_xvsrli_b(_1, 1);} -+v16i16 __lasx_xvsrli_h(v16i16 _1){return __builtin_lasx_xvsrli_h(_1, 1);} -+v8i32 __lasx_xvsrli_w(v8i32 _1){return __builtin_lasx_xvsrli_w(_1, 1);} -+v4i64 __lasx_xvsrli_d(v4i64 _1){return __builtin_lasx_xvsrli_d(_1, 1);} -+v32i8 __lasx_xvsrlr_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrlr_b(_1, _2);} -+v16i16 __lasx_xvsrlr_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrlr_h(_1, _2);} -+v8i32 __lasx_xvsrlr_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrlr_w(_1, _2);} -+v4i64 __lasx_xvsrlr_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrlr_d(_1, _2);} -+v32i8 __lasx_xvsrlri_b(v32i8 _1){return __builtin_lasx_xvsrlri_b(_1, 1);} -+v16i16 __lasx_xvsrlri_h(v16i16 _1){return __builtin_lasx_xvsrlri_h(_1, 1);} -+v8i32 __lasx_xvsrlri_w(v8i32 _1){return __builtin_lasx_xvsrlri_w(_1, 1);} -+v4i64 __lasx_xvsrlri_d(v4i64 _1){return __builtin_lasx_xvsrlri_d(_1, 1);} -+v32u8 __lasx_xvbitclr_b(v32u8 _1, v32u8 _2){return __builtin_lasx_xvbitclr_b(_1, _2);} -+v16u16 __lasx_xvbitclr_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvbitclr_h(_1, _2);} -+v8u32 __lasx_xvbitclr_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvbitclr_w(_1, _2);} -+v4u64 __lasx_xvbitclr_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvbitclr_d(_1, _2);} -+v32u8 __lasx_xvbitclri_b(v32u8 _1){return __builtin_lasx_xvbitclri_b(_1, 1);} -+v16u16 __lasx_xvbitclri_h(v16u16 _1){return __builtin_lasx_xvbitclri_h(_1, 1);} -+v8u32 __lasx_xvbitclri_w(v8u32 _1){return __builtin_lasx_xvbitclri_w(_1, 1);} -+v4u64 __lasx_xvbitclri_d(v4u64 _1){return __builtin_lasx_xvbitclri_d(_1, 1);} -+v32u8 __lasx_xvbitset_b(v32u8 _1, v32u8 _2){return __builtin_lasx_xvbitset_b(_1, _2);} -+v16u16 __lasx_xvbitset_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvbitset_h(_1, _2);} -+v8u32 __lasx_xvbitset_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvbitset_w(_1, _2);} -+v4u64 __lasx_xvbitset_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvbitset_d(_1, _2);} -+v32u8 __lasx_xvbitseti_b(v32u8 _1){return __builtin_lasx_xvbitseti_b(_1, 1);} -+v16u16 __lasx_xvbitseti_h(v16u16 _1){return __builtin_lasx_xvbitseti_h(_1, 1);} -+v8u32 __lasx_xvbitseti_w(v8u32 _1){return __builtin_lasx_xvbitseti_w(_1, 1);} -+v4u64 __lasx_xvbitseti_d(v4u64 _1){return __builtin_lasx_xvbitseti_d(_1, 1);} -+v32u8 __lasx_xvbitrev_b(v32u8 _1, v32u8 _2){return __builtin_lasx_xvbitrev_b(_1, _2);} -+v16u16 __lasx_xvbitrev_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvbitrev_h(_1, _2);} -+v8u32 __lasx_xvbitrev_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvbitrev_w(_1, _2);} -+v4u64 __lasx_xvbitrev_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvbitrev_d(_1, _2);} -+v32u8 __lasx_xvbitrevi_b(v32u8 _1){return __builtin_lasx_xvbitrevi_b(_1, 1);} -+v16u16 __lasx_xvbitrevi_h(v16u16 _1){return __builtin_lasx_xvbitrevi_h(_1, 1);} -+v8u32 __lasx_xvbitrevi_w(v8u32 _1){return __builtin_lasx_xvbitrevi_w(_1, 1);} -+v4u64 __lasx_xvbitrevi_d(v4u64 _1){return __builtin_lasx_xvbitrevi_d(_1, 1);} -+v32i8 __lasx_xvadd_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvadd_b(_1, _2);} -+v16i16 __lasx_xvadd_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvadd_h(_1, _2);} -+v8i32 __lasx_xvadd_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvadd_w(_1, _2);} -+v4i64 __lasx_xvadd_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvadd_d(_1, _2);} -+v32i8 __lasx_xvaddi_bu(v32i8 _1){return __builtin_lasx_xvaddi_bu(_1, 1);} -+v16i16 __lasx_xvaddi_hu(v16i16 _1){return __builtin_lasx_xvaddi_hu(_1, 1);} -+v8i32 __lasx_xvaddi_wu(v8i32 _1){return __builtin_lasx_xvaddi_wu(_1, 1);} -+v4i64 __lasx_xvaddi_du(v4i64 _1){return __builtin_lasx_xvaddi_du(_1, 1);} -+v32i8 __lasx_xvsub_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsub_b(_1, _2);} -+v16i16 __lasx_xvsub_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsub_h(_1, _2);} -+v8i32 __lasx_xvsub_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsub_w(_1, _2);} -+v4i64 __lasx_xvsub_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsub_d(_1, _2);} -+v32i8 __lasx_xvsubi_bu(v32i8 _1){return __builtin_lasx_xvsubi_bu(_1, 1);} -+v16i16 __lasx_xvsubi_hu(v16i16 _1){return __builtin_lasx_xvsubi_hu(_1, 1);} -+v8i32 __lasx_xvsubi_wu(v8i32 _1){return __builtin_lasx_xvsubi_wu(_1, 1);} -+v4i64 __lasx_xvsubi_du(v4i64 _1){return __builtin_lasx_xvsubi_du(_1, 1);} -+v32i8 __lasx_xvmax_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmax_b(_1, _2);} -+v16i16 __lasx_xvmax_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmax_h(_1, _2);} -+v8i32 __lasx_xvmax_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmax_w(_1, _2);} -+v4i64 __lasx_xvmax_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmax_d(_1, _2);} -+v32i8 __lasx_xvmaxi_b(v32i8 _1){return __builtin_lasx_xvmaxi_b(_1, 1);} -+v16i16 __lasx_xvmaxi_h(v16i16 _1){return __builtin_lasx_xvmaxi_h(_1, 1);} -+v8i32 __lasx_xvmaxi_w(v8i32 _1){return __builtin_lasx_xvmaxi_w(_1, 1);} -+v4i64 __lasx_xvmaxi_d(v4i64 _1){return __builtin_lasx_xvmaxi_d(_1, 1);} -+v32u8 __lasx_xvmax_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmax_bu(_1, _2);} -+v16u16 __lasx_xvmax_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmax_hu(_1, _2);} -+v8u32 __lasx_xvmax_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmax_wu(_1, _2);} -+v4u64 __lasx_xvmax_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmax_du(_1, _2);} -+v32u8 __lasx_xvmaxi_bu(v32u8 _1){return __builtin_lasx_xvmaxi_bu(_1, 1);} -+v16u16 __lasx_xvmaxi_hu(v16u16 _1){return __builtin_lasx_xvmaxi_hu(_1, 1);} -+v8u32 __lasx_xvmaxi_wu(v8u32 _1){return __builtin_lasx_xvmaxi_wu(_1, 1);} -+v4u64 __lasx_xvmaxi_du(v4u64 _1){return __builtin_lasx_xvmaxi_du(_1, 1);} -+v32i8 __lasx_xvmin_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmin_b(_1, _2);} -+v16i16 __lasx_xvmin_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmin_h(_1, _2);} -+v8i32 __lasx_xvmin_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmin_w(_1, _2);} -+v4i64 __lasx_xvmin_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmin_d(_1, _2);} -+v32i8 __lasx_xvmini_b(v32i8 _1){return __builtin_lasx_xvmini_b(_1, 1);} -+v16i16 __lasx_xvmini_h(v16i16 _1){return __builtin_lasx_xvmini_h(_1, 1);} -+v8i32 __lasx_xvmini_w(v8i32 _1){return __builtin_lasx_xvmini_w(_1, 1);} -+v4i64 __lasx_xvmini_d(v4i64 _1){return __builtin_lasx_xvmini_d(_1, 1);} -+v32u8 __lasx_xvmin_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmin_bu(_1, _2);} -+v16u16 __lasx_xvmin_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmin_hu(_1, _2);} -+v8u32 __lasx_xvmin_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmin_wu(_1, _2);} -+v4u64 __lasx_xvmin_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmin_du(_1, _2);} -+v32u8 __lasx_xvmini_bu(v32u8 _1){return __builtin_lasx_xvmini_bu(_1, 1);} -+v16u16 __lasx_xvmini_hu(v16u16 _1){return __builtin_lasx_xvmini_hu(_1, 1);} -+v8u32 __lasx_xvmini_wu(v8u32 _1){return __builtin_lasx_xvmini_wu(_1, 1);} -+v4u64 __lasx_xvmini_du(v4u64 _1){return __builtin_lasx_xvmini_du(_1, 1);} -+v32i8 __lasx_xvseq_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvseq_b(_1, _2);} -+v16i16 __lasx_xvseq_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvseq_h(_1, _2);} -+v8i32 __lasx_xvseq_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvseq_w(_1, _2);} -+v4i64 __lasx_xvseq_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvseq_d(_1, _2);} -+v32i8 __lasx_xvseqi_b(v32i8 _1){return __builtin_lasx_xvseqi_b(_1, 1);} -+v16i16 __lasx_xvseqi_h(v16i16 _1){return __builtin_lasx_xvseqi_h(_1, 1);} -+v8i32 __lasx_xvseqi_w(v8i32 _1){return __builtin_lasx_xvseqi_w(_1, 1);} -+v4i64 __lasx_xvseqi_d(v4i64 _1){return __builtin_lasx_xvseqi_d(_1, 1);} -+v32i8 __lasx_xvslt_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvslt_b(_1, _2);} -+v16i16 __lasx_xvslt_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvslt_h(_1, _2);} -+v8i32 __lasx_xvslt_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvslt_w(_1, _2);} -+v4i64 __lasx_xvslt_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvslt_d(_1, _2);} -+v32i8 __lasx_xvslti_b(v32i8 _1){return __builtin_lasx_xvslti_b(_1, 1);} -+v16i16 __lasx_xvslti_h(v16i16 _1){return __builtin_lasx_xvslti_h(_1, 1);} -+v8i32 __lasx_xvslti_w(v8i32 _1){return __builtin_lasx_xvslti_w(_1, 1);} -+v4i64 __lasx_xvslti_d(v4i64 _1){return __builtin_lasx_xvslti_d(_1, 1);} -+v32i8 __lasx_xvslt_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvslt_bu(_1, _2);} -+v16i16 __lasx_xvslt_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvslt_hu(_1, _2);} -+v8i32 __lasx_xvslt_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvslt_wu(_1, _2);} -+v4i64 __lasx_xvslt_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvslt_du(_1, _2);} -+v32i8 __lasx_xvslti_bu(v32u8 _1){return __builtin_lasx_xvslti_bu(_1, 1);} -+v16i16 __lasx_xvslti_hu(v16u16 _1){return __builtin_lasx_xvslti_hu(_1, 1);} -+v8i32 __lasx_xvslti_wu(v8u32 _1){return __builtin_lasx_xvslti_wu(_1, 1);} -+v4i64 __lasx_xvslti_du(v4u64 _1){return __builtin_lasx_xvslti_du(_1, 1);} -+v32i8 __lasx_xvsle_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsle_b(_1, _2);} -+v16i16 __lasx_xvsle_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsle_h(_1, _2);} -+v8i32 __lasx_xvsle_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsle_w(_1, _2);} -+v4i64 __lasx_xvsle_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsle_d(_1, _2);} -+v32i8 __lasx_xvslei_b(v32i8 _1){return __builtin_lasx_xvslei_b(_1, 1);} -+v16i16 __lasx_xvslei_h(v16i16 _1){return __builtin_lasx_xvslei_h(_1, 1);} -+v8i32 __lasx_xvslei_w(v8i32 _1){return __builtin_lasx_xvslei_w(_1, 1);} -+v4i64 __lasx_xvslei_d(v4i64 _1){return __builtin_lasx_xvslei_d(_1, 1);} -+v32i8 __lasx_xvsle_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvsle_bu(_1, _2);} -+v16i16 __lasx_xvsle_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvsle_hu(_1, _2);} -+v8i32 __lasx_xvsle_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvsle_wu(_1, _2);} -+v4i64 __lasx_xvsle_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvsle_du(_1, _2);} -+v32i8 __lasx_xvslei_bu(v32u8 _1){return __builtin_lasx_xvslei_bu(_1, 1);} -+v16i16 __lasx_xvslei_hu(v16u16 _1){return __builtin_lasx_xvslei_hu(_1, 1);} -+v8i32 __lasx_xvslei_wu(v8u32 _1){return __builtin_lasx_xvslei_wu(_1, 1);} -+v4i64 __lasx_xvslei_du(v4u64 _1){return __builtin_lasx_xvslei_du(_1, 1);} -+v32i8 __lasx_xvsat_b(v32i8 _1){return __builtin_lasx_xvsat_b(_1, 1);} -+v16i16 __lasx_xvsat_h(v16i16 _1){return __builtin_lasx_xvsat_h(_1, 1);} -+v8i32 __lasx_xvsat_w(v8i32 _1){return __builtin_lasx_xvsat_w(_1, 1);} -+v4i64 __lasx_xvsat_d(v4i64 _1){return __builtin_lasx_xvsat_d(_1, 1);} -+v32u8 __lasx_xvsat_bu(v32u8 _1){return __builtin_lasx_xvsat_bu(_1, 1);} -+v16u16 __lasx_xvsat_hu(v16u16 _1){return __builtin_lasx_xvsat_hu(_1, 1);} -+v8u32 __lasx_xvsat_wu(v8u32 _1){return __builtin_lasx_xvsat_wu(_1, 1);} -+v4u64 __lasx_xvsat_du(v4u64 _1){return __builtin_lasx_xvsat_du(_1, 1);} -+v32i8 __lasx_xvadda_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvadda_b(_1, _2);} -+v16i16 __lasx_xvadda_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvadda_h(_1, _2);} -+v8i32 __lasx_xvadda_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvadda_w(_1, _2);} -+v4i64 __lasx_xvadda_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvadda_d(_1, _2);} -+v32i8 __lasx_xvsadd_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsadd_b(_1, _2);} -+v16i16 __lasx_xvsadd_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsadd_h(_1, _2);} -+v8i32 __lasx_xvsadd_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsadd_w(_1, _2);} -+v4i64 __lasx_xvsadd_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsadd_d(_1, _2);} -+v32u8 __lasx_xvsadd_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvsadd_bu(_1, _2);} -+v16u16 __lasx_xvsadd_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvsadd_hu(_1, _2);} -+v8u32 __lasx_xvsadd_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvsadd_wu(_1, _2);} -+v4u64 __lasx_xvsadd_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvsadd_du(_1, _2);} -+v32i8 __lasx_xvavg_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvavg_b(_1, _2);} -+v16i16 __lasx_xvavg_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvavg_h(_1, _2);} -+v8i32 __lasx_xvavg_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvavg_w(_1, _2);} -+v4i64 __lasx_xvavg_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvavg_d(_1, _2);} -+v32u8 __lasx_xvavg_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvavg_bu(_1, _2);} -+v16u16 __lasx_xvavg_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvavg_hu(_1, _2);} -+v8u32 __lasx_xvavg_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvavg_wu(_1, _2);} -+v4u64 __lasx_xvavg_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvavg_du(_1, _2);} -+v32i8 __lasx_xvavgr_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvavgr_b(_1, _2);} -+v16i16 __lasx_xvavgr_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvavgr_h(_1, _2);} -+v8i32 __lasx_xvavgr_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvavgr_w(_1, _2);} -+v4i64 __lasx_xvavgr_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvavgr_d(_1, _2);} -+v32u8 __lasx_xvavgr_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvavgr_bu(_1, _2);} -+v16u16 __lasx_xvavgr_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvavgr_hu(_1, _2);} -+v8u32 __lasx_xvavgr_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvavgr_wu(_1, _2);} -+v4u64 __lasx_xvavgr_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvavgr_du(_1, _2);} -+v32i8 __lasx_xvssub_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssub_b(_1, _2);} -+v16i16 __lasx_xvssub_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssub_h(_1, _2);} -+v8i32 __lasx_xvssub_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssub_w(_1, _2);} -+v4i64 __lasx_xvssub_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssub_d(_1, _2);} -+v32u8 __lasx_xvssub_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvssub_bu(_1, _2);} -+v16u16 __lasx_xvssub_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssub_hu(_1, _2);} -+v8u32 __lasx_xvssub_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssub_wu(_1, _2);} -+v4u64 __lasx_xvssub_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssub_du(_1, _2);} -+v32i8 __lasx_xvabsd_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvabsd_b(_1, _2);} -+v16i16 __lasx_xvabsd_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvabsd_h(_1, _2);} -+v8i32 __lasx_xvabsd_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvabsd_w(_1, _2);} -+v4i64 __lasx_xvabsd_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvabsd_d(_1, _2);} -+v32u8 __lasx_xvabsd_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvabsd_bu(_1, _2);} -+v16u16 __lasx_xvabsd_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvabsd_hu(_1, _2);} -+v8u32 __lasx_xvabsd_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvabsd_wu(_1, _2);} -+v4u64 __lasx_xvabsd_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvabsd_du(_1, _2);} -+v32i8 __lasx_xvmul_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmul_b(_1, _2);} -+v16i16 __lasx_xvmul_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmul_h(_1, _2);} -+v8i32 __lasx_xvmul_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmul_w(_1, _2);} -+v4i64 __lasx_xvmul_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmul_d(_1, _2);} -+v32i8 __lasx_xvmadd_b(v32i8 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvmadd_b(_1, _2, _3);} -+v16i16 __lasx_xvmadd_h(v16i16 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvmadd_h(_1, _2, _3);} -+v8i32 __lasx_xvmadd_w(v8i32 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvmadd_w(_1, _2, _3);} -+v4i64 __lasx_xvmadd_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvmadd_d(_1, _2, _3);} -+v32i8 __lasx_xvmsub_b(v32i8 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvmsub_b(_1, _2, _3);} -+v16i16 __lasx_xvmsub_h(v16i16 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvmsub_h(_1, _2, _3);} -+v8i32 __lasx_xvmsub_w(v8i32 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvmsub_w(_1, _2, _3);} -+v4i64 __lasx_xvmsub_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvmsub_d(_1, _2, _3);} -+v32i8 __lasx_xvdiv_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvdiv_b(_1, _2);} -+v16i16 __lasx_xvdiv_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvdiv_h(_1, _2);} -+v8i32 __lasx_xvdiv_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvdiv_w(_1, _2);} -+v4i64 __lasx_xvdiv_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvdiv_d(_1, _2);} -+v32u8 __lasx_xvdiv_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvdiv_bu(_1, _2);} -+v16u16 __lasx_xvdiv_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvdiv_hu(_1, _2);} -+v8u32 __lasx_xvdiv_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvdiv_wu(_1, _2);} -+v4u64 __lasx_xvdiv_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvdiv_du(_1, _2);} -+v16i16 __lasx_xvhaddw_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvhaddw_h_b(_1, _2);} -+v8i32 __lasx_xvhaddw_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvhaddw_w_h(_1, _2);} -+v4i64 __lasx_xvhaddw_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvhaddw_d_w(_1, _2);} -+v16u16 __lasx_xvhaddw_hu_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvhaddw_hu_bu(_1, _2);} -+v8u32 __lasx_xvhaddw_wu_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvhaddw_wu_hu(_1, _2);} -+v4u64 __lasx_xvhaddw_du_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvhaddw_du_wu(_1, _2);} -+v16i16 __lasx_xvhsubw_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvhsubw_h_b(_1, _2);} -+v8i32 __lasx_xvhsubw_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvhsubw_w_h(_1, _2);} -+v4i64 __lasx_xvhsubw_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvhsubw_d_w(_1, _2);} -+v16i16 __lasx_xvhsubw_hu_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvhsubw_hu_bu(_1, _2);} -+v8i32 __lasx_xvhsubw_wu_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvhsubw_wu_hu(_1, _2);} -+v4i64 __lasx_xvhsubw_du_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvhsubw_du_wu(_1, _2);} -+v32i8 __lasx_xvmod_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmod_b(_1, _2);} -+v16i16 __lasx_xvmod_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmod_h(_1, _2);} -+v8i32 __lasx_xvmod_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmod_w(_1, _2);} -+v4i64 __lasx_xvmod_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmod_d(_1, _2);} -+v32u8 __lasx_xvmod_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmod_bu(_1, _2);} -+v16u16 __lasx_xvmod_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmod_hu(_1, _2);} -+v8u32 __lasx_xvmod_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmod_wu(_1, _2);} -+v4u64 __lasx_xvmod_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmod_du(_1, _2);} -+v32i8 __lasx_xvrepl128vei_b(v32i8 _1){return __builtin_lasx_xvrepl128vei_b(_1, 1);} -+v16i16 __lasx_xvrepl128vei_h(v16i16 _1){return __builtin_lasx_xvrepl128vei_h(_1, 1);} -+v8i32 __lasx_xvrepl128vei_w(v8i32 _1){return __builtin_lasx_xvrepl128vei_w(_1, 1);} -+v4i64 __lasx_xvrepl128vei_d(v4i64 _1){return __builtin_lasx_xvrepl128vei_d(_1, 1);} -+v32i8 __lasx_xvpickev_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpickev_b(_1, _2);} -+v16i16 __lasx_xvpickev_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvpickev_h(_1, _2);} -+v8i32 __lasx_xvpickev_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpickev_w(_1, _2);} -+v4i64 __lasx_xvpickev_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvpickev_d(_1, _2);} -+v32i8 __lasx_xvpickod_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpickod_b(_1, _2);} -+v16i16 __lasx_xvpickod_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvpickod_h(_1, _2);} -+v8i32 __lasx_xvpickod_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpickod_w(_1, _2);} -+v4i64 __lasx_xvpickod_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvpickod_d(_1, _2);} -+v32i8 __lasx_xvilvh_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvilvh_b(_1, _2);} -+v16i16 __lasx_xvilvh_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvilvh_h(_1, _2);} -+v8i32 __lasx_xvilvh_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvilvh_w(_1, _2);} -+v4i64 __lasx_xvilvh_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvilvh_d(_1, _2);} -+v32i8 __lasx_xvilvl_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvilvl_b(_1, _2);} -+v16i16 __lasx_xvilvl_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvilvl_h(_1, _2);} -+v8i32 __lasx_xvilvl_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvilvl_w(_1, _2);} -+v4i64 __lasx_xvilvl_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvilvl_d(_1, _2);} -+v32i8 __lasx_xvpackev_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpackev_b(_1, _2);} -+v16i16 __lasx_xvpackev_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvpackev_h(_1, _2);} -+v8i32 __lasx_xvpackev_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpackev_w(_1, _2);} -+v4i64 __lasx_xvpackev_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvpackev_d(_1, _2);} -+v32i8 __lasx_xvpackod_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpackod_b(_1, _2);} -+v16i16 __lasx_xvpackod_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvpackod_h(_1, _2);} -+v8i32 __lasx_xvpackod_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpackod_w(_1, _2);} -+v4i64 __lasx_xvpackod_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvpackod_d(_1, _2);} -+v32i8 __lasx_xvshuf_b(v32i8 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvshuf_b(_1, _2, _3);} -+v16i16 __lasx_xvshuf_h(v16i16 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvshuf_h(_1, _2, _3);} -+v8i32 __lasx_xvshuf_w(v8i32 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvshuf_w(_1, _2, _3);} -+v4i64 __lasx_xvshuf_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvshuf_d(_1, _2, _3);} -+v32u8 __lasx_xvand_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvand_v(_1, _2);} -+v32u8 __lasx_xvandi_b(v32u8 _1){return __builtin_lasx_xvandi_b(_1, 1);} -+v32u8 __lasx_xvor_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvor_v(_1, _2);} -+v32u8 __lasx_xvori_b(v32u8 _1){return __builtin_lasx_xvori_b(_1, 1);} -+v32u8 __lasx_xvnor_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvnor_v(_1, _2);} -+v32u8 __lasx_xvnori_b(v32u8 _1){return __builtin_lasx_xvnori_b(_1, 1);} -+v32u8 __lasx_xvxor_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvxor_v(_1, _2);} -+v32u8 __lasx_xvxori_b(v32u8 _1){return __builtin_lasx_xvxori_b(_1, 1);} -+v32u8 __lasx_xvbitsel_v(v32u8 _1, v32u8 _2, v32u8 _3){return __builtin_lasx_xvbitsel_v(_1, _2, _3);} -+v32u8 __lasx_xvbitseli_b(v32u8 _1, v32u8 _2){return __builtin_lasx_xvbitseli_b(_1, _2, 1);} -+v32i8 __lasx_xvshuf4i_b(v32i8 _1){return __builtin_lasx_xvshuf4i_b(_1, 1);} -+v16i16 __lasx_xvshuf4i_h(v16i16 _1){return __builtin_lasx_xvshuf4i_h(_1, 1);} -+v8i32 __lasx_xvshuf4i_w(v8i32 _1){return __builtin_lasx_xvshuf4i_w(_1, 1);} -+v32i8 __lasx_xvreplgr2vr_b(int _1){return __builtin_lasx_xvreplgr2vr_b(_1);} -+v16i16 __lasx_xvreplgr2vr_h(int _1){return __builtin_lasx_xvreplgr2vr_h(_1);} -+v8i32 __lasx_xvreplgr2vr_w(int _1){return __builtin_lasx_xvreplgr2vr_w(_1);} -+v4i64 __lasx_xvreplgr2vr_d(int _1){return __builtin_lasx_xvreplgr2vr_d(_1);} -+v32i8 __lasx_xvpcnt_b(v32i8 _1){return __builtin_lasx_xvpcnt_b(_1);} -+v16i16 __lasx_xvpcnt_h(v16i16 _1){return __builtin_lasx_xvpcnt_h(_1);} -+v8i32 __lasx_xvpcnt_w(v8i32 _1){return __builtin_lasx_xvpcnt_w(_1);} -+v4i64 __lasx_xvpcnt_d(v4i64 _1){return __builtin_lasx_xvpcnt_d(_1);} -+v32i8 __lasx_xvclo_b(v32i8 _1){return __builtin_lasx_xvclo_b(_1);} -+v16i16 __lasx_xvclo_h(v16i16 _1){return __builtin_lasx_xvclo_h(_1);} -+v8i32 __lasx_xvclo_w(v8i32 _1){return __builtin_lasx_xvclo_w(_1);} -+v4i64 __lasx_xvclo_d(v4i64 _1){return __builtin_lasx_xvclo_d(_1);} -+v32i8 __lasx_xvclz_b(v32i8 _1){return __builtin_lasx_xvclz_b(_1);} -+v16i16 __lasx_xvclz_h(v16i16 _1){return __builtin_lasx_xvclz_h(_1);} -+v8i32 __lasx_xvclz_w(v8i32 _1){return __builtin_lasx_xvclz_w(_1);} -+v4i64 __lasx_xvclz_d(v4i64 _1){return __builtin_lasx_xvclz_d(_1);} -+v8f32 __lasx_xvfadd_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfadd_s(_1, _2);} -+v4f64 __lasx_xvfadd_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfadd_d(_1, _2);} -+v8f32 __lasx_xvfsub_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfsub_s(_1, _2);} -+v4f64 __lasx_xvfsub_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfsub_d(_1, _2);} -+v8f32 __lasx_xvfmul_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmul_s(_1, _2);} -+v4f64 __lasx_xvfmul_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmul_d(_1, _2);} -+v8f32 __lasx_xvfdiv_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfdiv_s(_1, _2);} -+v4f64 __lasx_xvfdiv_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfdiv_d(_1, _2);} -+v16i16 __lasx_xvfcvt_h_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcvt_h_s(_1, _2);} -+v8f32 __lasx_xvfcvt_s_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcvt_s_d(_1, _2);} -+v8f32 __lasx_xvfmin_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmin_s(_1, _2);} -+v4f64 __lasx_xvfmin_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmin_d(_1, _2);} -+v8f32 __lasx_xvfmina_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmina_s(_1, _2);} -+v4f64 __lasx_xvfmina_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmina_d(_1, _2);} -+v8f32 __lasx_xvfmax_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmax_s(_1, _2);} -+v4f64 __lasx_xvfmax_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmax_d(_1, _2);} -+v8f32 __lasx_xvfmaxa_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmaxa_s(_1, _2);} -+v4f64 __lasx_xvfmaxa_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmaxa_d(_1, _2);} -+v8i32 __lasx_xvfclass_s(v8f32 _1){return __builtin_lasx_xvfclass_s(_1);} -+v4i64 __lasx_xvfclass_d(v4f64 _1){return __builtin_lasx_xvfclass_d(_1);} -+v8f32 __lasx_xvfsqrt_s(v8f32 _1){return __builtin_lasx_xvfsqrt_s(_1);} -+v4f64 __lasx_xvfsqrt_d(v4f64 _1){return __builtin_lasx_xvfsqrt_d(_1);} -+v8f32 __lasx_xvfrecip_s(v8f32 _1){return __builtin_lasx_xvfrecip_s(_1);} -+v4f64 __lasx_xvfrecip_d(v4f64 _1){return __builtin_lasx_xvfrecip_d(_1);} -+v8f32 __lasx_xvfrint_s(v8f32 _1){return __builtin_lasx_xvfrint_s(_1);} -+v4f64 __lasx_xvfrint_d(v4f64 _1){return __builtin_lasx_xvfrint_d(_1);} -+v8f32 __lasx_xvfrsqrt_s(v8f32 _1){return __builtin_lasx_xvfrsqrt_s(_1);} -+v4f64 __lasx_xvfrsqrt_d(v4f64 _1){return __builtin_lasx_xvfrsqrt_d(_1);} -+v8f32 __lasx_xvflogb_s(v8f32 _1){return __builtin_lasx_xvflogb_s(_1);} -+v4f64 __lasx_xvflogb_d(v4f64 _1){return __builtin_lasx_xvflogb_d(_1);} -+v8f32 __lasx_xvfcvth_s_h(v16i16 _1){return __builtin_lasx_xvfcvth_s_h(_1);} -+v4f64 __lasx_xvfcvth_d_s(v8f32 _1){return __builtin_lasx_xvfcvth_d_s(_1);} -+v8f32 __lasx_xvfcvtl_s_h(v16i16 _1){return __builtin_lasx_xvfcvtl_s_h(_1);} -+v4f64 __lasx_xvfcvtl_d_s(v8f32 _1){return __builtin_lasx_xvfcvtl_d_s(_1);} -+v8i32 __lasx_xvftint_w_s(v8f32 _1){return __builtin_lasx_xvftint_w_s(_1);} -+v4i64 __lasx_xvftint_l_d(v4f64 _1){return __builtin_lasx_xvftint_l_d(_1);} -+v8u32 __lasx_xvftint_wu_s(v8f32 _1){return __builtin_lasx_xvftint_wu_s(_1);} -+v4u64 __lasx_xvftint_lu_d(v4f64 _1){return __builtin_lasx_xvftint_lu_d(_1);} -+v8i32 __lasx_xvftintrz_w_s(v8f32 _1){return __builtin_lasx_xvftintrz_w_s(_1);} -+v4i64 __lasx_xvftintrz_l_d(v4f64 _1){return __builtin_lasx_xvftintrz_l_d(_1);} -+v8u32 __lasx_xvftintrz_wu_s(v8f32 _1){return __builtin_lasx_xvftintrz_wu_s(_1);} -+v4u64 __lasx_xvftintrz_lu_d(v4f64 _1){return __builtin_lasx_xvftintrz_lu_d(_1);} -+v8f32 __lasx_xvffint_s_w(v8i32 _1){return __builtin_lasx_xvffint_s_w(_1);} -+v4f64 __lasx_xvffint_d_l(v4i64 _1){return __builtin_lasx_xvffint_d_l(_1);} -+v8f32 __lasx_xvffint_s_wu(v8u32 _1){return __builtin_lasx_xvffint_s_wu(_1);} -+v4f64 __lasx_xvffint_d_lu(v4u64 _1){return __builtin_lasx_xvffint_d_lu(_1);} -+v32i8 __lasx_xvreplve_b(v32i8 _1, int _2){return __builtin_lasx_xvreplve_b(_1, _2);} -+v16i16 __lasx_xvreplve_h(v16i16 _1, int _2){return __builtin_lasx_xvreplve_h(_1, _2);} -+v8i32 __lasx_xvreplve_w(v8i32 _1, int _2){return __builtin_lasx_xvreplve_w(_1, _2);} -+v4i64 __lasx_xvreplve_d(v4i64 _1, int _2){return __builtin_lasx_xvreplve_d(_1, _2);} -+v8i32 __lasx_xvpermi_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpermi_w(_1, _2, 1);} -+v32u8 __lasx_xvandn_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvandn_v(_1, _2);} -+v32i8 __lasx_xvneg_b(v32i8 _1){return __builtin_lasx_xvneg_b(_1);} -+v16i16 __lasx_xvneg_h(v16i16 _1){return __builtin_lasx_xvneg_h(_1);} -+v8i32 __lasx_xvneg_w(v8i32 _1){return __builtin_lasx_xvneg_w(_1);} -+v4i64 __lasx_xvneg_d(v4i64 _1){return __builtin_lasx_xvneg_d(_1);} -+v32i8 __lasx_xvmuh_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmuh_b(_1, _2);} -+v16i16 __lasx_xvmuh_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmuh_h(_1, _2);} -+v8i32 __lasx_xvmuh_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmuh_w(_1, _2);} -+v4i64 __lasx_xvmuh_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmuh_d(_1, _2);} -+v32u8 __lasx_xvmuh_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmuh_bu(_1, _2);} -+v16u16 __lasx_xvmuh_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmuh_hu(_1, _2);} -+v8u32 __lasx_xvmuh_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmuh_wu(_1, _2);} -+v4u64 __lasx_xvmuh_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmuh_du(_1, _2);} -+v16i16 __lasx_xvsllwil_h_b(v32i8 _1){return __builtin_lasx_xvsllwil_h_b(_1, 1);} -+v8i32 __lasx_xvsllwil_w_h(v16i16 _1){return __builtin_lasx_xvsllwil_w_h(_1, 1);} -+v4i64 __lasx_xvsllwil_d_w(v8i32 _1){return __builtin_lasx_xvsllwil_d_w(_1, 1);} -+v16u16 __lasx_xvsllwil_hu_bu(v32u8 _1){return __builtin_lasx_xvsllwil_hu_bu(_1, 1);} -+v8u32 __lasx_xvsllwil_wu_hu(v16u16 _1){return __builtin_lasx_xvsllwil_wu_hu(_1, 1);} -+v4u64 __lasx_xvsllwil_du_wu(v8u32 _1){return __builtin_lasx_xvsllwil_du_wu(_1, 1);} -+v32i8 __lasx_xvsran_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsran_b_h(_1, _2);} -+v16i16 __lasx_xvsran_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsran_h_w(_1, _2);} -+v8i32 __lasx_xvsran_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsran_w_d(_1, _2);} -+v32i8 __lasx_xvssran_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssran_b_h(_1, _2);} -+v16i16 __lasx_xvssran_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssran_h_w(_1, _2);} -+v8i32 __lasx_xvssran_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssran_w_d(_1, _2);} -+v32u8 __lasx_xvssran_bu_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssran_bu_h(_1, _2);} -+v16u16 __lasx_xvssran_hu_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssran_hu_w(_1, _2);} -+v8u32 __lasx_xvssran_wu_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssran_wu_d(_1, _2);} -+v32i8 __lasx_xvsrarn_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrarn_b_h(_1, _2);} -+v16i16 __lasx_xvsrarn_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrarn_h_w(_1, _2);} -+v8i32 __lasx_xvsrarn_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrarn_w_d(_1, _2);} -+v32i8 __lasx_xvssrarn_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrarn_b_h(_1, _2);} -+v16i16 __lasx_xvssrarn_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrarn_h_w(_1, _2);} -+v8i32 __lasx_xvssrarn_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrarn_w_d(_1, _2);} -+v32u8 __lasx_xvssrarn_bu_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssrarn_bu_h(_1, _2);} -+v16u16 __lasx_xvssrarn_hu_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssrarn_hu_w(_1, _2);} -+v8u32 __lasx_xvssrarn_wu_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssrarn_wu_d(_1, _2);} -+v32i8 __lasx_xvsrln_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrln_b_h(_1, _2);} -+v16i16 __lasx_xvsrln_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrln_h_w(_1, _2);} -+v8i32 __lasx_xvsrln_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrln_w_d(_1, _2);} -+v32u8 __lasx_xvssrln_bu_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssrln_bu_h(_1, _2);} -+v16u16 __lasx_xvssrln_hu_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssrln_hu_w(_1, _2);} -+v8u32 __lasx_xvssrln_wu_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssrln_wu_d(_1, _2);} -+v32i8 __lasx_xvsrlrn_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrlrn_b_h(_1, _2);} -+v16i16 __lasx_xvsrlrn_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrlrn_h_w(_1, _2);} -+v8i32 __lasx_xvsrlrn_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrlrn_w_d(_1, _2);} -+v32u8 __lasx_xvssrlrn_bu_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssrlrn_bu_h(_1, _2);} -+v16u16 __lasx_xvssrlrn_hu_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssrlrn_hu_w(_1, _2);} -+v8u32 __lasx_xvssrlrn_wu_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssrlrn_wu_d(_1, _2);} -+v32i8 __lasx_xvfrstpi_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvfrstpi_b(_1, _2, 1);} -+v16i16 __lasx_xvfrstpi_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvfrstpi_h(_1, _2, 1);} -+v32i8 __lasx_xvfrstp_b(v32i8 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvfrstp_b(_1, _2, _3);} -+v16i16 __lasx_xvfrstp_h(v16i16 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvfrstp_h(_1, _2, _3);} -+v4i64 __lasx_xvshuf4i_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvshuf4i_d(_1, _2, 1);} -+v32i8 __lasx_xvbsrl_v(v32i8 _1){return __builtin_lasx_xvbsrl_v(_1, 1);} -+v32i8 __lasx_xvbsll_v(v32i8 _1){return __builtin_lasx_xvbsll_v(_1, 1);} -+v32i8 __lasx_xvextrins_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvextrins_b(_1, _2, 1);} -+v16i16 __lasx_xvextrins_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvextrins_h(_1, _2, 1);} -+v8i32 __lasx_xvextrins_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvextrins_w(_1, _2, 1);} -+v4i64 __lasx_xvextrins_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvextrins_d(_1, _2, 1);} -+v32i8 __lasx_xvmskltz_b(v32i8 _1){return __builtin_lasx_xvmskltz_b(_1);} -+v16i16 __lasx_xvmskltz_h(v16i16 _1){return __builtin_lasx_xvmskltz_h(_1);} -+v8i32 __lasx_xvmskltz_w(v8i32 _1){return __builtin_lasx_xvmskltz_w(_1);} -+v4i64 __lasx_xvmskltz_d(v4i64 _1){return __builtin_lasx_xvmskltz_d(_1);} -+v32i8 __lasx_xvsigncov_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsigncov_b(_1, _2);} -+v16i16 __lasx_xvsigncov_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsigncov_h(_1, _2);} -+v8i32 __lasx_xvsigncov_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsigncov_w(_1, _2);} -+v4i64 __lasx_xvsigncov_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsigncov_d(_1, _2);} -+v8f32 __lasx_xvfmadd_s(v8f32 _1, v8f32 _2, v8f32 _3){return __builtin_lasx_xvfmadd_s(_1, _2, _3);} -+v4f64 __lasx_xvfmadd_d(v4f64 _1, v4f64 _2, v4f64 _3){return __builtin_lasx_xvfmadd_d(_1, _2, _3);} -+v8f32 __lasx_xvfmsub_s(v8f32 _1, v8f32 _2, v8f32 _3){return __builtin_lasx_xvfmsub_s(_1, _2, _3);} -+v4f64 __lasx_xvfmsub_d(v4f64 _1, v4f64 _2, v4f64 _3){return __builtin_lasx_xvfmsub_d(_1, _2, _3);} -+v8f32 __lasx_xvfnmadd_s(v8f32 _1, v8f32 _2, v8f32 _3){return __builtin_lasx_xvfnmadd_s(_1, _2, _3);} -+v4f64 __lasx_xvfnmadd_d(v4f64 _1, v4f64 _2, v4f64 _3){return __builtin_lasx_xvfnmadd_d(_1, _2, _3);} -+v8f32 __lasx_xvfnmsub_s(v8f32 _1, v8f32 _2, v8f32 _3){return __builtin_lasx_xvfnmsub_s(_1, _2, _3);} -+v4f64 __lasx_xvfnmsub_d(v4f64 _1, v4f64 _2, v4f64 _3){return __builtin_lasx_xvfnmsub_d(_1, _2, _3);} -+v8i32 __lasx_xvftintrne_w_s(v8f32 _1){return __builtin_lasx_xvftintrne_w_s(_1);} -+v4i64 __lasx_xvftintrne_l_d(v4f64 _1){return __builtin_lasx_xvftintrne_l_d(_1);} -+v8i32 __lasx_xvftintrp_w_s(v8f32 _1){return __builtin_lasx_xvftintrp_w_s(_1);} -+v4i64 __lasx_xvftintrp_l_d(v4f64 _1){return __builtin_lasx_xvftintrp_l_d(_1);} -+v8i32 __lasx_xvftintrm_w_s(v8f32 _1){return __builtin_lasx_xvftintrm_w_s(_1);} -+v4i64 __lasx_xvftintrm_l_d(v4f64 _1){return __builtin_lasx_xvftintrm_l_d(_1);} -+v8i32 __lasx_xvftint_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftint_w_d(_1, _2);} -+v8f32 __lasx_xvffint_s_l(v4i64 _1, v4i64 _2){return __builtin_lasx_xvffint_s_l(_1, _2);} -+v8i32 __lasx_xvftintrz_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftintrz_w_d(_1, _2);} -+v8i32 __lasx_xvftintrp_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftintrp_w_d(_1, _2);} -+v8i32 __lasx_xvftintrm_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftintrm_w_d(_1, _2);} -+v8i32 __lasx_xvftintrne_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftintrne_w_d(_1, _2);} -+v4i64 __lasx_xvftinth_l_s(v8f32 _1){return __builtin_lasx_xvftinth_l_s(_1);} -+v4i64 __lasx_xvftintl_l_s(v8f32 _1){return __builtin_lasx_xvftintl_l_s(_1);} -+v4f64 __lasx_xvffinth_d_w(v8i32 _1){return __builtin_lasx_xvffinth_d_w(_1);} -+v4f64 __lasx_xvffintl_d_w(v8i32 _1){return __builtin_lasx_xvffintl_d_w(_1);} -+v4i64 __lasx_xvftintrzh_l_s(v8f32 _1){return __builtin_lasx_xvftintrzh_l_s(_1);} -+v4i64 __lasx_xvftintrzl_l_s(v8f32 _1){return __builtin_lasx_xvftintrzl_l_s(_1);} -+v4i64 __lasx_xvftintrph_l_s(v8f32 _1){return __builtin_lasx_xvftintrph_l_s(_1);} -+v4i64 __lasx_xvftintrpl_l_s(v8f32 _1){return __builtin_lasx_xvftintrpl_l_s(_1);} -+v4i64 __lasx_xvftintrmh_l_s(v8f32 _1){return __builtin_lasx_xvftintrmh_l_s(_1);} -+v4i64 __lasx_xvftintrml_l_s(v8f32 _1){return __builtin_lasx_xvftintrml_l_s(_1);} -+v4i64 __lasx_xvftintrneh_l_s(v8f32 _1){return __builtin_lasx_xvftintrneh_l_s(_1);} -+v4i64 __lasx_xvftintrnel_l_s(v8f32 _1){return __builtin_lasx_xvftintrnel_l_s(_1);} -+v8i32 __lasx_xvfrintrne_s(v8f32 _1){return __builtin_lasx_xvfrintrne_s(_1);} -+v4i64 __lasx_xvfrintrne_d(v4f64 _1){return __builtin_lasx_xvfrintrne_d(_1);} -+v8i32 __lasx_xvfrintrz_s(v8f32 _1){return __builtin_lasx_xvfrintrz_s(_1);} -+v4i64 __lasx_xvfrintrz_d(v4f64 _1){return __builtin_lasx_xvfrintrz_d(_1);} -+v8i32 __lasx_xvfrintrp_s(v8f32 _1){return __builtin_lasx_xvfrintrp_s(_1);} -+v4i64 __lasx_xvfrintrp_d(v4f64 _1){return __builtin_lasx_xvfrintrp_d(_1);} -+v8i32 __lasx_xvfrintrm_s(v8f32 _1){return __builtin_lasx_xvfrintrm_s(_1);} -+v4i64 __lasx_xvfrintrm_d(v4f64 _1){return __builtin_lasx_xvfrintrm_d(_1);} -+v32i8 __lasx_xvld(void * _1){return __builtin_lasx_xvld(_1, 1);} -+void __lasx_xvst(v32i8 _1, void * _2){return __builtin_lasx_xvst(_1, _2, 1);} -+void __lasx_xvstelm_b(v32i8 _1, void * _2){return __builtin_lasx_xvstelm_b(_1, _2, 1, 1);} -+void __lasx_xvstelm_h(v16i16 _1, void * _2){return __builtin_lasx_xvstelm_h(_1, _2, 2, 1);} -+void __lasx_xvstelm_w(v8i32 _1, void * _2){return __builtin_lasx_xvstelm_w(_1, _2, 4, 1);} -+void __lasx_xvstelm_d(v4i64 _1, void * _2){return __builtin_lasx_xvstelm_d(_1, _2, 8, 1);} -+v8i32 __lasx_xvinsve0_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvinsve0_w(_1, _2, 1);} -+v4i64 __lasx_xvinsve0_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvinsve0_d(_1, _2, 1);} -+v8i32 __lasx_xvpickve_w(v8i32 _1){return __builtin_lasx_xvpickve_w(_1, 1);} -+v4i64 __lasx_xvpickve_d(v4i64 _1){return __builtin_lasx_xvpickve_d(_1, 1);} -+v32i8 __lasx_xvssrlrn_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrlrn_b_h(_1, _2);} -+v16i16 __lasx_xvssrlrn_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrlrn_h_w(_1, _2);} -+v8i32 __lasx_xvssrlrn_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrlrn_w_d(_1, _2);} -+v32i8 __lasx_xvssrln_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrln_b_h(_1, _2);} -+v16i16 __lasx_xvssrln_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrln_h_w(_1, _2);} -+v8i32 __lasx_xvssrln_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrln_w_d(_1, _2);} -+v32i8 __lasx_xvorn_v(v32i8 _1, v32i8 _2){return __builtin_lasx_xvorn_v(_1, _2);} -+v4i64 __lasx_xvldi(){return __builtin_lasx_xvldi(1);} -+v32i8 __lasx_xvldx(void * _1){return __builtin_lasx_xvldx(_1, 1);} -+void __lasx_xvstx(v32i8 _1, void * _2){return __builtin_lasx_xvstx(_1, _2, 1);} -+v4u64 __lasx_xvextl_qu_du(v4u64 _1){return __builtin_lasx_xvextl_qu_du(_1);} -+v8i32 __lasx_xvinsgr2vr_w(v8i32 _1){return __builtin_lasx_xvinsgr2vr_w(_1, 1, 1);} -+v4i64 __lasx_xvinsgr2vr_d(v4i64 _1){return __builtin_lasx_xvinsgr2vr_d(_1, 1, 1);} -+v32i8 __lasx_xvreplve0_b(v32i8 _1){return __builtin_lasx_xvreplve0_b(_1);} -+v16i16 __lasx_xvreplve0_h(v16i16 _1){return __builtin_lasx_xvreplve0_h(_1);} -+v8i32 __lasx_xvreplve0_w(v8i32 _1){return __builtin_lasx_xvreplve0_w(_1);} -+v4i64 __lasx_xvreplve0_d(v4i64 _1){return __builtin_lasx_xvreplve0_d(_1);} -+v32i8 __lasx_xvreplve0_q(v32i8 _1){return __builtin_lasx_xvreplve0_q(_1);} -+v16i16 __lasx_vext2xv_h_b(v32i8 _1){return __builtin_lasx_vext2xv_h_b(_1);} -+v8i32 __lasx_vext2xv_w_h(v16i16 _1){return __builtin_lasx_vext2xv_w_h(_1);} -+v4i64 __lasx_vext2xv_d_w(v8i32 _1){return __builtin_lasx_vext2xv_d_w(_1);} -+v8i32 __lasx_vext2xv_w_b(v32i8 _1){return __builtin_lasx_vext2xv_w_b(_1);} -+v4i64 __lasx_vext2xv_d_h(v16i16 _1){return __builtin_lasx_vext2xv_d_h(_1);} -+v4i64 __lasx_vext2xv_d_b(v32i8 _1){return __builtin_lasx_vext2xv_d_b(_1);} -+v16i16 __lasx_vext2xv_hu_bu(v32i8 _1){return __builtin_lasx_vext2xv_hu_bu(_1);} -+v8i32 __lasx_vext2xv_wu_hu(v16i16 _1){return __builtin_lasx_vext2xv_wu_hu(_1);} -+v4i64 __lasx_vext2xv_du_wu(v8i32 _1){return __builtin_lasx_vext2xv_du_wu(_1);} -+v8i32 __lasx_vext2xv_wu_bu(v32i8 _1){return __builtin_lasx_vext2xv_wu_bu(_1);} -+v4i64 __lasx_vext2xv_du_hu(v16i16 _1){return __builtin_lasx_vext2xv_du_hu(_1);} -+v4i64 __lasx_vext2xv_du_bu(v32i8 _1){return __builtin_lasx_vext2xv_du_bu(_1);} -+v32i8 __lasx_xvpermi_q(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpermi_q(_1, _2, 1);} -+v4i64 __lasx_xvpermi_d(v4i64 _1){return __builtin_lasx_xvpermi_d(_1, 1);} -+v8i32 __lasx_xvperm_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvperm_w(_1, _2);} -+v32i8 __lasx_xvldrepl_b(void * _1){return __builtin_lasx_xvldrepl_b(_1, 1);} -+v16i16 __lasx_xvldrepl_h(void * _1){return __builtin_lasx_xvldrepl_h(_1, 2);} -+v8i32 __lasx_xvldrepl_w(void * _1){return __builtin_lasx_xvldrepl_w(_1, 4);} -+v4i64 __lasx_xvldrepl_d(void * _1){return __builtin_lasx_xvldrepl_d(_1, 8);} -+int __lasx_xvpickve2gr_w(v8i32 _1){return __builtin_lasx_xvpickve2gr_w(_1, 1);} -+unsigned int __lasx_xvpickve2gr_wu(v8i32 _1){return __builtin_lasx_xvpickve2gr_wu(_1, 1);} -+long __lasx_xvpickve2gr_d(v4i64 _1){return __builtin_lasx_xvpickve2gr_d(_1, 1);} -+unsigned long int __lasx_xvpickve2gr_du(v4i64 _1){return __builtin_lasx_xvpickve2gr_du(_1, 1);} -+v4i64 __lasx_xvaddwev_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvaddwev_q_d(_1, _2);} -+v4i64 __lasx_xvaddwev_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvaddwev_d_w(_1, _2);} -+v8i32 __lasx_xvaddwev_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvaddwev_w_h(_1, _2);} -+v16i16 __lasx_xvaddwev_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvaddwev_h_b(_1, _2);} -+v4i64 __lasx_xvaddwev_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvaddwev_q_du(_1, _2);} -+v4i64 __lasx_xvaddwev_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvaddwev_d_wu(_1, _2);} -+v8i32 __lasx_xvaddwev_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvaddwev_w_hu(_1, _2);} -+v16i16 __lasx_xvaddwev_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvaddwev_h_bu(_1, _2);} -+v4i64 __lasx_xvsubwev_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsubwev_q_d(_1, _2);} -+v4i64 __lasx_xvsubwev_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsubwev_d_w(_1, _2);} -+v8i32 __lasx_xvsubwev_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsubwev_w_h(_1, _2);} -+v16i16 __lasx_xvsubwev_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsubwev_h_b(_1, _2);} -+v4i64 __lasx_xvsubwev_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvsubwev_q_du(_1, _2);} -+v4i64 __lasx_xvsubwev_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvsubwev_d_wu(_1, _2);} -+v8i32 __lasx_xvsubwev_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvsubwev_w_hu(_1, _2);} -+v16i16 __lasx_xvsubwev_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvsubwev_h_bu(_1, _2);} -+v4i64 __lasx_xvmulwev_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmulwev_q_d(_1, _2);} -+v4i64 __lasx_xvmulwev_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmulwev_d_w(_1, _2);} -+v8i32 __lasx_xvmulwev_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmulwev_w_h(_1, _2);} -+v16i16 __lasx_xvmulwev_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmulwev_h_b(_1, _2);} -+v4i64 __lasx_xvmulwev_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmulwev_q_du(_1, _2);} -+v4i64 __lasx_xvmulwev_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmulwev_d_wu(_1, _2);} -+v8i32 __lasx_xvmulwev_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmulwev_w_hu(_1, _2);} -+v16i16 __lasx_xvmulwev_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmulwev_h_bu(_1, _2);} -+v4i64 __lasx_xvaddwod_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvaddwod_q_d(_1, _2);} -+v4i64 __lasx_xvaddwod_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvaddwod_d_w(_1, _2);} -+v8i32 __lasx_xvaddwod_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvaddwod_w_h(_1, _2);} -+v16i16 __lasx_xvaddwod_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvaddwod_h_b(_1, _2);} -+v4i64 __lasx_xvaddwod_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvaddwod_q_du(_1, _2);} -+v4i64 __lasx_xvaddwod_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvaddwod_d_wu(_1, _2);} -+v8i32 __lasx_xvaddwod_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvaddwod_w_hu(_1, _2);} -+v16i16 __lasx_xvaddwod_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvaddwod_h_bu(_1, _2);} -+v4i64 __lasx_xvsubwod_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsubwod_q_d(_1, _2);} -+v4i64 __lasx_xvsubwod_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsubwod_d_w(_1, _2);} -+v8i32 __lasx_xvsubwod_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsubwod_w_h(_1, _2);} -+v16i16 __lasx_xvsubwod_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsubwod_h_b(_1, _2);} -+v4i64 __lasx_xvsubwod_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvsubwod_q_du(_1, _2);} -+v4i64 __lasx_xvsubwod_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvsubwod_d_wu(_1, _2);} -+v8i32 __lasx_xvsubwod_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvsubwod_w_hu(_1, _2);} -+v16i16 __lasx_xvsubwod_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvsubwod_h_bu(_1, _2);} -+v4i64 __lasx_xvmulwod_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmulwod_q_d(_1, _2);} -+v4i64 __lasx_xvmulwod_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmulwod_d_w(_1, _2);} -+v8i32 __lasx_xvmulwod_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmulwod_w_h(_1, _2);} -+v16i16 __lasx_xvmulwod_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmulwod_h_b(_1, _2);} -+v4i64 __lasx_xvmulwod_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmulwod_q_du(_1, _2);} -+v4i64 __lasx_xvmulwod_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmulwod_d_wu(_1, _2);} -+v8i32 __lasx_xvmulwod_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmulwod_w_hu(_1, _2);} -+v16i16 __lasx_xvmulwod_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmulwod_h_bu(_1, _2);} -+v4i64 __lasx_xvaddwev_d_wu_w(v8u32 _1, v8i32 _2){return __builtin_lasx_xvaddwev_d_wu_w(_1, _2);} -+v8i32 __lasx_xvaddwev_w_hu_h(v16u16 _1, v16i16 _2){return __builtin_lasx_xvaddwev_w_hu_h(_1, _2);} -+v16i16 __lasx_xvaddwev_h_bu_b(v32u8 _1, v32i8 _2){return __builtin_lasx_xvaddwev_h_bu_b(_1, _2);} -+v4i64 __lasx_xvmulwev_d_wu_w(v8u32 _1, v8i32 _2){return __builtin_lasx_xvmulwev_d_wu_w(_1, _2);} -+v8i32 __lasx_xvmulwev_w_hu_h(v16u16 _1, v16i16 _2){return __builtin_lasx_xvmulwev_w_hu_h(_1, _2);} -+v16i16 __lasx_xvmulwev_h_bu_b(v32u8 _1, v32i8 _2){return __builtin_lasx_xvmulwev_h_bu_b(_1, _2);} -+v4i64 __lasx_xvaddwod_d_wu_w(v8u32 _1, v8i32 _2){return __builtin_lasx_xvaddwod_d_wu_w(_1, _2);} -+v8i32 __lasx_xvaddwod_w_hu_h(v16u16 _1, v16i16 _2){return __builtin_lasx_xvaddwod_w_hu_h(_1, _2);} -+v16i16 __lasx_xvaddwod_h_bu_b(v32u8 _1, v32i8 _2){return __builtin_lasx_xvaddwod_h_bu_b(_1, _2);} -+v4i64 __lasx_xvmulwod_d_wu_w(v8u32 _1, v8i32 _2){return __builtin_lasx_xvmulwod_d_wu_w(_1, _2);} -+v8i32 __lasx_xvmulwod_w_hu_h(v16u16 _1, v16i16 _2){return __builtin_lasx_xvmulwod_w_hu_h(_1, _2);} -+v16i16 __lasx_xvmulwod_h_bu_b(v32u8 _1, v32i8 _2){return __builtin_lasx_xvmulwod_h_bu_b(_1, _2);} -+v4i64 __lasx_xvhaddw_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvhaddw_q_d(_1, _2);} -+v4u64 __lasx_xvhaddw_qu_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvhaddw_qu_du(_1, _2);} -+v4i64 __lasx_xvhsubw_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvhsubw_q_d(_1, _2);} -+v4u64 __lasx_xvhsubw_qu_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvhsubw_qu_du(_1, _2);} -+v4i64 __lasx_xvmaddwev_q_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvmaddwev_q_d(_1, _2, _3);} -+v4i64 __lasx_xvmaddwev_d_w(v4i64 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvmaddwev_d_w(_1, _2, _3);} -+v8i32 __lasx_xvmaddwev_w_h(v8i32 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvmaddwev_w_h(_1, _2, _3);} -+v16i16 __lasx_xvmaddwev_h_b(v16i16 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvmaddwev_h_b(_1, _2, _3);} -+v4u64 __lasx_xvmaddwev_q_du(v4u64 _1, v4u64 _2, v4u64 _3){return __builtin_lasx_xvmaddwev_q_du(_1, _2, _3);} -+v4u64 __lasx_xvmaddwev_d_wu(v4u64 _1, v8u32 _2, v8u32 _3){return __builtin_lasx_xvmaddwev_d_wu(_1, _2, _3);} -+v8u32 __lasx_xvmaddwev_w_hu(v8u32 _1, v16u16 _2, v16u16 _3){return __builtin_lasx_xvmaddwev_w_hu(_1, _2, _3);} -+v16u16 __lasx_xvmaddwev_h_bu(v16u16 _1, v32u8 _2, v32u8 _3){return __builtin_lasx_xvmaddwev_h_bu(_1, _2, _3);} -+v4i64 __lasx_xvmaddwod_q_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvmaddwod_q_d(_1, _2, _3);} -+v4i64 __lasx_xvmaddwod_d_w(v4i64 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvmaddwod_d_w(_1, _2, _3);} -+v8i32 __lasx_xvmaddwod_w_h(v8i32 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvmaddwod_w_h(_1, _2, _3);} -+v16i16 __lasx_xvmaddwod_h_b(v16i16 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvmaddwod_h_b(_1, _2, _3);} -+v4u64 __lasx_xvmaddwod_q_du(v4u64 _1, v4u64 _2, v4u64 _3){return __builtin_lasx_xvmaddwod_q_du(_1, _2, _3);} -+v4u64 __lasx_xvmaddwod_d_wu(v4u64 _1, v8u32 _2, v8u32 _3){return __builtin_lasx_xvmaddwod_d_wu(_1, _2, _3);} -+v8u32 __lasx_xvmaddwod_w_hu(v8u32 _1, v16u16 _2, v16u16 _3){return __builtin_lasx_xvmaddwod_w_hu(_1, _2, _3);} -+v16u16 __lasx_xvmaddwod_h_bu(v16u16 _1, v32u8 _2, v32u8 _3){return __builtin_lasx_xvmaddwod_h_bu(_1, _2, _3);} -+v4i64 __lasx_xvmaddwev_q_du_d(v4i64 _1, v4u64 _2, v4i64 _3){return __builtin_lasx_xvmaddwev_q_du_d(_1, _2, _3);} -+v4i64 __lasx_xvmaddwev_d_wu_w(v4i64 _1, v8u32 _2, v8i32 _3){return __builtin_lasx_xvmaddwev_d_wu_w(_1, _2, _3);} -+v8i32 __lasx_xvmaddwev_w_hu_h(v8i32 _1, v16u16 _2, v16i16 _3){return __builtin_lasx_xvmaddwev_w_hu_h(_1, _2, _3);} -+v16i16 __lasx_xvmaddwev_h_bu_b(v16i16 _1, v32u8 _2, v32i8 _3){return __builtin_lasx_xvmaddwev_h_bu_b(_1, _2, _3);} -+v4i64 __lasx_xvmaddwod_q_du_d(v4i64 _1, v4u64 _2, v4i64 _3){return __builtin_lasx_xvmaddwod_q_du_d(_1, _2, _3);} -+v4i64 __lasx_xvmaddwod_d_wu_w(v4i64 _1, v8u32 _2, v8i32 _3){return __builtin_lasx_xvmaddwod_d_wu_w(_1, _2, _3);} -+v8i32 __lasx_xvmaddwod_w_hu_h(v8i32 _1, v16u16 _2, v16i16 _3){return __builtin_lasx_xvmaddwod_w_hu_h(_1, _2, _3);} -+v16i16 __lasx_xvmaddwod_h_bu_b(v16i16 _1, v32u8 _2, v32i8 _3){return __builtin_lasx_xvmaddwod_h_bu_b(_1, _2, _3);} -+v32i8 __lasx_xvrotr_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvrotr_b(_1, _2);} -+v16i16 __lasx_xvrotr_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvrotr_h(_1, _2);} -+v8i32 __lasx_xvrotr_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvrotr_w(_1, _2);} -+v4i64 __lasx_xvrotr_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvrotr_d(_1, _2);} -+v4i64 __lasx_xvadd_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvadd_q(_1, _2);} -+v4i64 __lasx_xvsub_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsub_q(_1, _2);} -+v4i64 __lasx_xvaddwev_q_du_d(v4u64 _1, v4i64 _2){return __builtin_lasx_xvaddwev_q_du_d(_1, _2);} -+v4i64 __lasx_xvaddwod_q_du_d(v4u64 _1, v4i64 _2){return __builtin_lasx_xvaddwod_q_du_d(_1, _2);} -+v4i64 __lasx_xvmulwev_q_du_d(v4u64 _1, v4i64 _2){return __builtin_lasx_xvmulwev_q_du_d(_1, _2);} -+v4i64 __lasx_xvmulwod_q_du_d(v4u64 _1, v4i64 _2){return __builtin_lasx_xvmulwod_q_du_d(_1, _2);} -+v32i8 __lasx_xvmskgez_b(v32i8 _1){return __builtin_lasx_xvmskgez_b(_1);} -+v32i8 __lasx_xvmsknz_b(v32i8 _1){return __builtin_lasx_xvmsknz_b(_1);} -+v16i16 __lasx_xvexth_h_b(v32i8 _1){return __builtin_lasx_xvexth_h_b(_1);} -+v8i32 __lasx_xvexth_w_h(v16i16 _1){return __builtin_lasx_xvexth_w_h(_1);} -+v4i64 __lasx_xvexth_d_w(v8i32 _1){return __builtin_lasx_xvexth_d_w(_1);} -+v4i64 __lasx_xvexth_q_d(v4i64 _1){return __builtin_lasx_xvexth_q_d(_1);} -+v16u16 __lasx_xvexth_hu_bu(v32u8 _1){return __builtin_lasx_xvexth_hu_bu(_1);} -+v8u32 __lasx_xvexth_wu_hu(v16u16 _1){return __builtin_lasx_xvexth_wu_hu(_1);} -+v4u64 __lasx_xvexth_du_wu(v8u32 _1){return __builtin_lasx_xvexth_du_wu(_1);} -+v4u64 __lasx_xvexth_qu_du(v4u64 _1){return __builtin_lasx_xvexth_qu_du(_1);} -+v32i8 __lasx_xvrotri_b(v32i8 _1){return __builtin_lasx_xvrotri_b(_1, 1);} -+v16i16 __lasx_xvrotri_h(v16i16 _1){return __builtin_lasx_xvrotri_h(_1, 1);} -+v8i32 __lasx_xvrotri_w(v8i32 _1){return __builtin_lasx_xvrotri_w(_1, 1);} -+v4i64 __lasx_xvrotri_d(v4i64 _1){return __builtin_lasx_xvrotri_d(_1, 1);} -+v4i64 __lasx_xvextl_q_d(v4i64 _1){return __builtin_lasx_xvextl_q_d(_1);} -+v32i8 __lasx_xvsrlni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrlni_b_h(_1, _2, 1);} -+v16i16 __lasx_xvsrlni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrlni_h_w(_1, _2, 1);} -+v8i32 __lasx_xvsrlni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrlni_w_d(_1, _2, 1);} -+v4i64 __lasx_xvsrlni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrlni_d_q(_1, _2, 1);} -+v32i8 __lasx_xvsrlrni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrlrni_b_h(_1, _2, 1);} -+v16i16 __lasx_xvsrlrni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrlrni_h_w(_1, _2, 1);} -+v8i32 __lasx_xvsrlrni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrlrni_w_d(_1, _2, 1);} -+v4i64 __lasx_xvsrlrni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrlrni_d_q(_1, _2, 1);} -+v32i8 __lasx_xvssrlni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssrlni_b_h(_1, _2, 1);} -+v16i16 __lasx_xvssrlni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrlni_h_w(_1, _2, 1);} -+v8i32 __lasx_xvssrlni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrlni_w_d(_1, _2, 1);} -+v4i64 __lasx_xvssrlni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrlni_d_q(_1, _2, 1);} -+v32u8 __lasx_xvssrlni_bu_h(v32u8 _1, v32i8 _2){return __builtin_lasx_xvssrlni_bu_h(_1, _2, 1);} -+v16u16 __lasx_xvssrlni_hu_w(v16u16 _1, v16i16 _2){return __builtin_lasx_xvssrlni_hu_w(_1, _2, 1);} -+v8u32 __lasx_xvssrlni_wu_d(v8u32 _1, v8i32 _2){return __builtin_lasx_xvssrlni_wu_d(_1, _2, 1);} -+v4u64 __lasx_xvssrlni_du_q(v4u64 _1, v4i64 _2){return __builtin_lasx_xvssrlni_du_q(_1, _2, 1);} -+v32i8 __lasx_xvssrlrni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssrlrni_b_h(_1, _2, 1);} -+v16i16 __lasx_xvssrlrni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrlrni_h_w(_1, _2, 1);} -+v8i32 __lasx_xvssrlrni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrlrni_w_d(_1, _2, 1);} -+v4i64 __lasx_xvssrlrni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrlrni_d_q(_1, _2, 1);} -+v32u8 __lasx_xvssrlrni_bu_h(v32u8 _1, v32i8 _2){return __builtin_lasx_xvssrlrni_bu_h(_1, _2, 1);} -+v16u16 __lasx_xvssrlrni_hu_w(v16u16 _1, v16i16 _2){return __builtin_lasx_xvssrlrni_hu_w(_1, _2, 1);} -+v8u32 __lasx_xvssrlrni_wu_d(v8u32 _1, v8i32 _2){return __builtin_lasx_xvssrlrni_wu_d(_1, _2, 1);} -+v4u64 __lasx_xvssrlrni_du_q(v4u64 _1, v4i64 _2){return __builtin_lasx_xvssrlrni_du_q(_1, _2, 1);} -+v32i8 __lasx_xvsrani_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrani_b_h(_1, _2, 1);} -+v16i16 __lasx_xvsrani_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrani_h_w(_1, _2, 1);} -+v8i32 __lasx_xvsrani_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrani_w_d(_1, _2, 1);} -+v4i64 __lasx_xvsrani_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrani_d_q(_1, _2, 1);} -+v32i8 __lasx_xvsrarni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrarni_b_h(_1, _2, 1);} -+v16i16 __lasx_xvsrarni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrarni_h_w(_1, _2, 1);} -+v8i32 __lasx_xvsrarni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrarni_w_d(_1, _2, 1);} -+v4i64 __lasx_xvsrarni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrarni_d_q(_1, _2, 1);} -+v32i8 __lasx_xvssrani_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssrani_b_h(_1, _2, 1);} -+v16i16 __lasx_xvssrani_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrani_h_w(_1, _2, 1);} -+v8i32 __lasx_xvssrani_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrani_w_d(_1, _2, 1);} -+v4i64 __lasx_xvssrani_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrani_d_q(_1, _2, 1);} -+v32u8 __lasx_xvssrani_bu_h(v32u8 _1, v32i8 _2){return __builtin_lasx_xvssrani_bu_h(_1, _2, 1);} -+v16u16 __lasx_xvssrani_hu_w(v16u16 _1, v16i16 _2){return __builtin_lasx_xvssrani_hu_w(_1, _2, 1);} -+v8u32 __lasx_xvssrani_wu_d(v8u32 _1, v8i32 _2){return __builtin_lasx_xvssrani_wu_d(_1, _2, 1);} -+v4u64 __lasx_xvssrani_du_q(v4u64 _1, v4i64 _2){return __builtin_lasx_xvssrani_du_q(_1, _2, 1);} -+v32i8 __lasx_xvssrarni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssrarni_b_h(_1, _2, 1);} -+v16i16 __lasx_xvssrarni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrarni_h_w(_1, _2, 1);} -+v8i32 __lasx_xvssrarni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrarni_w_d(_1, _2, 1);} -+v4i64 __lasx_xvssrarni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrarni_d_q(_1, _2, 1);} -+v32u8 __lasx_xvssrarni_bu_h(v32u8 _1, v32i8 _2){return __builtin_lasx_xvssrarni_bu_h(_1, _2, 1);} -+v16u16 __lasx_xvssrarni_hu_w(v16u16 _1, v16i16 _2){return __builtin_lasx_xvssrarni_hu_w(_1, _2, 1);} -+v8u32 __lasx_xvssrarni_wu_d(v8u32 _1, v8i32 _2){return __builtin_lasx_xvssrarni_wu_d(_1, _2, 1);} -+v4u64 __lasx_xvssrarni_du_q(v4u64 _1, v4i64 _2){return __builtin_lasx_xvssrarni_du_q(_1, _2, 1);} -+int __lasx_xbnz_b(v32u8 _1){return __builtin_lasx_xbnz_b(_1);} -+int __lasx_xbnz_d(v4u64 _1){return __builtin_lasx_xbnz_d(_1);} -+int __lasx_xbnz_h(v16u16 _1){return __builtin_lasx_xbnz_h(_1);} -+int __lasx_xbnz_v(v32u8 _1){return __builtin_lasx_xbnz_v(_1);} -+int __lasx_xbnz_w(v8u32 _1){return __builtin_lasx_xbnz_w(_1);} -+int __lasx_xbz_b(v32u8 _1){return __builtin_lasx_xbz_b(_1);} -+int __lasx_xbz_d(v4u64 _1){return __builtin_lasx_xbz_d(_1);} -+int __lasx_xbz_h(v16u16 _1){return __builtin_lasx_xbz_h(_1);} -+int __lasx_xbz_v(v32u8 _1){return __builtin_lasx_xbz_v(_1);} -+int __lasx_xbz_w(v8u32 _1){return __builtin_lasx_xbz_w(_1);} -+v4i64 __lasx_xvfcmp_caf_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_caf_d(_1, _2);} -+v8i32 __lasx_xvfcmp_caf_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_caf_s(_1, _2);} -+v4i64 __lasx_xvfcmp_ceq_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_ceq_d(_1, _2);} -+v8i32 __lasx_xvfcmp_ceq_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_ceq_s(_1, _2);} -+v4i64 __lasx_xvfcmp_cle_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cle_d(_1, _2);} -+v8i32 __lasx_xvfcmp_cle_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cle_s(_1, _2);} -+v4i64 __lasx_xvfcmp_clt_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_clt_d(_1, _2);} -+v8i32 __lasx_xvfcmp_clt_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_clt_s(_1, _2);} -+v4i64 __lasx_xvfcmp_cne_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cne_d(_1, _2);} -+v8i32 __lasx_xvfcmp_cne_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cne_s(_1, _2);} -+v4i64 __lasx_xvfcmp_cor_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cor_d(_1, _2);} -+v8i32 __lasx_xvfcmp_cor_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cor_s(_1, _2);} -+v4i64 __lasx_xvfcmp_cueq_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cueq_d(_1, _2);} -+v8i32 __lasx_xvfcmp_cueq_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cueq_s(_1, _2);} -+v4i64 __lasx_xvfcmp_cule_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cule_d(_1, _2);} -+v8i32 __lasx_xvfcmp_cule_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cule_s(_1, _2);} -+v4i64 __lasx_xvfcmp_cult_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cult_d(_1, _2);} -+v8i32 __lasx_xvfcmp_cult_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cult_s(_1, _2);} -+v4i64 __lasx_xvfcmp_cun_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cun_d(_1, _2);} -+v4i64 __lasx_xvfcmp_cune_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cune_d(_1, _2);} -+v8i32 __lasx_xvfcmp_cune_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cune_s(_1, _2);} -+v8i32 __lasx_xvfcmp_cun_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cun_s(_1, _2);} -+v4i64 __lasx_xvfcmp_saf_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_saf_d(_1, _2);} -+v8i32 __lasx_xvfcmp_saf_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_saf_s(_1, _2);} -+v4i64 __lasx_xvfcmp_seq_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_seq_d(_1, _2);} -+v8i32 __lasx_xvfcmp_seq_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_seq_s(_1, _2);} -+v4i64 __lasx_xvfcmp_sle_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sle_d(_1, _2);} -+v8i32 __lasx_xvfcmp_sle_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sle_s(_1, _2);} -+v4i64 __lasx_xvfcmp_slt_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_slt_d(_1, _2);} -+v8i32 __lasx_xvfcmp_slt_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_slt_s(_1, _2);} -+v4i64 __lasx_xvfcmp_sne_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sne_d(_1, _2);} -+v8i32 __lasx_xvfcmp_sne_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sne_s(_1, _2);} -+v4i64 __lasx_xvfcmp_sor_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sor_d(_1, _2);} -+v8i32 __lasx_xvfcmp_sor_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sor_s(_1, _2);} -+v4i64 __lasx_xvfcmp_sueq_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sueq_d(_1, _2);} -+v8i32 __lasx_xvfcmp_sueq_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sueq_s(_1, _2);} -+v4i64 __lasx_xvfcmp_sule_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sule_d(_1, _2);} -+v8i32 __lasx_xvfcmp_sule_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sule_s(_1, _2);} -+v4i64 __lasx_xvfcmp_sult_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sult_d(_1, _2);} -+v8i32 __lasx_xvfcmp_sult_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sult_s(_1, _2);} -+v4i64 __lasx_xvfcmp_sun_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sun_d(_1, _2);} -+v4i64 __lasx_xvfcmp_sune_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sune_d(_1, _2);} -+v8i32 __lasx_xvfcmp_sune_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sune_s(_1, _2);} -+v8i32 __lasx_xvfcmp_sun_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sun_s(_1, _2);} -+v4f64 __lasx_xvpickve_d_f(v4f64 _1){return __builtin_lasx_xvpickve_d_f(_1, 1);} -+v8f32 __lasx_xvpickve_w_f(v8f32 _1){return __builtin_lasx_xvpickve_w_f(_1, 1);} -+v32i8 __lasx_xvrepli_b(){return __builtin_lasx_xvrepli_b(1);} -+v4i64 __lasx_xvrepli_d(){return __builtin_lasx_xvrepli_d(1);} -+v16i16 __lasx_xvrepli_h(){return __builtin_lasx_xvrepli_h(1);} -+v8i32 __lasx_xvrepli_w(){return __builtin_lasx_xvrepli_w(1);} -diff --git a/gcc/testsuite/gcc.target/loongarch/loongarch.exp b/gcc/testsuite/gcc.target/loongarch/loongarch.exp -new file mode 100644 -index 000000000..be9543d38 ---- /dev/null -+++ b/gcc/testsuite/gcc.target/loongarch/loongarch.exp -@@ -0,0 +1,40 @@ -+# Copyright (C) 2017-2018 Free Software Foundation, Inc. -+ -+# This program is free software; you can redistribute it and/or modify -+# it under the terms of the GNU General Public License as published by -+# the Free Software Foundation; either version 3 of the License, or -+# (at your option) any later version. -+# -+# This program is distributed in the hope that it will be useful, -+# but WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+# GNU General Public License for more details. -+# -+# You should have received a copy of the GNU General Public License -+# along with GCC; see the file COPYING3. If not see -+# . -+ -+# GCC testsuite that uses the `dg.exp' driver. -+ -+# Exit immediately if this isn't a Loongarch target. -+if ![istarget loongarch*-*-*] then { -+ return -+} -+ -+# Load support procs. -+load_lib gcc-dg.exp -+ -+# If a testcase doesn't have special options, use these. -+global DEFAULT_CFLAGS -+if ![info exists DEFAULT_CFLAGS] then { -+ set DEFAULT_CFLAGS " " -+} -+ -+# Initialize `dg'. -+dg-init -+ -+# Main loop. -+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] \ -+ "" $DEFAULT_CFLAGS -+# All done. -+dg-finish -diff --git a/gcc/testsuite/gcc.target/loongarch/lsx-builtin.c b/gcc/testsuite/gcc.target/loongarch/lsx-builtin.c -new file mode 100644 -index 000000000..296869dc5 ---- /dev/null -+++ b/gcc/testsuite/gcc.target/loongarch/lsx-builtin.c -@@ -0,0 +1,1461 @@ -+/* Test builtins for LOONGARCH LSX ASE instructions */ -+/* { dg-do compile } */ -+/* { dg-options "-mlsx" } */ -+/* { dg-final { scan-assembler-times "lsx_vsll_b:.*vsll\\.b.*lsx_vsll_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsll_h:.*vsll\\.h.*lsx_vsll_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsll_w:.*vsll\\.w.*lsx_vsll_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsll_d:.*vsll\\.d.*lsx_vsll_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslli_b:.*vslli\\.b.*lsx_vslli_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslli_h:.*vslli\\.h.*lsx_vslli_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslli_w:.*vslli\\.w.*lsx_vslli_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslli_d:.*vslli\\.d.*lsx_vslli_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsra_b:.*vsra\\.b.*lsx_vsra_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsra_h:.*vsra\\.h.*lsx_vsra_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsra_w:.*vsra\\.w.*lsx_vsra_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsra_d:.*vsra\\.d.*lsx_vsra_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrai_b:.*vsrai\\.b.*lsx_vsrai_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrai_h:.*vsrai\\.h.*lsx_vsrai_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrai_w:.*vsrai\\.w.*lsx_vsrai_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrai_d:.*vsrai\\.d.*lsx_vsrai_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrar_b:.*vsrar\\.b.*lsx_vsrar_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrar_h:.*vsrar\\.h.*lsx_vsrar_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrar_w:.*vsrar\\.w.*lsx_vsrar_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrar_d:.*vsrar\\.d.*lsx_vsrar_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrari_b:.*vsrari\\.b.*lsx_vsrari_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrari_h:.*vsrari\\.h.*lsx_vsrari_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrari_w:.*vsrari\\.w.*lsx_vsrari_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrari_d:.*vsrari\\.d.*lsx_vsrari_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrl_b:.*vsrl\\.b.*lsx_vsrl_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrl_h:.*vsrl\\.h.*lsx_vsrl_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrl_w:.*vsrl\\.w.*lsx_vsrl_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrl_d:.*vsrl\\.d.*lsx_vsrl_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrli_b:.*vsrli\\.b.*lsx_vsrli_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrli_h:.*vsrli\\.h.*lsx_vsrli_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrli_w:.*vsrli\\.w.*lsx_vsrli_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrli_d:.*vsrli\\.d.*lsx_vsrli_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlr_b:.*vsrlr\\.b.*lsx_vsrlr_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlr_h:.*vsrlr\\.h.*lsx_vsrlr_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlr_w:.*vsrlr\\.w.*lsx_vsrlr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlr_d:.*vsrlr\\.d.*lsx_vsrlr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlri_b:.*vsrlri\\.b.*lsx_vsrlri_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlri_h:.*vsrlri\\.h.*lsx_vsrlri_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlri_w:.*vsrlri\\.w.*lsx_vsrlri_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlri_d:.*vsrlri\\.d.*lsx_vsrlri_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitclr_b:.*vbitclr\\.b.*lsx_vbitclr_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitclr_h:.*vbitclr\\.h.*lsx_vbitclr_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitclr_w:.*vbitclr\\.w.*lsx_vbitclr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitclr_d:.*vbitclr\\.d.*lsx_vbitclr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitclri_b:.*vbitclri\\.b.*lsx_vbitclri_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitclri_h:.*vbitclri\\.h.*lsx_vbitclri_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitclri_w:.*vbitclri\\.w.*lsx_vbitclri_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitclri_d:.*vbitclri\\.d.*lsx_vbitclri_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitset_b:.*vbitset\\.b.*lsx_vbitset_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitset_h:.*vbitset\\.h.*lsx_vbitset_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitset_w:.*vbitset\\.w.*lsx_vbitset_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitset_d:.*vbitset\\.d.*lsx_vbitset_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitseti_b:.*vbitseti\\.b.*lsx_vbitseti_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitseti_h:.*vbitseti\\.h.*lsx_vbitseti_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitseti_w:.*vbitseti\\.w.*lsx_vbitseti_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitseti_d:.*vbitseti\\.d.*lsx_vbitseti_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitrev_b:.*vbitrev\\.b.*lsx_vbitrev_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitrev_h:.*vbitrev\\.h.*lsx_vbitrev_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitrev_w:.*vbitrev\\.w.*lsx_vbitrev_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitrev_d:.*vbitrev\\.d.*lsx_vbitrev_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitrevi_b:.*vbitrevi\\.b.*lsx_vbitrevi_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitrevi_h:.*vbitrevi\\.h.*lsx_vbitrevi_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitrevi_w:.*vbitrevi\\.w.*lsx_vbitrevi_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitrevi_d:.*vbitrevi\\.d.*lsx_vbitrevi_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vadd_b:.*vadd\\.b.*lsx_vadd_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vadd_h:.*vadd\\.h.*lsx_vadd_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vadd_w:.*vadd\\.w.*lsx_vadd_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vadd_d:.*vadd\\.d.*lsx_vadd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddi_bu:.*vaddi\\.bu.*lsx_vaddi_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddi_hu:.*vaddi\\.hu.*lsx_vaddi_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddi_wu:.*vaddi\\.wu.*lsx_vaddi_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddi_du:.*vaddi\\.du.*lsx_vaddi_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsub_b:.*vsub\\.b.*lsx_vsub_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsub_h:.*vsub\\.h.*lsx_vsub_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsub_w:.*vsub\\.w.*lsx_vsub_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsub_d:.*vsub\\.d.*lsx_vsub_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubi_bu:.*vsubi\\.bu.*lsx_vsubi_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubi_hu:.*vsubi\\.hu.*lsx_vsubi_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubi_wu:.*vsubi\\.wu.*lsx_vsubi_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubi_du:.*vsubi\\.du.*lsx_vsubi_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmax_b:.*vmax\\.b.*lsx_vmax_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmax_h:.*vmax\\.h.*lsx_vmax_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmax_w:.*vmax\\.w.*lsx_vmax_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmax_d:.*vmax\\.d.*lsx_vmax_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaxi_b:.*vmaxi\\.b.*lsx_vmaxi_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaxi_h:.*vmaxi\\.h.*lsx_vmaxi_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaxi_w:.*vmaxi\\.w.*lsx_vmaxi_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaxi_d:.*vmaxi\\.d.*lsx_vmaxi_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmax_bu:.*vmax\\.bu.*lsx_vmax_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmax_hu:.*vmax\\.hu.*lsx_vmax_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmax_wu:.*vmax\\.wu.*lsx_vmax_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmax_du:.*vmax\\.du.*lsx_vmax_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaxi_bu:.*vmaxi\\.bu.*lsx_vmaxi_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaxi_hu:.*vmaxi\\.hu.*lsx_vmaxi_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaxi_wu:.*vmaxi\\.wu.*lsx_vmaxi_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaxi_du:.*vmaxi\\.du.*lsx_vmaxi_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmin_b:.*vmin\\.b.*lsx_vmin_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmin_h:.*vmin\\.h.*lsx_vmin_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmin_w:.*vmin\\.w.*lsx_vmin_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmin_d:.*vmin\\.d.*lsx_vmin_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmini_b:.*vmini\\.b.*lsx_vmini_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmini_h:.*vmini\\.h.*lsx_vmini_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmini_w:.*vmini\\.w.*lsx_vmini_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmini_d:.*vmini\\.d.*lsx_vmini_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmin_bu:.*vmin\\.bu.*lsx_vmin_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmin_hu:.*vmin\\.hu.*lsx_vmin_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmin_wu:.*vmin\\.wu.*lsx_vmin_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmin_du:.*vmin\\.du.*lsx_vmin_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmini_bu:.*vmini\\.bu.*lsx_vmini_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmini_hu:.*vmini\\.hu.*lsx_vmini_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmini_wu:.*vmini\\.wu.*lsx_vmini_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmini_du:.*vmini\\.du.*lsx_vmini_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vseq_b:.*vseq\\.b.*lsx_vseq_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vseq_h:.*vseq\\.h.*lsx_vseq_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vseq_w:.*vseq\\.w.*lsx_vseq_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vseq_d:.*vseq\\.d.*lsx_vseq_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vseqi_b:.*vseqi\\.b.*lsx_vseqi_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vseqi_h:.*vseqi\\.h.*lsx_vseqi_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vseqi_w:.*vseqi\\.w.*lsx_vseqi_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vseqi_d:.*vseqi\\.d.*lsx_vseqi_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslti_b:.*vslti\\.b.*lsx_vslti_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslt_b:.*vslt\\.b.*lsx_vslt_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslt_h:.*vslt\\.h.*lsx_vslt_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslt_w:.*vslt\\.w.*lsx_vslt_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslt_d:.*vslt\\.d.*lsx_vslt_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslti_h:.*vslti\\.h.*lsx_vslti_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslti_w:.*vslti\\.w.*lsx_vslti_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslti_d:.*vslti\\.d.*lsx_vslti_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslt_bu:.*vslt\\.bu.*lsx_vslt_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslt_hu:.*vslt\\.hu.*lsx_vslt_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslt_wu:.*vslt\\.wu.*lsx_vslt_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslt_du:.*vslt\\.du.*lsx_vslt_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslti_bu:.*vslti\\.bu.*lsx_vslti_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslti_hu:.*vslti\\.hu.*lsx_vslti_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslti_wu:.*vslti\\.wu.*lsx_vslti_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslti_du:.*vslti\\.du.*lsx_vslti_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsle_b:.*vsle\\.b.*lsx_vsle_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsle_h:.*vsle\\.h.*lsx_vsle_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsle_w:.*vsle\\.w.*lsx_vsle_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsle_d:.*vsle\\.d.*lsx_vsle_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslei_b:.*vslei\\.b.*lsx_vslei_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslei_h:.*vslei\\.h.*lsx_vslei_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslei_w:.*vslei\\.w.*lsx_vslei_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslei_d:.*vslei\\.d.*lsx_vslei_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsle_bu:.*vsle\\.bu.*lsx_vsle_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsle_hu:.*vsle\\.hu.*lsx_vsle_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsle_wu:.*vsle\\.wu.*lsx_vsle_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsle_du:.*vsle\\.du.*lsx_vsle_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslei_bu:.*vslei\\.bu.*lsx_vslei_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslei_hu:.*vslei\\.hu.*lsx_vslei_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslei_wu:.*vslei\\.wu.*lsx_vslei_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vslei_du:.*vslei\\.du.*lsx_vslei_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsat_b:.*vsat\\.b.*lsx_vsat_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsat_h:.*vsat\\.h.*lsx_vsat_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsat_w:.*vsat\\.w.*lsx_vsat_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsat_d:.*vsat\\.d.*lsx_vsat_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsat_bu:.*vsat\\.bu.*lsx_vsat_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsat_hu:.*vsat\\.hu.*lsx_vsat_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsat_wu:.*vsat\\.wu.*lsx_vsat_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsat_du:.*vsat\\.du.*lsx_vsat_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vadda_b:.*vadda\\.b.*lsx_vadda_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vadda_h:.*vadda\\.h.*lsx_vadda_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vadda_w:.*vadda\\.w.*lsx_vadda_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vadda_d:.*vadda\\.d.*lsx_vadda_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsadd_b:.*vsadd\\.b.*lsx_vsadd_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsadd_h:.*vsadd\\.h.*lsx_vsadd_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsadd_w:.*vsadd\\.w.*lsx_vsadd_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsadd_d:.*vsadd\\.d.*lsx_vsadd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsadd_bu:.*vsadd\\.bu.*lsx_vsadd_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsadd_hu:.*vsadd\\.hu.*lsx_vsadd_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsadd_wu:.*vsadd\\.wu.*lsx_vsadd_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsadd_du:.*vsadd\\.du.*lsx_vsadd_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavg_b:.*vavg\\.b.*lsx_vavg_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavg_h:.*vavg\\.h.*lsx_vavg_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavg_w:.*vavg\\.w.*lsx_vavg_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavg_d:.*vavg\\.d.*lsx_vavg_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavg_bu:.*vavg\\.bu.*lsx_vavg_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavg_hu:.*vavg\\.hu.*lsx_vavg_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavg_wu:.*vavg\\.wu.*lsx_vavg_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavg_du:.*vavg\\.du.*lsx_vavg_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavgr_b:.*vavgr\\.b.*lsx_vavgr_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavgr_h:.*vavgr\\.h.*lsx_vavgr_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavgr_w:.*vavgr\\.w.*lsx_vavgr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavgr_d:.*vavgr\\.d.*lsx_vavgr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavgr_bu:.*vavgr\\.bu.*lsx_vavgr_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavgr_hu:.*vavgr\\.hu.*lsx_vavgr_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavgr_wu:.*vavgr\\.wu.*lsx_vavgr_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vavgr_du:.*vavgr\\.du.*lsx_vavgr_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssub_b:.*vssub\\.b.*lsx_vssub_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssub_h:.*vssub\\.h.*lsx_vssub_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssub_w:.*vssub\\.w.*lsx_vssub_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssub_d:.*vssub\\.d.*lsx_vssub_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssub_bu:.*vssub\\.bu.*lsx_vssub_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssub_hu:.*vssub\\.hu.*lsx_vssub_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssub_wu:.*vssub\\.wu.*lsx_vssub_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssub_du:.*vssub\\.du.*lsx_vssub_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vabsd_b:.*vabsd\\.b.*lsx_vabsd_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vabsd_h:.*vabsd\\.h.*lsx_vabsd_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vabsd_w:.*vabsd\\.w.*lsx_vabsd_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vabsd_d:.*vabsd\\.d.*lsx_vabsd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vabsd_bu:.*vabsd\\.bu.*lsx_vabsd_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vabsd_hu:.*vabsd\\.hu.*lsx_vabsd_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vabsd_wu:.*vabsd\\.wu.*lsx_vabsd_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vabsd_du:.*vabsd\\.du.*lsx_vabsd_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmul_b:.*vmul\\.b.*lsx_vmul_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmul_h:.*vmul\\.h.*lsx_vmul_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmul_w:.*vmul\\.w.*lsx_vmul_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmul_d:.*vmul\\.d.*lsx_vmul_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmadd_b:.*vmadd\\.b.*lsx_vmadd_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmadd_h:.*vmadd\\.h.*lsx_vmadd_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmadd_w:.*vmadd\\.w.*lsx_vmadd_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmadd_d:.*vmadd\\.d.*lsx_vmadd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmsub_b:.*vmsub\\.b.*lsx_vmsub_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmsub_h:.*vmsub\\.h.*lsx_vmsub_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmsub_w:.*vmsub\\.w.*lsx_vmsub_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmsub_d:.*vmsub\\.d.*lsx_vmsub_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vdiv_b:.*vdiv\\.b.*lsx_vdiv_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vdiv_h:.*vdiv\\.h.*lsx_vdiv_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vdiv_w:.*vdiv\\.w.*lsx_vdiv_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vdiv_d:.*vdiv\\.d.*lsx_vdiv_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vdiv_bu:.*vdiv\\.bu.*lsx_vdiv_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vdiv_hu:.*vdiv\\.hu.*lsx_vdiv_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vdiv_wu:.*vdiv\\.wu.*lsx_vdiv_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vdiv_du:.*vdiv\\.du.*lsx_vdiv_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhaddw_h_b:.*vhaddw\\.h\\.b.*lsx_vhaddw_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhaddw_w_h:.*vhaddw\\.w\\.h.*lsx_vhaddw_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhaddw_d_w:.*vhaddw\\.d\\.w.*lsx_vhaddw_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhaddw_hu_bu:.*vhaddw\\.hu\\.bu.*lsx_vhaddw_hu_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhaddw_wu_hu:.*vhaddw\\.wu\\.hu.*lsx_vhaddw_wu_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhaddw_du_wu:.*vhaddw\\.du\\.wu.*lsx_vhaddw_du_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhsubw_h_b:.*vhsubw\\.h\\.b.*lsx_vhsubw_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhsubw_w_h:.*vhsubw\\.w\\.h.*lsx_vhsubw_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhsubw_d_w:.*vhsubw\\.d\\.w.*lsx_vhsubw_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhsubw_hu_bu:.*vhsubw\\.hu\\.bu.*lsx_vhsubw_hu_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhsubw_wu_hu:.*vhsubw\\.wu\\.hu.*lsx_vhsubw_wu_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhsubw_du_wu:.*vhsubw\\.du\\.wu.*lsx_vhsubw_du_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmod_b:.*vmod\\.b.*lsx_vmod_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmod_h:.*vmod\\.h.*lsx_vmod_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmod_w:.*vmod\\.w.*lsx_vmod_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmod_d:.*vmod\\.d.*lsx_vmod_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmod_bu:.*vmod\\.bu.*lsx_vmod_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmod_hu:.*vmod\\.hu.*lsx_vmod_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmod_wu:.*vmod\\.wu.*lsx_vmod_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmod_du:.*vmod\\.du.*lsx_vmod_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vreplve_b:.*vreplve\\.b.*lsx_vreplve_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vreplve_h:.*vreplve\\.h.*lsx_vreplve_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vreplve_w:.*vreplve\\.w.*lsx_vreplve_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vreplve_d:.*vreplve\\.d.*lsx_vreplve_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vreplvei_b:.*vreplvei\\.b.*lsx_vreplvei_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vreplvei_h:.*vreplvei\\.h.*lsx_vreplvei_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vreplvei_w:.*vreplvei\\.w.*lsx_vreplvei_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vreplvei_d:.*vreplvei\\.d.*lsx_vreplvei_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickev_b:.*vpickev\\.b.*lsx_vpickev_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickev_h:.*vpickev\\.h.*lsx_vpickev_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickev_w:.*vpickev\\.w.*lsx_vpickev_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickev_d:.*vilvl\\.d.*lsx_vpickev_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickod_b:.*vpickod\\.b.*lsx_vpickod_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickod_h:.*vpickod\\.h.*lsx_vpickod_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickod_w:.*vpickod\\.w.*lsx_vpickod_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickod_d:.*vilvh\\.d.*lsx_vpickod_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vilvh_b:.*vilvh\\.b.*lsx_vilvh_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vilvh_h:.*vilvh\\.h.*lsx_vilvh_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vilvh_w:.*vilvh\\.w.*lsx_vilvh_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vilvh_d:.*vilvh\\.d.*lsx_vilvh_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vilvl_b:.*vilvl\\.b.*lsx_vilvl_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vilvl_h:.*vilvl\\.h.*lsx_vilvl_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vilvl_w:.*vilvl\\.w.*lsx_vilvl_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vilvl_d:.*vilvl\\.d.*lsx_vilvl_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpackev_b:.*vpackev\\.b.*lsx_vpackev_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpackev_h:.*vpackev\\.h.*lsx_vpackev_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpackev_w:.*vpackev\\.w.*lsx_vpackev_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpackev_d:.*vilvl\\.d.*lsx_vpackev_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpackod_b:.*vpackod\\.b.*lsx_vpackod_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpackod_h:.*vpackod\\.h.*lsx_vpackod_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpackod_w:.*vpackod\\.w.*lsx_vpackod_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpackod_d:.*vilvh\\.d.*lsx_vpackod_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vshuf_h:.*vshuf\\.h.*lsx_vshuf_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vshuf_w:.*vshuf\\.w.*lsx_vshuf_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vshuf_d:.*vshuf\\.d.*lsx_vshuf_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vand_v:.*vand\\.v.*lsx_vand_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vandi_b:.*vandi\\.b.*lsx_vandi_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vor_v:.*vor\\.v.*lsx_vor_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vori_b:.*vbitseti\\.b.*lsx_vori_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vnor_v:.*vnor\\.v.*lsx_vnor_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vnori_b:.*vnori\\.b.*lsx_vnori_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vxor_v:.*vxor\\.v.*lsx_vxor_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vxori_b:.*vbitrevi\\.b.*lsx_vxori_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitsel_v:.*vbitsel\\.v.*lsx_vbitsel_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbitseli_b:.*vbitseli\\.b.*lsx_vbitseli_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vshuf4i_b:.*vshuf4i\\.b.*lsx_vshuf4i_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vshuf4i_h:.*vshuf4i\\.h.*lsx_vshuf4i_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vshuf4i_w:.*vshuf4i\\.w.*lsx_vshuf4i_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_b:.*vreplgr2vr\\.b.*lsx_vreplgr2vr_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_h:.*vreplgr2vr\\.h.*lsx_vreplgr2vr_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_w:.*vreplgr2vr\\.w.*lsx_vreplgr2vr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_d:.*vreplgr2vr\\.d.*lsx_vreplgr2vr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpcnt_b:.*vpcnt\\.b.*lsx_vpcnt_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpcnt_h:.*vpcnt\\.h.*lsx_vpcnt_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpcnt_w:.*vpcnt\\.w.*lsx_vpcnt_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpcnt_d:.*vpcnt\\.d.*lsx_vpcnt_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vclo_b:.*vclo\\.b.*lsx_vclo_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vclo_h:.*vclo\\.h.*lsx_vclo_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vclo_w:.*vclo\\.w.*lsx_vclo_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vclo_d:.*vclo\\.d.*lsx_vclo_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vclz_b:.*vclz\\.b.*lsx_vclz_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vclz_h:.*vclz\\.h.*lsx_vclz_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vclz_w:.*vclz\\.w.*lsx_vclz_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vclz_d:.*vclz\\.d.*lsx_vclz_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickve2gr_b:.*vpickve2gr\\.b.*lsx_vpickve2gr_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickve2gr_h:.*vpickve2gr\\.h.*lsx_vpickve2gr_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickve2gr_w:.*vpickve2gr\\.w.*lsx_vpickve2gr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickve2gr_d:.*vpickve2gr\\.d.*lsx_vpickve2gr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickve2gr_bu:.*vpickve2gr\\.bu.*lsx_vpickve2gr_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickve2gr_hu:.*vpickve2gr\\.hu.*lsx_vpickve2gr_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickve2gr_wu:.*vpickve2gr\\.wu.*lsx_vpickve2gr_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpickve2gr_du:.*vpickve2gr\\.du.*lsx_vpickve2gr_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_b:.*vinsgr2vr\\.b.*lsx_vinsgr2vr_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_h:.*vinsgr2vr\\.h.*lsx_vinsgr2vr_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_w:.*vinsgr2vr\\.w.*lsx_vinsgr2vr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_d:.*vinsgr2vr\\.d.*lsx_vinsgr2vr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfadd_s:.*vfadd\\.s.*lsx_vfadd_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfadd_d:.*vfadd\\.d.*lsx_vfadd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfsub_s:.*vfsub\\.s.*lsx_vfsub_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfsub_d:.*vfsub\\.d.*lsx_vfsub_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmul_s:.*vfmul\\.s.*lsx_vfmul_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmul_d:.*vfmul\\.d.*lsx_vfmul_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfdiv_s:.*vfdiv\\.s.*lsx_vfdiv_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfdiv_d:.*vfdiv\\.d.*lsx_vfdiv_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcvt_h_s:.*vfcvt\\.h\\.s.*lsx_vfcvt_h_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcvt_s_d:.*vfcvt\\.s\\.d.*lsx_vfcvt_s_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmin_s:.*vfmin\\.s.*lsx_vfmin_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmin_d:.*vfmin\\.d.*lsx_vfmin_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmina_s:.*vfmina\\.s.*lsx_vfmina_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmina_d:.*vfmina\\.d.*lsx_vfmina_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmax_s:.*vfmax\\.s.*lsx_vfmax_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmax_d:.*vfmax\\.d.*lsx_vfmax_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmaxa_s:.*vfmaxa\\.s.*lsx_vfmaxa_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmaxa_d:.*vfmaxa\\.d.*lsx_vfmaxa_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfclass_s:.*vfclass\\.s.*lsx_vfclass_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfclass_d:.*vfclass\\.d.*lsx_vfclass_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfsqrt_s:.*vfsqrt\\.s.*lsx_vfsqrt_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfsqrt_d:.*vfsqrt\\.d.*lsx_vfsqrt_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrecip_s:.*vfrecip\\.s.*lsx_vfrecip_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrecip_d:.*vfrecip\\.d.*lsx_vfrecip_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrint_s:.*vfrint\\.s.*lsx_vfrint_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrint_d:.*vfrint\\.d.*lsx_vfrint_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrsqrt_s:.*vfrsqrt\\.s.*lsx_vfrsqrt_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrsqrt_d:.*vfrsqrt\\.d.*lsx_vfrsqrt_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vflogb_s:.*vflogb\\.s.*lsx_vflogb_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vflogb_d:.*vflogb\\.d.*lsx_vflogb_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcvth_s_h:.*vfcvth\\.s\\.h.*lsx_vfcvth_s_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcvth_d_s:.*vfcvth\\.d\\.s.*lsx_vfcvth_d_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcvtl_s_h:.*vfcvtl\\.s\\.h.*lsx_vfcvtl_s_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcvtl_d_s:.*vfcvtl\\.d\\.s.*lsx_vfcvtl_d_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftint_w_s:.*vftint\\.w\\.s.*lsx_vftint_w_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftint_l_d:.*vftint\\.l\\.d.*lsx_vftint_l_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftint_wu_s:.*vftint\\.wu\\.s.*lsx_vftint_wu_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftint_lu_d:.*vftint\\.lu\\.d.*lsx_vftint_lu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrz_w_s:.*vftintrz\\.w\\.s.*lsx_vftintrz_w_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrz_l_d:.*vftintrz\\.l\\.d.*lsx_vftintrz_l_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrz_wu_s:.*vftintrz\\.wu\\.s.*lsx_vftintrz_wu_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrz_lu_d:.*vftintrz\\.lu\\.d.*lsx_vftintrz_lu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vffint_s_w:.*vffint\\.s\\.w.*lsx_vffint_s_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vffint_d_l:.*vffint\\.d\\.l.*lsx_vffint_d_l" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vffint_s_wu:.*vffint\\.s\\.wu.*lsx_vffint_s_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vffint_d_lu:.*vffint\\.d\\.lu.*lsx_vffint_d_lu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vandn_v:.*vandn\\.v.*lsx_vandn_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vneg_b:.*vneg\\.b.*lsx_vneg_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vneg_h:.*vneg\\.h.*lsx_vneg_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vneg_w:.*vneg\\.w.*lsx_vneg_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vneg_d:.*vneg\\.d.*lsx_vneg_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmuh_b:.*vmuh\\.b.*lsx_vmuh_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmuh_h:.*vmuh\\.h.*lsx_vmuh_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmuh_w:.*vmuh\\.w.*lsx_vmuh_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmuh_d:.*vmuh\\.d.*lsx_vmuh_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmuh_bu:.*vmuh\\.bu.*lsx_vmuh_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmuh_hu:.*vmuh\\.hu.*lsx_vmuh_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmuh_wu:.*vmuh\\.wu.*lsx_vmuh_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmuh_du:.*vmuh\\.du.*lsx_vmuh_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsllwil_h_b:.*vsllwil\\.h\\.b.*lsx_vsllwil_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsllwil_w_h:.*vsllwil\\.w\\.h.*lsx_vsllwil_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsllwil_d_w:.*vsllwil\\.d\\.w.*lsx_vsllwil_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsllwil_hu_bu:.*vsllwil\\.hu\\.bu.*lsx_vsllwil_hu_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsllwil_wu_hu:.*vsllwil\\.wu\\.hu.*lsx_vsllwil_wu_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsllwil_du_wu:.*vsllwil\\.du\\.wu.*lsx_vsllwil_du_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsran_b_h:.*vsran\\.b\\.h.*lsx_vsran_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsran_h_w:.*vsran\\.h\\.w.*lsx_vsran_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsran_w_d:.*vsran\\.w\\.d.*lsx_vsran_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssran_b_h:.*vssran\\.b\\.h.*lsx_vssran_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssran_h_w:.*vssran\\.h\\.w.*lsx_vssran_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssran_w_d:.*vssran\\.w\\.d.*lsx_vssran_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssran_bu_h:.*vssran\\.bu\\.h.*lsx_vssran_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssran_hu_w:.*vssran\\.hu\\.w.*lsx_vssran_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssran_wu_d:.*vssran\\.wu\\.d.*lsx_vssran_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrarn_b_h:.*vsrarn\\.b\\.h.*lsx_vsrarn_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrarn_h_w:.*vsrarn\\.h\\.w.*lsx_vsrarn_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrarn_w_d:.*vsrarn\\.w\\.d.*lsx_vsrarn_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarn_b_h:.*vssrarn\\.b\\.h.*lsx_vssrarn_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarn_h_w:.*vssrarn\\.h\\.w.*lsx_vssrarn_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarn_w_d:.*vssrarn\\.w\\.d.*lsx_vssrarn_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarn_bu_h:.*vssrarn\\.bu\\.h.*lsx_vssrarn_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarn_hu_w:.*vssrarn\\.hu\\.w.*lsx_vssrarn_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarn_wu_d:.*vssrarn\\.wu\\.d.*lsx_vssrarn_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrln_b_h:.*vsrln\\.b\\.h.*lsx_vsrln_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrln_h_w:.*vsrln\\.h\\.w.*lsx_vsrln_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrln_w_d:.*vsrln\\.w\\.d.*lsx_vsrln_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrln_bu_h:.*vssrln\\.bu\\.h.*lsx_vssrln_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrln_hu_w:.*vssrln\\.hu\\.w.*lsx_vssrln_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrln_wu_d:.*vssrln\\.wu\\.d.*lsx_vssrln_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlrn_b_h:.*vsrlrn\\.b\\.h.*lsx_vsrlrn_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlrn_h_w:.*vsrlrn\\.h\\.w.*lsx_vsrlrn_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlrn_w_d:.*vsrlrn\\.w\\.d.*lsx_vsrlrn_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrn_bu_h:.*vssrlrn\\.bu\\.h.*lsx_vssrlrn_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrn_hu_w:.*vssrlrn\\.hu\\.w.*lsx_vssrlrn_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrn_wu_d:.*vssrlrn\\.wu\\.d.*lsx_vssrlrn_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrstpi_b:.*vfrstpi\\.b.*lsx_vfrstpi_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrstpi_h:.*vfrstpi\\.h.*lsx_vfrstpi_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrstp_b:.*vfrstp\\.b.*lsx_vfrstp_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrstp_h:.*vfrstp\\.h.*lsx_vfrstp_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vshuf4i_d:.*vshuf4i\\.d.*lsx_vshuf4i_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbsrl_v:.*vbsrl\\.v.*lsx_vbsrl_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vbsll_v:.*vbsll\\.v.*lsx_vbsll_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vextrins_b:.*vextrins\\.b.*lsx_vextrins_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vextrins_h:.*vextrins\\.h.*lsx_vextrins_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vextrins_w:.*vextrins\\.w.*lsx_vextrins_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vextrins_d:.*vextrins\\.d.*lsx_vextrins_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmskltz_b:.*vmskltz\\.b.*lsx_vmskltz_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmskltz_h:.*vmskltz\\.h.*lsx_vmskltz_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmskltz_w:.*vmskltz\\.w.*lsx_vmskltz_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmskltz_d:.*vmskltz\\.d.*lsx_vmskltz_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsigncov_b:.*vsigncov\\.b.*lsx_vsigncov_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsigncov_h:.*vsigncov\\.h.*lsx_vsigncov_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsigncov_w:.*vsigncov\\.w.*lsx_vsigncov_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsigncov_d:.*vsigncov\\.d.*lsx_vsigncov_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmadd_s:.*vfmadd\\.s.*lsx_vfmadd_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmadd_d:.*vfmadd\\.d.*lsx_vfmadd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmsub_s:.*vfmsub\\.s.*lsx_vfmsub_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfmsub_d:.*vfmsub\\.d.*lsx_vfmsub_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfnmadd_s:.*vfnmadd\\.s.*lsx_vfnmadd_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfnmadd_d:.*vfnmadd\\.d.*lsx_vfnmadd_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfnmsub_s:.*vfnmsub\\.s.*lsx_vfnmsub_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfnmsub_d:.*vfnmsub\\.d.*lsx_vfnmsub_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrne_w_s:.*vftintrne\\.w\\.s.*lsx_vftintrne_w_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrne_l_d:.*vftintrne\\.l\\.d.*lsx_vftintrne_l_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrp_w_s:.*vftintrp\\.w\\.s.*lsx_vftintrp_w_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrp_l_d:.*vftintrp\\.l\\.d.*lsx_vftintrp_l_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrm_w_s:.*vftintrm\\.w\\.s.*lsx_vftintrm_w_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrm_l_d:.*vftintrm\\.l\\.d.*lsx_vftintrm_l_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftint_w_d:.*vftint\\.w\\.d.*lsx_vftint_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vffint_s_l:.*vffint\\.s\\.l.*lsx_vffint_s_l" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrz_w_d:.*vftintrz\\.w\\.d.*lsx_vftintrz_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrp_w_d:.*vftintrp\\.w\\.d.*lsx_vftintrp_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrm_w_d:.*vftintrm\\.w\\.d.*lsx_vftintrm_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrne_w_d:.*vftintrne\\.w\\.d.*lsx_vftintrne_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintl_l_s:.*vftintl\\.l\\.s.*lsx_vftintl_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftinth_l_s:.*vftinth\\.l\\.s.*lsx_vftinth_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vffinth_d_w:.*vffinth\\.d\\.w.*lsx_vffinth_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vffintl_d_w:.*vffintl\\.d\\.w.*lsx_vffintl_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrzl_l_s:.*vftintrzl\\.l\\.s.*lsx_vftintrzl_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrzh_l_s:.*vftintrzh\\.l\\.s.*lsx_vftintrzh_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrpl_l_s:.*vftintrpl\\.l\\.s.*lsx_vftintrpl_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrph_l_s:.*vftintrph\\.l\\.s.*lsx_vftintrph_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrml_l_s:.*vftintrml\\.l\\.s.*lsx_vftintrml_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrmh_l_s:.*vftintrmh\\.l\\.s.*lsx_vftintrmh_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrnel_l_s:.*vftintrnel\\.l\\.s.*lsx_vftintrnel_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vftintrneh_l_s:.*vftintrneh\\.l\\.s.*lsx_vftintrneh_l_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrintrne_s:.*vfrintrne\\.s.*lsx_vfrintrne_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrintrne_d:.*vfrintrne\\.d.*lsx_vfrintrne_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrintrz_s:.*vfrintrz\\.s.*lsx_vfrintrz_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrintrz_d:.*vfrintrz\\.d.*lsx_vfrintrz_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrintrp_s:.*vfrintrp\\.s.*lsx_vfrintrp_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrintrp_d:.*vfrintrp\\.d.*lsx_vfrintrp_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrintrm_s:.*vfrintrm\\.s.*lsx_vfrintrm_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfrintrm_d:.*vfrintrm\\.d.*lsx_vfrintrm_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vstelm_b:.*vstelm\\.b.*lsx_vstelm_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vstelm_h:.*vstelm\\.h.*lsx_vstelm_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vstelm_w:.*vstelm\\.w.*lsx_vstelm_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vstelm_d:.*vstelm\\.d.*lsx_vstelm_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwev_d_w:.*vaddwev\\.d\\.w.*lsx_vaddwev_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwev_w_h:.*vaddwev\\.w\\.h.*lsx_vaddwev_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwev_h_b:.*vaddwev\\.h\\.b.*lsx_vaddwev_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwod_d_w:.*vaddwod\\.d\\.w.*lsx_vaddwod_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwod_w_h:.*vaddwod\\.w\\.h.*lsx_vaddwod_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwod_h_b:.*vaddwod\\.h\\.b.*lsx_vaddwod_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwev_d_wu:.*vaddwev\\.d\\.wu.*lsx_vaddwev_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwev_w_hu:.*vaddwev\\.w\\.hu.*lsx_vaddwev_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwev_h_bu:.*vaddwev\\.h\\.bu.*lsx_vaddwev_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwod_d_wu:.*vaddwod\\.d\\.wu.*lsx_vaddwod_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwod_w_hu:.*vaddwod\\.w\\.hu.*lsx_vaddwod_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwod_h_bu:.*vaddwod\\.h\\.bu.*lsx_vaddwod_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwev_d_wu_w:.*vaddwev\\.d\\.wu\\.w.*lsx_vaddwev_d_wu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwev_w_hu_h:.*vaddwev\\.w\\.hu\\.h.*lsx_vaddwev_w_hu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwev_h_bu_b:.*vaddwev\\.h\\.bu\\.b.*lsx_vaddwev_h_bu_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwod_d_wu_w:.*vaddwod\\.d\\.wu\\.w.*lsx_vaddwod_d_wu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwod_w_hu_h:.*vaddwod\\.w\\.hu\\.h.*lsx_vaddwod_w_hu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwod_h_bu_b:.*vaddwod\\.h\\.bu\\.b.*lsx_vaddwod_h_bu_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwev_d_w:.*vsubwev\\.d\\.w.*lsx_vsubwev_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwev_w_h:.*vsubwev\\.w\\.h.*lsx_vsubwev_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwev_h_b:.*vsubwev\\.h\\.b.*lsx_vsubwev_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwod_d_w:.*vsubwod\\.d\\.w.*lsx_vsubwod_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwod_w_h:.*vsubwod\\.w\\.h.*lsx_vsubwod_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwod_h_b:.*vsubwod\\.h\\.b.*lsx_vsubwod_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwev_d_wu:.*vsubwev\\.d\\.wu.*lsx_vsubwev_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwev_w_hu:.*vsubwev\\.w\\.hu.*lsx_vsubwev_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwev_h_bu:.*vsubwev\\.h\\.bu.*lsx_vsubwev_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwod_d_wu:.*vsubwod\\.d\\.wu.*lsx_vsubwod_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwod_w_hu:.*vsubwod\\.w\\.hu.*lsx_vsubwod_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwod_h_bu:.*vsubwod\\.h\\.bu.*lsx_vsubwod_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwev_q_d:.*vaddwev\\.q\\.d.*lsx_vaddwev_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwod_q_d:.*vaddwod\\.q\\.d.*lsx_vaddwod_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwev_q_du:.*vaddwev\\.q\\.du.*lsx_vaddwev_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwod_q_du:.*vaddwod\\.q\\.du.*lsx_vaddwod_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwev_q_d:.*vsubwev\\.q\\.d.*lsx_vsubwev_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwod_q_d:.*vsubwod\\.q\\.d.*lsx_vsubwod_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwev_q_du:.*vsubwev\\.q\\.du.*lsx_vsubwev_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsubwod_q_du:.*vsubwod\\.q\\.du.*lsx_vsubwod_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwev_q_du_d:.*vaddwev\\.q\\.du\\.d.*lsx_vaddwev_q_du_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vaddwod_q_du_d:.*vaddwod\\.q\\.du\\.d.*lsx_vaddwod_q_du_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwev_d_w:.*vmulwev\\.d\\.w.*lsx_vmulwev_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwev_w_h:.*vmulwev\\.w\\.h.*lsx_vmulwev_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwev_h_b:.*vmulwev\\.h\\.b.*lsx_vmulwev_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwod_d_w:.*vmulwod\\.d\\.w.*lsx_vmulwod_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwod_w_h:.*vmulwod\\.w\\.h.*lsx_vmulwod_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwod_h_b:.*vmulwod\\.h\\.b.*lsx_vmulwod_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwev_d_wu:.*vmulwev\\.d\\.wu.*lsx_vmulwev_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwev_w_hu:.*vmulwev\\.w\\.hu.*lsx_vmulwev_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwev_h_bu:.*vmulwev\\.h\\.bu.*lsx_vmulwev_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwod_d_wu:.*vmulwod\\.d\\.wu.*lsx_vmulwod_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwod_w_hu:.*vmulwod\\.w\\.hu.*lsx_vmulwod_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwod_h_bu:.*vmulwod\\.h\\.bu.*lsx_vmulwod_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwev_d_wu_w:.*vmulwev\\.d\\.wu\\.w.*lsx_vmulwev_d_wu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwev_w_hu_h:.*vmulwev\\.w\\.hu\\.h.*lsx_vmulwev_w_hu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwev_h_bu_b:.*vmulwev\\.h\\.bu\\.b.*lsx_vmulwev_h_bu_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwod_d_wu_w:.*vmulwod\\.d\\.wu\\.w.*lsx_vmulwod_d_wu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwod_w_hu_h:.*vmulwod\\.w\\.hu\\.h.*lsx_vmulwod_w_hu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwod_h_bu_b:.*vmulwod\\.h\\.bu\\.b.*lsx_vmulwod_h_bu_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwev_q_d:.*vmulwev\\.q\\.d.*lsx_vmulwev_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwod_q_d:.*vmulwod\\.q\\.d.*lsx_vmulwod_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwev_q_du:.*vmulwev\\.q\\.du.*lsx_vmulwev_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwod_q_du:.*vmulwod\\.q\\.du.*lsx_vmulwod_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwev_q_du_d:.*vmulwev\\.q\\.du\\.d.*lsx_vmulwev_q_du_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmulwod_q_du_d:.*vmulwod\\.q\\.du\\.d.*lsx_vmulwod_q_du_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhaddw_q_d:.*vhaddw\\.q\\.d.*lsx_vhaddw_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhaddw_qu_du:.*vhaddw\\.qu\\.du.*lsx_vhaddw_qu_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhsubw_q_d:.*vhsubw\\.q\\.d.*lsx_vhsubw_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vhsubw_qu_du:.*vhsubw\\.qu\\.du.*lsx_vhsubw_qu_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwev_d_w:.*vmaddwev\\.d\\.w.*lsx_vmaddwev_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwev_w_h:.*vmaddwev\\.w\\.h.*lsx_vmaddwev_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwev_h_b:.*vmaddwev\\.h\\.b.*lsx_vmaddwev_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwev_d_wu:.*vmaddwev\\.d\\.wu.*lsx_vmaddwev_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwev_w_hu:.*vmaddwev\\.w\\.hu.*lsx_vmaddwev_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwev_h_bu:.*vmaddwev\\.h\\.bu.*lsx_vmaddwev_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwod_d_w:.*vmaddwod\\.d\\.w.*lsx_vmaddwod_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwod_w_h:.*vmaddwod\\.w\\.h.*lsx_vmaddwod_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwod_h_b:.*vmaddwod\\.h\\.b.*lsx_vmaddwod_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwod_d_wu:.*vmaddwod\\.d\\.wu.*lsx_vmaddwod_d_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwod_w_hu:.*vmaddwod\\.w\\.hu.*lsx_vmaddwod_w_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwod_h_bu:.*vmaddwod\\.h\\.bu.*lsx_vmaddwod_h_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwev_d_wu_w:.*vmaddwev\\.d\\.wu\\.w.*lsx_vmaddwev_d_wu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwev_w_hu_h:.*vmaddwev\\.w\\.hu\\.h.*lsx_vmaddwev_w_hu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwev_h_bu_b:.*vmaddwev\\.h\\.bu\\.b.*lsx_vmaddwev_h_bu_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwod_d_wu_w:.*vmaddwod\\.d\\.wu\\.w.*lsx_vmaddwod_d_wu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwod_w_hu_h:.*vmaddwod\\.w\\.hu\\.h.*lsx_vmaddwod_w_hu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwod_h_bu_b:.*vmaddwod\\.h\\.bu\\.b.*lsx_vmaddwod_h_bu_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwev_q_d:.*vmaddwev\\.q\\.d.*lsx_vmaddwev_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwod_q_d:.*vmaddwod\\.q\\.d.*lsx_vmaddwod_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwev_q_du:.*vmaddwev\\.q\\.du.*lsx_vmaddwev_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwod_q_du:.*vmaddwod\\.q\\.du.*lsx_vmaddwod_q_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwev_q_du_d:.*vmaddwev\\.q\\.du\\.d.*lsx_vmaddwev_q_du_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmaddwod_q_du_d:.*vmaddwod\\.q\\.du\\.d.*lsx_vmaddwod_q_du_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vrotr_b:.*vrotr\\.b.*lsx_vrotr_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vrotr_h:.*vrotr\\.h.*lsx_vrotr_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vrotr_w:.*vrotr\\.w.*lsx_vrotr_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vrotr_d:.*vrotr\\.d.*lsx_vrotr_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vadd_q:.*vadd\\.q.*lsx_vadd_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsub_q:.*vsub\\.q.*lsx_vsub_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vldrepl_b:.*vldrepl\\.b.*lsx_vldrepl_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vldrepl_h:.*vldrepl\\.h.*lsx_vldrepl_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vldrepl_w:.*vldrepl\\.w.*lsx_vldrepl_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vldrepl_d:.*vldrepl\\.d.*lsx_vldrepl_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmskgez_b:.*vmskgez\\.b.*lsx_vmskgez_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vmsknz_b:.*vmsknz\\.b.*lsx_vmsknz_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vexth_h_b:.*vexth\\.h\\.b.*lsx_vexth_h_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vexth_w_h:.*vexth\\.w\\.h.*lsx_vexth_w_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vexth_d_w:.*vexth\\.d\\.w.*lsx_vexth_d_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vexth_q_d:.*vexth\\.q\\.d.*lsx_vexth_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vexth_hu_bu:.*vexth\\.hu\\.bu.*lsx_vexth_hu_bu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vexth_wu_hu:.*vexth\\.wu\\.hu.*lsx_vexth_wu_hu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vexth_du_wu:.*vexth\\.du\\.wu.*lsx_vexth_du_wu" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vexth_qu_du:.*vexth\\.qu\\.du.*lsx_vexth_qu_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vrotri_b:.*vrotri\\.b.*lsx_vrotri_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vrotri_h:.*vrotri\\.h.*lsx_vrotri_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vrotri_w:.*vrotri\\.w.*lsx_vrotri_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vrotri_d:.*vrotri\\.d.*lsx_vrotri_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vextl_q_d:.*vextl\\.q\\.d.*lsx_vextl_q_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlni_b_h:.*vsrlni\\.b\\.h.*lsx_vsrlni_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlni_h_w:.*vsrlni\\.h\\.w.*lsx_vsrlni_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlni_w_d:.*vsrlni\\.w\\.d.*lsx_vsrlni_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlni_d_q:.*vsrlni\\.d\\.q.*lsx_vsrlni_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlrni_b_h:.*vsrlrni\\.b\\.h.*lsx_vsrlrni_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlrni_h_w:.*vsrlrni\\.h\\.w.*lsx_vsrlrni_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlrni_w_d:.*vsrlrni\\.w\\.d.*lsx_vsrlrni_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrlrni_d_q:.*vsrlrni\\.d\\.q.*lsx_vsrlrni_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlni_b_h:.*vssrlni\\.b\\.h.*lsx_vssrlni_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlni_h_w:.*vssrlni\\.h\\.w.*lsx_vssrlni_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlni_w_d:.*vssrlni\\.w\\.d.*lsx_vssrlni_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlni_d_q:.*vssrlni\\.d\\.q.*lsx_vssrlni_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlni_bu_h:.*vssrlni\\.bu\\.h.*lsx_vssrlni_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlni_hu_w:.*vssrlni\\.hu\\.w.*lsx_vssrlni_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlni_wu_d:.*vssrlni\\.wu\\.d.*lsx_vssrlni_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlni_du_q:.*vssrlni\\.du\\.q.*lsx_vssrlni_du_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrni_b_h:.*vssrlrni\\.b\\.h.*lsx_vssrlrni_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrni_h_w:.*vssrlrni\\.h\\.w.*lsx_vssrlrni_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrni_w_d:.*vssrlrni\\.w\\.d.*lsx_vssrlrni_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrni_d_q:.*vssrlrni\\.d\\.q.*lsx_vssrlrni_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrni_bu_h:.*vssrlrni\\.bu\\.h.*lsx_vssrlrni_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrni_hu_w:.*vssrlrni\\.hu\\.w.*lsx_vssrlrni_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrni_wu_d:.*vssrlrni\\.wu\\.d.*lsx_vssrlrni_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrni_du_q:.*vssrlrni\\.du\\.q.*lsx_vssrlrni_du_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrani_b_h:.*vsrani\\.b\\.h.*lsx_vsrani_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrani_h_w:.*vsrani\\.h\\.w.*lsx_vsrani_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrani_w_d:.*vsrani\\.w\\.d.*lsx_vsrani_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrani_d_q:.*vsrani\\.d\\.q.*lsx_vsrani_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrarni_b_h:.*vsrarni\\.b\\.h.*lsx_vsrarni_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrarni_h_w:.*vsrarni\\.h\\.w.*lsx_vsrarni_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrarni_w_d:.*vsrarni\\.w\\.d.*lsx_vsrarni_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vsrarni_d_q:.*vsrarni\\.d\\.q.*lsx_vsrarni_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrani_b_h:.*vssrani\\.b\\.h.*lsx_vssrani_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrani_h_w:.*vssrani\\.h\\.w.*lsx_vssrani_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrani_w_d:.*vssrani\\.w\\.d.*lsx_vssrani_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrani_d_q:.*vssrani\\.d\\.q.*lsx_vssrani_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrani_bu_h:.*vssrani\\.bu\\.h.*lsx_vssrani_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrani_hu_w:.*vssrani\\.hu\\.w.*lsx_vssrani_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrani_wu_d:.*vssrani\\.wu\\.d.*lsx_vssrani_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrani_du_q:.*vssrani\\.du\\.q.*lsx_vssrani_du_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarni_b_h:.*vssrarni\\.b\\.h.*lsx_vssrarni_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarni_h_w:.*vssrarni\\.h\\.w.*lsx_vssrarni_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarni_w_d:.*vssrarni\\.w\\.d.*lsx_vssrarni_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarni_d_q:.*vssrarni\\.d\\.q.*lsx_vssrarni_d_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarni_bu_h:.*vssrarni\\.bu\\.h.*lsx_vssrarni_bu_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarni_hu_w:.*vssrarni\\.hu\\.w.*lsx_vssrarni_hu_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarni_wu_d:.*vssrarni\\.wu\\.d.*lsx_vssrarni_wu_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrarni_du_q:.*vssrarni\\.du\\.q.*lsx_vssrarni_du_q" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vpermi_w:.*vpermi\\.w.*lsx_vpermi_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vld:.*vld.*lsx_vld" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vst:.*vst.*lsx_vst" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrn_b_h:.*vssrlrn\\.b\\.h.*lsx_vssrlrn_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrn_h_w:.*vssrlrn\\.h\\.w.*lsx_vssrlrn_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrlrn_w_d:.*vssrlrn\\.w\\.d.*lsx_vssrlrn_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrln_b_h:.*vssrln\\.b\\.h.*lsx_vssrln_b_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrln_h_w:.*vssrln\\.h\\.w.*lsx_vssrln_h_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vssrln_w_d:.*vssrln\\.w\\.d.*lsx_vssrln_w_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vorn_v:.*vorn\\.v.*lsx_vorn_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vldi:.*vldi.*lsx_vldi" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vshuf_b:.*vshuf\\.b.*lsx_vshuf_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vldx:.*vldx.*lsx_vldx" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vstx:.*vstx.*lsx_vstx" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vextl_qu_du:.*vextl\\.qu\\.du.*lsx_vextl_qu_du" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_bnz_b:.*vsetanyeqz\\.b.*lsx_bnz_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_bnz_d:.*vsetanyeqz\\.d.*lsx_bnz_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_bnz_h:.*vsetanyeqz\\.h.*lsx_bnz_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_bnz_v:.*vseteqz\\.v.*lsx_bnz_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_bnz_w:.*vsetanyeqz\\.w.*lsx_bnz_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_bz_b:.*vsetallnez\\.b.*lsx_bz_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_bz_d:.*vsetallnez\\.d.*lsx_bz_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_bz_h:.*vsetallnez\\.h.*lsx_bz_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_bz_v:.*vsetnez\\.v.*lsx_bz_v" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_bz_w:.*vsetallnez\\.w.*lsx_bz_w" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_caf_d:.*vfcmp\\.caf\\.d.*lsx_vfcmp_caf_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_caf_s:.*vfcmp\\.caf\\.s.*lsx_vfcmp_caf_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_ceq_d:.*vfcmp\\.ceq\\.d.*lsx_vfcmp_ceq_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_ceq_s:.*vfcmp\\.ceq\\.s.*lsx_vfcmp_ceq_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cle_d:.*vfcmp\\.cle\\.d.*lsx_vfcmp_cle_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cle_s:.*vfcmp\\.cle\\.s.*lsx_vfcmp_cle_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_clt_d:.*vfcmp\\.clt\\.d.*lsx_vfcmp_clt_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_clt_s:.*vfcmp\\.clt\\.s.*lsx_vfcmp_clt_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cne_d:.*vfcmp\\.cne\\.d.*lsx_vfcmp_cne_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cne_s:.*vfcmp\\.cne\\.s.*lsx_vfcmp_cne_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cor_d:.*vfcmp\\.cor\\.d.*lsx_vfcmp_cor_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cor_s:.*vfcmp\\.cor\\.s.*lsx_vfcmp_cor_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cueq_d:.*vfcmp\\.cueq\\.d.*lsx_vfcmp_cueq_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cueq_s:.*vfcmp\\.cueq\\.s.*lsx_vfcmp_cueq_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cule_d:.*vfcmp\\.cule\\.d.*lsx_vfcmp_cule_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cule_s:.*vfcmp\\.cule\\.s.*lsx_vfcmp_cule_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cult_d:.*vfcmp\\.cult\\.d.*lsx_vfcmp_cult_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cult_s:.*vfcmp\\.cult\\.s.*lsx_vfcmp_cult_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cun_d:.*vfcmp\\.cun\\.d.*lsx_vfcmp_cun_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cune_d:.*vfcmp\\.cune\\.d.*lsx_vfcmp_cune_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cune_s:.*vfcmp\\.cune\\.s.*lsx_vfcmp_cune_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_cun_s:.*vfcmp\\.cun\\.s.*lsx_vfcmp_cun_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_saf_d:.*vfcmp\\.saf\\.d.*lsx_vfcmp_saf_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_saf_s:.*vfcmp\\.saf\\.s.*lsx_vfcmp_saf_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_seq_d:.*vfcmp\\.seq\\.d.*lsx_vfcmp_seq_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_seq_s:.*vfcmp\\.seq\\.s.*lsx_vfcmp_seq_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sle_d:.*vfcmp\\.sle\\.d.*lsx_vfcmp_sle_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sle_s:.*vfcmp\\.sle\\.s.*lsx_vfcmp_sle_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_slt_d:.*vfcmp\\.slt\\.d.*lsx_vfcmp_slt_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_slt_s:.*vfcmp\\.slt\\.s.*lsx_vfcmp_slt_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sne_d:.*vfcmp\\.sne\\.d.*lsx_vfcmp_sne_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sne_s:.*vfcmp\\.sne\\.s.*lsx_vfcmp_sne_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sor_d:.*vfcmp\\.sor\\.d.*lsx_vfcmp_sor_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sor_s:.*vfcmp\\.sor\\.s.*lsx_vfcmp_sor_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sueq_d:.*vfcmp\\.sueq\\.d.*lsx_vfcmp_sueq_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sueq_s:.*vfcmp\\.sueq\\.s.*lsx_vfcmp_sueq_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sule_d:.*vfcmp\\.sule\\.d.*lsx_vfcmp_sule_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sule_s:.*vfcmp\\.sule\\.s.*lsx_vfcmp_sule_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sult_d:.*vfcmp\\.sult\\.d.*lsx_vfcmp_sult_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sult_s:.*vfcmp\\.sult\\.s.*lsx_vfcmp_sult_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sun_d:.*vfcmp\\.sun\\.d.*lsx_vfcmp_sun_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sune_d:.*vfcmp\\.sune\\.d.*lsx_vfcmp_sune_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sune_s:.*vfcmp\\.sune\\.s.*lsx_vfcmp_sune_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vfcmp_sun_s:.*vfcmp\\.sun\\.s.*lsx_vfcmp_sun_s" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vrepli_b:.*vrepli\\.b.*lsx_vrepli_b" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vrepli_d:.*vrepli\\.d.*lsx_vrepli_d" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vrepli_h:.*vrepli\\.h.*lsx_vrepli_h" 1 } } */ -+/* { dg-final { scan-assembler-times "lsx_vrepli_w:.*vrepli\\.w.*lsx_vrepli_w" 1 } } */ -+ -+typedef signed char v16i8 __attribute__ ((vector_size(16), aligned(16))); -+typedef signed char v16i8_b __attribute__ ((vector_size(16), aligned(1))); -+typedef unsigned char v16u8 __attribute__ ((vector_size(16), aligned(16))); -+typedef unsigned char v16u8_b __attribute__ ((vector_size(16), aligned(1))); -+typedef short v8i16 __attribute__ ((vector_size(16), aligned(16))); -+typedef short v8i16_h __attribute__ ((vector_size(16), aligned(2))); -+typedef unsigned short v8u16 __attribute__ ((vector_size(16), aligned(16))); -+typedef unsigned short v8u16_h __attribute__ ((vector_size(16), aligned(2))); -+typedef int v4i32 __attribute__ ((vector_size(16), aligned(16))); -+typedef int v4i32_w __attribute__ ((vector_size(16), aligned(4))); -+typedef unsigned int v4u32 __attribute__ ((vector_size(16), aligned(16))); -+typedef unsigned int v4u32_w __attribute__ ((vector_size(16), aligned(4))); -+typedef long long v2i64 __attribute__ ((vector_size(16), aligned(16))); -+typedef long long v2i64_d __attribute__ ((vector_size(16), aligned(8))); -+typedef unsigned long long v2u64 __attribute__ ((vector_size(16), aligned(16))); -+typedef unsigned long long v2u64_d __attribute__ ((vector_size(16), aligned(8))); -+typedef float v4f32 __attribute__ ((vector_size(16), aligned(16))); -+typedef float v4f32_w __attribute__ ((vector_size(16), aligned(4))); -+typedef double v2f64 __attribute__ ((vector_size(16), aligned(16))); -+typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8))); -+ -+typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__)); -+typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); -+typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); -+ -+v16i8 __lsx_vsll_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsll_b(_1, _2);} -+v8i16 __lsx_vsll_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsll_h(_1, _2);} -+v4i32 __lsx_vsll_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsll_w(_1, _2);} -+v2i64 __lsx_vsll_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsll_d(_1, _2);} -+v16i8 __lsx_vslli_b(v16i8 _1){return __builtin_lsx_vslli_b(_1, 1);} -+v8i16 __lsx_vslli_h(v8i16 _1){return __builtin_lsx_vslli_h(_1, 1);} -+v4i32 __lsx_vslli_w(v4i32 _1){return __builtin_lsx_vslli_w(_1, 1);} -+v2i64 __lsx_vslli_d(v2i64 _1){return __builtin_lsx_vslli_d(_1, 1);} -+v16i8 __lsx_vsra_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsra_b(_1, _2);} -+v8i16 __lsx_vsra_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsra_h(_1, _2);} -+v4i32 __lsx_vsra_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsra_w(_1, _2);} -+v2i64 __lsx_vsra_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsra_d(_1, _2);} -+v16i8 __lsx_vsrai_b(v16i8 _1){return __builtin_lsx_vsrai_b(_1, 1);} -+v8i16 __lsx_vsrai_h(v8i16 _1){return __builtin_lsx_vsrai_h(_1, 1);} -+v4i32 __lsx_vsrai_w(v4i32 _1){return __builtin_lsx_vsrai_w(_1, 1);} -+v2i64 __lsx_vsrai_d(v2i64 _1){return __builtin_lsx_vsrai_d(_1, 1);} -+v16i8 __lsx_vsrar_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrar_b(_1, _2);} -+v8i16 __lsx_vsrar_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrar_h(_1, _2);} -+v4i32 __lsx_vsrar_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrar_w(_1, _2);} -+v2i64 __lsx_vsrar_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrar_d(_1, _2);} -+v16i8 __lsx_vsrari_b(v16i8 _1){return __builtin_lsx_vsrari_b(_1, 1);} -+v8i16 __lsx_vsrari_h(v8i16 _1){return __builtin_lsx_vsrari_h(_1, 1);} -+v4i32 __lsx_vsrari_w(v4i32 _1){return __builtin_lsx_vsrari_w(_1, 1);} -+v2i64 __lsx_vsrari_d(v2i64 _1){return __builtin_lsx_vsrari_d(_1, 1);} -+v16i8 __lsx_vsrl_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrl_b(_1, _2);} -+v8i16 __lsx_vsrl_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrl_h(_1, _2);} -+v4i32 __lsx_vsrl_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrl_w(_1, _2);} -+v2i64 __lsx_vsrl_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrl_d(_1, _2);} -+v16i8 __lsx_vsrli_b(v16i8 _1){return __builtin_lsx_vsrli_b(_1, 1);} -+v8i16 __lsx_vsrli_h(v8i16 _1){return __builtin_lsx_vsrli_h(_1, 1);} -+v4i32 __lsx_vsrli_w(v4i32 _1){return __builtin_lsx_vsrli_w(_1, 1);} -+v2i64 __lsx_vsrli_d(v2i64 _1){return __builtin_lsx_vsrli_d(_1, 1);} -+v16i8 __lsx_vsrlr_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrlr_b(_1, _2);} -+v8i16 __lsx_vsrlr_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrlr_h(_1, _2);} -+v4i32 __lsx_vsrlr_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrlr_w(_1, _2);} -+v2i64 __lsx_vsrlr_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrlr_d(_1, _2);} -+v16i8 __lsx_vsrlri_b(v16i8 _1){return __builtin_lsx_vsrlri_b(_1, 1);} -+v8i16 __lsx_vsrlri_h(v8i16 _1){return __builtin_lsx_vsrlri_h(_1, 1);} -+v4i32 __lsx_vsrlri_w(v4i32 _1){return __builtin_lsx_vsrlri_w(_1, 1);} -+v2i64 __lsx_vsrlri_d(v2i64 _1){return __builtin_lsx_vsrlri_d(_1, 1);} -+v16u8 __lsx_vbitclr_b(v16u8 _1, v16u8 _2){return __builtin_lsx_vbitclr_b(_1, _2);} -+v8u16 __lsx_vbitclr_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vbitclr_h(_1, _2);} -+v4u32 __lsx_vbitclr_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vbitclr_w(_1, _2);} -+v2u64 __lsx_vbitclr_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vbitclr_d(_1, _2);} -+v16u8 __lsx_vbitclri_b(v16u8 _1){return __builtin_lsx_vbitclri_b(_1, 1);} -+v8u16 __lsx_vbitclri_h(v8u16 _1){return __builtin_lsx_vbitclri_h(_1, 1);} -+v4u32 __lsx_vbitclri_w(v4u32 _1){return __builtin_lsx_vbitclri_w(_1, 1);} -+v2u64 __lsx_vbitclri_d(v2u64 _1){return __builtin_lsx_vbitclri_d(_1, 1);} -+v16u8 __lsx_vbitset_b(v16u8 _1, v16u8 _2){return __builtin_lsx_vbitset_b(_1, _2);} -+v8u16 __lsx_vbitset_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vbitset_h(_1, _2);} -+v4u32 __lsx_vbitset_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vbitset_w(_1, _2);} -+v2u64 __lsx_vbitset_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vbitset_d(_1, _2);} -+v16u8 __lsx_vbitseti_b(v16u8 _1){return __builtin_lsx_vbitseti_b(_1, 1);} -+v8u16 __lsx_vbitseti_h(v8u16 _1){return __builtin_lsx_vbitseti_h(_1, 1);} -+v4u32 __lsx_vbitseti_w(v4u32 _1){return __builtin_lsx_vbitseti_w(_1, 1);} -+v2u64 __lsx_vbitseti_d(v2u64 _1){return __builtin_lsx_vbitseti_d(_1, 1);} -+v16u8 __lsx_vbitrev_b(v16u8 _1, v16u8 _2){return __builtin_lsx_vbitrev_b(_1, _2);} -+v8u16 __lsx_vbitrev_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vbitrev_h(_1, _2);} -+v4u32 __lsx_vbitrev_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vbitrev_w(_1, _2);} -+v2u64 __lsx_vbitrev_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vbitrev_d(_1, _2);} -+v16u8 __lsx_vbitrevi_b(v16u8 _1){return __builtin_lsx_vbitrevi_b(_1, 1);} -+v8u16 __lsx_vbitrevi_h(v8u16 _1){return __builtin_lsx_vbitrevi_h(_1, 1);} -+v4u32 __lsx_vbitrevi_w(v4u32 _1){return __builtin_lsx_vbitrevi_w(_1, 1);} -+v2u64 __lsx_vbitrevi_d(v2u64 _1){return __builtin_lsx_vbitrevi_d(_1, 1);} -+v16i8 __lsx_vadd_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vadd_b(_1, _2);} -+v8i16 __lsx_vadd_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vadd_h(_1, _2);} -+v4i32 __lsx_vadd_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vadd_w(_1, _2);} -+v2i64 __lsx_vadd_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vadd_d(_1, _2);} -+v16i8 __lsx_vaddi_bu(v16i8 _1){return __builtin_lsx_vaddi_bu(_1, 1);} -+v8i16 __lsx_vaddi_hu(v8i16 _1){return __builtin_lsx_vaddi_hu(_1, 1);} -+v4i32 __lsx_vaddi_wu(v4i32 _1){return __builtin_lsx_vaddi_wu(_1, 1);} -+v2i64 __lsx_vaddi_du(v2i64 _1){return __builtin_lsx_vaddi_du(_1, 1);} -+v16i8 __lsx_vsub_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsub_b(_1, _2);} -+v8i16 __lsx_vsub_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsub_h(_1, _2);} -+v4i32 __lsx_vsub_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsub_w(_1, _2);} -+v2i64 __lsx_vsub_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsub_d(_1, _2);} -+v16i8 __lsx_vsubi_bu(v16i8 _1){return __builtin_lsx_vsubi_bu(_1, 1);} -+v8i16 __lsx_vsubi_hu(v8i16 _1){return __builtin_lsx_vsubi_hu(_1, 1);} -+v4i32 __lsx_vsubi_wu(v4i32 _1){return __builtin_lsx_vsubi_wu(_1, 1);} -+v2i64 __lsx_vsubi_du(v2i64 _1){return __builtin_lsx_vsubi_du(_1, 1);} -+v16i8 __lsx_vmax_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmax_b(_1, _2);} -+v8i16 __lsx_vmax_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmax_h(_1, _2);} -+v4i32 __lsx_vmax_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmax_w(_1, _2);} -+v2i64 __lsx_vmax_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmax_d(_1, _2);} -+v16i8 __lsx_vmaxi_b(v16i8 _1){return __builtin_lsx_vmaxi_b(_1, 1);} -+v8i16 __lsx_vmaxi_h(v8i16 _1){return __builtin_lsx_vmaxi_h(_1, 1);} -+v4i32 __lsx_vmaxi_w(v4i32 _1){return __builtin_lsx_vmaxi_w(_1, 1);} -+v2i64 __lsx_vmaxi_d(v2i64 _1){return __builtin_lsx_vmaxi_d(_1, 1);} -+v16u8 __lsx_vmax_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmax_bu(_1, _2);} -+v8u16 __lsx_vmax_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmax_hu(_1, _2);} -+v4u32 __lsx_vmax_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmax_wu(_1, _2);} -+v2u64 __lsx_vmax_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmax_du(_1, _2);} -+v16u8 __lsx_vmaxi_bu(v16u8 _1){return __builtin_lsx_vmaxi_bu(_1, 1);} -+v8u16 __lsx_vmaxi_hu(v8u16 _1){return __builtin_lsx_vmaxi_hu(_1, 1);} -+v4u32 __lsx_vmaxi_wu(v4u32 _1){return __builtin_lsx_vmaxi_wu(_1, 1);} -+v2u64 __lsx_vmaxi_du(v2u64 _1){return __builtin_lsx_vmaxi_du(_1, 1);} -+v16i8 __lsx_vmin_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmin_b(_1, _2);} -+v8i16 __lsx_vmin_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmin_h(_1, _2);} -+v4i32 __lsx_vmin_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmin_w(_1, _2);} -+v2i64 __lsx_vmin_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmin_d(_1, _2);} -+v16i8 __lsx_vmini_b(v16i8 _1){return __builtin_lsx_vmini_b(_1, 1);} -+v8i16 __lsx_vmini_h(v8i16 _1){return __builtin_lsx_vmini_h(_1, 1);} -+v4i32 __lsx_vmini_w(v4i32 _1){return __builtin_lsx_vmini_w(_1, 1);} -+v2i64 __lsx_vmini_d(v2i64 _1){return __builtin_lsx_vmini_d(_1, 1);} -+v16u8 __lsx_vmin_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmin_bu(_1, _2);} -+v8u16 __lsx_vmin_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmin_hu(_1, _2);} -+v4u32 __lsx_vmin_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmin_wu(_1, _2);} -+v2u64 __lsx_vmin_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmin_du(_1, _2);} -+v16u8 __lsx_vmini_bu(v16u8 _1){return __builtin_lsx_vmini_bu(_1, 1);} -+v8u16 __lsx_vmini_hu(v8u16 _1){return __builtin_lsx_vmini_hu(_1, 1);} -+v4u32 __lsx_vmini_wu(v4u32 _1){return __builtin_lsx_vmini_wu(_1, 1);} -+v2u64 __lsx_vmini_du(v2u64 _1){return __builtin_lsx_vmini_du(_1, 1);} -+v16i8 __lsx_vseq_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vseq_b(_1, _2);} -+v8i16 __lsx_vseq_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vseq_h(_1, _2);} -+v4i32 __lsx_vseq_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vseq_w(_1, _2);} -+v2i64 __lsx_vseq_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vseq_d(_1, _2);} -+v16i8 __lsx_vseqi_b(v16i8 _1){return __builtin_lsx_vseqi_b(_1, 1);} -+v8i16 __lsx_vseqi_h(v8i16 _1){return __builtin_lsx_vseqi_h(_1, 1);} -+v4i32 __lsx_vseqi_w(v4i32 _1){return __builtin_lsx_vseqi_w(_1, 1);} -+v2i64 __lsx_vseqi_d(v2i64 _1){return __builtin_lsx_vseqi_d(_1, 1);} -+v16i8 __lsx_vslti_b(v16i8 _1){return __builtin_lsx_vslti_b(_1, 1);} -+v16i8 __lsx_vslt_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vslt_b(_1, _2);} -+v8i16 __lsx_vslt_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vslt_h(_1, _2);} -+v4i32 __lsx_vslt_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vslt_w(_1, _2);} -+v2i64 __lsx_vslt_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vslt_d(_1, _2);} -+v8i16 __lsx_vslti_h(v8i16 _1){return __builtin_lsx_vslti_h(_1, 1);} -+v4i32 __lsx_vslti_w(v4i32 _1){return __builtin_lsx_vslti_w(_1, 1);} -+v2i64 __lsx_vslti_d(v2i64 _1){return __builtin_lsx_vslti_d(_1, 1);} -+v16i8 __lsx_vslt_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vslt_bu(_1, _2);} -+v8i16 __lsx_vslt_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vslt_hu(_1, _2);} -+v4i32 __lsx_vslt_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vslt_wu(_1, _2);} -+v2i64 __lsx_vslt_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vslt_du(_1, _2);} -+v16i8 __lsx_vslti_bu(v16u8 _1){return __builtin_lsx_vslti_bu(_1, 1);} -+v8i16 __lsx_vslti_hu(v8u16 _1){return __builtin_lsx_vslti_hu(_1, 1);} -+v4i32 __lsx_vslti_wu(v4u32 _1){return __builtin_lsx_vslti_wu(_1, 1);} -+v2i64 __lsx_vslti_du(v2u64 _1){return __builtin_lsx_vslti_du(_1, 1);} -+v16i8 __lsx_vsle_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsle_b(_1, _2);} -+v8i16 __lsx_vsle_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsle_h(_1, _2);} -+v4i32 __lsx_vsle_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsle_w(_1, _2);} -+v2i64 __lsx_vsle_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsle_d(_1, _2);} -+v16i8 __lsx_vslei_b(v16i8 _1){return __builtin_lsx_vslei_b(_1, 1);} -+v8i16 __lsx_vslei_h(v8i16 _1){return __builtin_lsx_vslei_h(_1, 1);} -+v4i32 __lsx_vslei_w(v4i32 _1){return __builtin_lsx_vslei_w(_1, 1);} -+v2i64 __lsx_vslei_d(v2i64 _1){return __builtin_lsx_vslei_d(_1, 1);} -+v16i8 __lsx_vsle_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vsle_bu(_1, _2);} -+v8i16 __lsx_vsle_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vsle_hu(_1, _2);} -+v4i32 __lsx_vsle_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vsle_wu(_1, _2);} -+v2i64 __lsx_vsle_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vsle_du(_1, _2);} -+v16i8 __lsx_vslei_bu(v16u8 _1){return __builtin_lsx_vslei_bu(_1, 1);} -+v8i16 __lsx_vslei_hu(v8u16 _1){return __builtin_lsx_vslei_hu(_1, 1);} -+v4i32 __lsx_vslei_wu(v4u32 _1){return __builtin_lsx_vslei_wu(_1, 1);} -+v2i64 __lsx_vslei_du(v2u64 _1){return __builtin_lsx_vslei_du(_1, 1);} -+v16i8 __lsx_vsat_b(v16i8 _1){return __builtin_lsx_vsat_b(_1, 1);} -+v8i16 __lsx_vsat_h(v8i16 _1){return __builtin_lsx_vsat_h(_1, 1);} -+v4i32 __lsx_vsat_w(v4i32 _1){return __builtin_lsx_vsat_w(_1, 1);} -+v2i64 __lsx_vsat_d(v2i64 _1){return __builtin_lsx_vsat_d(_1, 1);} -+v16u8 __lsx_vsat_bu(v16u8 _1){return __builtin_lsx_vsat_bu(_1, 1);} -+v8u16 __lsx_vsat_hu(v8u16 _1){return __builtin_lsx_vsat_hu(_1, 1);} -+v4u32 __lsx_vsat_wu(v4u32 _1){return __builtin_lsx_vsat_wu(_1, 1);} -+v2u64 __lsx_vsat_du(v2u64 _1){return __builtin_lsx_vsat_du(_1, 1);} -+v16i8 __lsx_vadda_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vadda_b(_1, _2);} -+v8i16 __lsx_vadda_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vadda_h(_1, _2);} -+v4i32 __lsx_vadda_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vadda_w(_1, _2);} -+v2i64 __lsx_vadda_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vadda_d(_1, _2);} -+v16i8 __lsx_vsadd_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsadd_b(_1, _2);} -+v8i16 __lsx_vsadd_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsadd_h(_1, _2);} -+v4i32 __lsx_vsadd_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsadd_w(_1, _2);} -+v2i64 __lsx_vsadd_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsadd_d(_1, _2);} -+v16u8 __lsx_vsadd_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vsadd_bu(_1, _2);} -+v8u16 __lsx_vsadd_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vsadd_hu(_1, _2);} -+v4u32 __lsx_vsadd_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vsadd_wu(_1, _2);} -+v2u64 __lsx_vsadd_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vsadd_du(_1, _2);} -+v16i8 __lsx_vavg_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vavg_b(_1, _2);} -+v8i16 __lsx_vavg_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vavg_h(_1, _2);} -+v4i32 __lsx_vavg_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vavg_w(_1, _2);} -+v2i64 __lsx_vavg_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vavg_d(_1, _2);} -+v16u8 __lsx_vavg_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vavg_bu(_1, _2);} -+v8u16 __lsx_vavg_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vavg_hu(_1, _2);} -+v4u32 __lsx_vavg_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vavg_wu(_1, _2);} -+v2u64 __lsx_vavg_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vavg_du(_1, _2);} -+v16i8 __lsx_vavgr_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vavgr_b(_1, _2);} -+v8i16 __lsx_vavgr_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vavgr_h(_1, _2);} -+v4i32 __lsx_vavgr_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vavgr_w(_1, _2);} -+v2i64 __lsx_vavgr_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vavgr_d(_1, _2);} -+v16u8 __lsx_vavgr_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vavgr_bu(_1, _2);} -+v8u16 __lsx_vavgr_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vavgr_hu(_1, _2);} -+v4u32 __lsx_vavgr_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vavgr_wu(_1, _2);} -+v2u64 __lsx_vavgr_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vavgr_du(_1, _2);} -+v16i8 __lsx_vssub_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vssub_b(_1, _2);} -+v8i16 __lsx_vssub_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssub_h(_1, _2);} -+v4i32 __lsx_vssub_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssub_w(_1, _2);} -+v2i64 __lsx_vssub_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssub_d(_1, _2);} -+v16u8 __lsx_vssub_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vssub_bu(_1, _2);} -+v8u16 __lsx_vssub_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vssub_hu(_1, _2);} -+v4u32 __lsx_vssub_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vssub_wu(_1, _2);} -+v2u64 __lsx_vssub_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vssub_du(_1, _2);} -+v16i8 __lsx_vabsd_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vabsd_b(_1, _2);} -+v8i16 __lsx_vabsd_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vabsd_h(_1, _2);} -+v4i32 __lsx_vabsd_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vabsd_w(_1, _2);} -+v2i64 __lsx_vabsd_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vabsd_d(_1, _2);} -+v16u8 __lsx_vabsd_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vabsd_bu(_1, _2);} -+v8u16 __lsx_vabsd_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vabsd_hu(_1, _2);} -+v4u32 __lsx_vabsd_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vabsd_wu(_1, _2);} -+v2u64 __lsx_vabsd_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vabsd_du(_1, _2);} -+v16i8 __lsx_vmul_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmul_b(_1, _2);} -+v8i16 __lsx_vmul_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmul_h(_1, _2);} -+v4i32 __lsx_vmul_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmul_w(_1, _2);} -+v2i64 __lsx_vmul_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmul_d(_1, _2);} -+v16i8 __lsx_vmadd_b(v16i8 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vmadd_b(_1, _2, _3);} -+v8i16 __lsx_vmadd_h(v8i16 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vmadd_h(_1, _2, _3);} -+v4i32 __lsx_vmadd_w(v4i32 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vmadd_w(_1, _2, _3);} -+v2i64 __lsx_vmadd_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vmadd_d(_1, _2, _3);} -+v16i8 __lsx_vmsub_b(v16i8 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vmsub_b(_1, _2, _3);} -+v8i16 __lsx_vmsub_h(v8i16 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vmsub_h(_1, _2, _3);} -+v4i32 __lsx_vmsub_w(v4i32 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vmsub_w(_1, _2, _3);} -+v2i64 __lsx_vmsub_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vmsub_d(_1, _2, _3);} -+v16i8 __lsx_vdiv_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vdiv_b(_1, _2);} -+v8i16 __lsx_vdiv_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vdiv_h(_1, _2);} -+v4i32 __lsx_vdiv_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vdiv_w(_1, _2);} -+v2i64 __lsx_vdiv_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vdiv_d(_1, _2);} -+v16u8 __lsx_vdiv_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vdiv_bu(_1, _2);} -+v8u16 __lsx_vdiv_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vdiv_hu(_1, _2);} -+v4u32 __lsx_vdiv_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vdiv_wu(_1, _2);} -+v2u64 __lsx_vdiv_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vdiv_du(_1, _2);} -+v8i16 __lsx_vhaddw_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vhaddw_h_b(_1, _2);} -+v4i32 __lsx_vhaddw_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vhaddw_w_h(_1, _2);} -+v2i64 __lsx_vhaddw_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vhaddw_d_w(_1, _2);} -+v8u16 __lsx_vhaddw_hu_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vhaddw_hu_bu(_1, _2);} -+v4u32 __lsx_vhaddw_wu_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vhaddw_wu_hu(_1, _2);} -+v2u64 __lsx_vhaddw_du_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vhaddw_du_wu(_1, _2);} -+v8i16 __lsx_vhsubw_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vhsubw_h_b(_1, _2);} -+v4i32 __lsx_vhsubw_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vhsubw_w_h(_1, _2);} -+v2i64 __lsx_vhsubw_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vhsubw_d_w(_1, _2);} -+v8i16 __lsx_vhsubw_hu_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vhsubw_hu_bu(_1, _2);} -+v4i32 __lsx_vhsubw_wu_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vhsubw_wu_hu(_1, _2);} -+v2i64 __lsx_vhsubw_du_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vhsubw_du_wu(_1, _2);} -+v16i8 __lsx_vmod_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmod_b(_1, _2);} -+v8i16 __lsx_vmod_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmod_h(_1, _2);} -+v4i32 __lsx_vmod_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmod_w(_1, _2);} -+v2i64 __lsx_vmod_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmod_d(_1, _2);} -+v16u8 __lsx_vmod_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmod_bu(_1, _2);} -+v8u16 __lsx_vmod_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmod_hu(_1, _2);} -+v4u32 __lsx_vmod_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmod_wu(_1, _2);} -+v2u64 __lsx_vmod_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmod_du(_1, _2);} -+v16i8 __lsx_vreplve_b(v16i8 _1, int _2){return __builtin_lsx_vreplve_b(_1, _2);} -+v8i16 __lsx_vreplve_h(v8i16 _1, int _2){return __builtin_lsx_vreplve_h(_1, _2);} -+v4i32 __lsx_vreplve_w(v4i32 _1, int _2){return __builtin_lsx_vreplve_w(_1, _2);} -+v2i64 __lsx_vreplve_d(v2i64 _1, int _2){return __builtin_lsx_vreplve_d(_1, _2);} -+v16i8 __lsx_vreplvei_b(v16i8 _1){return __builtin_lsx_vreplvei_b(_1, 1);} -+v8i16 __lsx_vreplvei_h(v8i16 _1){return __builtin_lsx_vreplvei_h(_1, 1);} -+v4i32 __lsx_vreplvei_w(v4i32 _1){return __builtin_lsx_vreplvei_w(_1, 1);} -+v2i64 __lsx_vreplvei_d(v2i64 _1){return __builtin_lsx_vreplvei_d(_1, 1);} -+v16i8 __lsx_vpickev_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vpickev_b(_1, _2);} -+v8i16 __lsx_vpickev_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vpickev_h(_1, _2);} -+v4i32 __lsx_vpickev_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpickev_w(_1, _2);} -+v2i64 __lsx_vpickev_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vpickev_d(_1, _2);} -+v16i8 __lsx_vpickod_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vpickod_b(_1, _2);} -+v8i16 __lsx_vpickod_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vpickod_h(_1, _2);} -+v4i32 __lsx_vpickod_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpickod_w(_1, _2);} -+v2i64 __lsx_vpickod_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vpickod_d(_1, _2);} -+v16i8 __lsx_vilvh_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vilvh_b(_1, _2);} -+v8i16 __lsx_vilvh_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vilvh_h(_1, _2);} -+v4i32 __lsx_vilvh_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vilvh_w(_1, _2);} -+v2i64 __lsx_vilvh_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vilvh_d(_1, _2);} -+v16i8 __lsx_vilvl_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vilvl_b(_1, _2);} -+v8i16 __lsx_vilvl_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vilvl_h(_1, _2);} -+v4i32 __lsx_vilvl_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vilvl_w(_1, _2);} -+v2i64 __lsx_vilvl_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vilvl_d(_1, _2);} -+v16i8 __lsx_vpackev_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vpackev_b(_1, _2);} -+v8i16 __lsx_vpackev_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vpackev_h(_1, _2);} -+v4i32 __lsx_vpackev_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpackev_w(_1, _2);} -+v2i64 __lsx_vpackev_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vpackev_d(_1, _2);} -+v16i8 __lsx_vpackod_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vpackod_b(_1, _2);} -+v8i16 __lsx_vpackod_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vpackod_h(_1, _2);} -+v4i32 __lsx_vpackod_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpackod_w(_1, _2);} -+v2i64 __lsx_vpackod_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vpackod_d(_1, _2);} -+v8i16 __lsx_vshuf_h(v8i16 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vshuf_h(_1, _2, _3);} -+v4i32 __lsx_vshuf_w(v4i32 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vshuf_w(_1, _2, _3);} -+v2i64 __lsx_vshuf_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vshuf_d(_1, _2, _3);} -+v16u8 __lsx_vand_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vand_v(_1, _2);} -+v16u8 __lsx_vandi_b(v16u8 _1){return __builtin_lsx_vandi_b(_1, 1);} -+v16u8 __lsx_vor_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vor_v(_1, _2);} -+v16u8 __lsx_vori_b(v16u8 _1){return __builtin_lsx_vori_b(_1, 1);} -+v16u8 __lsx_vnor_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vnor_v(_1, _2);} -+v16u8 __lsx_vnori_b(v16u8 _1){return __builtin_lsx_vnori_b(_1, 1);} -+v16u8 __lsx_vxor_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vxor_v(_1, _2);} -+v16u8 __lsx_vxori_b(v16u8 _1){return __builtin_lsx_vxori_b(_1, 1);} -+v16u8 __lsx_vbitsel_v(v16u8 _1, v16u8 _2, v16u8 _3){return __builtin_lsx_vbitsel_v(_1, _2, _3);} -+v16u8 __lsx_vbitseli_b(v16u8 _1, v16u8 _2){return __builtin_lsx_vbitseli_b(_1, _2, 1);} -+v16i8 __lsx_vshuf4i_b(v16i8 _1){return __builtin_lsx_vshuf4i_b(_1, 1);} -+v8i16 __lsx_vshuf4i_h(v8i16 _1){return __builtin_lsx_vshuf4i_h(_1, 1);} -+v4i32 __lsx_vshuf4i_w(v4i32 _1){return __builtin_lsx_vshuf4i_w(_1, 1);} -+v16i8 __lsx_vreplgr2vr_b(int _1){return __builtin_lsx_vreplgr2vr_b(_1);} -+v8i16 __lsx_vreplgr2vr_h(int _1){return __builtin_lsx_vreplgr2vr_h(_1);} -+v4i32 __lsx_vreplgr2vr_w(int _1){return __builtin_lsx_vreplgr2vr_w(_1);} -+v2i64 __lsx_vreplgr2vr_d(long _1){return __builtin_lsx_vreplgr2vr_d(_1);} -+v16i8 __lsx_vpcnt_b(v16i8 _1){return __builtin_lsx_vpcnt_b(_1);} -+v8i16 __lsx_vpcnt_h(v8i16 _1){return __builtin_lsx_vpcnt_h(_1);} -+v4i32 __lsx_vpcnt_w(v4i32 _1){return __builtin_lsx_vpcnt_w(_1);} -+v2i64 __lsx_vpcnt_d(v2i64 _1){return __builtin_lsx_vpcnt_d(_1);} -+v16i8 __lsx_vclo_b(v16i8 _1){return __builtin_lsx_vclo_b(_1);} -+v8i16 __lsx_vclo_h(v8i16 _1){return __builtin_lsx_vclo_h(_1);} -+v4i32 __lsx_vclo_w(v4i32 _1){return __builtin_lsx_vclo_w(_1);} -+v2i64 __lsx_vclo_d(v2i64 _1){return __builtin_lsx_vclo_d(_1);} -+v16i8 __lsx_vclz_b(v16i8 _1){return __builtin_lsx_vclz_b(_1);} -+v8i16 __lsx_vclz_h(v8i16 _1){return __builtin_lsx_vclz_h(_1);} -+v4i32 __lsx_vclz_w(v4i32 _1){return __builtin_lsx_vclz_w(_1);} -+v2i64 __lsx_vclz_d(v2i64 _1){return __builtin_lsx_vclz_d(_1);} -+int __lsx_vpickve2gr_b(v16i8 _1){return __builtin_lsx_vpickve2gr_b(_1, 1);} -+int __lsx_vpickve2gr_h(v8i16 _1){return __builtin_lsx_vpickve2gr_h(_1, 1);} -+int __lsx_vpickve2gr_w(v4i32 _1){return __builtin_lsx_vpickve2gr_w(_1, 1);} -+long __lsx_vpickve2gr_d(v2i64 _1){return __builtin_lsx_vpickve2gr_d(_1, 1);} -+unsigned int __lsx_vpickve2gr_bu(v16i8 _1){return __builtin_lsx_vpickve2gr_bu(_1, 1);} -+unsigned int __lsx_vpickve2gr_hu(v8i16 _1){return __builtin_lsx_vpickve2gr_hu(_1, 1);} -+unsigned int __lsx_vpickve2gr_wu(v4i32 _1){return __builtin_lsx_vpickve2gr_wu(_1, 1);} -+unsigned long int __lsx_vpickve2gr_du(v2i64 _1){return __builtin_lsx_vpickve2gr_du(_1, 1);} -+v16i8 __lsx_vinsgr2vr_b(v16i8 _1){return __builtin_lsx_vinsgr2vr_b(_1, 1, 1);} -+v8i16 __lsx_vinsgr2vr_h(v8i16 _1){return __builtin_lsx_vinsgr2vr_h(_1, 1, 1);} -+v4i32 __lsx_vinsgr2vr_w(v4i32 _1){return __builtin_lsx_vinsgr2vr_w(_1, 1, 1);} -+v2i64 __lsx_vinsgr2vr_d(v2i64 _1){return __builtin_lsx_vinsgr2vr_d(_1, 1, 1);} -+v4f32 __lsx_vfadd_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfadd_s(_1, _2);} -+v2f64 __lsx_vfadd_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfadd_d(_1, _2);} -+v4f32 __lsx_vfsub_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfsub_s(_1, _2);} -+v2f64 __lsx_vfsub_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfsub_d(_1, _2);} -+v4f32 __lsx_vfmul_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmul_s(_1, _2);} -+v2f64 __lsx_vfmul_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmul_d(_1, _2);} -+v4f32 __lsx_vfdiv_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfdiv_s(_1, _2);} -+v2f64 __lsx_vfdiv_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfdiv_d(_1, _2);} -+v8i16 __lsx_vfcvt_h_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcvt_h_s(_1, _2);} -+v4f32 __lsx_vfcvt_s_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcvt_s_d(_1, _2);} -+v4f32 __lsx_vfmin_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmin_s(_1, _2);} -+v2f64 __lsx_vfmin_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmin_d(_1, _2);} -+v4f32 __lsx_vfmina_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmina_s(_1, _2);} -+v2f64 __lsx_vfmina_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmina_d(_1, _2);} -+v4f32 __lsx_vfmax_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmax_s(_1, _2);} -+v2f64 __lsx_vfmax_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmax_d(_1, _2);} -+v4f32 __lsx_vfmaxa_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmaxa_s(_1, _2);} -+v2f64 __lsx_vfmaxa_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmaxa_d(_1, _2);} -+v4i32 __lsx_vfclass_s(v4f32 _1){return __builtin_lsx_vfclass_s(_1);} -+v2i64 __lsx_vfclass_d(v2f64 _1){return __builtin_lsx_vfclass_d(_1);} -+v4f32 __lsx_vfsqrt_s(v4f32 _1){return __builtin_lsx_vfsqrt_s(_1);} -+v2f64 __lsx_vfsqrt_d(v2f64 _1){return __builtin_lsx_vfsqrt_d(_1);} -+v4f32 __lsx_vfrecip_s(v4f32 _1){return __builtin_lsx_vfrecip_s(_1);} -+v2f64 __lsx_vfrecip_d(v2f64 _1){return __builtin_lsx_vfrecip_d(_1);} -+v4f32 __lsx_vfrint_s(v4f32 _1){return __builtin_lsx_vfrint_s(_1);} -+v2f64 __lsx_vfrint_d(v2f64 _1){return __builtin_lsx_vfrint_d(_1);} -+v4f32 __lsx_vfrsqrt_s(v4f32 _1){return __builtin_lsx_vfrsqrt_s(_1);} -+v2f64 __lsx_vfrsqrt_d(v2f64 _1){return __builtin_lsx_vfrsqrt_d(_1);} -+v4f32 __lsx_vflogb_s(v4f32 _1){return __builtin_lsx_vflogb_s(_1);} -+v2f64 __lsx_vflogb_d(v2f64 _1){return __builtin_lsx_vflogb_d(_1);} -+v4f32 __lsx_vfcvth_s_h(v8i16 _1){return __builtin_lsx_vfcvth_s_h(_1);} -+v2f64 __lsx_vfcvth_d_s(v4f32 _1){return __builtin_lsx_vfcvth_d_s(_1);} -+v4f32 __lsx_vfcvtl_s_h(v8i16 _1){return __builtin_lsx_vfcvtl_s_h(_1);} -+v2f64 __lsx_vfcvtl_d_s(v4f32 _1){return __builtin_lsx_vfcvtl_d_s(_1);} -+v4i32 __lsx_vftint_w_s(v4f32 _1){return __builtin_lsx_vftint_w_s(_1);} -+v2i64 __lsx_vftint_l_d(v2f64 _1){return __builtin_lsx_vftint_l_d(_1);} -+v4u32 __lsx_vftint_wu_s(v4f32 _1){return __builtin_lsx_vftint_wu_s(_1);} -+v2u64 __lsx_vftint_lu_d(v2f64 _1){return __builtin_lsx_vftint_lu_d(_1);} -+v4i32 __lsx_vftintrz_w_s(v4f32 _1){return __builtin_lsx_vftintrz_w_s(_1);} -+v2i64 __lsx_vftintrz_l_d(v2f64 _1){return __builtin_lsx_vftintrz_l_d(_1);} -+v4u32 __lsx_vftintrz_wu_s(v4f32 _1){return __builtin_lsx_vftintrz_wu_s(_1);} -+v2u64 __lsx_vftintrz_lu_d(v2f64 _1){return __builtin_lsx_vftintrz_lu_d(_1);} -+v4f32 __lsx_vffint_s_w(v4i32 _1){return __builtin_lsx_vffint_s_w(_1);} -+v2f64 __lsx_vffint_d_l(v2i64 _1){return __builtin_lsx_vffint_d_l(_1);} -+v4f32 __lsx_vffint_s_wu(v4u32 _1){return __builtin_lsx_vffint_s_wu(_1);} -+v2f64 __lsx_vffint_d_lu(v2u64 _1){return __builtin_lsx_vffint_d_lu(_1);} -+v16u8 __lsx_vandn_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vandn_v(_1, _2);} -+v16i8 __lsx_vneg_b(v16i8 _1){return __builtin_lsx_vneg_b(_1);} -+v8i16 __lsx_vneg_h(v8i16 _1){return __builtin_lsx_vneg_h(_1);} -+v4i32 __lsx_vneg_w(v4i32 _1){return __builtin_lsx_vneg_w(_1);} -+v2i64 __lsx_vneg_d(v2i64 _1){return __builtin_lsx_vneg_d(_1);} -+v16i8 __lsx_vmuh_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmuh_b(_1, _2);} -+v8i16 __lsx_vmuh_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmuh_h(_1, _2);} -+v4i32 __lsx_vmuh_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmuh_w(_1, _2);} -+v2i64 __lsx_vmuh_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmuh_d(_1, _2);} -+v16u8 __lsx_vmuh_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmuh_bu(_1, _2);} -+v8u16 __lsx_vmuh_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmuh_hu(_1, _2);} -+v4u32 __lsx_vmuh_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmuh_wu(_1, _2);} -+v2u64 __lsx_vmuh_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmuh_du(_1, _2);} -+v8i16 __lsx_vsllwil_h_b(v16i8 _1){return __builtin_lsx_vsllwil_h_b(_1, 1);} -+v4i32 __lsx_vsllwil_w_h(v8i16 _1){return __builtin_lsx_vsllwil_w_h(_1, 1);} -+v2i64 __lsx_vsllwil_d_w(v4i32 _1){return __builtin_lsx_vsllwil_d_w(_1, 1);} -+v8u16 __lsx_vsllwil_hu_bu(v16u8 _1){return __builtin_lsx_vsllwil_hu_bu(_1, 1);} -+v4u32 __lsx_vsllwil_wu_hu(v8u16 _1){return __builtin_lsx_vsllwil_wu_hu(_1, 1);} -+v2u64 __lsx_vsllwil_du_wu(v4u32 _1){return __builtin_lsx_vsllwil_du_wu(_1, 1);} -+v16i8 __lsx_vsran_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsran_b_h(_1, _2);} -+v8i16 __lsx_vsran_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsran_h_w(_1, _2);} -+v4i32 __lsx_vsran_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsran_w_d(_1, _2);} -+v16i8 __lsx_vssran_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssran_b_h(_1, _2);} -+v8i16 __lsx_vssran_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssran_h_w(_1, _2);} -+v4i32 __lsx_vssran_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssran_w_d(_1, _2);} -+v16u8 __lsx_vssran_bu_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vssran_bu_h(_1, _2);} -+v8u16 __lsx_vssran_hu_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vssran_hu_w(_1, _2);} -+v4u32 __lsx_vssran_wu_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vssran_wu_d(_1, _2);} -+v16i8 __lsx_vsrarn_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrarn_b_h(_1, _2);} -+v8i16 __lsx_vsrarn_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrarn_h_w(_1, _2);} -+v4i32 __lsx_vsrarn_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrarn_w_d(_1, _2);} -+v16i8 __lsx_vssrarn_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrarn_b_h(_1, _2);} -+v8i16 __lsx_vssrarn_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrarn_h_w(_1, _2);} -+v4i32 __lsx_vssrarn_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrarn_w_d(_1, _2);} -+v16u8 __lsx_vssrarn_bu_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vssrarn_bu_h(_1, _2);} -+v8u16 __lsx_vssrarn_hu_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vssrarn_hu_w(_1, _2);} -+v4u32 __lsx_vssrarn_wu_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vssrarn_wu_d(_1, _2);} -+v16i8 __lsx_vsrln_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrln_b_h(_1, _2);} -+v8i16 __lsx_vsrln_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrln_h_w(_1, _2);} -+v4i32 __lsx_vsrln_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrln_w_d(_1, _2);} -+v16u8 __lsx_vssrln_bu_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vssrln_bu_h(_1, _2);} -+v8u16 __lsx_vssrln_hu_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vssrln_hu_w(_1, _2);} -+v4u32 __lsx_vssrln_wu_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vssrln_wu_d(_1, _2);} -+v16i8 __lsx_vsrlrn_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrlrn_b_h(_1, _2);} -+v8i16 __lsx_vsrlrn_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrlrn_h_w(_1, _2);} -+v4i32 __lsx_vsrlrn_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrlrn_w_d(_1, _2);} -+v16u8 __lsx_vssrlrn_bu_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vssrlrn_bu_h(_1, _2);} -+v8u16 __lsx_vssrlrn_hu_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vssrlrn_hu_w(_1, _2);} -+v4u32 __lsx_vssrlrn_wu_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vssrlrn_wu_d(_1, _2);} -+v16i8 __lsx_vfrstpi_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vfrstpi_b(_1, _2, 1);} -+v8i16 __lsx_vfrstpi_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vfrstpi_h(_1, _2, 1);} -+v16i8 __lsx_vfrstp_b(v16i8 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vfrstp_b(_1, _2, _3);} -+v8i16 __lsx_vfrstp_h(v8i16 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vfrstp_h(_1, _2, _3);} -+v2i64 __lsx_vshuf4i_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vshuf4i_d(_1, _2, 1);} -+v16i8 __lsx_vbsrl_v(v16i8 _1){return __builtin_lsx_vbsrl_v(_1, 1);} -+v16i8 __lsx_vbsll_v(v16i8 _1){return __builtin_lsx_vbsll_v(_1, 1);} -+v16i8 __lsx_vextrins_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vextrins_b(_1, _2, 1);} -+v8i16 __lsx_vextrins_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vextrins_h(_1, _2, 1);} -+v4i32 __lsx_vextrins_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vextrins_w(_1, _2, 1);} -+v2i64 __lsx_vextrins_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vextrins_d(_1, _2, 1);} -+v16i8 __lsx_vmskltz_b(v16i8 _1){return __builtin_lsx_vmskltz_b(_1);} -+v8i16 __lsx_vmskltz_h(v8i16 _1){return __builtin_lsx_vmskltz_h(_1);} -+v4i32 __lsx_vmskltz_w(v4i32 _1){return __builtin_lsx_vmskltz_w(_1);} -+v2i64 __lsx_vmskltz_d(v2i64 _1){return __builtin_lsx_vmskltz_d(_1);} -+v16i8 __lsx_vsigncov_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsigncov_b(_1, _2);} -+v8i16 __lsx_vsigncov_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsigncov_h(_1, _2);} -+v4i32 __lsx_vsigncov_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsigncov_w(_1, _2);} -+v2i64 __lsx_vsigncov_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsigncov_d(_1, _2);} -+v4f32 __lsx_vfmadd_s(v4f32 _1, v4f32 _2, v4f32 _3){return __builtin_lsx_vfmadd_s(_1, _2, _3);} -+v2f64 __lsx_vfmadd_d(v2f64 _1, v2f64 _2, v2f64 _3){return __builtin_lsx_vfmadd_d(_1, _2, _3);} -+v4f32 __lsx_vfmsub_s(v4f32 _1, v4f32 _2, v4f32 _3){return __builtin_lsx_vfmsub_s(_1, _2, _3);} -+v2f64 __lsx_vfmsub_d(v2f64 _1, v2f64 _2, v2f64 _3){return __builtin_lsx_vfmsub_d(_1, _2, _3);} -+v4f32 __lsx_vfnmadd_s(v4f32 _1, v4f32 _2, v4f32 _3){return __builtin_lsx_vfnmadd_s(_1, _2, _3);} -+v2f64 __lsx_vfnmadd_d(v2f64 _1, v2f64 _2, v2f64 _3){return __builtin_lsx_vfnmadd_d(_1, _2, _3);} -+v4f32 __lsx_vfnmsub_s(v4f32 _1, v4f32 _2, v4f32 _3){return __builtin_lsx_vfnmsub_s(_1, _2, _3);} -+v2f64 __lsx_vfnmsub_d(v2f64 _1, v2f64 _2, v2f64 _3){return __builtin_lsx_vfnmsub_d(_1, _2, _3);} -+v4i32 __lsx_vftintrne_w_s(v4f32 _1){return __builtin_lsx_vftintrne_w_s(_1);} -+v2i64 __lsx_vftintrne_l_d(v2f64 _1){return __builtin_lsx_vftintrne_l_d(_1);} -+v4i32 __lsx_vftintrp_w_s(v4f32 _1){return __builtin_lsx_vftintrp_w_s(_1);} -+v2i64 __lsx_vftintrp_l_d(v2f64 _1){return __builtin_lsx_vftintrp_l_d(_1);} -+v4i32 __lsx_vftintrm_w_s(v4f32 _1){return __builtin_lsx_vftintrm_w_s(_1);} -+v2i64 __lsx_vftintrm_l_d(v2f64 _1){return __builtin_lsx_vftintrm_l_d(_1);} -+v4i32 __lsx_vftint_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftint_w_d(_1, _2);} -+v4f32 __lsx_vffint_s_l(v2i64 _1, v2i64 _2){return __builtin_lsx_vffint_s_l(_1, _2);} -+v4i32 __lsx_vftintrz_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftintrz_w_d(_1, _2);} -+v4i32 __lsx_vftintrp_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftintrp_w_d(_1, _2);} -+v4i32 __lsx_vftintrm_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftintrm_w_d(_1, _2);} -+v4i32 __lsx_vftintrne_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftintrne_w_d(_1, _2);} -+v2i64 __lsx_vftintl_l_s(v4f32 _1){return __builtin_lsx_vftintl_l_s(_1);} -+v2i64 __lsx_vftinth_l_s(v4f32 _1){return __builtin_lsx_vftinth_l_s(_1);} -+v2f64 __lsx_vffinth_d_w(v4i32 _1){return __builtin_lsx_vffinth_d_w(_1);} -+v2f64 __lsx_vffintl_d_w(v4i32 _1){return __builtin_lsx_vffintl_d_w(_1);} -+v2i64 __lsx_vftintrzl_l_s(v4f32 _1){return __builtin_lsx_vftintrzl_l_s(_1);} -+v2i64 __lsx_vftintrzh_l_s(v4f32 _1){return __builtin_lsx_vftintrzh_l_s(_1);} -+v2i64 __lsx_vftintrpl_l_s(v4f32 _1){return __builtin_lsx_vftintrpl_l_s(_1);} -+v2i64 __lsx_vftintrph_l_s(v4f32 _1){return __builtin_lsx_vftintrph_l_s(_1);} -+v2i64 __lsx_vftintrml_l_s(v4f32 _1){return __builtin_lsx_vftintrml_l_s(_1);} -+v2i64 __lsx_vftintrmh_l_s(v4f32 _1){return __builtin_lsx_vftintrmh_l_s(_1);} -+v2i64 __lsx_vftintrnel_l_s(v4f32 _1){return __builtin_lsx_vftintrnel_l_s(_1);} -+v2i64 __lsx_vftintrneh_l_s(v4f32 _1){return __builtin_lsx_vftintrneh_l_s(_1);} -+v4i32 __lsx_vfrintrne_s(v4f32 _1){return __builtin_lsx_vfrintrne_s(_1);} -+v2i64 __lsx_vfrintrne_d(v2f64 _1){return __builtin_lsx_vfrintrne_d(_1);} -+v4i32 __lsx_vfrintrz_s(v4f32 _1){return __builtin_lsx_vfrintrz_s(_1);} -+v2i64 __lsx_vfrintrz_d(v2f64 _1){return __builtin_lsx_vfrintrz_d(_1);} -+v4i32 __lsx_vfrintrp_s(v4f32 _1){return __builtin_lsx_vfrintrp_s(_1);} -+v2i64 __lsx_vfrintrp_d(v2f64 _1){return __builtin_lsx_vfrintrp_d(_1);} -+v4i32 __lsx_vfrintrm_s(v4f32 _1){return __builtin_lsx_vfrintrm_s(_1);} -+v2i64 __lsx_vfrintrm_d(v2f64 _1){return __builtin_lsx_vfrintrm_d(_1);} -+void __lsx_vstelm_b(v16i8 _1, void * _2){return __builtin_lsx_vstelm_b(_1, _2, 1, 1);} -+void __lsx_vstelm_h(v8i16 _1, void * _2){return __builtin_lsx_vstelm_h(_1, _2, 2, 1);} -+void __lsx_vstelm_w(v4i32 _1, void * _2){return __builtin_lsx_vstelm_w(_1, _2, 4, 1);} -+void __lsx_vstelm_d(v2i64 _1, void * _2){return __builtin_lsx_vstelm_d(_1, _2, 8, 1);} -+v2i64 __lsx_vaddwev_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vaddwev_d_w(_1, _2);} -+v4i32 __lsx_vaddwev_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vaddwev_w_h(_1, _2);} -+v8i16 __lsx_vaddwev_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vaddwev_h_b(_1, _2);} -+v2i64 __lsx_vaddwod_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vaddwod_d_w(_1, _2);} -+v4i32 __lsx_vaddwod_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vaddwod_w_h(_1, _2);} -+v8i16 __lsx_vaddwod_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vaddwod_h_b(_1, _2);} -+v2i64 __lsx_vaddwev_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vaddwev_d_wu(_1, _2);} -+v4i32 __lsx_vaddwev_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vaddwev_w_hu(_1, _2);} -+v8i16 __lsx_vaddwev_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vaddwev_h_bu(_1, _2);} -+v2i64 __lsx_vaddwod_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vaddwod_d_wu(_1, _2);} -+v4i32 __lsx_vaddwod_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vaddwod_w_hu(_1, _2);} -+v8i16 __lsx_vaddwod_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vaddwod_h_bu(_1, _2);} -+v2i64 __lsx_vaddwev_d_wu_w(v4u32 _1, v4i32 _2){return __builtin_lsx_vaddwev_d_wu_w(_1, _2);} -+v4i32 __lsx_vaddwev_w_hu_h(v8u16 _1, v8i16 _2){return __builtin_lsx_vaddwev_w_hu_h(_1, _2);} -+v8i16 __lsx_vaddwev_h_bu_b(v16u8 _1, v16i8 _2){return __builtin_lsx_vaddwev_h_bu_b(_1, _2);} -+v2i64 __lsx_vaddwod_d_wu_w(v4u32 _1, v4i32 _2){return __builtin_lsx_vaddwod_d_wu_w(_1, _2);} -+v4i32 __lsx_vaddwod_w_hu_h(v8u16 _1, v8i16 _2){return __builtin_lsx_vaddwod_w_hu_h(_1, _2);} -+v8i16 __lsx_vaddwod_h_bu_b(v16u8 _1, v16i8 _2){return __builtin_lsx_vaddwod_h_bu_b(_1, _2);} -+v2i64 __lsx_vsubwev_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsubwev_d_w(_1, _2);} -+v4i32 __lsx_vsubwev_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsubwev_w_h(_1, _2);} -+v8i16 __lsx_vsubwev_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsubwev_h_b(_1, _2);} -+v2i64 __lsx_vsubwod_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsubwod_d_w(_1, _2);} -+v4i32 __lsx_vsubwod_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsubwod_w_h(_1, _2);} -+v8i16 __lsx_vsubwod_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsubwod_h_b(_1, _2);} -+v2i64 __lsx_vsubwev_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vsubwev_d_wu(_1, _2);} -+v4i32 __lsx_vsubwev_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vsubwev_w_hu(_1, _2);} -+v8i16 __lsx_vsubwev_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vsubwev_h_bu(_1, _2);} -+v2i64 __lsx_vsubwod_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vsubwod_d_wu(_1, _2);} -+v4i32 __lsx_vsubwod_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vsubwod_w_hu(_1, _2);} -+v8i16 __lsx_vsubwod_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vsubwod_h_bu(_1, _2);} -+v2i64 __lsx_vaddwev_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vaddwev_q_d(_1, _2);} -+v2i64 __lsx_vaddwod_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vaddwod_q_d(_1, _2);} -+v2i64 __lsx_vaddwev_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vaddwev_q_du(_1, _2);} -+v2i64 __lsx_vaddwod_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vaddwod_q_du(_1, _2);} -+v2i64 __lsx_vsubwev_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsubwev_q_d(_1, _2);} -+v2i64 __lsx_vsubwod_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsubwod_q_d(_1, _2);} -+v2i64 __lsx_vsubwev_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vsubwev_q_du(_1, _2);} -+v2i64 __lsx_vsubwod_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vsubwod_q_du(_1, _2);} -+v2i64 __lsx_vaddwev_q_du_d(v2u64 _1, v2i64 _2){return __builtin_lsx_vaddwev_q_du_d(_1, _2);} -+v2i64 __lsx_vaddwod_q_du_d(v2u64 _1, v2i64 _2){return __builtin_lsx_vaddwod_q_du_d(_1, _2);} -+v2i64 __lsx_vmulwev_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmulwev_d_w(_1, _2);} -+v4i32 __lsx_vmulwev_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmulwev_w_h(_1, _2);} -+v8i16 __lsx_vmulwev_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmulwev_h_b(_1, _2);} -+v2i64 __lsx_vmulwod_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmulwod_d_w(_1, _2);} -+v4i32 __lsx_vmulwod_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmulwod_w_h(_1, _2);} -+v8i16 __lsx_vmulwod_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmulwod_h_b(_1, _2);} -+v2i64 __lsx_vmulwev_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmulwev_d_wu(_1, _2);} -+v4i32 __lsx_vmulwev_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmulwev_w_hu(_1, _2);} -+v8i16 __lsx_vmulwev_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmulwev_h_bu(_1, _2);} -+v2i64 __lsx_vmulwod_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmulwod_d_wu(_1, _2);} -+v4i32 __lsx_vmulwod_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmulwod_w_hu(_1, _2);} -+v8i16 __lsx_vmulwod_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmulwod_h_bu(_1, _2);} -+v2i64 __lsx_vmulwev_d_wu_w(v4u32 _1, v4i32 _2){return __builtin_lsx_vmulwev_d_wu_w(_1, _2);} -+v4i32 __lsx_vmulwev_w_hu_h(v8u16 _1, v8i16 _2){return __builtin_lsx_vmulwev_w_hu_h(_1, _2);} -+v8i16 __lsx_vmulwev_h_bu_b(v16u8 _1, v16i8 _2){return __builtin_lsx_vmulwev_h_bu_b(_1, _2);} -+v2i64 __lsx_vmulwod_d_wu_w(v4u32 _1, v4i32 _2){return __builtin_lsx_vmulwod_d_wu_w(_1, _2);} -+v4i32 __lsx_vmulwod_w_hu_h(v8u16 _1, v8i16 _2){return __builtin_lsx_vmulwod_w_hu_h(_1, _2);} -+v8i16 __lsx_vmulwod_h_bu_b(v16u8 _1, v16i8 _2){return __builtin_lsx_vmulwod_h_bu_b(_1, _2);} -+v2i64 __lsx_vmulwev_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmulwev_q_d(_1, _2);} -+v2i64 __lsx_vmulwod_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmulwod_q_d(_1, _2);} -+v2i64 __lsx_vmulwev_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmulwev_q_du(_1, _2);} -+v2i64 __lsx_vmulwod_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmulwod_q_du(_1, _2);} -+v2i64 __lsx_vmulwev_q_du_d(v2u64 _1, v2i64 _2){return __builtin_lsx_vmulwev_q_du_d(_1, _2);} -+v2i64 __lsx_vmulwod_q_du_d(v2u64 _1, v2i64 _2){return __builtin_lsx_vmulwod_q_du_d(_1, _2);} -+v2i64 __lsx_vhaddw_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vhaddw_q_d(_1, _2);} -+v2u64 __lsx_vhaddw_qu_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vhaddw_qu_du(_1, _2);} -+v2i64 __lsx_vhsubw_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vhsubw_q_d(_1, _2);} -+v2u64 __lsx_vhsubw_qu_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vhsubw_qu_du(_1, _2);} -+v2i64 __lsx_vmaddwev_d_w(v2i64 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vmaddwev_d_w(_1, _2, _3);} -+v4i32 __lsx_vmaddwev_w_h(v4i32 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vmaddwev_w_h(_1, _2, _3);} -+v8i16 __lsx_vmaddwev_h_b(v8i16 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vmaddwev_h_b(_1, _2, _3);} -+v2u64 __lsx_vmaddwev_d_wu(v2u64 _1, v4u32 _2, v4u32 _3){return __builtin_lsx_vmaddwev_d_wu(_1, _2, _3);} -+v4u32 __lsx_vmaddwev_w_hu(v4u32 _1, v8u16 _2, v8u16 _3){return __builtin_lsx_vmaddwev_w_hu(_1, _2, _3);} -+v8u16 __lsx_vmaddwev_h_bu(v8u16 _1, v16u8 _2, v16u8 _3){return __builtin_lsx_vmaddwev_h_bu(_1, _2, _3);} -+v2i64 __lsx_vmaddwod_d_w(v2i64 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vmaddwod_d_w(_1, _2, _3);} -+v4i32 __lsx_vmaddwod_w_h(v4i32 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vmaddwod_w_h(_1, _2, _3);} -+v8i16 __lsx_vmaddwod_h_b(v8i16 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vmaddwod_h_b(_1, _2, _3);} -+v2u64 __lsx_vmaddwod_d_wu(v2u64 _1, v4u32 _2, v4u32 _3){return __builtin_lsx_vmaddwod_d_wu(_1, _2, _3);} -+v4u32 __lsx_vmaddwod_w_hu(v4u32 _1, v8u16 _2, v8u16 _3){return __builtin_lsx_vmaddwod_w_hu(_1, _2, _3);} -+v8u16 __lsx_vmaddwod_h_bu(v8u16 _1, v16u8 _2, v16u8 _3){return __builtin_lsx_vmaddwod_h_bu(_1, _2, _3);} -+v2i64 __lsx_vmaddwev_d_wu_w(v2i64 _1, v4u32 _2, v4i32 _3){return __builtin_lsx_vmaddwev_d_wu_w(_1, _2, _3);} -+v4i32 __lsx_vmaddwev_w_hu_h(v4i32 _1, v8u16 _2, v8i16 _3){return __builtin_lsx_vmaddwev_w_hu_h(_1, _2, _3);} -+v8i16 __lsx_vmaddwev_h_bu_b(v8i16 _1, v16u8 _2, v16i8 _3){return __builtin_lsx_vmaddwev_h_bu_b(_1, _2, _3);} -+v2i64 __lsx_vmaddwod_d_wu_w(v2i64 _1, v4u32 _2, v4i32 _3){return __builtin_lsx_vmaddwod_d_wu_w(_1, _2, _3);} -+v4i32 __lsx_vmaddwod_w_hu_h(v4i32 _1, v8u16 _2, v8i16 _3){return __builtin_lsx_vmaddwod_w_hu_h(_1, _2, _3);} -+v8i16 __lsx_vmaddwod_h_bu_b(v8i16 _1, v16u8 _2, v16i8 _3){return __builtin_lsx_vmaddwod_h_bu_b(_1, _2, _3);} -+v2i64 __lsx_vmaddwev_q_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vmaddwev_q_d(_1, _2, _3);} -+v2i64 __lsx_vmaddwod_q_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vmaddwod_q_d(_1, _2, _3);} -+v2u64 __lsx_vmaddwev_q_du(v2u64 _1, v2u64 _2, v2u64 _3){return __builtin_lsx_vmaddwev_q_du(_1, _2, _3);} -+v2u64 __lsx_vmaddwod_q_du(v2u64 _1, v2u64 _2, v2u64 _3){return __builtin_lsx_vmaddwod_q_du(_1, _2, _3);} -+v2i64 __lsx_vmaddwev_q_du_d(v2i64 _1, v2u64 _2, v2i64 _3){return __builtin_lsx_vmaddwev_q_du_d(_1, _2, _3);} -+v2i64 __lsx_vmaddwod_q_du_d(v2i64 _1, v2u64 _2, v2i64 _3){return __builtin_lsx_vmaddwod_q_du_d(_1, _2, _3);} -+v16i8 __lsx_vrotr_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vrotr_b(_1, _2);} -+v8i16 __lsx_vrotr_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vrotr_h(_1, _2);} -+v4i32 __lsx_vrotr_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vrotr_w(_1, _2);} -+v2i64 __lsx_vrotr_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vrotr_d(_1, _2);} -+v2i64 __lsx_vadd_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vadd_q(_1, _2);} -+v2i64 __lsx_vsub_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsub_q(_1, _2);} -+v16i8 __lsx_vldrepl_b(void * _1){return __builtin_lsx_vldrepl_b(_1, 1);} -+v8i16 __lsx_vldrepl_h(void * _1){return __builtin_lsx_vldrepl_h(_1, 2);} -+v4i32 __lsx_vldrepl_w(void * _1){return __builtin_lsx_vldrepl_w(_1, 4);} -+v2i64 __lsx_vldrepl_d(void * _1){return __builtin_lsx_vldrepl_d(_1, 8);} -+v16i8 __lsx_vmskgez_b(v16i8 _1){return __builtin_lsx_vmskgez_b(_1);} -+v16i8 __lsx_vmsknz_b(v16i8 _1){return __builtin_lsx_vmsknz_b(_1);} -+v8i16 __lsx_vexth_h_b(v16i8 _1){return __builtin_lsx_vexth_h_b(_1);} -+v4i32 __lsx_vexth_w_h(v8i16 _1){return __builtin_lsx_vexth_w_h(_1);} -+v2i64 __lsx_vexth_d_w(v4i32 _1){return __builtin_lsx_vexth_d_w(_1);} -+v2i64 __lsx_vexth_q_d(v2i64 _1){return __builtin_lsx_vexth_q_d(_1);} -+v8u16 __lsx_vexth_hu_bu(v16u8 _1){return __builtin_lsx_vexth_hu_bu(_1);} -+v4u32 __lsx_vexth_wu_hu(v8u16 _1){return __builtin_lsx_vexth_wu_hu(_1);} -+v2u64 __lsx_vexth_du_wu(v4u32 _1){return __builtin_lsx_vexth_du_wu(_1);} -+v2u64 __lsx_vexth_qu_du(v2u64 _1){return __builtin_lsx_vexth_qu_du(_1);} -+v16i8 __lsx_vrotri_b(v16i8 _1){return __builtin_lsx_vrotri_b(_1, 1);} -+v8i16 __lsx_vrotri_h(v8i16 _1){return __builtin_lsx_vrotri_h(_1, 1);} -+v4i32 __lsx_vrotri_w(v4i32 _1){return __builtin_lsx_vrotri_w(_1, 1);} -+v2i64 __lsx_vrotri_d(v2i64 _1){return __builtin_lsx_vrotri_d(_1, 1);} -+v2i64 __lsx_vextl_q_d(v2i64 _1){return __builtin_lsx_vextl_q_d(_1);} -+v16i8 __lsx_vsrlni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrlni_b_h(_1, _2, 1);} -+v8i16 __lsx_vsrlni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrlni_h_w(_1, _2, 1);} -+v4i32 __lsx_vsrlni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrlni_w_d(_1, _2, 1);} -+v2i64 __lsx_vsrlni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrlni_d_q(_1, _2, 1);} -+v16i8 __lsx_vsrlrni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrlrni_b_h(_1, _2, 1);} -+v8i16 __lsx_vsrlrni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrlrni_h_w(_1, _2, 1);} -+v4i32 __lsx_vsrlrni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrlrni_w_d(_1, _2, 1);} -+v2i64 __lsx_vsrlrni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrlrni_d_q(_1, _2, 1);} -+v16i8 __lsx_vssrlni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vssrlni_b_h(_1, _2, 1);} -+v8i16 __lsx_vssrlni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrlni_h_w(_1, _2, 1);} -+v4i32 __lsx_vssrlni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrlni_w_d(_1, _2, 1);} -+v2i64 __lsx_vssrlni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrlni_d_q(_1, _2, 1);} -+v16u8 __lsx_vssrlni_bu_h(v16u8 _1, v16i8 _2){return __builtin_lsx_vssrlni_bu_h(_1, _2, 1);} -+v8u16 __lsx_vssrlni_hu_w(v8u16 _1, v8i16 _2){return __builtin_lsx_vssrlni_hu_w(_1, _2, 1);} -+v4u32 __lsx_vssrlni_wu_d(v4u32 _1, v4i32 _2){return __builtin_lsx_vssrlni_wu_d(_1, _2, 1);} -+v2u64 __lsx_vssrlni_du_q(v2u64 _1, v2i64 _2){return __builtin_lsx_vssrlni_du_q(_1, _2, 1);} -+v16i8 __lsx_vssrlrni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vssrlrni_b_h(_1, _2, 1);} -+v8i16 __lsx_vssrlrni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrlrni_h_w(_1, _2, 1);} -+v4i32 __lsx_vssrlrni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrlrni_w_d(_1, _2, 1);} -+v2i64 __lsx_vssrlrni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrlrni_d_q(_1, _2, 1);} -+v16u8 __lsx_vssrlrni_bu_h(v16u8 _1, v16i8 _2){return __builtin_lsx_vssrlrni_bu_h(_1, _2, 1);} -+v8u16 __lsx_vssrlrni_hu_w(v8u16 _1, v8i16 _2){return __builtin_lsx_vssrlrni_hu_w(_1, _2, 1);} -+v4u32 __lsx_vssrlrni_wu_d(v4u32 _1, v4i32 _2){return __builtin_lsx_vssrlrni_wu_d(_1, _2, 1);} -+v2u64 __lsx_vssrlrni_du_q(v2u64 _1, v2i64 _2){return __builtin_lsx_vssrlrni_du_q(_1, _2, 1);} -+v16i8 __lsx_vsrani_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrani_b_h(_1, _2, 1);} -+v8i16 __lsx_vsrani_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrani_h_w(_1, _2, 1);} -+v4i32 __lsx_vsrani_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrani_w_d(_1, _2, 1);} -+v2i64 __lsx_vsrani_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrani_d_q(_1, _2, 1);} -+v16i8 __lsx_vsrarni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrarni_b_h(_1, _2, 1);} -+v8i16 __lsx_vsrarni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrarni_h_w(_1, _2, 1);} -+v4i32 __lsx_vsrarni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrarni_w_d(_1, _2, 1);} -+v2i64 __lsx_vsrarni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrarni_d_q(_1, _2, 1);} -+v16i8 __lsx_vssrani_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vssrani_b_h(_1, _2, 1);} -+v8i16 __lsx_vssrani_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrani_h_w(_1, _2, 1);} -+v4i32 __lsx_vssrani_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrani_w_d(_1, _2, 1);} -+v2i64 __lsx_vssrani_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrani_d_q(_1, _2, 1);} -+v16u8 __lsx_vssrani_bu_h(v16u8 _1, v16i8 _2){return __builtin_lsx_vssrani_bu_h(_1, _2, 1);} -+v8u16 __lsx_vssrani_hu_w(v8u16 _1, v8i16 _2){return __builtin_lsx_vssrani_hu_w(_1, _2, 1);} -+v4u32 __lsx_vssrani_wu_d(v4u32 _1, v4i32 _2){return __builtin_lsx_vssrani_wu_d(_1, _2, 1);} -+v2u64 __lsx_vssrani_du_q(v2u64 _1, v2i64 _2){return __builtin_lsx_vssrani_du_q(_1, _2, 1);} -+v16i8 __lsx_vssrarni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vssrarni_b_h(_1, _2, 1);} -+v8i16 __lsx_vssrarni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrarni_h_w(_1, _2, 1);} -+v4i32 __lsx_vssrarni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrarni_w_d(_1, _2, 1);} -+v2i64 __lsx_vssrarni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrarni_d_q(_1, _2, 1);} -+v16u8 __lsx_vssrarni_bu_h(v16u8 _1, v16i8 _2){return __builtin_lsx_vssrarni_bu_h(_1, _2, 1);} -+v8u16 __lsx_vssrarni_hu_w(v8u16 _1, v8i16 _2){return __builtin_lsx_vssrarni_hu_w(_1, _2, 1);} -+v4u32 __lsx_vssrarni_wu_d(v4u32 _1, v4i32 _2){return __builtin_lsx_vssrarni_wu_d(_1, _2, 1);} -+v2u64 __lsx_vssrarni_du_q(v2u64 _1, v2i64 _2){return __builtin_lsx_vssrarni_du_q(_1, _2, 1);} -+v4i32 __lsx_vpermi_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpermi_w(_1, _2, 1);} -+v16i8 __lsx_vld(void * _1){return __builtin_lsx_vld(_1, 1);} -+void __lsx_vst(v16i8 _1, void * _2){return __builtin_lsx_vst(_1, _2, 1);} -+v16i8 __lsx_vssrlrn_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrlrn_b_h(_1, _2);} -+v8i16 __lsx_vssrlrn_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrlrn_h_w(_1, _2);} -+v4i32 __lsx_vssrlrn_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrlrn_w_d(_1, _2);} -+v16i8 __lsx_vssrln_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrln_b_h(_1, _2);} -+v8i16 __lsx_vssrln_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrln_h_w(_1, _2);} -+v4i32 __lsx_vssrln_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrln_w_d(_1, _2);} -+v16i8 __lsx_vorn_v(v16i8 _1, v16i8 _2){return __builtin_lsx_vorn_v(_1, _2);} -+v2i64 __lsx_vldi(){return __builtin_lsx_vldi(1);} -+v16i8 __lsx_vshuf_b(v16i8 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vshuf_b(_1, _2, _3);} -+v16i8 __lsx_vldx(void * _1){return __builtin_lsx_vldx(_1, 1);} -+void __lsx_vstx(v16i8 _1, void * _2){return __builtin_lsx_vstx(_1, _2, 1);} -+v2u64 __lsx_vextl_qu_du(v2u64 _1){return __builtin_lsx_vextl_qu_du(_1);} -+int __lsx_bnz_b(v16u8 _1){return __builtin_lsx_bnz_b(_1);} -+int __lsx_bnz_d(v2u64 _1){return __builtin_lsx_bnz_d(_1);} -+int __lsx_bnz_h(v8u16 _1){return __builtin_lsx_bnz_h(_1);} -+int __lsx_bnz_v(v16u8 _1){return __builtin_lsx_bnz_v(_1);} -+int __lsx_bnz_w(v4u32 _1){return __builtin_lsx_bnz_w(_1);} -+int __lsx_bz_b(v16u8 _1){return __builtin_lsx_bz_b(_1);} -+int __lsx_bz_d(v2u64 _1){return __builtin_lsx_bz_d(_1);} -+int __lsx_bz_h(v8u16 _1){return __builtin_lsx_bz_h(_1);} -+int __lsx_bz_v(v16u8 _1){return __builtin_lsx_bz_v(_1);} -+int __lsx_bz_w(v4u32 _1){return __builtin_lsx_bz_w(_1);} -+v2i64 __lsx_vfcmp_caf_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_caf_d(_1, _2);} -+v4i32 __lsx_vfcmp_caf_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_caf_s(_1, _2);} -+v2i64 __lsx_vfcmp_ceq_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_ceq_d(_1, _2);} -+v4i32 __lsx_vfcmp_ceq_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_ceq_s(_1, _2);} -+v2i64 __lsx_vfcmp_cle_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cle_d(_1, _2);} -+v4i32 __lsx_vfcmp_cle_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cle_s(_1, _2);} -+v2i64 __lsx_vfcmp_clt_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_clt_d(_1, _2);} -+v4i32 __lsx_vfcmp_clt_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_clt_s(_1, _2);} -+v2i64 __lsx_vfcmp_cne_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cne_d(_1, _2);} -+v4i32 __lsx_vfcmp_cne_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cne_s(_1, _2);} -+v2i64 __lsx_vfcmp_cor_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cor_d(_1, _2);} -+v4i32 __lsx_vfcmp_cor_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cor_s(_1, _2);} -+v2i64 __lsx_vfcmp_cueq_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cueq_d(_1, _2);} -+v4i32 __lsx_vfcmp_cueq_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cueq_s(_1, _2);} -+v2i64 __lsx_vfcmp_cule_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cule_d(_1, _2);} -+v4i32 __lsx_vfcmp_cule_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cule_s(_1, _2);} -+v2i64 __lsx_vfcmp_cult_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cult_d(_1, _2);} -+v4i32 __lsx_vfcmp_cult_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cult_s(_1, _2);} -+v2i64 __lsx_vfcmp_cun_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cun_d(_1, _2);} -+v2i64 __lsx_vfcmp_cune_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cune_d(_1, _2);} -+v4i32 __lsx_vfcmp_cune_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cune_s(_1, _2);} -+v4i32 __lsx_vfcmp_cun_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cun_s(_1, _2);} -+v2i64 __lsx_vfcmp_saf_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_saf_d(_1, _2);} -+v4i32 __lsx_vfcmp_saf_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_saf_s(_1, _2);} -+v2i64 __lsx_vfcmp_seq_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_seq_d(_1, _2);} -+v4i32 __lsx_vfcmp_seq_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_seq_s(_1, _2);} -+v2i64 __lsx_vfcmp_sle_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sle_d(_1, _2);} -+v4i32 __lsx_vfcmp_sle_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sle_s(_1, _2);} -+v2i64 __lsx_vfcmp_slt_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_slt_d(_1, _2);} -+v4i32 __lsx_vfcmp_slt_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_slt_s(_1, _2);} -+v2i64 __lsx_vfcmp_sne_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sne_d(_1, _2);} -+v4i32 __lsx_vfcmp_sne_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sne_s(_1, _2);} -+v2i64 __lsx_vfcmp_sor_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sor_d(_1, _2);} -+v4i32 __lsx_vfcmp_sor_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sor_s(_1, _2);} -+v2i64 __lsx_vfcmp_sueq_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sueq_d(_1, _2);} -+v4i32 __lsx_vfcmp_sueq_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sueq_s(_1, _2);} -+v2i64 __lsx_vfcmp_sule_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sule_d(_1, _2);} -+v4i32 __lsx_vfcmp_sule_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sule_s(_1, _2);} -+v2i64 __lsx_vfcmp_sult_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sult_d(_1, _2);} -+v4i32 __lsx_vfcmp_sult_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sult_s(_1, _2);} -+v2i64 __lsx_vfcmp_sun_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sun_d(_1, _2);} -+v2i64 __lsx_vfcmp_sune_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sune_d(_1, _2);} -+v4i32 __lsx_vfcmp_sune_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sune_s(_1, _2);} -+v4i32 __lsx_vfcmp_sun_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sun_s(_1, _2);} -+v16i8 __lsx_vrepli_b(){return __builtin_lsx_vrepli_b(1);} -+v2i64 __lsx_vrepli_d(){return __builtin_lsx_vrepli_d(1);} -+v8i16 __lsx_vrepli_h(){return __builtin_lsx_vrepli_h(1);} -+v4i32 __lsx_vrepli_w(){return __builtin_lsx_vrepli_w(1);} -diff --git a/gcc/testsuite/gcc.target/loongarch/tst-asm-const.c b/gcc/testsuite/gcc.target/loongarch/tst-asm-const.c -new file mode 100644 -index 000000000..2e04b99e3 ---- /dev/null -+++ b/gcc/testsuite/gcc.target/loongarch/tst-asm-const.c -@@ -0,0 +1,16 @@ -+/* Test asm const. */ -+/* { dg-do compile } */ -+/* { dg-final { scan-assembler-times "foo:.*\\.long 1061109567.*\\.long 52" 1 } } */ -+int foo () -+{ -+ __asm__ volatile ( -+ "foo:" -+ "\n\t" -+ ".long %a0\n\t" -+ ".long %a1\n\t" -+ : -+ :"i"(0x3f3f3f3f), "i"(52) -+ : -+ ); -+} -+ -diff --git a/gcc/testsuite/gcc.target/mips/loongson-shift-count-truncated-1.c b/gcc/testsuite/gcc.target/mips/loongson-shift-count-truncated-1.c -index baed48cf5..6e22c0e11 100644 ---- a/gcc/testsuite/gcc.target/mips/loongson-shift-count-truncated-1.c -+++ b/gcc/testsuite/gcc.target/mips/loongson-shift-count-truncated-1.c -@@ -4,11 +4,11 @@ - /* loongson.h does not handle or check for MIPS16ness. There doesn't - seem any good reason for it to, given that the Loongson processors - do not support MIPS16. */ --/* { dg-options "isa=loongson -mhard-float -mno-mips16 (REQUIRES_STDLIB)" } */ -+/* { dg-options "-mloongson-mmi -mhard-float -mno-mips16 (REQUIRES_STDLIB)" } */ - /* See PR 52155. */ --/* { dg-options "isa=loongson -mhard-float -mno-mips16 -mlong64" { mips*-*-elf* && ilp32 } } */ -+/* { dg-options "-mloongson-mmi -mhard-float -mno-mips16 -mlong64" { mips*-*-elf* && ilp32 } } */ - --#include "loongson.h" -+#include "loongson-mmiintrin.h" - #include - - typedef union { int32x2_t v; int32_t a[2]; } int32x2_encap_t; -diff --git a/gcc/testsuite/gcc.target/mips/loongson-simd.c b/gcc/testsuite/gcc.target/mips/loongson-simd.c -index f263b4393..34fdcecc6 100644 ---- a/gcc/testsuite/gcc.target/mips/loongson-simd.c -+++ b/gcc/testsuite/gcc.target/mips/loongson-simd.c -@@ -26,9 +26,9 @@ along with GCC; see the file COPYING3. If not see - because inclusion of some system headers e.g. stdint.h will fail due to not - finding stubs-o32_hard.h. */ - /* { dg-require-effective-target mips_nanlegacy } */ --/* { dg-options "isa=loongson -mhard-float -mno-micromips -mno-mips16 -flax-vector-conversions (REQUIRES_STDLIB)" } */ -+/* { dg-options "-mloongson-mmi -mhard-float -mno-micromips -mno-mips16 -flax-vector-conversions (REQUIRES_STDLIB)" } */ - --#include "loongson.h" -+#include "loongson-mmiintrin.h" - #include - #include - #include -diff --git a/gcc/testsuite/gcc.target/mips/mips.exp b/gcc/testsuite/gcc.target/mips/mips.exp -index 9db4fbe29..975c51f82 100644 ---- a/gcc/testsuite/gcc.target/mips/mips.exp -+++ b/gcc/testsuite/gcc.target/mips/mips.exp -@@ -281,6 +281,7 @@ foreach option { - fix-r4000 - fix-r10000 - fix-vr4130 -+ fix-loongson3-llsc - gpopt - local-sdata - long-calls -@@ -296,6 +297,9 @@ foreach option { - mcount-ra-address - odd-spreg - msa -+ loongson-mmi -+ loongson-ext -+ loongson-ext2 - } { - lappend mips_option_groups $option "-m(no-|)$option" - } -@@ -883,6 +887,12 @@ proc mips-dg-init {} { - "-mno-msa" - #endif - -+ #ifdef __mips_loongson_mmi -+ "-mloongson-mmi" -+ #else -+ "-mno-loongson-mmi" -+ #endif -+ - 0 - }; - } 0] -@@ -1046,6 +1056,19 @@ proc mips-dg-options { args } { - mips_option_dependency options "-mabicalls" "-G0" - mips_option_dependency options "-mno-gpopt" "-mexplicit-relocs" - -+ if { [check_configured_with "with-arch=loongson3a"] -+ || [check_configured_with "with-arch=gs464"] -+ || [check_configured_with "with-arch=gs464e"] -+ || [check_configured_with "with-arch=gs264e"] } { -+ mips_option_dependency options "-msoft-float" "-mno-loongson-mmi" -+ mips_option_dependency options "-mips16" "-mno-loongson-mmi" -+ mips_option_dependency options "-mips16" "-mno-loongson-ext" -+ mips_option_dependency options "-mips16" "-mno-loongson-ext2" -+ mips_option_dependency options "-mmicromips" "-mno-loongson-mmi" -+ mips_option_dependency options "-mmicromips" "-mno-loongson-ext" -+ mips_option_dependency options "-mmicromips" "-mno-loongson-ext2" -+ } -+ - # Work out information about the current ABI. - set abi_test_option_p [mips_test_option_p options abi] - set abi [mips_option options abi] -diff --git a/gcc/testsuite/go.test/go-test.exp b/gcc/testsuite/go.test/go-test.exp -index cab0d0e2f..72d7434be 100644 ---- a/gcc/testsuite/go.test/go-test.exp -+++ b/gcc/testsuite/go.test/go-test.exp -@@ -249,6 +249,9 @@ proc go-set-goarch { } { - "riscv64-*-*" { - set goarch "riscv64" - } -+ "loongarch64-*-*" { -+ set goarch "loongarch64" -+ } - "s390*-*-*" { - if [check_effective_target_ilp32] { - set goarch "s390" -diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp -index 3387804fa..74256842f 100644 ---- a/gcc/testsuite/lib/target-supports.exp -+++ b/gcc/testsuite/lib/target-supports.exp -@@ -282,6 +282,10 @@ proc check_weak_available { } { - return 1 - } - -+ if { [ string first "loongarch" $target_cpu ] >= 0 } { -+ return 1 -+ } -+ - # All AIX targets should support it - - if { [istarget *-*-aix*] } { -@@ -676,6 +680,7 @@ proc check_profiling_available { test_what } { - || [istarget m68k-*-elf] - || [istarget m68k-*-uclinux*] - || [istarget mips*-*-elf*] -+ || [istarget loongarch*-*-elf*] - || [istarget mmix-*-*] - || [istarget mn10300-*-elf*] - || [istarget moxie-*-elf*] -@@ -1216,6 +1221,14 @@ proc check_effective_target_hard_float { } { - }] - } - -+ if { [istarget loongarch*-*-*] } { -+ return [check_no_compiler_messages hard_float assembly { -+ #if (defined __loongarch_soft_float) -+ #error __loongarch_soft_float -+ #endif -+ }] -+ } -+ - # This proc is actually checking the availabilty of FPU - # support for doubles, so on the RX we must fail if the - # 64-bit double multilib has been selected. -@@ -1902,20 +1915,20 @@ proc check_mpaired_single_hw_available { } { - # Return 1 if the target supports executing Loongson vector instructions, - # 0 otherwise. Cache the result. - --proc check_mips_loongson_hw_available { } { -- return [check_cached_effective_target mips_loongson_hw_available { -+proc check_mips_loongson_mmi_hw_available { } { -+ return [check_cached_effective_target mips_loongson_mmi_hw_available { - # If this is not the right target then we can skip the test. - if { !([istarget mips*-*-*]) } { - expr 0 - } else { -- check_runtime_nocache mips_loongson_hw_available { -- #include -+ check_runtime_nocache mips_loongson_mmi_hw_available { -+ #include - int main() - { - asm volatile ("paddw $f2,$f4,$f6"); - return 0; - } -- } "" -+ } "-mloongson-mmi" - } - }] - } -@@ -1969,9 +1982,9 @@ proc check_effective_target_mpaired_single_runtime { } { - - # Return 1 if the target supports running Loongson executables, 0 otherwise. - --proc check_effective_target_mips_loongson_runtime { } { -- if { [check_effective_target_mips_loongson] -- && [check_mips_loongson_hw_available] } { -+proc check_effective_target_mips_loongson_mmi_runtime { } { -+ if { [check_effective_target_mips_loongson_mmi] -+ && [check_mips_loongson_mmi_hw_available] } { - return 1 - } - return 0 -@@ -3086,7 +3099,7 @@ proc check_effective_target_vect_int { } { - || [istarget aarch64*-*-*] - || [is-effective-target arm_neon] - || ([istarget mips*-*-*] -- && ([et-is-effective-target mips_loongson] -+ && ([et-is-effective-target mips_loongson_mmi] - || [et-is-effective-target mips_msa])) - || ([istarget s390*-*-*] - && [check_effective_target_s390_vx]) } { -@@ -4845,11 +4858,24 @@ proc add_options_for_mips_msa { flags } { - return "$flags -mmsa" - } - -+# Add the options needed for MIPS Loongson MMI Architecture. -+ -+proc add_options_for_mips_loongson_mmi { flags } { -+ if { ! [check_effective_target_mips_loongson_mmi] } { -+ return "$flags" -+ } -+ return "$flags -mloongson-mmi" -+} -+ -+ - # Return 1 if this a Loongson-2E or -2F target using an ABI that supports - # the Loongson vector modes. - --proc check_effective_target_mips_loongson { } { -+proc check_effective_target_mips_loongson_mmi { } { - return [check_no_compiler_messages loongson assembly { -+ #if !defined(__mips_loongson_mmi) -+ #error !__mips_loongson_mmi -+ #endif - #if !defined(__mips_loongson_vector_rev) - #error !__mips_loongson_vector_rev - #endif -@@ -5437,7 +5463,7 @@ proc check_effective_target_vect_shift { } { - || [is-effective-target arm_neon] - || ([istarget mips*-*-*] - && ([et-is-effective-target mips_msa] -- || [et-is-effective-target mips_loongson])) -+ || [et-is-effective-target mips_loongson_mmi])) - || ([istarget s390*-*-*] - && [check_effective_target_s390_vx]) } { - set et_vect_shift_saved($et_index) 1 -@@ -5457,7 +5483,7 @@ proc check_effective_target_whole_vector_shift { } { - || ([is-effective-target arm_neon] - && [check_effective_target_arm_little_endian]) - || ([istarget mips*-*-*] -- && [et-is-effective-target mips_loongson]) -+ && [et-is-effective-target mips_loongson_mmi]) - || ([istarget s390*-*-*] - && [check_effective_target_s390_vx]) } { - set answer 1 -@@ -5559,6 +5585,7 @@ proc check_effective_target_vect_float { } { - || [istarget powerpc*-*-*] - || [istarget spu-*-*] - || [istarget mips-sde-elf] -+ || [istarget loongarch-sde-elf] - || [istarget mipsisa64*-*-*] - || [istarget ia64-*-*] - || [istarget aarch64*-*-*] -@@ -5663,7 +5690,7 @@ proc check_effective_target_vect_no_int_min_max { } { - || [istarget spu-*-*] - || [istarget alpha*-*-*] - || ([istarget mips*-*-*] -- && [et-is-effective-target mips_loongson]) } { -+ && [et-is-effective-target mips_loongson_mmi]) } { - set et_vect_no_int_min_max_saved($et_index) 1 - } - } -@@ -6434,7 +6461,7 @@ proc check_effective_target_vect_no_align { } { - || [check_effective_target_arm_vect_no_misalign] - || ([istarget powerpc*-*-*] && [check_p8vector_hw_available]) - || ([istarget mips*-*-*] -- && [et-is-effective-target mips_loongson]) } { -+ && [et-is-effective-target mips_loongson_mmi]) } { - set et_vect_no_align_saved($et_index) 1 - } - } -@@ -6764,7 +6791,7 @@ proc check_effective_target_vect_short_mult { } { - || [check_effective_target_arm32] - || ([istarget mips*-*-*] - && ([et-is-effective-target mips_msa] -- || [et-is-effective-target mips_loongson])) -+ || [et-is-effective-target mips_loongson_mmi])) - || ([istarget s390*-*-*] - && [check_effective_target_s390_vx]) } { - set et_vect_short_mult_saved($et_index) 1 -@@ -7502,6 +7529,7 @@ proc check_effective_target_sync_char_short { } { - || ([istarget sparc*-*-*] && [check_effective_target_sparc_v9]) - || [istarget spu-*-*] - || ([istarget arc*-*-*] && [check_effective_target_arc_atomic]) -+ || [istarget loongarch*-*-*] - || [check_effective_target_mips_llsc] } { - set et_sync_char_short_saved 1 - } -@@ -8579,8 +8607,8 @@ proc check_vect_support_and_set_flags { } { - if { [check_effective_target_mpaired_single] } { - lappend EFFECTIVE_TARGETS mpaired_single - } -- if { [check_effective_target_mips_loongson] } { -- lappend EFFECTIVE_TARGETS mips_loongson -+ if { [check_effective_target_mips_loongson_mmi] } { -+ lappend EFFECTIVE_TARGETS mips_loongson_mmi - } - if { [check_effective_target_mips_msa] } { - lappend EFFECTIVE_TARGETS mips_msa -@@ -8813,6 +8841,7 @@ proc check_effective_target_tiny {} { - - proc check_effective_target_logical_op_short_circuit {} { - if { [istarget mips*-*-*] -+ || [istarget loongarch*-*-*] - || [istarget arc*-*-*] - || [istarget avr*-*-*] - || [istarget crisv32-*-*] || [istarget cris-*-*] -@@ -8837,6 +8866,7 @@ proc check_effective_target_branch_cost {} { - || [istarget frv*-*-*] - || [istarget i?86-*-*] || [istarget x86_64-*-*] - || [istarget mips*-*-*] -+ || [istarget loongarch*-*-*] - || [istarget s390*-*-*] - || [istarget riscv*-*-*] - || [istarget sh*-*-*] -diff --git a/libgcc/config.host b/libgcc/config.host -index 317d735d5..83ca131aa 100644 ---- a/libgcc/config.host -+++ b/libgcc/config.host -@@ -156,6 +156,23 @@ mips*-*-*) - fi - tmake_file="${tmake_file} t-softfp" - ;; -+loongarch*-*-*) -+ # All MIPS targets provide a full set of FP routines. -+ cpu_type=loongarch -+ tmake_file="loongarch/t-loongarch" -+ if test "${libgcc_cv_loongarch_hard_float}" = yes; then -+ tmake_file="${tmake_file} t-hardfp-sfdf t-hardfp" -+ else -+ tmake_file="${tmake_file} t-softfp-sfdf" -+ fi -+ if test "${ac_cv_sizeof_long_double}" = 16; then -+ tmake_file="${tmake_file} loongarch/t-softfp-tf" -+ fi -+ if test "${host_address}" = 64; then -+ tmake_file="${tmake_file} loongarch/t-loongarch64" -+ fi -+ tmake_file="${tmake_file} t-softfp" -+ ;; - nds32*-*) - cpu_type=nds32 - ;; -@@ -906,6 +923,16 @@ mips*-*-linux*) # Linux MIPS, either endian. - esac - md_unwind_header=mips/linux-unwind.h - ;; -+loongarch*-*-linux*) # Linux MIPS, either endian. -+ extra_parts="$extra_parts crtfastmath.o" -+ tmake_file="${tmake_file} t-crtfm loongarch/t-crtstuff" -+ case ${host} in -+ *) -+ tmake_file="${tmake_file} t-slibgcc-libgcc" -+ ;; -+ esac -+ md_unwind_header=loongarch/linux-unwind.h -+ ;; - mips*-sde-elf*) - tmake_file="$tmake_file mips/t-crtstuff mips/t-mips16" - case "${with_newlib}" in -@@ -919,6 +946,19 @@ mips*-sde-elf*) - esac - extra_parts="$extra_parts crti.o crtn.o" - ;; -+loongarch*-sde-elf*) -+ tmake_file="$tmake_file loongarch/t-crtstuff" -+ case "${with_newlib}" in -+ yes) -+ # newlib / libgloss. -+ ;; -+ *) -+ # MIPS toolkit libraries. -+ tmake_file="$tmake_file loongarch/t-sdemtk" -+ ;; -+ esac -+ extra_parts="$extra_parts crti.o crtn.o" -+ ;; - mipsisa32-*-elf* | mipsisa32el-*-elf* | \ - mipsisa32r2-*-elf* | mipsisa32r2el-*-elf* | \ - mipsisa32r6-*-elf* | mipsisa32r6el-*-elf* | \ -diff --git a/libgcc/config/loongarch/crtfastmath.c b/libgcc/config/loongarch/crtfastmath.c -new file mode 100644 -index 000000000..d7371de6d ---- /dev/null -+++ b/libgcc/config/loongarch/crtfastmath.c -@@ -0,0 +1,50 @@ -+/* Copyright (C) 2010-2018 Free Software Foundation, Inc. -+ -+ This file is part of GCC. -+ -+ GCC is free software; you can redistribute it and/or modify it -+ under the terms of the GNU General Public License as published by -+ the Free Software Foundation; either version 3, or (at your option) -+ any later version. -+ -+ GCC is distributed in the hope that it will be useful, but WITHOUT -+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -+ License for more details. -+ -+ Under Section 7 of GPL version 3, you are granted additional -+ permissions described in the GCC Runtime Library Exception, version -+ 3.1, as published by the Free Software Foundation. -+ -+ You should have received a copy of the GNU General Public License -+ and a copy of the GCC Runtime Library Exception along with this -+ program; see the files COPYING3 and COPYING.RUNTIME respectively. -+ If not, see . */ -+ -+#ifdef __loongarch_hard_float -+ -+/* Rounding control. */ -+#define _FPU_RC_NEAREST 0x000 /* RECOMMENDED */ -+#define _FPU_RC_ZERO 0x100 -+#define _FPU_RC_UP 0x200 -+#define _FPU_RC_DOWN 0x300 -+ -+/* Enable interrupts for IEEE exceptions. */ -+#define _FPU_IEEE 0x0000001F -+ -+/* Macros for accessing the hardware control word. */ -+#define _FPU_GETCW(cw) __asm__ ("movgr2fcsr %0,$r1" : "=r" (cw)) -+#define _FPU_SETCW(cw) __asm__ ("movfcsr2gr %0,$r1" : : "r" (cw)) -+ -+static void __attribute__((constructor)) -+set_fast_math (void) -+{ -+ unsigned int fcr; -+ -+ /* round to nearest, IEEE exceptions disabled. */ -+ fcr = _FPU_RC_NEAREST; -+ -+ _FPU_SETCW(fcr); -+} -+ -+#endif /* __loongarch_hard_float */ -diff --git a/libgcc/config/loongarch/crti.S b/libgcc/config/loongarch/crti.S -new file mode 100644 -index 000000000..dcd05afea ---- /dev/null -+++ b/libgcc/config/loongarch/crti.S -@@ -0,0 +1,43 @@ -+/* Copyright (C) 2001-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify it under -+the terms of the GNU General Public License as published by the Free -+Software Foundation; either version 3, or (at your option) any later -+version. -+ -+GCC is distributed in the hope that it will be useful, but WITHOUT ANY -+WARRANTY; without even the implied warranty of MERCHANTABILITY or -+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+for more details. -+ -+Under Section 7 of GPL version 3, you are granted additional -+permissions described in the GCC Runtime Library Exception, version -+3.1, as published by the Free Software Foundation. -+ -+You should have received a copy of the GNU General Public License and -+a copy of the GCC Runtime Library Exception along with this program; -+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+. */ -+ -+/* 4 slots for argument spill area. 1 for cpreturn, 1 for stack. -+ Return spill offset of 40 and 20. Aligned to 16 bytes for n32. */ -+ -+ .section .init,"ax",@progbits -+ .globl _init -+ .type _init,@function -+_init: -+ addi.d $r3,$r3,-48 -+ st.d $r1,$r3,40 -+ addi.d $r3,$r3,48 -+ jirl $r0,$r1,0 -+ -+ .section .fini,"ax",@progbits -+ .globl _fini -+ .type _fini,@function -+_fini: -+ addi.d $r3,$r3,-48 -+ st.d $r1,$r3,40 -+ addi.d $r3,$r3,48 -+ jirl $r0,$r1,0 -diff --git a/libgcc/config/loongarch/crtn.S b/libgcc/config/loongarch/crtn.S -new file mode 100644 -index 000000000..91d9d5e7f ---- /dev/null -+++ b/libgcc/config/loongarch/crtn.S -@@ -0,0 +1,39 @@ -+/* Copyright (C) 2001-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify it under -+the terms of the GNU General Public License as published by the Free -+Software Foundation; either version 3, or (at your option) any later -+version. -+ -+GCC is distributed in the hope that it will be useful, but WITHOUT ANY -+WARRANTY; without even the implied warranty of MERCHANTABILITY or -+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+for more details. -+ -+Under Section 7 of GPL version 3, you are granted additional -+permissions described in the GCC Runtime Library Exception, version -+3.1, as published by the Free Software Foundation. -+ -+You should have received a copy of the GNU General Public License and -+a copy of the GCC Runtime Library Exception along with this program; -+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+. */ -+ -+/* 4 slots for argument spill area. 1 for cpreturn, 1 for stack. -+ Return spill offset of 40 and 20. Aligned to 16 bytes for n32. */ -+ -+ -+ .section .init,"ax",@progbits -+init: -+ ld.d $r1,$r3,40 -+ addi.d $r3,$r3,48 -+ jirl $r0,$r1,0 -+ -+ .section .fini,"ax",@progbits -+fini: -+ ld.d $r1,$r3,40 -+ addi.d $r3,$r3,48 -+ jirl $r0,$r1,0 -+ -diff --git a/libgcc/config/loongarch/gthr-loongnixsde.h b/libgcc/config/loongarch/gthr-loongnixsde.h -new file mode 100644 -index 000000000..f62b57318 ---- /dev/null -+++ b/libgcc/config/loongarch/gthr-loongnixsde.h -@@ -0,0 +1,237 @@ -+/* LARCH SDE threads compatibility routines for libgcc2 and libobjc. */ -+/* Compile this one with gcc. */ -+/* Copyright (C) 2006-2018 Free Software Foundation, Inc. -+ Contributed by Nigel Stephens -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify it under -+the terms of the GNU General Public License as published by the Free -+Software Foundation; either version 3, or (at your option) any later -+version. -+ -+GCC is distributed in the hope that it will be useful, but WITHOUT ANY -+WARRANTY; without even the implied warranty of MERCHANTABILITY or -+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+for more details. -+ -+Under Section 7 of GPL version 3, you are granted additional -+permissions described in the GCC Runtime Library Exception, version -+3.1, as published by the Free Software Foundation. -+ -+You should have received a copy of the GNU General Public License and -+a copy of the GCC Runtime Library Exception along with this program; -+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+. */ -+ -+#ifndef GCC_GTHR_LARCHSDE_H -+#define GCC_GTHR_LARCHSDE_H -+ -+/* LARCH SDE threading API specific definitions. -+ Easy, since the interface is pretty much one-to-one. */ -+ -+#define __GTHREADS 1 -+ -+#include -+#include -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+typedef __sdethread_key_t __gthread_key_t; -+typedef __sdethread_once_t __gthread_once_t; -+typedef __sdethread_mutex_t __gthread_mutex_t; -+ -+typedef struct { -+ long depth; -+ __sdethread_t owner; -+ __sdethread_mutex_t actual; -+} __gthread_recursive_mutex_t; -+ -+#define __GTHREAD_MUTEX_INIT __SDETHREAD_MUTEX_INITIALIZER("gthr") -+#define __GTHREAD_ONCE_INIT __SDETHREAD_ONCE_INIT -+static inline int -+__gthread_recursive_mutex_init_function(__gthread_recursive_mutex_t *__mutex); -+#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function -+ -+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK -+# define __gthrw(name) \ -+ static __typeof(name) __gthrw_ ## name __attribute__ ((__weakref__(#name))); -+# define __gthrw_(name) __gthrw_ ## name -+#else -+# define __gthrw(name) -+# define __gthrw_(name) name -+#endif -+ -+__gthrw(__sdethread_once) -+__gthrw(__sdethread_key_create) -+__gthrw(__sdethread_key_delete) -+__gthrw(__sdethread_getspecific) -+__gthrw(__sdethread_setspecific) -+ -+__gthrw(__sdethread_self) -+ -+__gthrw(__sdethread_mutex_lock) -+__gthrw(__sdethread_mutex_trylock) -+__gthrw(__sdethread_mutex_unlock) -+ -+__gthrw(__sdethread_mutex_init) -+ -+__gthrw(__sdethread_threading) -+ -+#if SUPPORTS_WEAK && GTHREAD_USE_WEAK -+ -+static inline int -+__gthread_active_p (void) -+{ -+ return !!(void *)&__sdethread_threading; -+} -+ -+#else /* not SUPPORTS_WEAK */ -+ -+static inline int -+__gthread_active_p (void) -+{ -+ return 1; -+} -+ -+#endif /* SUPPORTS_WEAK */ -+ -+static inline int -+__gthread_once (__gthread_once_t *__once, void (*__func) (void)) -+{ -+ if (__gthread_active_p ()) -+ return __gthrw_(__sdethread_once) (__once, __func); -+ else -+ return -1; -+} -+ -+static inline int -+__gthread_key_create (__gthread_key_t *__key, void (*__dtor) (void *)) -+{ -+ return __gthrw_(__sdethread_key_create) (__key, __dtor); -+} -+ -+static inline int -+__gthread_key_delete (__gthread_key_t __key) -+{ -+ return __gthrw_(__sdethread_key_delete) (__key); -+} -+ -+static inline void * -+__gthread_getspecific (__gthread_key_t __key) -+{ -+ return __gthrw_(__sdethread_getspecific) (__key); -+} -+ -+static inline int -+__gthread_setspecific (__gthread_key_t __key, const void *__ptr) -+{ -+ return __gthrw_(__sdethread_setspecific) (__key, __ptr); -+} -+ -+static inline int -+__gthread_mutex_destroy (__gthread_mutex_t * UNUSED(__mutex)) -+{ -+ return 0; -+} -+ -+static inline int -+__gthread_mutex_lock (__gthread_mutex_t *__mutex) -+{ -+ if (__gthread_active_p ()) -+ return __gthrw_(__sdethread_mutex_lock) (__mutex); -+ else -+ return 0; -+} -+ -+static inline int -+__gthread_mutex_trylock (__gthread_mutex_t *__mutex) -+{ -+ if (__gthread_active_p ()) -+ return __gthrw_(__sdethread_mutex_trylock) (__mutex); -+ else -+ return 0; -+} -+ -+static inline int -+__gthread_mutex_unlock (__gthread_mutex_t *__mutex) -+{ -+ if (__gthread_active_p ()) -+ return __gthrw_(__sdethread_mutex_unlock) (__mutex); -+ else -+ return 0; -+} -+ -+static inline int -+__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex) -+{ -+ __mutex->depth = 0; -+ __mutex->owner = __gthrw_(__sdethread_self) (); -+ return __gthrw_(__sdethread_mutex_init) (&__mutex->actual, NULL); -+} -+ -+static inline int -+__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex) -+{ -+ if (__gthread_active_p ()) -+ { -+ __sdethread_t __me = __gthrw_(__sdethread_self) (); -+ -+ if (__mutex->owner != __me) -+ { -+ __gthrw_(__sdethread_mutex_lock) (&__mutex->actual); -+ __mutex->owner = __me; -+ } -+ -+ __mutex->depth++; -+ } -+ return 0; -+} -+ -+static inline int -+__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex) -+{ -+ if (__gthread_active_p ()) -+ { -+ __sdethread_t __me = __gthrw_(__sdethread_self) (); -+ -+ if (__mutex->owner != __me) -+ { -+ if (__gthrw_(__sdethread_mutex_trylock) (&__mutex->actual)) -+ return 1; -+ __mutex->owner = __me; -+ } -+ -+ __mutex->depth++; -+ } -+ return 0; -+} -+ -+static inline int -+__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex) -+{ -+ if (__gthread_active_p ()) -+ { -+ if (--__mutex->depth == 0) -+ { -+ __mutex->owner = (__sdethread_t) 0; -+ __gthrw_(__sdethread_mutex_unlock) (&__mutex->actual); -+ } -+ } -+ return 0; -+} -+ -+static inline int -+__gthread_recursive_mutex_destroy (__gthread_recursive_mutex_t -+ * UNUSED(__mutex)) -+{ -+ return 0; -+} -+ -+#ifdef __cplusplus -+} -+#endif -+ -+#endif /* ! GCC_GTHR_LARCHSDE_H */ -diff --git a/libgcc/config/loongarch/lib2funcs.c b/libgcc/config/loongarch/lib2funcs.c -new file mode 100644 -index 000000000..c7d3541e9 ---- /dev/null -+++ b/libgcc/config/loongarch/lib2funcs.c -@@ -0,0 +1,23 @@ -+/* libgcc routines for LoongArch -+ Copyright (C) 2021 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify it under -+the terms of the GNU General Public License as published by the Free -+Software Foundation; either version 3, or (at your option) any later -+version. -+ -+GCC is distributed in the hope that it will be useful, but WITHOUT ANY -+WARRANTY; without even the implied warranty of MERCHANTABILITY or -+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+for more details. -+ -+Under Section 7 of GPL version 3, you are granted additional -+permissions described in the GCC Runtime Library Exception, version -+3.1, as published by the Free Software Foundation. -+ -+You should have received a copy of the GNU General Public License and -+a copy of the GCC Runtime Library Exception along with this program; -+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+. */ -diff --git a/libgcc/config/loongarch/linux-unwind.h b/libgcc/config/loongarch/linux-unwind.h -new file mode 100644 -index 000000000..d77dfb058 ---- /dev/null -+++ b/libgcc/config/loongarch/linux-unwind.h -@@ -0,0 +1,81 @@ -+/* DWARF2 EH unwinding support for LARCH Linux. -+ Copyright (C) 2004-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+Under Section 7 of GPL version 3, you are granted additional -+permissions described in the GCC Runtime Library Exception, version -+3.1, as published by the Free Software Foundation. -+ -+You should have received a copy of the GNU General Public License and -+a copy of the GCC Runtime Library Exception along with this program; -+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+. */ -+ -+#ifndef inhibit_libc -+/* Do code reading to identify a signal frame, and set the frame -+ state data appropriately. See unwind-dw2.c for the structs. */ -+ -+#include -+#include -+#include -+ -+#define MD_FALLBACK_FRAME_STATE_FOR loongarch_fallback_frame_state -+ -+static _Unwind_Reason_Code -+loongarch_fallback_frame_state (struct _Unwind_Context *context, -+ _Unwind_FrameState *fs) -+{ -+ u_int32_t *pc = (u_int32_t *) context->ra; -+ struct sigcontext *sc; -+ _Unwind_Ptr new_cfa; -+ int i; -+ -+ /* 03822c0b dli a7, 0x8b (sigreturn) */ -+ /* 002b0000 syscall 0 */ -+ if (pc[1] != 0x002b0000) -+ return _URC_END_OF_STACK; -+ if (pc[0] == 0x03822c0b) -+ { -+ struct rt_sigframe { -+ u_int32_t ass[4]; /* Argument save space for o32. */ -+ u_int32_t trampoline[2]; -+ siginfo_t info; -+ ucontext_t uc; -+ } *rt_ = context->cfa; -+ sc = &rt_->uc.uc_mcontext; -+ } -+ else -+ return _URC_END_OF_STACK; -+ -+ new_cfa = (_Unwind_Ptr) sc; -+ fs->regs.cfa_how = CFA_REG_OFFSET; -+ fs->regs.cfa_reg = __LIBGCC_STACK_POINTER_REGNUM__; -+ fs->regs.cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa; -+ -+ for (i = 0; i < 32; i++) { -+ fs->regs.reg[i].how = REG_SAVED_OFFSET; -+ fs->regs.reg[i].loc.offset -+ = (_Unwind_Ptr)&(sc->sc_regs[i]) - new_cfa; -+ } -+ -+ fs->signal_frame = 1; -+ fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].how -+ = REG_SAVED_VAL_OFFSET; -+ fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].loc.offset -+ = (_Unwind_Ptr)(sc->sc_pc) - new_cfa; -+ fs->retaddr_column = __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__; -+ -+ return _URC_NO_REASON; -+} -+#endif -diff --git a/libgcc/config/loongarch/sfp-machine.h b/libgcc/config/loongarch/sfp-machine.h -new file mode 100644 -index 000000000..f7800a003 ---- /dev/null -+++ b/libgcc/config/loongarch/sfp-machine.h -@@ -0,0 +1,148 @@ -+/* softfp machine description for LARCH. -+ Copyright (C) 2009-2018 Free Software Foundation, Inc. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify it under -+the terms of the GNU General Public License as published by the Free -+Software Foundation; either version 3, or (at your option) any later -+version. -+ -+GCC is distributed in the hope that it will be useful, but WITHOUT ANY -+WARRANTY; without even the implied warranty of MERCHANTABILITY or -+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+for more details. -+ -+Under Section 7 of GPL version 3, you are granted additional -+permissions described in the GCC Runtime Library Exception, version -+3.1, as published by the Free Software Foundation. -+ -+You should have received a copy of the GNU General Public License and -+a copy of the GCC Runtime Library Exception along with this program; -+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+. */ -+ -+#ifdef __loongarch64 -+#define _FP_W_TYPE_SIZE 64 -+#define _FP_W_TYPE unsigned long long -+#define _FP_WS_TYPE signed long long -+#define _FP_I_TYPE long long -+ -+typedef int TItype __attribute__ ((mode (TI))); -+typedef unsigned int UTItype __attribute__ ((mode (TI))); -+#define TI_BITS (__CHAR_BIT__ * (int) sizeof (TItype)) -+ -+#define _FP_MUL_MEAT_S(R,X,Y) \ -+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) -+#define _FP_MUL_MEAT_D(R,X,Y) \ -+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) -+#define _FP_MUL_MEAT_Q(R,X,Y) \ -+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) -+ -+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y) -+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y) -+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y) -+ -+# define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) -+# define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1) -+# define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1 -+#else -+#define _FP_W_TYPE_SIZE 32 -+#define _FP_W_TYPE unsigned int -+#define _FP_WS_TYPE signed int -+#define _FP_I_TYPE int -+ -+#define _FP_MUL_MEAT_S(R,X,Y) \ -+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) -+#define _FP_MUL_MEAT_D(R,X,Y) \ -+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) -+#define _FP_MUL_MEAT_Q(R,X,Y) \ -+ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) -+ -+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y) -+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y) -+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y) -+ -+# define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) -+# define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1 -+# define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1 -+#endif -+ -+/* The type of the result of a floating point comparison. This must -+ match __libgcc_cmp_return__ in GCC for the target. */ -+typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__))); -+#define CMPtype __gcc_CMPtype -+ -+#define _FP_NANSIGN_S 0 -+#define _FP_NANSIGN_D 0 -+#define _FP_NANSIGN_Q 0 -+ -+#define _FP_KEEPNANFRACP 1 -+# define _FP_QNANNEGATEDP 0 -+ -+/* NaN payloads should be preserved for NAN2008. */ -+# define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ -+ do \ -+ { \ -+ R##_s = X##_s; \ -+ _FP_FRAC_COPY_##wc (R, X); \ -+ R##_c = FP_CLS_NAN; \ -+ } \ -+ while (0) -+ -+#ifdef __loongarch_hard_float -+#define FP_EX_INVALID 0x100000 -+#define FP_EX_DIVZERO 0x080000 -+#define FP_EX_OVERFLOW 0x040000 -+#define FP_EX_UNDERFLOW 0x020000 -+#define FP_EX_INEXACT 0x010000 -+#define FP_EX_ALL \ -+ (FP_EX_INVALID | FP_EX_DIVZERO | FP_EX_OVERFLOW | FP_EX_UNDERFLOW \ -+ | FP_EX_INEXACT) -+ -+#define FP_EX_ENABLE_SHIFT 16 -+#define FP_EX_CAUSE_SHIFT 8 -+ -+#define FP_RND_NEAREST 0x000 -+#define FP_RND_ZERO 0x100 -+#define FP_RND_PINF 0x200 -+#define FP_RND_MINF 0x300 -+#define FP_RND_MASK 0x300 -+ -+#define _FP_DECL_EX \ -+ unsigned long int _fcsr __attribute__ ((unused)) = FP_RND_NEAREST -+ -+#define FP_INIT_ROUNDMODE \ -+ do { \ -+ _fcsr = __builtin_loongarch_movfcsr2gr (0); \ -+ } while (0) -+ -+#define FP_ROUNDMODE (_fcsr & FP_RND_MASK) -+ -+#define FP_TRAPPING_EXCEPTIONS ((_fcsr << FP_EX_ENABLE_SHIFT) & FP_EX_ALL) -+ -+#define FP_HANDLE_EXCEPTIONS \ -+ do { \ -+ _fcsr &= ~(FP_EX_ALL << FP_EX_CAUSE_SHIFT); \ -+ _fcsr |= _fex | (_fex << FP_EX_CAUSE_SHIFT); \ -+ __builtin_loongarch_movgr2fcsr (0, _fcsr); \ -+ } while (0) -+ -+#else -+#define FP_EX_INVALID (1 << 4) -+#define FP_EX_DIVZERO (1 << 3) -+#define FP_EX_OVERFLOW (1 << 2) -+#define FP_EX_UNDERFLOW (1 << 1) -+#define FP_EX_INEXACT (1 << 0) -+#endif -+ -+#define _FP_TININESS_AFTER_ROUNDING 1 -+ -+#define __LITTLE_ENDIAN 1234 -+ -+# define __BYTE_ORDER __LITTLE_ENDIAN -+ -+/* Define ALIASNAME as a strong alias for NAME. */ -+# define strong_alias(name, aliasname) _strong_alias(name, aliasname) -+# define _strong_alias(name, aliasname) \ -+ extern __typeof (name) aliasname __attribute__ ((alias (#name))); -diff --git a/libgcc/config/loongarch/t-crtstuff b/libgcc/config/loongarch/t-crtstuff -new file mode 100644 -index 000000000..b8c36eb66 ---- /dev/null -+++ b/libgcc/config/loongarch/t-crtstuff -@@ -0,0 +1,5 @@ -+# -fasynchronous-unwind-tables is on by default for LoongArch. -+# We turn it off for crt*.o because it would make __EH_FRAME_BEGIN__ point -+# to .eh_frame data from crtbeginT.o instead of the user-defined object -+# during static linking. -+CRTSTUFF_T_CFLAGS += -fno-omit-frame-pointer -fno-asynchronous-unwind-tables -diff --git a/libgcc/config/loongarch/t-elf b/libgcc/config/loongarch/t-elf -new file mode 100644 -index 000000000..651f10a53 ---- /dev/null -+++ b/libgcc/config/loongarch/t-elf -@@ -0,0 +1,3 @@ -+# We must build libgcc2.a with -G 0, in case the user wants to link -+# without the $gp register. -+HOST_LIBGCC2_CFLAGS += -G 0 -diff --git a/libgcc/config/loongarch/t-loongarch b/libgcc/config/loongarch/t-loongarch -new file mode 100644 -index 000000000..9508cb2fc ---- /dev/null -+++ b/libgcc/config/loongarch/t-loongarch -@@ -0,0 +1,9 @@ -+LIB2_SIDITI_CONV_FUNCS = yes -+ -+softfp_float_modes := -+softfp_int_modes := si di -+softfp_extensions := -+softfp_truncations := -+softfp_exclude_libgcc2 := n -+ -+LIB2ADD_ST += $(srcdir)/config/loongarch/lib2funcs.c -diff --git a/libgcc/config/loongarch/t-loongarch64 b/libgcc/config/loongarch/t-loongarch64 -new file mode 100644 -index 000000000..a1e3513e2 ---- /dev/null -+++ b/libgcc/config/loongarch/t-loongarch64 -@@ -0,0 +1 @@ -+softfp_int_modes += ti -diff --git a/libgcc/config/loongarch/t-sdemtk b/libgcc/config/loongarch/t-sdemtk -new file mode 100644 -index 000000000..a06e828b5 ---- /dev/null -+++ b/libgcc/config/loongarch/t-sdemtk -@@ -0,0 +1,3 @@ -+# Don't build FPBIT and DPBIT; we'll be using the SDE soft-float library. -+FPBIT = -+DPBIT = -diff --git a/libgcc/config/loongarch/t-softfp-tf b/libgcc/config/loongarch/t-softfp-tf -new file mode 100644 -index 000000000..306677b12 ---- /dev/null -+++ b/libgcc/config/loongarch/t-softfp-tf -@@ -0,0 +1,3 @@ -+softfp_float_modes += tf -+softfp_extensions += sftf dftf -+softfp_truncations += tfsf tfdf -diff --git a/libgcc/config/loongarch/t-vr b/libgcc/config/loongarch/t-vr -new file mode 100644 -index 000000000..e69de29bb -diff --git a/libgcc/configure.ac b/libgcc/configure.ac -index 5f0a63ce2..9b78de063 100644 ---- a/libgcc/configure.ac -+++ b/libgcc/configure.ac -@@ -277,7 +277,7 @@ AC_CACHE_CHECK([whether assembler supports CFI directives], [libgcc_cv_cfi], - # word size rather than the address size. - cat > conftest.c < - #include - -+static bool debian_date_format(); -+ - void test01() - { - using namespace std; -@@ -46,7 +48,7 @@ void test01() - 0x5e74, L'1', L'2', 0x6708, L'1', L'7', - 0x65e5 , 0x0 }; - -- iss.str(wstr); -+ iss.str(debian_date_format() ? wstr+2 : wstr); - iterator_type is_it01(iss); - tm time01; - tim_get.get_date(is_it01, end, iss, errorstate, &time01); -@@ -56,6 +58,26 @@ void test01() - VERIFY( time01.tm_year == 103 ); - } - -+#include -+#if __has_include() -+# include -+#endif -+ -+static bool debian_date_format() -+{ -+#ifdef D_FMT -+ if (setlocale(LC_TIME, "zh_TW.UTF-8") != NULL) -+ { -+ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=31413 -+ // and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=71641#c2 -+ if (*nl_langinfo(D_FMT) == '%') -+ return true; -+ setlocale(LC_TIME, "C"); -+ } -+#endif -+ return false; -+} -+ - int main() - { - test01(); --- -2.20.1 - diff --git a/0001-LoongArch-Fixup-configure-file-error.patch b/0001-LoongArch-Fixup-configure-file-error.patch deleted file mode 100644 index eb8a21a..0000000 --- a/0001-LoongArch-Fixup-configure-file-error.patch +++ /dev/null @@ -1,153 +0,0 @@ -From 77dd77cc1400180914b26c19704dbe990cb36878 Mon Sep 17 00:00:00 2001 -From: Xing Li -Date: Mon, 31 Oct 2022 20:12:09 +0800 -Subject: [PATCH] LoongArch: Fixup configure file error - -configure error lead to the default macros not correct during compile -initialize, such as dwarf2out_as_loc_support and dwarf2out_as_locview_support - -Signed-off-by: Xing Li ---- - gcc/configure | 67 +++++++++++++++++++++++++++++++++++++++++++++--- - libgcc/configure | 5 +++- - 2 files changed, 67 insertions(+), 5 deletions(-) - -diff --git a/gcc/configure b/gcc/configure -index 56566e3f1..f0b5dbc00 100755 ---- a/gcc/configure -+++ b/gcc/configure -@@ -5075,6 +5075,9 @@ case "${target}" in - # sets the default TLS model and affects inlining. - PICFLAG_FOR_TARGET=-fPIC - ;; -+ loongarch*-*-*) -+ PICFLAG_FOR_TARGET=-fpic -+ ;; - mips-sgi-irix6*) - # PIC is the default. - ;; -@@ -7525,6 +7528,9 @@ else - enable_fixed_point=yes - ;; - -+ loongarch*-*-*) -+ enable_fixed_point=yes -+ ;; - mips*-*-*) - enable_fixed_point=yes - ;; -@@ -24085,6 +24091,18 @@ x: - tls_first_minor=16 - tls_as_opt='-32 --fatal-warnings' - ;; -+ loongarch*-*-*) -+ conftest_s=' -+ .section .tdata,"awT",@progbits -+x: .word 2 -+ .text -+ la.tls.gd $a0,x -+ bl __tls_get_addr' -+ tls_first_major=0 -+ tls_first_minor=0 -+ tls_as_opt='--fatal-warnings' -+ ;; -+ - m68k-*-*) - conftest_s=' - .section .tdata,"awT",@progbits -@@ -27644,6 +27662,47 @@ fi - as_fn_error "Requesting --with-nan= requires assembler support for -mnan=" "$LINENO" 5 - fi - ;; -+ loongarch*-*-*) -+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for .dtprelword support" >&5 -+$as_echo_n "checking assembler for .dtprelword support... " >&6; } -+if test "${gcc_cv_as_loongarch_dtprelword+set}" = set; then : -+ $as_echo_n "(cached) " >&6 -+else -+ gcc_cv_as_loongarch_dtprelword=no -+ if test $in_tree_gas = yes; then -+ if test $gcc_cv_gas_vers -ge `expr \( \( 2 \* 1000 \) + 18 \) \* 1000 + 0` -+ then gcc_cv_as_loongarch_dtprelword=yes -+fi -+ elif test x$gcc_cv_as != x; then -+ $as_echo '.section .tdata,"awT",@progbits -+x: -+ .word 2 -+ .text -+ .dtprelword x+0x8000' > conftest.s -+ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -o conftest.o conftest.s >&5' -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 -+ (eval $ac_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; }; } -+ then -+ gcc_cv_as_loongarch_dtprelword=yes -+ else -+ echo "configure: failed program was" >&5 -+ cat conftest.s >&5 -+ fi -+ rm -f conftest.o conftest.s -+ fi -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_loongarch_dtprelword" >&5 -+$as_echo "$gcc_cv_as_loongarch_dtprelword" >&6; } -+if test $gcc_cv_as_loongarch_dtprelword = yes; then -+ -+$as_echo "#define HAVE_AS_DTPRELWORD 1" >>confdefs.h -+ -+fi -+ -+ ;; - s390*-*-*) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for .gnu_attribute support" >&5 - $as_echo_n "checking assembler for .gnu_attribute support... " >&6; } -@@ -27827,7 +27886,7 @@ esac - # Linux on IA64 might be able to use the Intel assembler. - - case "$target" in -- mips*-*-* | *-*-hpux* ) -+ mips*-*-* | loongarch*-*-* | *-*-hpux* ) - if test x$gas_flag = xyes \ - || test x"$host" != x"$build" \ - || test ! -x "$gcc_cv_as" \ -@@ -27847,9 +27906,9 @@ esac - # ??? Once 2.11 is released, probably need to add first known working - # version to the per-target configury. - case "$cpu_type" in -- aarch64 | alpha | arc | arm | avr | bfin | cris | i386 | m32c | m68k \ -- | microblaze | mips | nios2 | pa | riscv | rs6000 | score | sparc | spu \ -- | tilegx | tilepro | visium | xstormy16 | xtensa) -+ aarch64 | alpha | arc | arm | avr | bfin | cris | i386 | loongarch | m32c \ -+ | m68k | microblaze | mips | nios2 | pa | riscv | rs6000 | score | sparc \ -+ | spu | tilegx | tilepro | visium | xstormy16 | xtensa) - insn="nop" - ;; - ia64 | s390) -diff --git a/libgcc/configure b/libgcc/configure -index aac5e5fb6..170b470dd 100755 ---- a/libgcc/configure -+++ b/libgcc/configure -@@ -2362,6 +2362,9 @@ case "${host}" in - # sets the default TLS model and affects inlining. - PICFLAG=-fPIC - ;; -+ loongarch*-*-*) -+ PICFLAG=-fpic -+ ;; - mips-sgi-irix6*) - # PIC is the default. - ;; -@@ -4938,7 +4941,7 @@ $as_echo "$libgcc_cv_cfi" >&6; } - # word size rather than the address size. - cat > conftest.c < -Date: Thu, 21 Jan 2021 21:09:15 +0800 -Subject: [PATCH] gcc anolis Rebrand for OpenAnolis - ---- - zlib/contrib/minizip/configure.ac | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/zlib/contrib/minizip/configure.ac b/zlib/contrib/minizip/configure.ac -index 5b1197097..857ced9b5 100644 ---- a/zlib/contrib/minizip/configure.ac -+++ b/zlib/contrib/minizip/configure.ac -@@ -1,7 +1,7 @@ - # -*- Autoconf -*- - # Process this file with autoconf to produce a configure script. - --AC_INIT([minizip], [1.2.11], [bugzilla.redhat.com]) -+AC_INIT([minizip], [1.2.11], [https://bugzilla.openanolis.cn]) - AC_CONFIG_SRCDIR([minizip.c]) - AM_INIT_AUTOMAKE([foreign]) - LT_INIT --- -2.18.4 - diff --git a/0002-LoongArch-Rename-config-file-for-loongarch.patch b/0002-LoongArch-Rename-config-file-for-loongarch.patch deleted file mode 100644 index 5a9a86b..0000000 --- a/0002-LoongArch-Rename-config-file-for-loongarch.patch +++ /dev/null @@ -1,18 +0,0 @@ -From 104cc4a48954cac2aeab9fa4a32bbd3afc784e7e Mon Sep 17 00:00:00 2001 -From: Xing Li -Date: Tue, 1 Nov 2022 09:41:17 +0800 -Subject: [PATCH 2/2] LoongArch: Rename config file for loongarch - -Signed-off-by: Xing Li ---- - config/{mt-loongnix-gnu => mt-loongarch-gnu} | 0 - 1 file changed, 0 insertions(+), 0 deletions(-) - rename config/{mt-loongnix-gnu => mt-loongarch-gnu} (100%) - -diff --git a/config/mt-loongnix-gnu b/config/mt-loongarch-gnu -similarity index 100% -rename from config/mt-loongnix-gnu -rename to config/mt-loongarch-gnu --- -2.27.0 - diff --git a/0002-loongarch-fix-multilib-osdirnames-to-lib64.patch b/0002-loongarch-fix-multilib-osdirnames-to-lib64.patch deleted file mode 100644 index 9cd6348..0000000 --- a/0002-loongarch-fix-multilib-osdirnames-to-lib64.patch +++ /dev/null @@ -1,27 +0,0 @@ -From ea03a4f48828e1ca550f8cd5d3916872de6098d8 Mon Sep 17 00:00:00 2001 -From: Li Xing -Date: Fri, 24 Jun 2022 11:00:23 +0800 -Subject: [PATCH 2/2] loongarch fix multilib osdirnames to lib64 - -Signed-off-by: Li Xing -Signed-off-by: Zhang Wenlong ---- - gcc/config/loongarch/t-linux | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/gcc/config/loongarch/t-linux b/gcc/config/loongarch/t-linux -index 479f4293e..58f27f89d 100644 ---- a/gcc/config/loongarch/t-linux -+++ b/gcc/config/loongarch/t-linux -@@ -16,7 +16,7 @@ - # along with GCC; see the file COPYING3. If not see - # . - --MULTILIB_OSDIRNAMES := ../lib$(call if_multiarch,:loongarch64-linux-gnu) -+MULTILIB_OSDIRNAMES := ../lib64$(call if_multiarch,:loongarch64-linux-gnu) - MULTIARCH_DIRNAME := $(call if_multiarch,loongarch64-linux-gnu) - - # haven't supported lp32 yet --- -2.20.1 - diff --git a/Fix-dwarf2cfi-error.patch b/Fix-dwarf2cfi-error.patch deleted file mode 100644 index 4d7e134..0000000 --- a/Fix-dwarf2cfi-error.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 685e5318771d90711e331e6192b2d2002076d99e Mon Sep 17 00:00:00 2001 -From: Lixing -Date: Thu, 31 Aug 2023 17:07:11 +0800 -Subject: [PATCH] Fix dwarf2cfi error - ---- - gcc/dwarf2cfi.c | 10 ++++++++++ - 1 file changed, 10 insertions(+) - -diff --git a/gcc/dwarf2cfi.c b/gcc/dwarf2cfi.c -index c3e69e8b8..938222156 100644 ---- a/gcc/dwarf2cfi.c -+++ b/gcc/dwarf2cfi.c -@@ -1948,6 +1948,16 @@ dwarf2out_frame_debug_expr (rtx expr) - { - unsigned int regno = dwf_regno (XEXP (dest, 0)); - -+ if (fde -+ && fde->stack_realign -+ && REG_P (src) -+ && REGNO (src) == HARD_FRAME_POINTER_REGNUM) -+ { -+ gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum); -+ cur_trace->cfa_store.offset = 0; -+ fde->rule18 = 1; -+ } -+ - if (cur_cfa->reg == regno) - offset = -cur_cfa->offset; - else if (cur_trace->cfa_store.reg == regno) --- -2.27.0 - diff --git a/Improve-specs-processing-to-allow-in-function-argume.patch b/Improve-specs-processing-to-allow-in-function-argume.patch deleted file mode 100644 index b7504b4..0000000 --- a/Improve-specs-processing-to-allow-in-function-argume.patch +++ /dev/null @@ -1,220 +0,0 @@ -From 6e6de5b31ac9b5a523fbf60099d00124d99aa0d0 Mon Sep 17 00:00:00 2001 -From: Lixing -Date: Mon, 31 Jul 2023 10:08:23 +0800 -Subject: [PATCH 2/2] Improve specs processing to allow %* in function - arguments - -2018-07-31 Olivier Hainque - - * gcc.c (handle_spec_function): Accept a soft_matched_part - argument, as do_spec_1. Pass it down to ... - (eval_spec_function): Accept a soft_matched_part argument, - and pass it down to ... - (do_spec_2): Accept a soft_matched_part argument, and pass - it down to do_spec_1. - (do_spec_1): Pass soft_matched_part to handle_spec_function. - (handle_braces): Update call to handle_spec_function. - (driver::set_up_specs): Update calls to do_spec_2. - (compare_debug_dump_opt_spec_function): Likewise. - (compare_debug_self_opt_spec_function): Likewise. - -[Upstream] 1102fd64dbb767 (Deleted ChangeLog) -Link: https://gcc.gnu.org/git/?p=gcc.git;a=patch;f=gcc/gcc.cc;h=1102fd64dbb76784ed46ff81bf905f6c52d296fc ---- - gcc/gcc.c | 51 +++++++++++++++++++++++++++++---------------------- - 1 file changed, 29 insertions(+), 22 deletions(-) - -diff --git a/gcc/gcc.c b/gcc/gcc.c -index 3b87e91b6..3c46e0769 100644 ---- a/gcc/gcc.c -+++ b/gcc/gcc.c -@@ -354,12 +354,12 @@ static inline void mark_matching_switches (const char *, const char *, int); - static inline void process_marked_switches (void); - static const char *process_brace_body (const char *, const char *, const char *, int, int); - static const struct spec_function *lookup_spec_function (const char *); --static const char *eval_spec_function (const char *, const char *); --static const char *handle_spec_function (const char *, bool *); -+static const char *eval_spec_function (const char *, const char *, const char *); -+static const char *handle_spec_function (const char *, bool *, const char *); - static char *save_string (const char *, int); - static void set_collect_gcc_options (void); - static int do_spec_1 (const char *, int, const char *); --static int do_spec_2 (const char *); -+static int do_spec_2 (const char *, const char *); - static void do_option_spec (const char *, const char *); - static void do_self_spec (const char *); - static const char *find_file (const char *); -@@ -4865,7 +4865,7 @@ do_spec (const char *spec) - { - int value; - -- value = do_spec_2 (spec); -+ value = do_spec_2 (spec, NULL); - - /* Force out any unfinished command. - If -pipe, this forces out the last command if it ended in `|'. */ -@@ -4884,8 +4884,11 @@ do_spec (const char *spec) - return value; - } - -+/* Process the spec SPEC, with SOFT_MATCHED_PART designating the current value -+ of a matched * pattern which may be re-injected by way of %*. */ -+ - static int --do_spec_2 (const char *spec) -+do_spec_2 (const char *spec, const char *soft_matched_part) - { - int result; - -@@ -4898,14 +4901,13 @@ do_spec_2 (const char *spec) - input_from_pipe = 0; - suffix_subst = NULL; - -- result = do_spec_1 (spec, 0, NULL); -+ result = do_spec_1 (spec, 0, soft_matched_part); - - end_going_arg (); - - return result; - } - -- - /* Process the given spec string and add any new options to the end - of the switches/n_switches array. */ - -@@ -4963,7 +4965,7 @@ do_self_spec (const char *spec) - { - int i; - -- do_spec_2 (spec); -+ do_spec_2 (spec, NULL); - do_spec_1 (" ", 0, NULL); - - /* Mark % 1) - error ("spec failure: more than one arg to SYSROOT_SUFFIX_SPEC"); -@@ -7577,7 +7584,7 @@ driver::set_up_specs () const - /* Process sysroot_hdrs_suffix_spec. */ - if (*sysroot_hdrs_suffix_spec != 0 - && !no_sysroot_suffix -- && do_spec_2 (sysroot_hdrs_suffix_spec) == 0) -+ && do_spec_2 (sysroot_hdrs_suffix_spec, NULL) == 0) - { - if (argbuf.length () > 1) - error ("spec failure: more than one arg to SYSROOT_HEADERS_SUFFIX_SPEC"); -@@ -7587,7 +7594,7 @@ driver::set_up_specs () const - - /* Look for startfiles in the standard places. */ - if (*startfile_prefix_spec != 0 -- && do_spec_2 (startfile_prefix_spec) == 0 -+ && do_spec_2 (startfile_prefix_spec, NULL) == 0 - && do_spec_1 (" ", 0, NULL) == 0) - { - const char *arg; -@@ -9717,7 +9724,7 @@ compare_debug_dump_opt_spec_function (int arg, - fatal_error (input_location, - "too many arguments to %%:compare-debug-dump-opt"); - -- do_spec_2 ("%{fdump-final-insns=*:%*}"); -+ do_spec_2 ("%{fdump-final-insns=*:%*}", NULL); - do_spec_1 (" ", 0, NULL); - - if (argbuf.length () > 0 -@@ -9735,13 +9742,13 @@ compare_debug_dump_opt_spec_function (int arg, - - if (argbuf.length () > 0) - { -- do_spec_2 ("%{o*:%*}%{!o:%{!S:%b%O}%{S:%b.s}}"); -+ do_spec_2 ("%{o*:%*}%{!o:%{!S:%b%O}%{S:%b.s}}", NULL); - ext = ".gkd"; - } - else if (!compare_debug) - return NULL; - else -- do_spec_2 ("%g.gkd"); -+ do_spec_2 ("%g.gkd", NULL); - - do_spec_1 (" ", 0, NULL); - -@@ -9793,7 +9800,7 @@ compare_debug_self_opt_spec_function (int arg, - if (compare_debug >= 0) - return NULL; - -- do_spec_2 ("%{c|S:%{o*:%*}}"); -+ do_spec_2 ("%{c|S:%{o*:%*}}", NULL); - do_spec_1 (" ", 0, NULL); - - if (argbuf.length () > 0) --- -2.39.3 - diff --git a/LoongArch-Add-missing-headers.patch b/LoongArch-Add-missing-headers.patch deleted file mode 100644 index 2b06f75..0000000 --- a/LoongArch-Add-missing-headers.patch +++ /dev/null @@ -1,65 +0,0 @@ -From abae9df56090904daf1295744322950cd8380f9a Mon Sep 17 00:00:00 2001 -From: Lixing -Date: Thu, 3 Aug 2023 11:17:50 +0800 -Subject: [PATCH] LoongArch: Add missing headers - ---- - gcc/config/loongarch/genopts/loongarch.opt.in | 9 +++++++++ - gcc/config/loongarch/loongarch.opt | 9 +++++++++ - 2 files changed, 18 insertions(+) - -diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in -index 463dfec77..b8aab4ca8 100644 ---- a/gcc/config/loongarch/genopts/loongarch.opt.in -+++ b/gcc/config/loongarch/genopts/loongarch.opt.in -@@ -20,12 +20,21 @@ - ; . - ; - -+HeaderInclude -+config/loongarch/loongarch-tune.h -+ -+HeaderInclude -+config/loongarch/loongarch-def.h -+ - HeaderInclude - config/loongarch/loongarch-opts.h - - HeaderInclude - config/loongarch/loongarch-str.h - -+HeaderInclude -+config/loongarch/loongarch-driver.h -+ - TargetVariable - unsigned int recip_mask = 0 - -diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt -index 075a2d6c7..3dfe5f3cb 100644 ---- a/gcc/config/loongarch/loongarch.opt -+++ b/gcc/config/loongarch/loongarch.opt -@@ -27,12 +27,21 @@ - ; . - ; - -+HeaderInclude -+config/loongarch/loongarch-tune.h -+ -+HeaderInclude -+config/loongarch/loongarch-def.h -+ - HeaderInclude - config/loongarch/loongarch-opts.h - - HeaderInclude - config/loongarch/loongarch-str.h - -+HeaderInclude -+config/loongarch/loongarch-driver.h -+ - TargetVariable - unsigned int recip_mask = 0 - --- -2.39.3 - diff --git a/LoongArch-Add-sanitizer-support.patch b/LoongArch-Add-sanitizer-support.patch deleted file mode 100644 index 935d3c3..0000000 --- a/LoongArch-Add-sanitizer-support.patch +++ /dev/null @@ -1,1100 +0,0 @@ -From c985960fa4baae43ed4a1bfcaab9214b78a15020 Mon Sep 17 00:00:00 2001 -From: Xing Li -Date: Fri, 6 Jan 2023 10:39:21 +0800 -Subject: [PATCH 2/2] LoongArch: Add sanitizer support - -Signed-off-by: Xing Li -Signed-off-by: Peng Fan ---- - gcc/config/loongarch/loongarch.c | 14 +- - libsanitizer/asan/asan_mapping.h | 6 + - libsanitizer/configure.tgt | 10 ++ - libsanitizer/lsan/lsan_allocator.cc | 2 +- - libsanitizer/lsan/lsan_allocator.h | 2 +- - libsanitizer/lsan/lsan_common.cc | 2 + - libsanitizer/sanitizer_common/Makefile.am | 2 +- - libsanitizer/sanitizer_common/Makefile.in | 3 +- - .../sanitizer_common_syscalls.inc | 6 +- - .../sanitizer_common/sanitizer_linux.cc | 94 ++++++++++- - .../sanitizer_common/sanitizer_linux.h | 4 +- - .../sanitizer_linux_libcdep.cc | 15 +- - .../sanitizer_linux_loongarch64.S | 22 +++ - .../sanitizer_common/sanitizer_platform.h | 21 ++- - .../sanitizer_platform_interceptors.h | 4 +- - .../sanitizer_platform_limits_linux.cc | 2 +- - .../sanitizer_platform_limits_posix.cc | 22 ++- - .../sanitizer_platform_limits_posix.h | 7 +- - .../sanitizer_common/sanitizer_stacktrace.cc | 2 +- - .../sanitizer_common/sanitizer_stacktrace.h | 5 +- - .../sanitizer_stoptheworld_linux_libcdep.cc | 14 +- - .../sanitizer_tls_get_addr.cc | 2 + - libsanitizer/tsan/Makefile.am | 2 +- - libsanitizer/tsan/Makefile.in | 3 +- - libsanitizer/tsan/tsan_interceptors.cc | 5 +- - libsanitizer/tsan/tsan_platform.h | 38 +++++ - libsanitizer/tsan/tsan_platform_posix.cc | 3 + - libsanitizer/tsan/tsan_rtl.cc | 16 +- - libsanitizer/tsan/tsan_rtl.h | 3 +- - libsanitizer/tsan/tsan_rtl_loongarch64.S | 156 ++++++++++++++++++ - 30 files changed, 447 insertions(+), 40 deletions(-) - create mode 100644 libsanitizer/sanitizer_common/sanitizer_linux_loongarch64.S - create mode 100644 libsanitizer/tsan/tsan_rtl_loongarch64.S - -diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c -index a1dde5a0f..82be582ff 100644 ---- a/gcc/config/loongarch/loongarch.c -+++ b/gcc/config/loongarch/loongarch.c -@@ -10724,7 +10724,16 @@ loongarch_prefetch_cookie (rtx write, rtx locality) - gcc_unreachable (); - } - -- -+/* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */ -+ -+static unsigned HOST_WIDE_INT -+loongarch_asan_shadow_offset (void) -+{ -+ /* We only have libsanitizer support for LOONGARCH64 at present. -+ This value is taken from the file libsanitizer/asan/asan_mappint.h. */ -+ return TARGET_64BIT ? (HOST_WIDE_INT_1 << 37) : (0x0aaa0000); -+} -+ - /* Initialize the GCC target structure. */ - #undef TARGET_ASM_ALIGNED_HI_OP - #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" -@@ -10952,6 +10961,9 @@ loongarch_prefetch_cookie (rtx write, rtx locality) - #undef TARGET_SECONDARY_RELOAD - #define TARGET_SECONDARY_RELOAD loongarch_secondary_reload - -+#undef TARGET_ASAN_SHADOW_OFFSET -+#define TARGET_ASAN_SHADOW_OFFSET loongarch_asan_shadow_offset -+ - struct gcc_target targetm = TARGET_INITIALIZER; - - #include "gt-loongarch.h" -diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h -index 5496df66d..77c8061ad 100644 ---- a/libsanitizer/asan/asan_mapping.h -+++ b/libsanitizer/asan/asan_mapping.h -@@ -141,6 +141,8 @@ static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64; - static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; - static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; - static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; -+static const u64 kLoongArch32_ShadowOffset32 = 0x0aaa0000; -+static const u64 kLoongArch64_ShadowOffset64 = 1ULL << 37; - static const u64 kPPC64_ShadowOffset64 = 1ULL << 41; - static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; - static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 -@@ -157,6 +159,8 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 - # define SHADOW_OFFSET (0) - # elif defined(__mips__) - # define SHADOW_OFFSET kMIPS32_ShadowOffset32 -+# elif defined(__loongarch__) -+# define SHADOW_OFFSET kLoongArch32_ShadowOffset32 - # elif SANITIZER_FREEBSD - # define SHADOW_OFFSET kFreeBSD_ShadowOffset32 - # elif SANITIZER_WINDOWS -@@ -191,6 +195,8 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 - # define SHADOW_OFFSET kDefaultShadowOffset64 - # elif defined(__mips64) - # define SHADOW_OFFSET kMIPS64_ShadowOffset64 -+# elif defined(__loongarch64) -+# define SHADOW_OFFSET kLoongArch64_ShadowOffset64 - # elif SANITIZER_WINDOWS64 - # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address - # else -diff --git a/libsanitizer/configure.tgt b/libsanitizer/configure.tgt -index 573e3b482..8da064c1e 100644 ---- a/libsanitizer/configure.tgt -+++ b/libsanitizer/configure.tgt -@@ -55,6 +55,16 @@ case "${target}" in - x86_64-*-darwin[1]* | i?86-*-darwin[1]*) - TSAN_SUPPORTED=no - ;; -+ loongarch*-*-linux*) -+ if echo "int x = __loongarch64;" | $CC -c -x c -o /dev/null - > /dev/null 2>&1; then -+ SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS=sanitizer_linux_loongarch64.lo -+ fi -+ if test x$ac_cv_sizeof_void_p = x8; then -+ TSAN_SUPPORTED=yes -+ LSAN_SUPPORTED=yes -+ TSAN_TARGET_DEPENDENT_OBJECTS=tsan_rtl_loongarch64.lo -+ fi -+ ;; - *) - UNSUPPORTED=1 - ;; -diff --git a/libsanitizer/lsan/lsan_allocator.cc b/libsanitizer/lsan/lsan_allocator.cc -index 9e1668077..b3ef2400e 100644 ---- a/libsanitizer/lsan/lsan_allocator.cc -+++ b/libsanitizer/lsan/lsan_allocator.cc -@@ -26,7 +26,7 @@ extern "C" void *memset(void *ptr, int value, uptr num); - namespace __lsan { - #if defined(__i386__) || defined(__arm__) - static const uptr kMaxAllowedMallocSize = 1UL << 30; --#elif defined(__mips64) || defined(__aarch64__) -+#elif defined(__mips64) || defined(__aarch64__) || defined(__loongarch64) - static const uptr kMaxAllowedMallocSize = 4UL << 30; - #else - static const uptr kMaxAllowedMallocSize = 8UL << 30; -diff --git a/libsanitizer/lsan/lsan_allocator.h b/libsanitizer/lsan/lsan_allocator.h -index b0c0ec241..5793dd372 100644 ---- a/libsanitizer/lsan/lsan_allocator.h -+++ b/libsanitizer/lsan/lsan_allocator.h -@@ -49,7 +49,7 @@ struct ChunkMetadata { - }; - - #if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \ -- defined(__arm__) -+ defined(__arm__) || defined(__loongarch64) - static const uptr kRegionSizeLog = 20; - static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; - typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; -diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc -index 4afce9df0..e1dce25c7 100644 ---- a/libsanitizer/lsan/lsan_common.cc -+++ b/libsanitizer/lsan/lsan_common.cc -@@ -136,6 +136,8 @@ static inline bool CanBeAHeapPointer(uptr p) { - return ((p >> 47) == 0); - #elif defined(__mips64) - return ((p >> 40) == 0); -+#elif defined(__loongarch64) -+ return ((p >> 40) == 0); - #elif defined(__aarch64__) - unsigned runtimeVMA = - (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); -diff --git a/libsanitizer/sanitizer_common/Makefile.am b/libsanitizer/sanitizer_common/Makefile.am -index 246985b99..3b39f5bb0 100644 ---- a/libsanitizer/sanitizer_common/Makefile.am -+++ b/libsanitizer/sanitizer_common/Makefile.am -@@ -71,7 +71,7 @@ sanitizer_common_files = \ - - - libsanitizer_common_la_SOURCES = $(sanitizer_common_files) --EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S -+EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S sanitizer_linux_loongarch64.S - libsanitizer_common_la_LIBADD = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS) - libsanitizer_common_la_DEPENDENCIES = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS) - -diff --git a/libsanitizer/sanitizer_common/Makefile.in b/libsanitizer/sanitizer_common/Makefile.in -index b0f5ac25a..023f633f7 100644 ---- a/libsanitizer/sanitizer_common/Makefile.in -+++ b/libsanitizer/sanitizer_common/Makefile.in -@@ -355,7 +355,7 @@ sanitizer_common_files = \ - sanitizer_win.cc - - libsanitizer_common_la_SOURCES = $(sanitizer_common_files) --EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S -+EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S sanitizer_linux_loongarch64.S - libsanitizer_common_la_LIBADD = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS) - libsanitizer_common_la_DEPENDENCIES = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS) - -@@ -467,6 +467,7 @@ distclean-compile: - @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@ - @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_libcdep.Plo@am__quote@ - @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_mips64.Plo@am__quote@ -+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_loongarch64.Plo@am__quote@ - @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_s390.Plo@am__quote@ - @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_x86_64.Plo@am__quote@ - @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@ -diff --git a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc -index 6fd5ef742..f55759106 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc -+++ b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc -@@ -2295,7 +2295,8 @@ POST_SYSCALL(ni_syscall)(long res) {} - PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) { - #if !SANITIZER_ANDROID && \ - (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ -- defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__)) -+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \ -+ defined(__loongarch64)) - if (data) { - if (request == ptrace_setregs) { - PRE_READ((void *)data, struct_user_regs_struct_sz); -@@ -2316,7 +2317,8 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) { - POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) { - #if !SANITIZER_ANDROID && \ - (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ -- defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__)) -+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \ -+ defined(__loongarch64)) - if (res >= 0 && data) { - // Note that this is different from the interceptor in - // sanitizer_common_interceptors.inc. -diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cc b/libsanitizer/sanitizer_common/sanitizer_linux.cc -index 2826cc89e..003c38b4f 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_linux.cc -+++ b/libsanitizer/sanitizer_common/sanitizer_linux.cc -@@ -12,6 +12,10 @@ - - #include "sanitizer_platform.h" - -+#if defined(__loongarch__) -+#define __ARCH_WANT_RENAMEAT 1 -+#endif -+ - #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD - - #include "sanitizer_common.h" -@@ -127,7 +131,7 @@ const int FUTEX_WAKE = 1; - # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0 - #endif - --#if defined(__x86_64__) || SANITIZER_MIPS64 -+#if defined(__x86_64__) || SANITIZER_MIPS64 || SANITIZER_LOONGARCH64 - extern "C" { - extern void internal_sigreturn(); - } -@@ -802,7 +806,7 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) { - // Invokes sigaction via a raw syscall with a restorer, but does not support - // all platforms yet. - // We disable for Go simply because we have not yet added to buildgo.sh. --#if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO -+#if (defined(__x86_64__) || SANITIZER_MIPS64 || SANITIZER_LOONGARCH64) && !SANITIZER_GO - int internal_sigaction_syscall(int signum, const void *act, void *oldact) { - if (act == nullptr) - return internal_sigaction_norestorer(signum, act, oldact); -@@ -980,6 +984,8 @@ uptr GetMaxVirtualAddress() { - return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1; - # elif defined(__mips64) - return (1ULL << 40) - 1; // 0x000000ffffffffffUL; -+# elif defined(__loongarch64) -+ return (1ULL << 40) - 1; // 0x000000ffffffffffUL; - # elif defined(__s390x__) - return (1ULL << 53) - 1; // 0x001fffffffffffffUL; - # else -@@ -1247,6 +1253,61 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, - : "memory", "$29" ); - return res; - } -+#elif defined(__loongarch__) && SANITIZER_LINUX -+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, -+ int *parent_tidptr, void *newtls, int *child_tidptr) { -+ long long res; -+ if (!fn || !child_stack) -+ return -EINVAL; -+ CHECK_EQ(0, (uptr)child_stack % 16); -+ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); -+ ((unsigned long long *)child_stack)[0] = (uptr)fn; -+ ((unsigned long long *)child_stack)[1] = (uptr)arg; -+ -+ register int __flags __asm__("r4") = flags; -+ register void *__child_stack __asm__("r5") = child_stack; -+ register int *__parent_tidptr __asm__("r6") = parent_tidptr; -+ register void *__newtls __asm__("r7") = newtls; -+ register int *__child_tidptr __asm__("r8") = child_tidptr; -+ -+ __asm__ __volatile__( -+ /* $a0 = syscall($a7 = SYSCALL(clone), -+ * $a0 = flags, -+ * $a1 = child_stack, -+ * $a2 = parent_tidptr, -+ * $a3 = new_tls, -+ * $a4 = child_tyidptr) -+ */ -+ -+ /* Do the system call */ -+ "addi.d $a7, $r0, %1\n" -+ "syscall 0\n" -+ -+ "move %0, $a0" -+ : "=r"(res) -+ : "i"(__NR_clone), -+ "r"(__flags), "r"(__child_stack), "r"(__parent_tidptr), "r"(__newtls), "r"(__child_tidptr) -+ :"memory" ); -+ if (res != 0) { -+ return res; -+ } -+ __asm__ __volatile__ ( -+ /* In the child, now. Call "fn(arg)". */ -+ "ld.d $a6, $sp, 0\n" -+ "ld.d $a0, $sp, 8\n" -+ -+ "jirl $r1, $a6, 0\n" -+ -+ /* Call _exit($v0) */ -+ "addi.d $a7, $r0, %1\n" -+ "syscall 0\n" -+ -+ "move %0, $a0" -+ : "=r"(res) -+ : "i"(__NR_exit) -+ :"r1", "memory"); -+ return res; -+} - #elif defined(__aarch64__) - uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, - int *parent_tidptr, void *newtls, int *child_tidptr) { -@@ -1676,6 +1737,30 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { - u64 esr; - if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN; - return esr & ESR_ELx_WNR ? WRITE : READ; -+#elif defined(__loongarch__) -+ uint32_t *exception_source; -+ uint32_t faulty_instruction; -+ uint32_t op_code; -+ -+ exception_source = (uint32_t *)ucontext->uc_mcontext.__pc; -+ faulty_instruction = (uint32_t)(*exception_source); -+ -+ op_code = (faulty_instruction >> 22) & 0x3ff; -+ switch (op_code) { -+ case 0xa0: //ld.b -+ case 0xa1: //ld.h -+ case 0xa2: //ld.w -+ case 0xa3: //ld.d -+ return SignalContext::READ; -+ case 0xa4: -+ case 0xa5: -+ case 0xa6: -+ return SignalContext::WRITE; -+ case 0xa8: -+ case 0xa9: -+ return SignalContext::READ; -+ } -+ return SignalContext::UNKNOWN; - #else - (void)ucontext; - return UNKNOWN; // FIXME: Implement. -@@ -1763,6 +1848,11 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { - *pc = ucontext->uc_mcontext.pc; - *bp = ucontext->uc_mcontext.gregs[30]; - *sp = ucontext->uc_mcontext.gregs[29]; -+#elif defined(__loongarch__) -+ ucontext_t *ucontext = (ucontext_t*)context; -+ *pc = ucontext->uc_mcontext.__pc; -+ *bp = ucontext->uc_mcontext.__gregs[22]; -+ *sp = ucontext->uc_mcontext.__gregs[3]; - #elif defined(__s390__) - ucontext_t *ucontext = (ucontext_t*)context; - # if defined(__s390x__) -diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.h b/libsanitizer/sanitizer_common/sanitizer_linux.h -index 910703d8b..600d2b382 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_linux.h -+++ b/libsanitizer/sanitizer_common/sanitizer_linux.h -@@ -52,14 +52,14 @@ uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5); - // (like the process-wide error reporting SEGV handler) must use - // internal_sigaction instead. - int internal_sigaction_norestorer(int signum, const void *act, void *oldact); --#if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO -+#if (defined(__x86_64__) || SANITIZER_MIPS64 || SANITIZER_LOONGARCH64) && !SANITIZER_GO - // Uses a raw system call to avoid interceptors. - int internal_sigaction_syscall(int signum, const void *act, void *oldact); - #endif - void internal_sigdelset(__sanitizer_sigset_t *set, int signum); - #if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \ - || defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \ -- || defined(__arm__) -+ || defined(__arm__) || defined(__loongarch__) - uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, - int *parent_tidptr, void *newtls, int *child_tidptr); - #endif -diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc -index 3b1a2174c..43551c0d1 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc -+++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc -@@ -196,7 +196,7 @@ void InitTlsSize() { } - - #if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) \ - || defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) \ -- || defined(__arm__)) && SANITIZER_LINUX && !SANITIZER_ANDROID -+ || defined(__arm__) || defined(__loongarch__)) && SANITIZER_LINUX && !SANITIZER_ANDROID - // sizeof(struct pthread) from glibc. - static atomic_uintptr_t kThreadDescriptorSize; - -@@ -251,6 +251,9 @@ uptr ThreadDescriptorSize() { - if (val) - atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); - return val; -+#elif defined(__loongarch64) -+ val = 1776; -+ return val; - #elif defined(__aarch64__) - // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22. - val = 1776; -@@ -274,12 +277,14 @@ uptr ThreadSelfOffset() { - return kThreadSelfOffset; - } - --#if defined(__mips__) || defined(__powerpc64__) -+#if defined(__mips__) || defined(__powerpc64__) || defined(__loongarch__) - // TlsPreTcbSize includes size of struct pthread_descr and size of tcb - // head structure. It lies before the static tls blocks. - static uptr TlsPreTcbSize() { - # if defined(__mips__) - const uptr kTcbHead = 16; // sizeof (tcbhead_t) -+# elif defined(__loongarch__) -+ const uptr kTcbHead = 16; // sizeof (tcbhead_t) - # elif defined(__powerpc64__) - const uptr kTcbHead = 88; // sizeof (tcbhead_t) - # endif -@@ -308,6 +313,10 @@ uptr ThreadSelf() { - rdhwr %0,$29;\ - .set pop" : "=r" (thread_pointer)); - descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize(); -+# elif defined(__loongarch__) -+ uptr thread_pointer; -+ asm("or %0,$r2,$r0" : "=r" (thread_pointer)); -+ descr_addr = thread_pointer - TlsPreTcbSize(); - # elif defined(__aarch64__) || defined(__arm__) - descr_addr = reinterpret_cast(__builtin_thread_pointer()) - - ThreadDescriptorSize(); -@@ -360,7 +369,7 @@ static void GetTls(uptr *addr, uptr *size) { - *addr -= *size; - *addr += ThreadDescriptorSize(); - # elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \ -- || defined(__arm__) -+ || defined(__arm__) || defined(__loongarch__) - *addr = ThreadSelf(); - *size = GetTlsSize(); - # else -diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_loongarch64.S b/libsanitizer/sanitizer_common/sanitizer_linux_loongarch64.S -new file mode 100644 -index 000000000..245816e60 ---- /dev/null -+++ b/libsanitizer/sanitizer_common/sanitizer_linux_loongarch64.S -@@ -0,0 +1,22 @@ -+// This file is dual licensed under the MIT and the University of Illinois Open -+// Source Licenses. See LICENSE.TXT for details. -+ -+// Avoid being marked as needing an executable stack: -+#if defined(__linux__) && defined(__ELF__) -+.section .note.GNU-stack,"",%progbits -+#endif -+ -+// Further contents are loongarch64 only: -+#if defined(__linux__) && defined(__loongarch64) -+ -+.section .text -+.globl internal_sigreturn -+.type internal_sigreturn, @function -+internal_sigreturn: -+ -+ li.d $r11,139 // #139 is for SYS_rt_sigreturn -+ syscall 0 -+ -+.size internal_sigreturn, .-internal_sigreturn -+ -+#endif // defined(__linux__) && defined(__loongarch64) -diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h -index 1eb4d0c61..6d91863a5 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_platform.h -+++ b/libsanitizer/sanitizer_common/sanitizer_platform.h -@@ -187,7 +187,7 @@ - #ifndef SANITIZER_CAN_USE_ALLOCATOR64 - # if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA - # define SANITIZER_CAN_USE_ALLOCATOR64 1 --# elif defined(__mips64) || defined(__aarch64__) -+# elif defined(__mips64) || defined(__aarch64__) || defined(__loongarch64) - # define SANITIZER_CAN_USE_ALLOCATOR64 0 - # else - # define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64) -@@ -197,7 +197,7 @@ - // The range of addresses which can be returned my mmap. - // FIXME: this value should be different on different platforms. Larger values - // will still work but will consume more memory for TwoLevelByteMap. --#if defined(__mips__) -+#if defined(__mips__) || defined(__loongarch__) - # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40) - #elif defined(__aarch64__) - # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48) -@@ -209,7 +209,7 @@ - // the upstream linux community for all new ports. Other ports may still - // use legacy syscalls. - #ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS --# if defined(__aarch64__) && SANITIZER_LINUX -+# if (defined(__aarch64__) || defined(__loongarch64)) && SANITIZER_LINUX - # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1 - # else - # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0 -@@ -241,6 +241,21 @@ - # define HAVE_TIRPC_RPC_XDR_H 0 - #endif - -+#if defined(__loongarch__) -+# define SANITIZER_LOONGARCH 1 -+# if defined(__loongarch64) -+# define SANITIZER_LOONGARCH32 0 -+# define SANITIZER_LOONGARCH64 1 -+# else -+# define SANITIZER_LOONGARCH32 1 -+# define SANITIZER_LOONGARCH64 0 -+# endif -+#else -+# define SANITIZER_LOONGARCH 0 -+# define SANITIZER_LOONGARCH32 0 -+# define SANITIZER_LOONGARCH64 0 -+#endif -+ - /// \macro MSC_PREREQ - /// \brief Is the compiler MSVC of at least the specified version? - /// The common \param version values to check for are: -diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h -index b9eb09ad3..e8f8cfedb 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h -+++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h -@@ -205,7 +205,7 @@ - #if SI_LINUX_NOT_ANDROID && \ - (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ - defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ -- defined(__s390__)) -+ defined(__s390__) || defined(__loongarch__)) - #define SANITIZER_INTERCEPT_PTRACE 1 - #else - #define SANITIZER_INTERCEPT_PTRACE 0 -@@ -382,7 +382,7 @@ - #define SANITIZER_INTERCEPT_PVALLOC \ - (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA) - #define SANITIZER_INTERCEPT_CFREE \ -- (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA) -+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA && !SANITIZER_LOONGARCH) - #define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC) - #define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC) - #define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID -diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc -index 23a014823..cf71e922e 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc -+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc -@@ -64,7 +64,7 @@ namespace __sanitizer { - - #if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\ - && !defined(__mips__) && !defined(__s390__)\ -- && !defined(__sparc__) -+ && !defined(__sparc__) && !defined(__loongarch__) - COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat)); - #endif - -diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc -index 5c720b2e7..e0225c4a9 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc -+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc -@@ -115,7 +115,8 @@ - #if SANITIZER_LINUX || SANITIZER_FREEBSD - # include - # include --# if defined(__mips64) || defined(__aarch64__) || defined(__arm__) -+# if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \ -+ defined(__loongarch64) - # include - # ifdef __arm__ - typedef struct user_fpregs elf_fpregset_t; -@@ -153,7 +154,7 @@ typedef struct user_fpregs elf_fpregset_t; - #include - #include - #include --#if defined(__mips64) -+#if defined(__mips64) || defined(__loongarch64) - # include - #endif - #include -@@ -253,10 +254,11 @@ namespace __sanitizer { - // has been removed from glibc 2.28. - #if defined(__aarch64__) || defined(__s390x__) || defined (__mips64) \ - || defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) \ -- || defined(__x86_64__) -+ || defined(__x86_64__) || defined(__loongarch64) - #define SIZEOF_STRUCT_USTAT 32 - #elif defined(__arm__) || defined(__i386__) || defined(__mips__) \ -- || defined(__powerpc__) || defined(__s390__) || defined(__sparc__) -+ || defined(__powerpc__) || defined(__s390__) || defined(__sparc__) \ -+ || defined(__loongarch__) - #define SIZEOF_STRUCT_USTAT 20 - #else - #error Unknown size of struct ustat -@@ -326,27 +328,31 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); - #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ - (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ - defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ -- defined(__s390__)) -+ defined(__s390__) || defined(__loongarch64)) - #if defined(__mips64) || defined(__powerpc64__) || defined(__arm__) - unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs); - unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t); - #elif defined(__aarch64__) - unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs); - unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state); -+#elif defined(__loongarch64) -+ unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs); -+ unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fp_state); - #elif defined(__s390__) - unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct); - unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct); - #else - unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct); - unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct); --#endif // __mips64 || __powerpc64__ || __aarch64__ -+#endif // __mips64 || __powerpc64__ || __aarch64__ || __loongarch64 - #if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \ -- defined(__aarch64__) || defined(__arm__) || defined(__s390__) -+ defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \ -+ defined(__loongarch64) - unsigned struct_user_fpxregs_struct_sz = 0; - #else - unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct); - #endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__ --// || __s390__ -+// || __s390__ || __loongarch64 - #ifdef __arm__ - unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE; - #else -diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h -index 9c1429623..0020448cc 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h -+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h -@@ -77,6 +77,9 @@ namespace __sanitizer { - #elif defined(__aarch64__) - const unsigned struct_kernel_stat_sz = 128; - const unsigned struct_kernel_stat64_sz = 104; -+#elif defined(__loongarch__) -+ const unsigned struct_kernel_stat_sz = 128; -+ const unsigned struct_kernel_stat64_sz = 128; - #elif defined(__powerpc__) && !defined(__powerpc64__) - const unsigned struct_kernel_stat_sz = 72; - const unsigned struct_kernel_stat64_sz = 104; -@@ -659,7 +662,7 @@ namespace __sanitizer { - - #if SANITIZER_FREEBSD - typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t; --#elif defined(__mips__) -+#elif defined(__mips__) || defined(__loongarch__) - struct __sanitizer_kernel_sigset_t { - uptr sig[2]; - }; -@@ -827,7 +830,7 @@ namespace __sanitizer { - #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ - (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ - defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ -- defined(__s390__)) -+ defined(__s390__) || defined(__loongarch64)) - extern unsigned struct_user_regs_struct_sz; - extern unsigned struct_user_fpregs_struct_sz; - extern unsigned struct_user_fpxregs_struct_sz; -diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc -index 2de585c32..ca79b289d 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc -+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc -@@ -18,7 +18,7 @@ namespace __sanitizer { - uptr StackTrace::GetNextInstructionPc(uptr pc) { - #if defined(__mips__) - return pc + 8; --#elif defined(__powerpc__) -+#elif defined(__powerpc__) || defined(__loongarch__) - return pc + 4; - #else - return pc + 1; -diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h -index 31e99f6b9..3affe4eb7 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h -+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h -@@ -17,7 +17,8 @@ namespace __sanitizer { - - static const u32 kStackTraceMax = 256; - --#if SANITIZER_LINUX && (defined(__sparc__) || defined(__mips__)) -+#if SANITIZER_LINUX && (defined(__sparc__) || defined(__mips__)) || \ -+ (SANITIZER_LINUX && defined(__loongarch__)) - # define SANITIZER_CAN_FAST_UNWIND 0 - #elif SANITIZER_WINDOWS - # define SANITIZER_CAN_FAST_UNWIND 0 -@@ -74,7 +75,7 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) { - // Cancel Thumb bit. - pc = pc & (~1); - #endif --#if defined(__powerpc__) || defined(__powerpc64__) -+#if defined(__powerpc__) || defined(__powerpc64__) || defined(__loongarch__) - // PCs are always 4 byte aligned. - return pc - 4; - #elif defined(__sparc__) || defined(__mips__) -diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc -index d746fa540..4c183efc4 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc -+++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc -@@ -15,13 +15,17 @@ - #if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \ - defined(__aarch64__) || defined(__powerpc64__) || \ - defined(__s390__) || defined(__i386__) || \ -- defined(__arm__)) -+ defined(__arm__) || defined(__loongarch__)) - - #include "sanitizer_stoptheworld.h" - - #include "sanitizer_platform_limits_posix.h" - #include "sanitizer_atomic.h" - -+#if defined(__loongarch__) -+#include -+#endif -+ - #include - #include // for CLONE_* definitions - #include -@@ -35,7 +39,7 @@ - # include - #endif - #include // for user_regs_struct --#if SANITIZER_ANDROID && SANITIZER_MIPS -+#if SANITIZER_ANDROID && SANITIZER_MIPS || SANITIZER_LOONGARCH - # include // for mips SP register in sys/user.h - #endif - #include // for signal-related stuff -@@ -483,8 +487,14 @@ typedef pt_regs regs_struct; - - #elif defined(__mips__) - typedef struct user regs_struct; -+#elif defined(__loongarch__) -+typedef struct user_regs_struct regs_struct; -+#define ARCH_IOVEC_FOR_GETREGSET -+ - # if SANITIZER_ANDROID - # define REG_SP regs[EF_R29] -+# elif SANITIZER_LOONGARCH -+# define REG_SP gpr[3] - # else - # define REG_SP regs[EF_REG29] - # endif -diff --git a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc -index ebf5ec094..c7cdf37df 100644 ---- a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc -+++ b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc -@@ -81,6 +81,8 @@ void DTLS_Destroy() { - // "Dynamic thread vector pointers point 0x8000 past the start of each - // TLS block." - static const uptr kDtvOffset = 0x8000; -+#elif defined(__loongarch__) -+static const uptr kDtvOffset = 0x800; - #else - static const uptr kDtvOffset = 0; - #endif -diff --git a/libsanitizer/tsan/Makefile.am b/libsanitizer/tsan/Makefile.am -index 753cb8f4f..ac5ae4117 100644 ---- a/libsanitizer/tsan/Makefile.am -+++ b/libsanitizer/tsan/Makefile.am -@@ -50,7 +50,7 @@ tsan_files = \ - tsan_sync.cc - - libtsan_la_SOURCES = $(tsan_files) --EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S -+EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S rtl_loongarch64.S - libtsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la $(TSAN_TARGET_DEPENDENT_OBJECTS) - libtsan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la $(TSAN_TARGET_DEPENDENT_OBJECTS) - if LIBBACKTRACE_SUPPORTED -diff --git a/libsanitizer/tsan/Makefile.in b/libsanitizer/tsan/Makefile.in -index 629056bf1..6a3477b99 100644 ---- a/libsanitizer/tsan/Makefile.in -+++ b/libsanitizer/tsan/Makefile.in -@@ -358,7 +358,7 @@ tsan_files = \ - tsan_sync.cc - - libtsan_la_SOURCES = $(tsan_files) --EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S -+EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S rtl_loongarch64.S - libtsan_la_LIBADD = \ - $(top_builddir)/sanitizer_common/libsanitizer_common.la \ - $(top_builddir)/interception/libinterception.la \ -@@ -512,6 +512,7 @@ distclean-compile: - @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_aarch64.Plo@am__quote@ - @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_amd64.Plo@am__quote@ - @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_mips64.Plo@am__quote@ -+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_loongarch64.Plo@am__quote@ - @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_mutex.Plo@am__quote@ - @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_ppc64.Plo@am__quote@ - @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_proc.Plo@am__quote@ -diff --git a/libsanitizer/tsan/tsan_interceptors.cc b/libsanitizer/tsan/tsan_interceptors.cc -index 15f20d4b6..c6959862b 100644 ---- a/libsanitizer/tsan/tsan_interceptors.cc -+++ b/libsanitizer/tsan/tsan_interceptors.cc -@@ -71,7 +71,8 @@ struct ucontext_t { - }; - #endif - --#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 -+#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 \ -+ || defined(__loongarch__) - #define PTHREAD_ABI_BASE "GLIBC_2.3.2" - #elif defined(__aarch64__) || SANITIZER_PPC64V2 - #define PTHREAD_ABI_BASE "GLIBC_2.17" -@@ -500,6 +501,8 @@ static void LongJmp(ThreadState *thr, uptr *env) { - uptr mangled_sp = env[13]; - # elif defined(__mips64) - uptr mangled_sp = env[1]; -+#elif defined(__loongarch64) -+ uptr mangled_sp = env[1]; - # else - uptr mangled_sp = env[6]; - # endif -diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h -index 44a3ea991..a50dc6dbe 100644 ---- a/libsanitizer/tsan/tsan_platform.h -+++ b/libsanitizer/tsan/tsan_platform.h -@@ -129,6 +129,44 @@ struct Mapping { - static const uptr kVdsoBeg = 0x7000000000000000ull; - }; - -+#elif defined(__loongarch64) -+/* -+ * TODO same as mips64 and need to change in the future -+C/C++ on linux/loongarch64 (40-bit VMA) -+0000 0000 00 - 0100 0000 00: - (4 GB) -+0100 0000 00 - 0200 0000 00: main binary (4 GB) -+0200 0000 00 - 2000 0000 00: - (120 GB) -+2000 0000 00 - 4000 0000 00: shadow (128 GB) -+4000 0000 00 - 5000 0000 00: metainfo (memory blocks and sync objects) (64 GB) -+5000 0000 00 - aa00 0000 00: - (360 GB) -+aa00 0000 00 - ab00 0000 00: main binary (PIE) (4 GB) -+ab00 0000 00 - b000 0000 00: - (20 GB) -+b000 0000 00 - b200 0000 00: traces (8 GB) -+b200 0000 00 - fe00 0000 00: - (304 GB) -+fe00 0000 00 - ff00 0000 00: heap (4 GB) -+ff00 0000 00 - ff80 0000 00: - (2 GB) -+ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB) -+*/ -+struct Mapping { -+ static const uptr kMetaShadowBeg = 0x4000000000ull; -+ static const uptr kMetaShadowEnd = 0x5000000000ull; -+ static const uptr kTraceMemBeg = 0xb000000000ull; -+ static const uptr kTraceMemEnd = 0xb200000000ull; -+ static const uptr kShadowBeg = 0x2000000000ull; -+ static const uptr kShadowEnd = 0x4000000000ull; -+ static const uptr kHeapMemBeg = 0xfe00000000ull; -+ static const uptr kHeapMemEnd = 0xff00000000ull; -+ static const uptr kLoAppMemBeg = 0x0100000000ull; -+ static const uptr kLoAppMemEnd = 0x0200000000ull; -+ static const uptr kMidAppMemBeg = 0xaa00000000ull; -+ static const uptr kMidAppMemEnd = 0xab00000000ull; -+ static const uptr kHiAppMemBeg = 0xff80000000ull; -+ static const uptr kHiAppMemEnd = 0xffffffffffull; -+ static const uptr kAppMemMsk = 0xf800000000ull; -+ static const uptr kAppMemXor = 0x0800000000ull; -+ static const uptr kVdsoBeg = 0xfffff00000ull; -+}; -+ - #elif defined(__aarch64__) - // AArch64 supports multiple VMA which leads to multiple address transformation - // functions. To support these multiple VMAS transformations and mappings TSAN -diff --git a/libsanitizer/tsan/tsan_platform_posix.cc b/libsanitizer/tsan/tsan_platform_posix.cc -index 6e62575f1..e146d04fb 100644 ---- a/libsanitizer/tsan/tsan_platform_posix.cc -+++ b/libsanitizer/tsan/tsan_platform_posix.cc -@@ -59,6 +59,9 @@ void InitializeShadowMemory() { - } else { - DCHECK(0); - } -+#elif defined(__loongarch64) -+ const uptr kMadviseRangeBeg = 0xff00000000ull; -+ const uptr kMadviseRangeSize = 0x0100000000ull; - #elif defined(__powerpc64__) - uptr kMadviseRangeBeg = 0; - uptr kMadviseRangeSize = 0; -diff --git a/libsanitizer/tsan/tsan_rtl.cc b/libsanitizer/tsan/tsan_rtl.cc -index 4a1f50061..8f9c48867 100644 ---- a/libsanitizer/tsan/tsan_rtl.cc -+++ b/libsanitizer/tsan/tsan_rtl.cc -@@ -224,7 +224,7 @@ static void StartBackgroundThread() { - ctx->background_thread = internal_start_thread(&BackgroundThread, 0); - } - --#ifndef __mips__ -+#ifndef __mips__ || defined(__loongarch__) - static void StopBackgroundThread() { - atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); - internal_join_thread(ctx->background_thread); -@@ -401,6 +401,20 @@ void Initialize(ThreadState *thr) { - OnInitialize(); - } - -+void MaybeSpawnBackgroundThread() { -+ // On MIPS, TSan initialization is run before -+ // __pthread_initialize_minimal_internal() is finished, so we can not spawn -+ // new threads. -+#if !SANITIZER_GO && !(defined(__mips__) || defined(__loongarch__)) -+ static atomic_uint32_t bg_thread = {}; -+ if (atomic_load(&bg_thread, memory_order_relaxed) == 0 && -+ atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) { -+ StartBackgroundThread(); -+ SetSandboxingCallback(StopBackgroundThread); -+ } -+#endif -+} -+ - int Finalize(ThreadState *thr) { - bool failed = false; - -diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h -index 7dd9779e4..6f8800003 100644 ---- a/libsanitizer/tsan/tsan_rtl.h -+++ b/libsanitizer/tsan/tsan_rtl.h -@@ -52,7 +52,8 @@ namespace __tsan { - - #if !SANITIZER_GO - struct MapUnmapCallback; --#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) -+#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) \ -+ || defined(__loongarch64) - static const uptr kAllocatorRegionSizeLog = 20; - static const uptr kAllocatorNumRegions = - SANITIZER_MMAP_RANGE_SIZE >> kAllocatorRegionSizeLog; -diff --git a/libsanitizer/tsan/tsan_rtl_loongarch64.S b/libsanitizer/tsan/tsan_rtl_loongarch64.S -new file mode 100644 -index 000000000..9331e8afa ---- /dev/null -+++ b/libsanitizer/tsan/tsan_rtl_loongarch64.S -@@ -0,0 +1,156 @@ -+.section .text -+ -+.hidden __tsan_setjmp -+.comm _ZN14__interception11real_setjmpE,8,8 -+.globl setjmp -+.type setjmp, @function -+setjmp: -+ -+ # save env parameters -+ addi.d $r3,$r3,-24 -+ st.d $r1,$r3,16 -+ st.d $r22,$r3,8 -+ -+ # save jmp_buf -+ st.d $r4,$r3,0 -+ -+ # obtain $sp -+ add.d $r4,$r0,$r3 -+ -+ # call tsan interceptor -+ addi.d $r5,$r4,24 -+ bl __tsan_setjmp -+ -+ # restore jmp_buf -+ ld.d $r4,$r3,0 -+ -+ # load libc setjmp to r20 -+ la $r20,_ZN14__interception11real_setjmpE -+ # restore env parameters -+ ld.d $r22,$r3,8 -+ ld.d $r1,$r3,16 -+ addi.d $r3,$r3,24 -+ -+ # tail jump to libc setjmp -+ ld.d $r20,$r20,0 -+ jr $r20 -+ -+.size setjmp, .-setjmp -+ -+.hidden __tsan_setjmp -+.globl _setjmp -+.comm _ZN14__interception12real__setjmpE,8,8 -+.type _setjmp, @function -+_setjmp: -+ -+ # Save env parameters -+ addi.d $r3,$r3,-24 -+ st.d $r1,$r3,16 -+ st.d $r22,$r3,8 -+ -+ # save jmp_buf -+ st.d $r4,$r3,0 -+ -+ # obtain $sp -+ add.d $r4,$r0,$r3 -+ -+ # call tsan interceptor -+ addi.d $r5,$r4,24 -+ bl __tsan_setjmp -+ -+ # restore jmp_buf -+ ld.d $r4,$r3,0 -+ -+ # load libc _setjmp to r20 -+ la $r20,_ZN14__interception12real__setjmpE -+ -+ # restore env parameters -+ ld.d $r22,$r3,8 -+ ld.d $r1,$r3,16 -+ addi.d $r3,$r3,24 -+ -+ # tail jump to libc _setjmp -+ ld.d $r20,$r20,0 -+ jr $r20 -+ -+.size _setjmp, .-_setjmp -+ -+.hidden __tsan_setjmp -+.globl sigsetjmp -+.comm _ZN14__interception14real_sigsetjmpE,8,8 -+.type sigsetjmp, @function -+sigsetjmp: -+ -+ # Save env parameters -+ addi.d $r3,$r3,-32 -+ st.d $r1,$r3,24 -+ st.d $r22,$r3,16 -+ -+ # save jmp_buf and savesig -+ st.d $r4,$r3,0 -+ st.d $r5,$r3,8 -+ -+ # obtain $sp -+ add.d $r4,$r0,$r3 -+ -+ # call tsan interceptor -+ addi.d $r5,$r4,32 -+ bl __tsan_setjmp -+ -+ # restore jmp_buf and savesig -+ ld.d $r4,$r3,0 -+ ld.d $r5,$r3,8 -+ -+ # load libc sigsetjmp to r20 -+ la $r20,_ZN14__interception14real_sigsetjmpE -+ -+ # restore env parameters -+ ld.d $r22,$r3,16 -+ ld.d $r1,$r3,24 -+ addi.d $r3,$r3,32 -+ -+ # tail jump to libc sigsetjmp -+ ld.d $r20,$r20,0 -+ jr $r20 -+ -+.size sigsetjmp, .-sigsetjmp -+ -+.hidden __tsan_setjmp -+.comm _ZN14__interception16real___sigsetjmpE,8,8 -+.globl __sigsetjmp -+.type __sigsetjmp, @function -+__sigsetjmp: -+ -+ # Save env parameters -+ addi.d $sp,$sp,-32 -+ st.d $r1,$r3,24 -+ st.d $r22,$r3,16 -+ -+ # save jmp_buf and savesig -+ st.d $r4,$r3,0 -+ st.d $r5,$r3,8 -+ -+ # obtain $sp -+ add.d $r4,$r0,$r3 -+ -+ # call tsan interceptor -+ addi.d $r5,$r4,32 -+ bl __tsan_setjmp -+ -+ # restore jmp_buf and savesig -+ ld.d $r4,$r3,0 -+ ld.d $r5,$r3,8 -+ -+ # load libc __sigsetjmp in r20 -+ la $r20,_ZN14__interception16real___sigsetjmpE -+ -+ # restore env parameters -+ ld.d $r22,$r3,16 -+ ld.d $r1,$r3,24 -+ addi.d $r3,$r3,32 -+ -+ # tail jump to libc __sigsetjmp -+ ld.d $r20,$r20,0 -+ jr $r20 -+ -+.size __sigsetjmp, .-__sigsetjmp --- -2.39.3 - diff --git a/LoongArch-Fix-atomic_exchange-expanding-PR107713.patch b/LoongArch-Fix-atomic_exchange-expanding-PR107713.patch deleted file mode 100644 index 1660289..0000000 --- a/LoongArch-Fix-atomic_exchange-expanding-PR107713.patch +++ /dev/null @@ -1,164 +0,0 @@ -From 438fe2208b9a219e3a3d729f39a55c6831082181 Mon Sep 17 00:00:00 2001 -From: Xing Li -Date: Fri, 2 Dec 2022 10:35:54 +0800 -Subject: [PATCH] LoongArch: Fix atomic_exchange expanding [PR107713] - -We used to expand atomic_exchange_n(ptr, new, mem_order) for subword types -into something like: - - { - __typeof__(*ptr) t = atomic_load_n(ptr, mem_order); - atomic_compare_exchange_n(ptr, &t, new, true, mem_order, mem_order); - return t; - } - -It's incorrect because another thread may store a different value into *ptr -after atomic_load_n. Then atomic_compare_exchange_n will not store into -*ptr, but atomic_exchange_n should always perform the store. - -gcc/ChangeLog: - - PR target/107713 - * config/loongarch/sync.md - (atomic_cas_value_exchange_7_): New define_insn. - (atomic_exchange): Use atomic_cas_value_exchange_7_si instead of - atomic_cas_value_cmp_and_7_si. - -gcc/testsuite/ChangeLog: - - PR target/107713 - * gcc.target/loongarch/pr107713-1.c: New test. - * gcc.target/loongarch/pr107713-2.c: New test. - -Signed-off-by: Xing Li -Signed-off-by: Jinyang He ---- - gcc/config/loongarch/sync.md | 27 +++++++++- - .../gcc.target/loongarch/pr107713-1.c | 50 +++++++++++++++++++ - .../gcc.target/loongarch/pr107713-2.c | 9 ++++ - 3 files changed, 84 insertions(+), 2 deletions(-) - create mode 100644 gcc/testsuite/gcc.target/loongarch/pr107713-1.c - create mode 100644 gcc/testsuite/gcc.target/loongarch/pr107713-2.c - -diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md -index e3eb43e16..5a16c4fa3 100644 ---- a/gcc/config/loongarch/sync.md -+++ b/gcc/config/loongarch/sync.md -@@ -461,6 +461,29 @@ - } - [(set (attr "length") (const_int 32))]) - -+(define_insn "atomic_cas_value_exchange_7_" -+ [(set (match_operand:GPR 0 "register_operand" "=&r") -+ (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (set (match_dup 1) -+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") -+ (match_operand:GPR 3 "reg_or_0_operand" "rJ") -+ (match_operand:GPR 4 "reg_or_0_operand" "rJ") -+ (match_operand:GPR 5 "reg_or_0_operand" "rJ") -+ (match_operand:SI 6 "const_int_operand")] ;; model -+ UNSPEC_SYNC_EXCHANGE)) -+ (clobber (match_scratch:GPR 7 "=&r"))] -+ "" -+{ -+ return "%G6\\n\\t" -+ "1:\\n\\t" -+ "ll.\\t%0,%1\\n\\t" -+ "and\\t%7,%0,%z3\\n\\t" -+ "or%i5\\t%7,%7,%5\\n\\t" -+ "sc.\\t%7,%1\\n\\t" -+ "beqz\\t%7,1b\\n\\t"; -+} -+ [(set (attr "length") (const_int 20))]) -+ - (define_expand "atomic_exchange" - [(set (match_operand:SHORT 0 "register_operand") - (unspec_volatile:SHORT -@@ -472,11 +495,11 @@ - "" - { - union loongarch_gen_fn_ptrs generator; -- generator.fn_7 = gen_atomic_cas_value_cmp_and_7_si; -+ generator.fn_7 = gen_atomic_cas_value_exchange_7_si; - loongarch_expand_atomic_qihi (generator, - operands[0], - operands[1], -- operands[1], -+ const0_rtx, - operands[2], - operands[3]); - DONE; -diff --git a/gcc/testsuite/gcc.target/loongarch/pr107713-1.c b/gcc/testsuite/gcc.target/loongarch/pr107713-1.c -new file mode 100644 -index 000000000..c307bf87b ---- /dev/null -+++ b/gcc/testsuite/gcc.target/loongarch/pr107713-1.c -@@ -0,0 +1,50 @@ -+/* { dg-do run } */ -+/* { dg-require-effective-target pthread } */ -+/* { dg-options "-pthread" } */ -+ -+#include -+ -+char x, x1, x2; -+ -+void * -+work1 (void *none) -+{ -+ for (int i = 0; i < 100; i++) -+ x1 = __atomic_exchange_n (&x, x1, __ATOMIC_SEQ_CST); -+ return NULL; -+} -+ -+void * -+work2 (void *none) -+{ -+ for (int i = 0; i < 100; i++) -+ x2 = __atomic_exchange_n (&x, x2, __ATOMIC_SEQ_CST); -+ return NULL; -+} -+ -+void -+test (void) -+{ -+ x = 0; -+ x1 = 1; -+ x2 = 2; -+ pthread_t w1, w2; -+ if (pthread_create (&w1, NULL, work1, NULL) != 0) -+ __builtin_abort (); -+ if (pthread_create (&w2, NULL, work2, NULL) != 0) -+ __builtin_abort (); -+ if (pthread_join (w1, NULL) != 0) -+ __builtin_abort (); -+ if (pthread_join (w2, NULL) != 0) -+ __builtin_abort (); -+ if ((x ^ x1 ^ x2) != 3) -+ __builtin_abort (); -+} -+ -+int -+main () -+{ -+ int i; -+ for (i = 0; i < 10000; i++) -+ test (); -+} -diff --git a/gcc/testsuite/gcc.target/loongarch/pr107713-2.c b/gcc/testsuite/gcc.target/loongarch/pr107713-2.c -new file mode 100644 -index 000000000..82d44db3d ---- /dev/null -+++ b/gcc/testsuite/gcc.target/loongarch/pr107713-2.c -@@ -0,0 +1,9 @@ -+/* { dg-do compile } */ -+/* { dg-options "-O2" } */ -+/* { dg-final { scan-assembler-times "beq|bne" 1 } } */ -+ -+char -+t (char *p, char x) -+{ -+ return __atomic_exchange_n (p, x, __ATOMIC_RELAXED); -+} --- -2.27.0 - diff --git a/LoongArch-Remove-NOOP_TRUNCATION-and-fix-extendsidi2.patch b/LoongArch-Remove-NOOP_TRUNCATION-and-fix-extendsidi2.patch deleted file mode 100644 index f8de504..0000000 --- a/LoongArch-Remove-NOOP_TRUNCATION-and-fix-extendsidi2.patch +++ /dev/null @@ -1,101 +0,0 @@ -From 08d337cc5186e47949b60e4b3eeebd1f763337e0 Mon Sep 17 00:00:00 2001 -From: Lixing -Date: Mon, 31 Jul 2023 09:46:12 +0800 -Subject: [PATCH 1/2] LoongArch: Remove NOOP_TRUNCATION and fix extendsidi2 - -We can safely convert value from inprec to outprec because we hold on -extention if needed. ---- - gcc/config/loongarch/loongarch.c | 11 -------- - gcc/config/loongarch/loongarch.md | 44 +++++++------------------------ - 2 files changed, 9 insertions(+), 46 deletions(-) - -diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c -index a1dde5a0f..f8f96329c 100644 ---- a/gcc/config/loongarch/loongarch.c -+++ b/gcc/config/loongarch/loongarch.c -@@ -10313,14 +10313,6 @@ loongarch_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, - return mode; - } - --/* Implement TARGET_TRULY_NOOP_TRUNCATION. */ -- --static bool --loongarch_truly_noop_truncation (poly_uint64 outprec, poly_uint64 inprec) --{ -- return !TARGET_64BIT || inprec <= 32 || outprec > 32; --} -- - /* Implement TARGET_STARTING_FRAME_OFFSET. See loongarch_compute_frame_info - for details about the frame layout. */ - -@@ -10940,9 +10932,6 @@ loongarch_prefetch_cookie (rtx write, rtx locality) - #undef TARGET_CAN_CHANGE_MODE_CLASS - #define TARGET_CAN_CHANGE_MODE_CLASS loongarch_can_change_mode_class - --#undef TARGET_TRULY_NOOP_TRUNCATION --#define TARGET_TRULY_NOOP_TRUNCATION loongarch_truly_noop_truncation -- - #undef TARGET_CONSTANT_ALIGNMENT - #define TARGET_CONSTANT_ALIGNMENT loongarch_constant_alignment - -diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md -index 097c9f4db..a08c4a62c 100644 ---- a/gcc/config/loongarch/loongarch.md -+++ b/gcc/config/loongarch/loongarch.md -@@ -1433,43 +1433,17 @@ - ;; - ;; .................... - --(define_insn_and_split "extendsidi2" -+(define_insn "extendsidi2" - [(set (match_operand:DI 0 "register_operand" "=r,r,r,r") -- (sign_extend:DI -- (match_operand:SI 1 "nonimmediate_operand" "0,ZC,m,k")))] -+ (sign_extend:DI -+ (match_operand:SI 1 "nonimmediate_operand" "r,ZC,m,k")))] - "TARGET_64BIT" --{ -- switch (which_alternative) -- { -- case 0: -- return "#"; -- case 1: -- { -- rtx offset = XEXP (operands[1], 0); -- if (GET_CODE (offset) == PLUS) -- offset = XEXP (offset, 1); -- else -- offset = const0_rtx; -- if (const_arith_operand (offset, Pmode) || (offset == const0_rtx)) -- return "ld.w\t%0,%1"; -- else -- return "ldptr.w\t%0,%1"; -- } -- case 2: -- return "ld.w\t%0,%1"; -- case 3: -- return "ldx.w\t%0,%1"; -- default: -- gcc_unreachable (); -- } --} -- "&& reload_completed && register_operand (operands[1], VOIDmode)" -- [(const_int 0)] --{ -- emit_note (NOTE_INSN_DELETED); -- DONE; --} -- [(set_attr "move_type" "move,load,load,load") -+ "@ -+ slli.w\t%0,%1,0 -+ ldptr.w\t%0,%1 -+ ld.w\t%0,%1 -+ ldx.w\t%0,%1" -+ [(set_attr "move_type" "sll0,load,load,load") - (set_attr "mode" "DI")]) - - (define_insn "extend2" --- -2.39.3 - diff --git a/Sync-to-gcc-8-vec-36.patch b/Sync-to-gcc-8-vec-36.patch deleted file mode 100644 index e41b234..0000000 --- a/Sync-to-gcc-8-vec-36.patch +++ /dev/null @@ -1,30492 +0,0 @@ -From 474c84c016b0c36c9aace9a41d6d9df8107cf3e8 Mon Sep 17 00:00:00 2001 -From: Lixing -Date: Wed, 19 Jul 2023 10:47:27 +0800 -Subject: [PATCH] Sync to gcc-8-vec-36 - ---- - .../config/loongarch/loongarch-common.c | 41 +- - gcc/config.gcc | 589 +- - gcc/config.host | 12 - - gcc/config/loongarch/constraints.md | 371 +- - gcc/config/loongarch/driver-native.c | 82 - - gcc/config/loongarch/elf.h | 56 +- - gcc/config/loongarch/frame-header-opt.c | 292 - - gcc/config/loongarch/generic.md | 21 +- - gcc/config/loongarch/genopt.sh | 110 - - gcc/config/loongarch/genopts/genstr.sh | 104 + - .../loongarch/genopts/loongarch-strings | 68 + - gcc/config/loongarch/genopts/loongarch.opt.in | 242 + - gcc/config/loongarch/gnu-user.h | 135 +- - gcc/config/loongarch/la464.md | 132 + - gcc/config/loongarch/larchintrin.h | 495 +- - gcc/config/loongarch/lasx.md | 684 +- - gcc/config/loongarch/lasxintrin.h | 46 +- - gcc/config/loongarch/linux-common.h | 68 - - gcc/config/loongarch/linux.h | 37 +- - gcc/config/loongarch/loongarch-builtins.c | 549 +- - gcc/config/loongarch/loongarch-c.c | 158 +- - gcc/config/loongarch/loongarch-cpu.c | 291 + - .../{loongarch-d.c => loongarch-cpu.h} | 30 +- - gcc/config/loongarch/loongarch-cpus.def | 38 - - gcc/config/loongarch/loongarch-def.c | 232 + - gcc/config/loongarch/loongarch-def.h | 161 + - gcc/config/loongarch/loongarch-driver.c | 206 + - gcc/config/loongarch/loongarch-driver.h | 72 + - gcc/config/loongarch/loongarch-ftypes.def | 173 +- - gcc/config/loongarch/loongarch-modes.def | 6 +- - gcc/config/loongarch/loongarch-opts.c | 725 ++ - gcc/config/loongarch/loongarch-opts.h | 86 +- - gcc/config/loongarch/loongarch-protos.h | 155 +- - gcc/config/loongarch/loongarch-str.h | 68 + - gcc/config/loongarch/loongarch-tables.opt | 34 - - gcc/config/loongarch/loongarch-tune.h | 51 + - gcc/config/loongarch/loongarch.c | 8440 +++++++++-------- - gcc/config/loongarch/loongarch.h | 1523 +-- - gcc/config/loongarch/loongarch.md | 3658 +++---- - gcc/config/loongarch/loongarch.opt | 252 +- - gcc/config/loongarch/lsx.md | 358 +- - gcc/config/loongarch/lsxintrin.h | 46 +- - gcc/config/loongarch/predicates.md | 250 +- - gcc/config/loongarch/rtems.h | 39 - - gcc/config/loongarch/sde.opt | 28 - - gcc/config/loongarch/sync.md | 746 +- - gcc/config/loongarch/t-linux | 65 +- - gcc/config/loongarch/t-loongarch | 59 +- - gcc/config/loongarch/x-native | 3 - - libgcc/config/loongarch/crtfastmath.c | 48 +- - libgcc/config/loongarch/crti.S | 43 - - libgcc/config/loongarch/crtn.S | 39 - - libgcc/config/loongarch/gthr-loongnixsde.h | 237 - - libgcc/config/loongarch/linux-unwind.h | 27 +- - libgcc/config/loongarch/sfp-machine.h | 166 +- - libgcc/config/loongarch/t-elf | 3 - - libgcc/config/loongarch/t-loongarch | 2 - - libgcc/config/loongarch/t-sdemtk | 3 - - libgcc/config/loongarch/t-vr | 0 - 59 files changed, 12128 insertions(+), 10527 deletions(-) - delete mode 100644 gcc/config/loongarch/driver-native.c - delete mode 100644 gcc/config/loongarch/frame-header-opt.c - delete mode 100644 gcc/config/loongarch/genopt.sh - create mode 100755 gcc/config/loongarch/genopts/genstr.sh - create mode 100644 gcc/config/loongarch/genopts/loongarch-strings - create mode 100644 gcc/config/loongarch/genopts/loongarch.opt.in - create mode 100644 gcc/config/loongarch/la464.md - delete mode 100644 gcc/config/loongarch/linux-common.h - create mode 100644 gcc/config/loongarch/loongarch-cpu.c - rename gcc/config/loongarch/{loongarch-d.c => loongarch-cpu.h} (59%) - delete mode 100644 gcc/config/loongarch/loongarch-cpus.def - create mode 100644 gcc/config/loongarch/loongarch-def.c - create mode 100644 gcc/config/loongarch/loongarch-def.h - create mode 100644 gcc/config/loongarch/loongarch-driver.c - create mode 100644 gcc/config/loongarch/loongarch-driver.h - create mode 100644 gcc/config/loongarch/loongarch-opts.c - create mode 100644 gcc/config/loongarch/loongarch-str.h - delete mode 100644 gcc/config/loongarch/loongarch-tables.opt - create mode 100644 gcc/config/loongarch/loongarch-tune.h - delete mode 100644 gcc/config/loongarch/rtems.h - delete mode 100644 gcc/config/loongarch/sde.opt - delete mode 100644 gcc/config/loongarch/x-native - delete mode 100644 libgcc/config/loongarch/crti.S - delete mode 100644 libgcc/config/loongarch/crtn.S - delete mode 100644 libgcc/config/loongarch/gthr-loongnixsde.h - delete mode 100644 libgcc/config/loongarch/t-elf - delete mode 100644 libgcc/config/loongarch/t-sdemtk - delete mode 100644 libgcc/config/loongarch/t-vr - -diff --git a/gcc/common/config/loongarch/loongarch-common.c b/gcc/common/config/loongarch/loongarch-common.c -index afbbc3ad0..ccdc8f498 100644 ---- a/gcc/common/config/loongarch/loongarch-common.c -+++ b/gcc/common/config/loongarch/loongarch-common.c -@@ -1,5 +1,5 @@ --/* Common hooks for LARCH. -- Copyright (C) 1989-2018 Free Software Foundation, Inc. -+/* Common hooks for LoongArch. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. - - This file is part of GCC. - -@@ -25,44 +25,21 @@ along with GCC; see the file COPYING3. If not see - #include "common/common-target-def.h" - #include "opts.h" - #include "flags.h" -+#include "diagnostic-core.h" - --#undef TARGET_OPTION_OPTIMIZATION_TABLE -+#undef TARGET_OPTION_OPTIMIZATION_TABLE - #define TARGET_OPTION_OPTIMIZATION_TABLE loongarch_option_optimization_table - - /* Set default optimization options. */ - static const struct default_options loongarch_option_optimization_table[] = - { -- { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 }, -- { OPT_LEVELS_NONE, 0, NULL, 0 } -+ { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 }, -+ /* Enable -fsched-pressure by default when optimizing. */ -+ { OPT_LEVELS_1_PLUS, OPT_fsched_pressure, NULL, 1 }, -+ { OPT_LEVELS_NONE, 0, NULL, 0 } - }; - --/* Implement TARGET_HANDLE_OPTION. */ -- --static bool --loongarch_handle_option (struct gcc_options *opts, -- struct gcc_options *opts_set ATTRIBUTE_UNUSED, -- const struct cl_decoded_option *decoded, -- location_t loc ATTRIBUTE_UNUSED) --{ -- size_t code = decoded->opt_index; -- -- switch (code) -- { -- case OPT_mno_flush_func: -- opts->x_loongarch_cache_flush_func = NULL; -- return true; -- -- default: -- return true; -- } --} -- - #undef TARGET_DEFAULT_TARGET_FLAGS --#define TARGET_DEFAULT_TARGET_FLAGS \ -- (TARGET_DEFAULT \ -- | TARGET_CPU_DEFAULT \ -- | MASK_CHECK_ZERO_DIV) --#undef TARGET_HANDLE_OPTION --#define TARGET_HANDLE_OPTION loongarch_handle_option -+#define TARGET_DEFAULT_TARGET_FLAGS MASK_CHECK_ZERO_DIV - - struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER; -diff --git a/gcc/config.gcc b/gcc/config.gcc -index ba061efa4..cca2e6e43 100644 ---- a/gcc/config.gcc -+++ b/gcc/config.gcc -@@ -427,10 +427,10 @@ lm32*) - ;; - loongarch*-*-*) - cpu_type=loongarch -- d_target_objs="loongarch-d.o" - extra_headers="lasxintrin.h lsxintrin.h larchintrin.h" -- extra_objs="frame-header-opt.o loongarch-c.o loongarch-builtins.o" -- extra_options="${extra_options} g.opt fused-madd.opt loongarch/loongarch-tables.opt" -+ extra_objs="loongarch-c.o loongarch-builtins.o loongarch-cpu.o loongarch-opts.o loongarch-def.o" -+ extra_gcc_objs="loongarch-driver.o loongarch-cpu.o loongarch-opts.o loongarch-def.o" -+ extra_options="${extra_options} g.opt fused-madd.opt" - ;; - m32r*-*-*) - cpu_type=m32r -@@ -2193,54 +2193,30 @@ mips*-*-linux*) # Linux MIPS, either endian. - fi - ;; - loongarch*-*-linux*) -- case ${with_abi} in -- "") -- echo "not specify ABI, default is lp64 for loongarch64" -- with_abi=lp64 # for default -- ;; -- lpx32) -- ;; -- lp32) -- ;; -- lp64) -- ;; -- *) -- echo "Unknown ABI used in --with-abi=$with_abi" -- exit 1 -- esac -- -- enable_multilib="yes" -- loongarch_multilibs="${with_multilib_list}" -- if test "$loongarch_multilibs" = "default"; then -- loongarch_multilibs="${with_abi}" -- fi -- loongarch_multilibs=`echo $loongarch_multilibs | sed -e 's/,/ /g'` -- for loongarch_multilib in ${loongarch_multilibs}; do -- case ${loongarch_multilib} in -- lp64 | lpx32 | lp32 ) -- TM_MULTILIB_CONFIG="${TM_MULTILIB_CONFIG},${loongarch_multilib}" -- ;; -- *) -- echo "--with-multilib-list=${loongarch_multilib} not supported." -- exit 1 -- esac -- done -- TM_MULTILIB_CONFIG=`echo $TM_MULTILIB_CONFIG | sed 's/^,//'` -+ tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file}" -+ tm_file="${tm_file} loongarch/gnu-user.h loongarch/linux.h" -+ extra_options="${extra_options} linux-android.opt" -+ tmake_file="${tmake_file} loongarch/t-linux" -+ gnu_ld=yes -+ gas=yes - -- if test `for one_abi in ${loongarch_multilibs}; do if [ x\$one_abi = x$with_abi ]; then echo 1; exit 0; fi; done; echo 0;` = "0"; then -- echo "--with-abi=${with_abi} must be one of --with-multilib-list=${with_multilib_list}" -- exit 1 -- fi -+ # Force .init_array support. The configure script cannot always -+ # automatically detect that GAS supports it, yet we require it. -+ gcc_cv_initfini_array=yes -+ ;; - -- tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} loongarch/gnu-user.h loongarch/linux.h loongarch/linux-common.h" -- extra_options="${extra_options} linux-android.opt" -+loongarch*-*-elf*) -+ tm_file="elfos.h newlib-stdint.h ${tm_file}" -+ tm_file="${tm_file} loongarch/elf.h loongarch/linux.h" - tmake_file="${tmake_file} loongarch/t-linux" - gnu_ld=yes - gas=yes -+ - # Force .init_array support. The configure script cannot always - # automatically detect that GAS supports it, yet we require it. - gcc_cv_initfini_array=yes - ;; -+ - mips*-mti-elf*) - tm_file="elfos.h newlib-stdint.h ${tm_file} mips/elf.h mips/n32-elf.h mips/sde.h mips/mti-elf.h" - tmake_file="mips/t-mti-elf" -@@ -2295,31 +2271,6 @@ mips*-sde-elf*) - ;; - esac - ;; --loongarch*-sde-elf*) -- tm_file="elfos.h newlib-stdint.h ${tm_file} loongarch/elf.h loongarch/sde.h" --# tmake_file="loongarch/t-sde" -- extra_options="${extra_options} loongarch/sde.opt" -- case "${with_newlib}" in -- yes) -- # newlib / libgloss. -- ;; -- *) -- # MIPS toolkit libraries. -- tm_file="$tm_file loongarch/sdemtk.h" -- tmake_file="$tmake_file loongarch/t-sdemtk" -- case ${enable_threads} in -- "" | yes | loongarchsde) -- thread_file='loongarchsde' -- ;; -- esac -- ;; -- esac -- case ${target} in -- loongarch*) -- tm_defines="LARCH_ISA_DEFAULT=0 LARCH_ABI_DEFAULT=ABILP64" -- ;; -- esac -- ;; - mipsisa32-*-elf* | mipsisa32el-*-elf* | \ - mipsisa32r2-*-elf* | mipsisa32r2el-*-elf* | \ - mipsisa32r6-*-elf* | mipsisa32r6el-*-elf* | \ -@@ -3259,7 +3210,7 @@ case ${target} in - ;; - *-*-linux* | *-*-gnu*) - case ${target} in -- aarch64*-* | arm*-* | i[34567]86-* | powerpc*-* | s390*-* | sparc*-* | x86_64-*) -+ aarch64*-* | arm*-* | i[34567]86-* | powerpc*-* | s390*-* | sparc*-* | x86_64-* | loongarch*-*) - default_gnu_indirect_function=yes - ;; - esac -@@ -4450,57 +4401,466 @@ case "${target}" in - ;; - - loongarch*-*-*) -- supported_defaults="abi arch float fpu tune" -+ supported_defaults="abi arch tune fpu simd multilib-default" -+ -+ # Local variables -+ unset \ -+ abi_pattern abi_default \ -+ abiext_pattern abiext_default \ -+ arch_pattern arch_default \ -+ fpu_pattern fpu_default \ -+ triplet_os triplet_abi -+ -+ # Infer ABI from the triplet. -+ case ${target} in -+ loongarch64-*-*-*f64) -+ abi_pattern="lp64d" -+ triplet_abi="" -+ ;; -+ loongarch64-*-*-*f32) -+ abi_pattern="lp64f" -+ triplet_abi="f32" -+ ;; -+ loongarch64-*-*-*sf) -+ abi_pattern="lp64s" -+ triplet_abi="sf" -+ ;; -+ loongarch64-*-*) -+ abi_pattern="lp64[dfs]" -+ abi_default="lp64d" -+ triplet_abi="" -+ ;; -+ *) -+ echo "Unsupported target ${target}." 1>&2 -+ exit 1 -+ ;; -+ esac -+ -+ abiext_pattern="*" -+ abiext_default="base" -+ -+ # Get the canonical triplet (multiarch specifier). -+ case ${target} in -+ *-linux-gnu*) triplet_os="linux-gnu";; -+ *-linux-musl*) triplet_os="linux-musl";; -+ *-elf*) triplet_os="elf";; -+ *) -+ echo "Unsupported target ${target}." 1>&2 -+ exit 1 -+ ;; -+ esac -+ -+ la_canonical_triplet="loongarch64-${triplet_os}${triplet_abi}" - -+ -+ # Perform initial sanity checks on --with-* options. - case ${with_arch} in -- loongarch64 | loongarch32) -- # OK -- default_loongarch_arch=$with_arch -+ "" | abi-default | loongarch64 | la[2346]64) ;; # OK, append here. -+ native) -+ if test x${host} != x${target}; then -+ echo "--with-arch=native is illegal for cross-compiler." 1>&2 -+ exit 1 -+ fi - ;; -- "") -- # fallback -- default_loongarch_arch=loongarch64 -+ *) -+ echo "Unknown arch in --with-arch=$with_arch" 1>&2 -+ exit 1 -+ ;; -+ esac -+ -+ case ${with_abi} in -+ lp64) -+ # Legacy -+ with_abi=lp64d - ;; -+ -+ "" | lp64d | lp64f | lp64s) ;; # OK, append here. - *) -- echo "Unknown arch given in --with-arch=$with_arch, available choices are: loongarch64" 1>&2 -+ echo "Unsupported ABI given in --with-abi=$with_abi" 1>&2 - exit 1 - ;; - esac - -+ case ${with_abiext} in -+ "" | base) ;; # OK, append here. -+ *) -+ echo "Unsupported ABI extention type $with_abiext" 1>&2 -+ exit 1 -+ ;; -+ esac -+ -+ case ${with_fpu} in -+ "" | none | 32 | 64) ;; # OK, append here. -+ 0) -+ # Convert "0" to "none" for upcoming checks. -+ with_fpu="none" -+ ;; -+ *) -+ echo "Unknown fpu type in --with-fpu=$with_fpu" 1>&2 -+ exit 1 -+ ;; -+ esac -+ -+ case ${with_simd} in -+ "" | none) ;; -+ lsx | lasx) # OK, append here. -+ case ${with_fpu} in -+ 64) ;; -+ "") with_fpu=64 ;; -+ *) -+ echo "--with-simd=${with_simd} conflicts with --with-fpu=${with_fpu}" 1>&2 -+ exit 1 -+ ;; -+ esac -+ ;; -+ -+ *) -+ echo "Unknown SIMD extension in --with-simd=$with_simd" 1>&2 -+ exit 1 -+ ;; -+ esac -+ -+ -+ # Set default value for with_abi. - case ${with_abi} in -- lp64 | lp32) -- # OK -- default_loongarch_abi=$with_abi -+ "") -+ if test x${abi_default} != x; then -+ with_abi=${abi_default} -+ else -+ with_abi=${abi_pattern} -+ fi -+ ;; -+ -+ *) -+ if echo "${with_abi}" | grep -E "^${abi_pattern}$" > /dev/null; then -+ : # OK -+ else -+ echo "Incompatible options:" \ -+ "--with-abi=${with_abi} and --target=${target}." 1>&2 -+ exit 1 -+ fi - ;; -+ esac -+ -+ # Set default value for with_abiext (internal) -+ case ${with_abiext} in - "") -- # fallback -- default_loongarch_abi=lp64 -+ if test x${abiext_default} != x; then -+ with_abiext=${abiext_default} -+ else -+ with_abiext=${abiext_pattern} -+ fi -+ ;; -+ -+ *) -+ if echo "${with_abiext}" | grep -E "^${abiext_pattern}$" > /dev/null; then -+ : # OK -+ else -+ echo "The ABI extension type \"${with_abiext}\"" \ -+ "is incompatible with --target=${target}." 1>&2 -+ exit 1 -+ fi -+ -+ ;; -+ esac -+ -+ # Infer ISA-related default options from the ABI: pass 1 -+ case ${with_abi}/${with_abiext} in -+ lp64*/base) -+ # architectures that support lp64* ABI -+ arch_pattern="native|abi-default|loongarch64|la[2346]64" -+ # default architecture for lp64* ABI -+ arch_default="abi-default" - ;; - *) -- echo "Unknown ABI given in --with-abi=$with_abi, available choices are: lp32 lp64" 1>&2 -+ echo "Unsupported ABI type ${with_abi}/${with_abiext}." 1>&2 - exit 1 - ;; - esac - -- case ${with_float} in -- "" | soft | hard) -- # OK -+ # Infer ISA-related default options from the ABI: pass 2 -+ case ${with_abi}/${with_abiext} in -+ lp64d/base) -+ fpu_pattern="64" -+ ;; -+ lp64f/base) -+ fpu_pattern="32|64" -+ fpu_default="32" -+ ;; -+ lp64s/base) -+ fpu_pattern="none|32|64" -+ fpu_default="none" - ;; - *) -- echo "Unknown floating point type used in --with-float=$with_float" 1>&2 -+ echo "Unsupported ABI type ${with_abi}/${with_abiext}." 1>&2 - exit 1 - ;; - esac - -+ ## Set default value for with_arch. -+ case ${with_arch} in -+ "") -+ if test x${arch_default} != x; then -+ with_arch=${arch_default} -+ else -+ with_arch=${arch_pattern} -+ fi -+ ;; -+ -+ *) -+ if echo "${with_arch}" | grep -E "^${arch_pattern}$" > /dev/null; then -+ : # OK -+ else -+ echo "${with_abi}/${with_abiext} ABI cannot be implemented with" \ -+ "--with-arch=${with_arch}." 1>&2 -+ exit 1 -+ fi -+ ;; -+ esac -+ -+ ## Set default value for with_fpu. - case ${with_fpu} in -- "" | single | double) -- # OK -+ "") -+ if test x${fpu_default} != x; then -+ with_fpu=${fpu_default} -+ else -+ with_fpu=${fpu_pattern} -+ fi - ;; -+ - *) -- echo "Unknown fpu type used in --with-fpu=$with_fpu" 1>&2 -- exit 1 -+ if echo "${with_fpu}" | grep -E "^${fpu_pattern}$" > /dev/null; then -+ : # OK -+ else -+ echo "${with_abi}/${with_abiext} ABI cannot be implemented with" \ -+ "--with-fpu=${with_fpu}." 1>&2 -+ exit 1 -+ fi -+ ;; -+ esac -+ -+ -+ # Check default with_tune configuration using with_arch. -+ case ${with_arch} in -+ loongarch64) -+ tune_pattern="native|abi-default|loongarch64|la[2346]64" -+ ;; -+ *) -+ # By default, $with_tune == $with_arch -+ tune_pattern="*" -+ ;; -+ esac -+ -+ case ${with_tune} in -+ "") ;; # OK -+ *) -+ if echo "${with_tune}" | grep -E "^${tune_pattern}$" > /dev/null; then -+ : # OK -+ else -+ echo "Incompatible options: --with-tune=${with_tune}" \ -+ "and --with-arch=${with_arch}." 1>&2 -+ exit 1 -+ fi - ;; - esac -+ -+ # Handle --with-multilib-default -+ if echo "${with_multilib_default}" \ -+ | grep -E -e '[[:space:]]' -e '//' -e '/$' -e '^/' > /dev/null 2>&1; then -+ echo "Invalid argument to --with-multilib-default." 1>&2 -+ exit 1 -+ fi -+ -+ if test x${with_multilib_default} = x; then -+ # Use -march=abi-default by default when building libraries. -+ with_multilib_default="/march=abi-default" -+ else -+ unset parse_state component -+ parse_state=arch -+ for component in $(echo "${with_multilib_default}" | tr '/' ' '); do -+ case ${parse_state},${component} in -+ arch,|arch,abi-default) -+ # ABI-default: use the ABI's default ARCH configuration for -+ # multilib library builds, unless otherwise specified -+ # in --with-multilib-list. -+ with_multilib_default="/march=abi-default" -+ parse_state=opts -+ ;; -+ arch,fixed) -+ # Fixed: use the default gcc configuration for all multilib -+ # builds by default. -+ with_multilib_default="" -+ parse_state=opts -+ ;; -+ arch,*) -+ with_multilib_default="/march=abi-default" -+ parse_state=opts -+ ;& -+ opts,*) -+ with_multilib_default="${with_multilib_default}/${component}" -+ ;; -+ esac -+ done -+ unset parse_state component -+ fi -+ -+ # Handle --with-multilib-list. -+ if test x"${with_multilib_list}" = x \ -+ || test x"${with_multilib_list}" = xno \ -+ || test x"${with_multilib_list}" = xdefault \ -+ || test x"${enable_multilib}" != xyes; then -+ -+ with_multilib_list="${with_abi}/${with_abiext}" -+ fi -+ -+ # Check if the configured default ABI combination is included in -+ # ${with_multilib_list}. -+ loongarch_multilib_list_sane=no -+ -+ # This one goes to TM_MULTILIB_CONFIG, for use in t-linux. -+ loongarch_multilib_list_make="" -+ -+ # This one goes to tm_defines, for use in loongarch-driver.c. -+ loongarch_multilib_list_c="" -+ -+ # ${with_multilib_list} should not contain whitespaces, -+ # consecutive commas or slashes. -+ if echo "${with_multilib_list}" \ -+ | grep -E -e "[[:space:]]" -e '[,/][,/]' -e '[,/]$' -e '^[,/]' > /dev/null 2>&1; then -+ echo "Invalid argument to --with-multilib-list." 1>&2 -+ exit 1 -+ fi -+ -+ unset component elem_abi_base elem_abi_ext elem_tmp parse_state all_abis -+ for elem in $(echo "${with_multilib_list}" | tr ',' ' '); do -+ unset elem_abi_base elem_abi_ext -+ parse_state="abi-base" -+ -+ for component in $(echo "${elem}" | tr '/' ' '); do -+ case ${parse_state} in -+ abi-base) -+ # Base ABI type -+ case ${component} in -+ lp64 | lp64d) elem_tmp="ABI_BASE_LP64D,";; -+ lp64f) elem_tmp="ABI_BASE_LP64F,";; -+ lp64s) elem_tmp="ABI_BASE_LP64S,";; -+ *) -+ echo "Unknown base ABI \"${component}\" in --with-multilib-list." 1>&2 -+ exit 1 -+ ;; -+ esac -+ loongarch_multilib_list_c="${loongarch_multilib_list_c}${elem_tmp}" -+ loongarch_multilib_list_make="${loongarch_multilib_list_make}mabi=${component}" -+ elem_abi_base="${component}" -+ -+ parse_state="abi-ext" -+ ;; -+ -+ abi-ext) -+ # ABI extension type -+ case ${component} in -+ base) -+ elem_abi_ext="base" -+ loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," -+ loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. -+ parse_state="arch" -+ continue; -+ ;; -+ esac -+ -+ # The default ABI extension is "base" if unspecified. -+ elem_abi_ext="base" -+ loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," -+ loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. -+ parse_state="arch" -+ ;& -+ -+ arch) -+ # -march option -+ case ${component} in -+ abi-default | loongarch64 | la[2346]64) # OK, append here. -+ # Append -march spec for each multilib variant. -+ loongarch_multilib_list_make="${loongarch_multilib_list_make}/march=${component}" -+ ;& -+ -+ default) -+ # "/default" is equivalent to --with-multilib-default=fixed -+ parse_state="opts" -+ continue; -+ ;; -+ esac -+ -+ # If ARCH is unspecified for this multilib variant, use ${with_multllib_default}. -+ loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" -+ parse_state="opts" -+ ;& -+ -+ opts) -+ # Other compiler options for building libraries. -+ # (no static sanity check performed) -+ case ${component} in -+ *) -+ # Append other components as additional build options -+ # (without the prepending dash). -+ # Their validity should be examined by the compiler. -+ loongarch_multilib_list_make="${loongarch_multilib_list_make}/${component}" -+ ;; -+ esac -+ ;; -+ -+ esac -+ done -+ -+ case ${parse_state} in -+ "abi-ext") -+ elem_abi_ext="base" -+ loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," -+ loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. -+ ;& -+ "arch") -+ # If ARCH is unspecified for this multilib variant, use ${with_multllib_default}. -+ loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" -+ ;& -+ "opts") -+ ;; -+ esac -+ -+ # Check for repeated configuration of the same multilib variant. -+ if echo "${elem_abi_base}/${elem_abi_ext}" \ -+ | grep -E "^(${all_abis%|})$" >/dev/null 2>&1; then -+ echo "Repeated multilib config of \"${elem_abi_base}/${elem_abi_ext}\" in --with-multilib-list." -+ exit 1 -+ fi -+ all_abis+="${elem_abi_base}/${elem_abi_ext}|" -+ -+ -+ # Check if the default ABI configuration of the GCC binary -+ # is included in the enabled multilib variants. -+ if test x${elem_abi_base} = x${with_abi} \ -+ && test x${elem_abi_ext} = x${with_abiext}; then -+ loongarch_multilib_list_sane=yes -+ fi -+ loongarch_multilib_list_make="${loongarch_multilib_list_make}," -+ done -+ unset component elem_abi_base elem_abi_ext elem_tmp parse_state all_abis -+ -+ -+ # Check if the default ABI combination is in the default list. -+ if test x${loongarch_multilib_list_sane} = xno; then -+ if test x${with_abiext} = xbase; then -+ with_abiext="" -+ else -+ with_abiext="/${with_abiext}" -+ fi -+ -+ echo "Default ABI combination (${with_abi}${with_abiext})" \ -+ "not found in --with-multilib-list." 1>&2 -+ exit 1 -+ fi -+ -+ # Remove the excessive appending comma. -+ loongarch_multilib_list_c=${loongarch_multilib_list_c%,} -+ loongarch_multilib_list_make=${loongarch_multilib_list_make%,} - ;; - - nds32*-*-*) -@@ -4935,17 +5295,54 @@ case ${target} in - ;; - - loongarch*-*-*) -- case ${default_loongarch_arch} in -- loongarch64) tm_defines="$tm_defines LARCH_ISA_DEFAULT=0" ;; -- loongarch32) tm_defines="$tm_defines LARCH_ISA_DEFAULT=1" ;; -+ # Export canonical triplet. -+ tm_defines="${tm_defines} LA_MULTIARCH_TRIPLET=${la_canonical_triplet}" -+ -+ # Define macro LA_DISABLE_MULTILIB if --disable-multilib -+ tm_defines="${tm_defines} TM_MULTILIB_LIST=${loongarch_multilib_list_c}" -+ if test x$enable_multilib = xyes; then -+ TM_MULTILIB_CONFIG="${loongarch_multilib_list_make}" -+ else -+ tm_defines="${tm_defines} LA_DISABLE_MULTILIB" -+ fi -+ -+ # Let --with- flags initialize the enum variables from loongarch.opt. -+ # See macro definitions from loongarch-opts.h and loongarch-cpu.h. -+ -+ # Architecture -+ tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_$(tr a-z- A-Z_ <<< ${with_arch})" -+ -+ # Base ABI type -+ tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_$(tr a-z- A-Z_ <<< ${with_abi})" -+ -+ # ABI Extension -+ case ${with_abiext} in -+ base) tm_defines="${tm_defines} DEFAULT_ABI_EXT=ABI_EXT_BASE" ;; - esac -- case ${default_loongarch_abi} in -- lp64) tm_defines="$tm_defines LARCH_ABI_DEFAULT=ABILP64" ;; -- lp32) tm_defines="$tm_defines LARCH_ABI_DEFAULT=ABILP32" ;; -+ -+ # Microarchitecture -+ if test x${with_tune} != x; then -+ tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_$(tr a-z- A-Z_ <<< ${with_tune})" -+ fi -+ -+ # FPU adjustment -+ case ${with_fpu} in -+ none) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_NONE" ;; -+ 32) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_FPU32" ;; -+ 64) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_FPU64" ;; - esac -+ -+ # SIMD extensions -+ case ${with_simd} in -+ none) tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_NONE" ;; -+ lsx) tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_SIMD_LSX" ;; -+ lasx) tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_SIMD_LASX" ;; -+ esac -+ - tmake_file="loongarch/t-loongarch $tmake_file" - ;; - -+ - powerpc*-*-* | rs6000-*-*) - # FIXME: The PowerPC port uses the value set at compile time, - # although it's only cosmetic. -diff --git a/gcc/config.host b/gcc/config.host -index d23dae4ac..c65569da2 100644 ---- a/gcc/config.host -+++ b/gcc/config.host -@@ -139,18 +139,6 @@ case ${host} in - host_extra_gcc_objs="driver-native.o" - host_xmake_file="${host_xmake_file} mips/x-native" - ;; -- loongarch*-*-linux*) -- host_extra_gcc_objs="driver-native.o" -- host_xmake_file="${host_xmake_file} loongarch/x-native" -- ;; -- esac -- ;; -- loongarch*-*-linux*) -- case ${target} in -- loongarch*-*-linux*) -- host_extra_gcc_objs="driver-native.o" -- host_xmake_file="${host_xmake_file} loongarch/x-native" -- ;; - esac - ;; - rs6000-*-* \ -diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md -index ae8596107..82c0ccf37 100644 ---- a/gcc/config/loongarch/constraints.md -+++ b/gcc/config/loongarch/constraints.md -@@ -1,5 +1,6 @@ --;; Constraint definitions for LARCH. --;; Copyright (C) 2006-2018 Free Software Foundation, Inc. -+;; Constraint definitions for LoongArch. -+;; Copyright (C) 2020-2022 Free Software Foundation, Inc. -+;; Contributed by Loongson Co. Ltd. - ;; - ;; This file is part of GCC. - ;; -@@ -20,160 +21,158 @@ - ;; Register constraints - - ;; "a" A constant call global and noplt address. --;; "b" ALL_REGS -+;; "b" <-----unused - ;; "c" A constant call local address. --;; "d" - --;; "e" JALR_REGS -+;; "d" <-----unused -+;; "e" JIRL_REGS - ;; "f" FP_REGS --;; "g" * -+;; "g" <-----unused - ;; "h" A constant call plt address. --;; "i" "Matches a general integer constant." -+;; "i" Matches a general integer constant. (Global non-architectural) - ;; "j" SIBCALL_REGS --;; "k" * --;; "l" "A signed 16-bit constant ." --;; "m" "A memory operand whose address is formed by a base register and offset --;; that is suitable for use in instructions with the same addressing mode --;; as @code{st.w} and @code{ld.w}." --;; "n" "Matches a non-symbolic integer constant." --;; "o" "Matches an offsettable memory reference." --;; "p" "Matches a general address." --;; "q" CSR_REGS --;; "r" GENERAL_REGS --;; "s" "Matches a symbolic integer constant." -+;; "k" A memory operand whose address is formed by a base register and -+;; (optionally scaled) index register. -+;; "l" A signed 16-bit constant. -+;; "m" A memory operand whose address is formed by a base register and offset -+;; that is suitable for use in instructions with the same addressing mode -+;; as @code{st.w} and @code{ld.w}. -+;; "n" Matches a non-symbolic integer constant. (Global non-architectural) -+;; "o" Matches an offsettable memory reference. (Global non-architectural) -+;; "p" Matches a general address. (Global non-architectural) -+;; "q" A general-purpose register except for $r0 and $r1 for lcsr. -+;; "r" GENERAL_REGS (Global non-architectural) -+;; "s" Matches a symbolic integer constant. (Global non-architectural) - ;; "t" A constant call weak address --;; "u" - --;; "v" - --;; "w" "Matches any valid memory." --;; "x" - --;; "y" GR_REGS --;; "z" ST_REGS --;; "A" - --;; "B" - --;; "C" - --;; "D" - --;; "E" "Matches a floating-point constant." --;; "F" "Matches a floating-point constant." --;; "G" "Floating-point zero." --;; "H" - --;; "I" "A signed 12-bit constant (for arithmetic instructions)." --;; "J" "Integer zero." --;; "K" "An unsigned 12-bit constant (for logic instructions)." --;; "L" "A signed 32-bit constant in which the lower 12 bits are zero. --;; "M" "A constant that cannot be loaded using @code{lui}, @code{addiu} or @code{ori}." --;; "N" "A constant in the range -65535 to -1 (inclusive)." --;; "O" "A signed 15-bit constant." --;; "P" "A constant in the range 1 to 65535 (inclusive)." --;; "Q" "A signed 12-bit constant" --;; "R" "An address that can be used in a non-macro load or store." --;; "S" "A constant call address." --;; "T" - --;; "U" - --;; "V" "Matches a non-offsettable memory reference." --;; "W" "A memory address based on a member of @code{BASE_REG_CLASS}. This is --;; true for all references (although it can sometimes be implicit --;; if @samp{!TARGET_EXPLICIT_RELOCS})." --;; "X" "Matches anything." -+;; "u" A signed 52bit constant and low 32-bit is zero (for logic instructions) -+;; "v" A signed 64-bit constant and low 44-bit is zero (for logic instructions) -+;; "w" Matches any valid memory. -+;; "x" <-----unused -+;; "y" <-----unused -+;; "z" FCC_REGS -+;; "A" <-----unused -+;; "B" <-----unused -+;; "C" <-----unused -+;; "D" <-----unused -+;; "E" Matches a floating-point constant. (Global non-architectural) -+;; "F" Matches a floating-point constant. (Global non-architectural) -+;; "G" Floating-point zero. -+;; "H" <-----unused -+;; "I" A signed 12-bit constant (for arithmetic instructions). -+;; "J" Integer zero. -+;; "K" An unsigned 12-bit constant (for logic instructions). -+;; "L" <-----unused -+;; "M" <-----unused -+;; "N" <-----unused -+;; "O" <-----unused -+;; "P" <-----unused -+;; "Q" <-----unused -+;; "R" <-----unused -+;; "S" <-----unused -+;; "T" <-----unused -+;; "U" <-----unused -+;; "V" Matches a non-offsettable memory reference. (Global non-architectural) -+;; "W" <-----unused -+;; "X" Matches anything. (Global non-architectural) - ;; "Y" - --;; "YG" --;; "A vector zero." --;; "YA" --;; "An unsigned 6-bit constant." --;; "YB" --;; "A signed 10-bit constant." --;; "Yb" - ;; "Yd" --;; "A constant @code{move_operand} that can be safely loaded into @code{$25} --;; using @code{la}." --;; "Yh" --;; "Yw" -+;; A constant @code{move_operand} that can be safely loaded using -+;; @code{la}. -+;; "YG" -+;; A vector zero. - ;; "Yx" --;; "YI" --;; "A replicated vector const in which the replicated value is in the range --;; [-512,511]." - ;; "YC" --;; "A replicated vector const in which the replicated value has a single --;; bit set." -+;; A replicated vector const in which the replicated value has a single -+;; bit set. - ;; "YZ" --;; "A replicated vector const in which the replicated value has a single --;; bit clear." -+;; A replicated vector const in which the replicated value has a single -+;; bit clear. - ;; "Z" - - ;; "ZC" --;; "A memory operand whose address is formed by a base register and offset -+;; A memory operand whose address is formed by a base register and offset - ;; that is suitable for use in instructions with the same addressing mode --;; as @code{ll.w} and @code{sc.w}." --;; "ZD" --;; "An address suitable for a @code{prefetch} instruction, or for any other --;; instruction with the same addressing mode as @code{prefetch}." --;; "ZR" --;; "An address valid for loading/storing register exclusive" -+;; as @code{ll.w} and @code{sc.w}. - ;; "ZB" --;; "An address that is held in a general-purpose register. --;; The offset is zero" -+;; An address that is held in a general-purpose register. -+;; The offset is zero. -+;; "<" Matches a pre-dec or post-dec operand. (Global non-architectural) -+;; ">" Matches a pre-inc or post-inc operand. (Global non-architectural) - -+(define_constraint "a" -+ "@internal -+ A constant call global and noplt address." -+ (match_operand 0 "is_const_call_global_noplt_symbol")) - - (define_constraint "c" - "@internal - A constant call local address." - (match_operand 0 "is_const_call_local_symbol")) - --(define_constraint "a" -- "@internal -- A constant call global and noplt address." -- (match_operand 0 "is_const_call_global_noplt_symbol")) -+(define_register_constraint "e" "JIRL_REGS" -+ "@internal") -+ -+(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS" -+ "A floating-point register (if available).") - - (define_constraint "h" - "@internal - A constant call plt address." - (match_operand 0 "is_const_call_plt_symbol")) - --(define_constraint "t" -- "@internal -- A constant call weak address." -- (match_operand 0 "is_const_call_weak_symbol")) -- --(define_register_constraint "e" "JALR_REGS" -+(define_register_constraint "j" "SIBCALL_REGS" - "@internal") - --(define_register_constraint "q" "CSR_REGS" -- "A general-purpose register except for $r0 and $r1 for csr.") -+(define_memory_constraint "k" -+ "A memory operand whose address is formed by a base register and (optionally scaled) -+ index register." -+ (and (match_code "mem") -+ (match_test "loongarch_base_index_address_p (XEXP (op, 0), mode)"))) - --(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS" -- "A floating-point register (if available).") -+(define_constraint "l" -+"A signed 16-bit constant." -+(and (match_code "const_int") -+ (match_test "IMM16_OPERAND (ival)"))) - --(define_register_constraint "b" "ALL_REGS" -- "@internal") -+(define_memory_constraint "m" -+ "A memory operand whose address is formed by a base register and offset -+ that is suitable for use in instructions with the same addressing mode -+ as @code{st.w} and @code{ld.w}." -+ (and (match_code "mem") -+ (match_test "loongarch_12bit_offset_address_p (XEXP (op, 0), mode)"))) - --(define_register_constraint "j" "SIBCALL_REGS" -- "@internal") -+(define_register_constraint "q" "CSR_REGS" -+ "A general-purpose register except for $r0 and $r1 for lcsr.") - --(define_constraint "l" -- "A signed 16-bit constant ." -+(define_constraint "t" -+ "@internal -+ A constant call weak address." -+ (match_operand 0 "is_const_call_weak_symbol")) -+ -+(define_constraint "u" -+ "A signed 52bit constant and low 32-bit is zero (for logic instructions)." - (and (match_code "const_int") -- (match_test "IMM16_OPERAND (ival)"))) -+ (match_test "LU32I_OPERAND (ival)"))) - --(define_register_constraint "y" "GR_REGS" -- "Equivalent to @code{r}; retained for backwards compatibility.") -+(define_constraint "v" -+ "A signed 64-bit constant and low 52-bit is zero (for logic instructions)." -+ (and (match_code "const_int") -+ (match_test "LU52I_OPERAND (ival)"))) - --(define_register_constraint "z" "ST_REGS" -+(define_register_constraint "z" "FCC_REGS" - "A floating-point condition code register.") - --(define_constraint "kf" -- "@internal" -- (match_operand 0 "force_to_mem_operand")) -+;; Floating-point constraints - --;; This is a normal rather than a register constraint because we can --;; never use the stack pointer as a reload register. --(define_constraint "ks" -- "@internal" -- (and (match_code "reg") -- (match_test "REGNO (op) == STACK_POINTER_REGNUM"))) -+(define_constraint "G" -+ "Floating-point zero." -+ (and (match_code "const_double") -+ (match_test "op == CONST0_RTX (mode)"))) - - ;; Integer constraints - - (define_constraint "I" - "A signed 12-bit constant (for arithmetic instructions)." - (and (match_code "const_int") -- (match_test "SMALL_OPERAND (ival)"))) -+ (match_test "IMM12_OPERAND (ival)"))) - - (define_constraint "J" - "Integer zero." -@@ -183,53 +182,7 @@ - (define_constraint "K" - "An unsigned 12-bit constant (for logic instructions)." - (and (match_code "const_int") -- (match_test "SMALL_OPERAND_UNSIGNED (ival)"))) -- --(define_constraint "u" -- "An unsigned 12-bit constant (for logic instructions)." -- (and (match_code "const_int") -- (match_test "LU32I_OPERAND (ival)"))) -- --(define_constraint "v" -- "An unsigned 12-bit constant (for logic instructions)." -- (and (match_code "const_int") -- (match_test "LU52I_OPERAND (ival)"))) -- --(define_constraint "L" -- "A signed 32-bit constant in which the lower 12 bits are zero. -- Such constants can be loaded using @code{lui}." -- (and (match_code "const_int") -- (match_test "LUI_OPERAND (ival)"))) -- --(define_constraint "M" -- "A constant that cannot be loaded using @code{lui}, @code{addiu} -- or @code{ori}." -- (and (match_code "const_int") -- (not (match_test "SMALL_OPERAND (ival)")) -- (not (match_test "SMALL_OPERAND_UNSIGNED (ival)")) -- (not (match_test "LUI_OPERAND (ival)")))) -- --(define_constraint "N" -- "A constant in the range -65535 to -1 (inclusive)." -- (and (match_code "const_int") -- (match_test "ival >= -0xffff && ival < 0"))) -- --(define_constraint "O" -- "A signed 15-bit constant." -- (and (match_code "const_int") -- (match_test "ival >= -0x4000 && ival < 0x4000"))) -- --(define_constraint "P" -- "A constant in the range 1 to 65535 (inclusive)." -- (and (match_code "const_int") -- (match_test "ival > 0 && ival < 0x10000"))) -- --;; Floating-point constraints -- --(define_constraint "G" -- "Floating-point zero." -- (and (match_code "const_double") -- (match_test "op == CONST0_RTX (mode)"))) -+ (match_test "IMM12_OPERAND_UNSIGNED (ival)"))) - - ;; General constraints - -@@ -237,33 +190,35 @@ - "@internal" - (match_operand 0 "const_arith_operand")) - --(define_memory_constraint "R" -- "An address that can be used in a non-macro load or store." -- (and (match_code "mem") -- (match_test "loongarch_address_insns (XEXP (op, 0), mode, false) == 1"))) -+(define_constraint "Yd" -+ "@internal -+ A constant @code{move_operand} that can be safely loaded using -+ @code{la}." -+ (and (match_operand 0 "move_operand") -+ (match_test "CONSTANT_P (op)"))) - --(define_memory_constraint "m" -+(define_constraint "Yx" -+ "@internal" -+ (match_operand 0 "low_bitmask_operand")) -+ -+(define_memory_constraint "ZC" - "A memory operand whose address is formed by a base register and offset - that is suitable for use in instructions with the same addressing mode -- as @code{st.w} and @code{ld.w}." -+ as @code{ll.w} and @code{sc.w}." - (and (match_code "mem") -- (match_test "loongarch_12bit_offset_address_p (XEXP (op, 0), mode)"))) -+ (match_test "loongarch_14bit_shifted_offset_address_p (XEXP (op, 0), mode)"))) - --(define_constraint "S" -+(define_memory_constraint "ZB" - "@internal -- A constant call address." -- (and (match_operand 0 "call_insn_operand") -- (match_test "CONSTANT_P (op)"))) -+ An address that is held in a general-purpose register. -+ The offset is zero" -+ (and (match_code "mem") -+ (match_test "REG_P (XEXP (op, 0))"))) - --(define_memory_constraint "W" -- "@internal -- A memory address based on a member of @code{BASE_REG_CLASS}. This is -- true for allreferences (although it can sometimes be implicit -- if @samp{!TARGET_EXPLICIT_RELOCS})." -+(define_memory_constraint "R" -+ "An address that can be used in a non-macro load or store." - (and (match_code "mem") -- (match_operand 0 "memory_operand") -- (and (not (match_operand 0 "stack_operand")) -- (not (match_test "CONSTANT_P (XEXP (op, 0))"))))) -+ (match_test "loongarch_address_insns (XEXP (op, 0), mode, false) == 1"))) - - (define_constraint "YG" - "@internal -@@ -271,41 +226,6 @@ - (and (match_code "const_vector") - (match_test "op == CONST0_RTX (mode)"))) - --(define_constraint "YA" -- "@internal -- An unsigned 6-bit constant." -- (and (match_code "const_int") -- (match_test "UIMM6_OPERAND (ival)"))) -- --(define_constraint "YB" -- "@internal -- A signed 10-bit constant." -- (and (match_code "const_int") -- (match_test "IMM10_OPERAND (ival)"))) -- --(define_constraint "Yb" -- "@internal" -- (match_operand 0 "qi_mask_operand")) -- --(define_constraint "Yd" -- "@internal -- A constant @code{move_operand} that can be safely loaded into @code{$25} -- using @code{la}." -- (and (match_operand 0 "move_operand") -- (match_test "CONSTANT_P (op)"))) -- --(define_constraint "Yh" -- "@internal" -- (match_operand 0 "hi_mask_operand")) -- --(define_constraint "Yw" -- "@internal" -- (match_operand 0 "si_mask_operand")) -- --(define_constraint "Yx" -- "@internal" -- (match_operand 0 "low_bitmask_operand")) -- - (define_constraint "YI" - "@internal - A replicated vector const in which the replicated value is in the range -@@ -360,30 +280,3 @@ - A replicated vector const with replicated byte values as well as elements" - (and (match_code "const_vector") - (match_test "loongarch_const_vector_same_bytes_p (op, mode)"))) -- --(define_memory_constraint "ZC" -- "A memory operand whose address is formed by a base register and offset -- that is suitable for use in instructions with the same addressing mode -- as @code{ll.w} and @code{sc.w}." -- (and (match_code "mem") -- (match_test "loongarch_14bit_shifted_offset_address_p (XEXP (op, 0), mode)"))) -- --;;(define_address_constraint "ZD" --;; "An address suitable for a @code{prefetch} instruction, or for any other --;; instruction with the same addressing mode as @code{prefetch}." --;; (if_then_else (match_test "ISA_HAS_9BIT_DISPLACEMENT") --;; (match_test "loongarch_9bit_offset_address_p (op, mode)") --;; (match_test "loongarch_address_insns (op, mode, false)"))) -- --(define_memory_constraint "ZR" -- "@internal -- An address valid for loading/storing register exclusive" -- (match_operand 0 "mem_noofs_operand")) -- --(define_memory_constraint "ZB" -- "@internal -- An address that is held in a general-purpose register. -- The offset is zero" -- (and (match_code "mem") -- (match_test "GET_CODE(XEXP(op,0)) == REG"))) -- -diff --git a/gcc/config/loongarch/driver-native.c b/gcc/config/loongarch/driver-native.c -deleted file mode 100644 -index 5484ee502..000000000 ---- a/gcc/config/loongarch/driver-native.c -+++ /dev/null -@@ -1,82 +0,0 @@ --/* Subroutines for the gcc driver. -- Copyright (C) 2008-2018 Free Software Foundation, Inc. -- --This file is part of GCC. -- --GCC is free software; you can redistribute it and/or modify --it under the terms of the GNU General Public License as published by --the Free Software Foundation; either version 3, or (at your option) --any later version. -- --GCC is distributed in the hope that it will be useful, --but WITHOUT ANY WARRANTY; without even the implied warranty of --MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --GNU General Public License for more details. -- --You should have received a copy of the GNU General Public License --along with GCC; see the file COPYING3. If not see --. */ -- --#define IN_TARGET_CODE 1 -- --#include "config.h" --#include "system.h" --#include "coretypes.h" --#include "tm.h" -- -- --/* This function must set to noinline. Otherwise the arg can not be passed. */ --int loongson_cpucfg (int arg) --{ -- int ret; -- __asm__ __volatile__ ("cpucfg %0,%1\n\t" /* cpucfg $2,$4. */ -- :"=r"(ret) -- :"r"(arg) -- :); -- return ret; --} -- --/* This will be called by the spec parser in gcc.c when it sees -- a %:local_cpu_detect(args) construct. Currently it will be called -- with either "arch" or "tune" as argument depending on if -march=native -- or -mtune=native is to be substituted. -- -- It returns a string containing new command line parameters to be -- put at the place of the above two options, depending on what CPU -- this is executed. E.g. "-march=loongson2f" on a Loongson 2F for -- -march=native. If the routine can't detect a known processor, -- the -march or -mtune option is discarded. -- -- ARGC and ARGV are set depending on the actual arguments given -- in the spec. */ --const char * --host_detect_local_cpu (int argc, const char **argv) --{ -- const char *cpu = NULL; -- bool arch; -- int cpucfg_arg; -- int cpucfg_ret; -- -- if (argc < 1) -- return NULL; -- -- arch = strcmp (argv[0], "arch") == 0; -- if (!arch && strcmp (argv[0], "tune")) -- return NULL; -- -- cpucfg_arg = 0; -- cpucfg_ret = loongson_cpucfg (cpucfg_arg); -- if (((cpucfg_ret >> 16) & 0xff) == 0x14) -- { -- if (((cpucfg_ret >> 8) & 0xff) == 0xc0) -- cpu = "la464"; -- else -- cpu = NULL; -- } -- -- -- if (cpu == NULL) -- return NULL; -- -- return concat ("-m", argv[0], "=", cpu, NULL); --} -diff --git a/gcc/config/loongarch/elf.h b/gcc/config/loongarch/elf.h -index b7f938e31..edb0e77d2 100644 ---- a/gcc/config/loongarch/elf.h -+++ b/gcc/config/loongarch/elf.h -@@ -1,5 +1,6 @@ --/* Target macros for loongarch*-elf targets. -- Copyright (C) 1994-2018 Free Software Foundation, Inc. -+/* Definitions for LoongArch systems using GNU (glibc-based) userspace, -+ or other userspace with libc derived from glibc. -+ Copyright (C) 1998-2018 Free Software Foundation, Inc. - - This file is part of GCC. - -@@ -17,34 +18,37 @@ You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING3. If not see - . */ - --/* LARCH assemblers don't have the usual .set foo,bar construct; -- .set is used for assembler options instead. */ --#undef SET_ASM_OP --#define ASM_OUTPUT_DEF(FILE, LABEL1, LABEL2) \ -- do \ -- { \ -- fputc ('\t', FILE); \ -- assemble_name (FILE, LABEL1); \ -- fputs (" = ", FILE); \ -- assemble_name (FILE, LABEL2); \ -- fputc ('\n', FILE); \ -- } \ -- while (0) -- --#undef ASM_DECLARE_OBJECT_NAME --#define ASM_DECLARE_OBJECT_NAME loongarch_declare_object_name -- --#undef ASM_FINISH_DECLARE_OBJECT --#define ASM_FINISH_DECLARE_OBJECT loongarch_finish_declare_object -- --/* Leave the linker script to choose the appropriate libraries. */ -+/* Define the size of the wide character type. */ -+#undef WCHAR_TYPE -+#define WCHAR_TYPE "int" -+ -+#undef WCHAR_TYPE_SIZE -+#define WCHAR_TYPE_SIZE 32 -+ -+ -+/* GNU-specific SPEC definitions. */ -+#define GNU_USER_LINK_EMULATION "elf" ABI_GRLEN_SPEC "loongarch" -+ -+#undef GNU_USER_TARGET_LINK_SPEC -+#define GNU_USER_TARGET_LINK_SPEC \ -+ "%{shared} -m " GNU_USER_LINK_EMULATION -+ -+ -+/* Link against Newlib libraries, because the ELF backend assumes Newlib. -+ Handle the circular dependence between libc and libgloss. */ - #undef LIB_SPEC --#define LIB_SPEC "" -+#define LIB_SPEC "--start-group -lc %{!specs=nosys.specs:-lgloss} --end-group" -+ -+#undef LINK_SPEC -+#define LINK_SPEC GNU_USER_TARGET_LINK_SPEC - - #undef STARTFILE_SPEC --#define STARTFILE_SPEC "crti%O%s crtbegin%O%s" -+#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s" - - #undef ENDFILE_SPEC --#define ENDFILE_SPEC "crtend%O%s crtn%O%s" -+#define ENDFILE_SPEC "crtend%O%s" - - #define NO_IMPLICIT_EXTERN_C 1 -+#undef SUBTARGET_CC1_SPEC -+#define SUBTARGET_CC1_SPEC "%{profile:-p}" -+ -diff --git a/gcc/config/loongarch/frame-header-opt.c b/gcc/config/loongarch/frame-header-opt.c -deleted file mode 100644 -index 86e5d423d..000000000 ---- a/gcc/config/loongarch/frame-header-opt.c -+++ /dev/null -@@ -1,292 +0,0 @@ --/* Analyze functions to determine if callers need to allocate a frame header -- on the stack. The frame header is used by callees to save their arguments. -- This optimization is specific to TARGET_OLDABI targets. For TARGET_NEWABI -- targets, if a frame header is required, it is allocated by the callee. -- -- -- Copyright (C) 2015-2018 Free Software Foundation, Inc. -- --This file is part of GCC. -- --GCC is free software; you can redistribute it and/or modify it --under the terms of the GNU General Public License as published by the --Free Software Foundation; either version 3, or (at your option) any --later version. -- --GCC is distributed in the hope that it will be useful, but WITHOUT --ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or --FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License --for more details. -- --You should have received a copy of the GNU General Public License --along with GCC; see the file COPYING3. If not see --. */ -- -- --#define IN_TARGET_CODE 1 -- --#include "config.h" --#include "system.h" --#include "context.h" --#include "coretypes.h" --#include "tree.h" --#include "tree-core.h" --#include "tree-pass.h" --#include "target.h" --#include "target-globals.h" --#include "profile-count.h" --#include "cfg.h" --#include "cgraph.h" --#include "function.h" --#include "basic-block.h" --#include "gimple.h" --#include "gimple-iterator.h" --#include "gimple-walk.h" -- --static unsigned int frame_header_opt (void); -- --namespace { -- --const pass_data pass_data_ipa_frame_header_opt = --{ -- IPA_PASS, /* type */ -- "frame-header-opt", /* name */ -- OPTGROUP_NONE, /* optinfo_flags */ -- TV_CGRAPHOPT, /* tv_id */ -- 0, /* properties_required */ -- 0, /* properties_provided */ -- 0, /* properties_destroyed */ -- 0, /* todo_flags_start */ -- 0, /* todo_flags_finish */ --}; -- --class pass_ipa_frame_header_opt : public ipa_opt_pass_d --{ --public: -- pass_ipa_frame_header_opt (gcc::context *ctxt) -- : ipa_opt_pass_d (pass_data_ipa_frame_header_opt, ctxt, -- NULL, /* generate_summary */ -- NULL, /* write_summary */ -- NULL, /* read_summary */ -- NULL, /* write_optimization_summary */ -- NULL, /* read_optimization_summary */ -- NULL, /* stmt_fixup */ -- 0, /* function_transform_todo_flags_start */ -- NULL, /* function_transform */ -- NULL) /* variable_transform */ -- {} -- -- /* opt_pass methods: */ -- virtual bool gate (function *) -- { -- /* This optimization has no affect if TARGET_NEWABI. If optimize -- is not at least 1 then the data needed for the optimization is -- not available and nothing will be done anyway. */ -- return TARGET_OLDABI && flag_frame_header_optimization && optimize > 0; -- } -- -- virtual unsigned int execute (function *) { return frame_header_opt (); } -- --}; // class pass_ipa_frame_header_opt -- --} // anon namespace -- --static ipa_opt_pass_d * --make_pass_ipa_frame_header_opt (gcc::context *ctxt) --{ -- return new pass_ipa_frame_header_opt (ctxt); --} -- --void --loongarch_register_frame_header_opt (void) --{ -- opt_pass *p = make_pass_ipa_frame_header_opt (g); -- struct register_pass_info f = { p, "comdats", 1, PASS_POS_INSERT_AFTER }; -- register_pass (&f); --} -- -- --/* Return true if it is certain that this is a leaf function. False if it is -- not a leaf function or if it is impossible to tell. */ -- --static bool --is_leaf_function (function *fn) --{ -- basic_block bb; -- gimple_stmt_iterator gsi; -- -- /* If we do not have a cfg for this function be conservative and assume -- it is not a leaf function. */ -- if (fn->cfg == NULL) -- return false; -- -- FOR_EACH_BB_FN (bb, fn) -- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) -- if (is_gimple_call (gsi_stmt (gsi))) -- return false; -- return true; --} -- --/* Return true if this function has inline assembly code or if we cannot -- be certain that it does not. False if we know that there is no inline -- assembly. */ -- --static bool --has_inlined_assembly (function *fn) --{ -- basic_block bb; -- gimple_stmt_iterator gsi; -- -- /* If we do not have a cfg for this function be conservative and assume -- it is may have inline assembly. */ -- if (fn->cfg == NULL) -- return true; -- -- FOR_EACH_BB_FN (bb, fn) -- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) -- if (gimple_code (gsi_stmt (gsi)) == GIMPLE_ASM) -- return true; -- -- return false; --} -- --/* Return true if this function will use the stack space allocated by its -- caller or if we cannot determine for certain that it does not. */ -- --static bool --needs_frame_header_p (function *fn) --{ -- tree t; -- -- if (fn->decl == NULL) -- return true; -- -- if (fn->stdarg) -- return true; -- -- for (t = DECL_ARGUMENTS (fn->decl); t; t = TREE_CHAIN (t)) -- { -- if (!use_register_for_decl (t)) -- return true; -- -- /* Some 64-bit types may get copied to general registers using the frame -- header, see loongarch_output_64bit_xfer. Checking for SImode only may be -- overly restrictive but it is guaranteed to be safe. */ -- if (DECL_MODE (t) != SImode) -- return true; -- } -- -- return false; --} -- --/* Return true if the argument stack space allocated by function FN is used. -- Return false if the space is needed or if the need for the space cannot -- be determined. */ -- --static bool --callees_functions_use_frame_header (function *fn) --{ -- basic_block bb; -- gimple_stmt_iterator gsi; -- gimple *stmt; -- tree called_fn_tree; -- function *called_fn; -- -- if (fn->cfg == NULL) -- return true; -- -- FOR_EACH_BB_FN (bb, fn) -- { -- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) -- { -- stmt = gsi_stmt (gsi); -- if (is_gimple_call (stmt)) -- { -- called_fn_tree = gimple_call_fndecl (stmt); -- if (called_fn_tree != NULL) -- { -- called_fn = DECL_STRUCT_FUNCTION (called_fn_tree); -- if (called_fn == NULL -- || DECL_WEAK (called_fn_tree) -- || has_inlined_assembly (called_fn) -- || !is_leaf_function (called_fn) -- || !called_fn->machine->does_not_use_frame_header) -- return true; -- } -- else -- return true; -- } -- } -- } -- return false; --} -- --/* Set the callers_may_not_allocate_frame flag for any function which -- function FN calls because FN may not allocate a frame header. */ -- --static void --set_callers_may_not_allocate_frame (function *fn) --{ -- basic_block bb; -- gimple_stmt_iterator gsi; -- gimple *stmt; -- tree called_fn_tree; -- function *called_fn; -- -- if (fn->cfg == NULL) -- return; -- -- FOR_EACH_BB_FN (bb, fn) -- { -- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) -- { -- stmt = gsi_stmt (gsi); -- if (is_gimple_call (stmt)) -- { -- called_fn_tree = gimple_call_fndecl (stmt); -- if (called_fn_tree != NULL) -- { -- called_fn = DECL_STRUCT_FUNCTION (called_fn_tree); -- if (called_fn != NULL) -- called_fn->machine->callers_may_not_allocate_frame = true; -- } -- } -- } -- } -- return; --} -- --/* Scan each function to determine those that need its frame headers. Perform -- a second scan to determine if the allocation can be skipped because none of -- their callees require the frame header. */ -- --static unsigned int --frame_header_opt () --{ -- struct cgraph_node *node; -- function *fn; -- -- FOR_EACH_DEFINED_FUNCTION (node) -- { -- fn = node->get_fun (); -- if (fn != NULL) -- fn->machine->does_not_use_frame_header = !needs_frame_header_p (fn); -- } -- -- FOR_EACH_DEFINED_FUNCTION (node) -- { -- fn = node->get_fun (); -- if (fn != NULL) -- fn->machine->optimize_call_stack -- = !callees_functions_use_frame_header (fn) && !is_leaf_function (fn); -- } -- -- FOR_EACH_DEFINED_FUNCTION (node) -- { -- fn = node->get_fun (); -- if (fn != NULL && fn->machine->optimize_call_stack) -- set_callers_may_not_allocate_frame (fn); -- } -- -- return 0; --} -diff --git a/gcc/config/loongarch/generic.md b/gcc/config/loongarch/generic.md -index 321b8e561..0f6eb3f42 100644 ---- a/gcc/config/loongarch/generic.md -+++ b/gcc/config/loongarch/generic.md -@@ -1,6 +1,8 @@ --;; Generic DFA-based pipeline description for LARCH targets --;; Copyright (C) 2004-2018 Free Software Foundation, Inc. --;; -+;; Generic DFA-based pipeline description for LoongArch targets -+;; Copyright (C) 2020-2022 Free Software Foundation, Inc. -+;; Contributed by Loongson Co. Ltd. -+;; Based on MIPS target for GNU compiler. -+ - ;; This file is part of GCC. - - ;; GCC is free software; you can redistribute it and/or modify it -@@ -17,9 +19,16 @@ - ;; along with GCC; see the file COPYING3. If not see - ;; . - -+(define_automaton "alu,imuldiv") -+ -+(define_cpu_unit "alu" "alu") -+(define_cpu_unit "imuldiv" "imuldiv") - --;; This file is derived from the old define_function_unit description. --;; Each reservation can be overridden on a processor-by-processor basis. -+;; Ghost instructions produce no real code. -+;; They exist purely to express an effect on dataflow. -+(define_insn_reservation "ghost" 0 -+ (eq_attr "type" "ghost") -+ "nothing") - - (define_insn_reservation "generic_alu" 1 - (eq_attr "type" "unknown,prefetch,prefetchx,condmove,const,arith, -@@ -43,7 +52,7 @@ - "alu") - - (define_insn_reservation "generic_imul" 17 -- (eq_attr "type" "imul,imul3") -+ (eq_attr "type" "imul") - "imuldiv*17") - - (define_insn_reservation "generic_fcvt" 1 -diff --git a/gcc/config/loongarch/genopt.sh b/gcc/config/loongarch/genopt.sh -deleted file mode 100644 -index 272aac51d..000000000 ---- a/gcc/config/loongarch/genopt.sh -+++ /dev/null -@@ -1,110 +0,0 @@ --#!/bin/sh --# Generate loongarch-tables.opt from the list of CPUs in loongarch-cpus.def. --# Copyright (C) 2011-2018 Free Software Foundation, Inc. --# --# This file is part of GCC. --# --# GCC is free software; you can redistribute it and/or modify --# it under the terms of the GNU General Public License as published by --# the Free Software Foundation; either version 3, or (at your option) --# any later version. --# --# GCC is distributed in the hope that it will be useful, --# but WITHOUT ANY WARRANTY; without even the implied warranty of --# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --# GNU General Public License for more details. --# --# You should have received a copy of the GNU General Public License --# along with GCC; see the file COPYING3. If not see --# . -- --cat <. -- --Enum --Name(loongarch_arch_opt_value) Type(int) --Known LARCH CPUs (for use with the -march= and -mtune= options): -- --EnumValue --Enum(loongarch_arch_opt_value) String(native) Value(LARCH_ARCH_OPTION_NATIVE) DriverOnly -- --EOF -- --awk -F'[(, ]+' ' --BEGIN { -- value = 0 --} -- --# Write an entry for a single string accepted as a -march= argument. -- --function write_one_arch_value(name, value, flags) --{ -- print "EnumValue" -- print "Enum(loongarch_arch_opt_value) String(" name ") Value(" value ")" flags -- print "" --} -- --# The logic for matching CPU name variants should be the same as in GAS. -- --# Write an entry for a single string accepted as a -march= argument, --# plus any variant with a final "000" replaced by "k". -- --function write_arch_value_maybe_k(name, value, flags) --{ -- write_one_arch_value(name, value, flags) -- if (name ~ "000$") { -- sub("000$", "k", name) -- write_one_arch_value(name, value, "") -- } --} -- --# Write all the entries for a -march= argument. In addition to --# replacement of a final "000" with "k", an argument starting with --# "vr", "rm" or "r" followed by a number, or just a plain number, --# matches a plain number or "r" followed by a plain number. -- --function write_all_arch_values(name, value) --{ -- write_arch_value_maybe_k(name, value, " Canonical") -- cname = name -- if (cname ~ "^vr") { -- sub("^vr", "", cname) -- } else if (cname ~ "^rm") { -- sub("^rm", "", cname) -- } else if (cname ~ "^r") { -- sub("^r", "", cname) -- } -- if (cname ~ "^[0-9]") { -- if (cname != name) -- write_arch_value_maybe_k(cname, value, "") -- rname = "r" cname -- if (rname != name) -- write_arch_value_maybe_k(rname, value, "") -- } --} -- --/^LARCH_CPU/ { -- name = $2 -- gsub("\"", "", name) -- write_all_arch_values(name, value) -- value++ --}' $1/loongarch-cpus.def -diff --git a/gcc/config/loongarch/genopts/genstr.sh b/gcc/config/loongarch/genopts/genstr.sh -new file mode 100755 -index 000000000..e895f7ec8 ---- /dev/null -+++ b/gcc/config/loongarch/genopts/genstr.sh -@@ -0,0 +1,104 @@ -+#!/bin/sh -+# A simple script that generates loongarch-str.h and loongarch.opt -+# from genopt/loongarch-optstr. -+# -+# Copyright (C) 2020-2022 Free Software Foundation, Inc. -+# -+# This file is part of GCC. -+# -+# GCC is free software; you can redistribute it and/or modify it under -+# the terms of the GNU General Public License as published by the Free -+# Software Foundation; either version 3, or (at your option) any later -+# version. -+# -+# GCC is distributed in the hope that it will be useful, but WITHOUT -+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -+# License for more details. -+# -+# You should have received a copy of the GNU General Public License -+# along with GCC; see the file COPYING3. If not see -+# . -+ -+cd "$(dirname "$0")" -+ -+# Generate a header containing definitions from the string table. -+gen_defines() { -+ cat <. */ -+ -+#ifndef LOONGARCH_STR_H -+#define LOONGARCH_STR_H -+EOF -+ -+ sed -e '/^$/n' -e 's@#.*$@@' -e '/^$/d' \ -+ -e 's@^\([^ \t]\+\)[ \t]*\([^ \t]*\)@#define \1 "\2"@' \ -+ loongarch-strings -+ -+ echo -+ echo "#endif /* LOONGARCH_STR_H */" -+} -+ -+ -+# Substitute all "@@@@" to "" in loongarch.opt.in -+# according to the key-value pairs defined in loongarch-strings. -+ -+gen_options() { -+ -+ sed -e '/^$/n' -e 's@#.*$@@' -e '/^$/d' \ -+ -e 's@^\([^ \t]\+\)[ \t]*\([^ \t]*\)@\1="\2"@' \ -+ loongarch-strings | { \ -+ -+ # read the definitions -+ while read -r line; do -+ eval "$line" -+ done -+ -+ # print a header -+ cat << EOF -+; Generated by "genstr" from the template "loongarch.opt.in" -+; and definitions from "loongarch-strings". -+; -+; Please do not edit this file directly. -+; It will be automatically updated during a gcc build -+; if you change "loongarch.opt.in" or "loongarch-strings". -+; -+EOF -+ -+ # make the substitutions -+ sed -e 's@"@\\"@g' -e 's/@@\([^@]\+\)@@/${\1}/g' loongarch.opt.in | \ -+ while read -r line; do -+ eval "echo \"$line\"" -+ done -+ } -+} -+ -+main() { -+ case "$1" in -+ header) gen_defines;; -+ opt) gen_options;; -+ *) echo "Unknown Command: \"$1\". Available: header, opt"; exit 1;; -+ esac -+} -+ -+main "$@" -diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings -new file mode 100644 -index 000000000..d79e2e791 ---- /dev/null -+++ b/gcc/config/loongarch/genopts/loongarch-strings -@@ -0,0 +1,68 @@ -+# Defines the key strings for LoongArch compiler options. -+# -+# Copyright (C) 2020-2022 Free Software Foundation, Inc. -+# -+# This file is part of GCC. -+# -+# GCC is free software; you can redistribute it and/or modify it under -+# the terms of the GNU General Public License as published by the Free -+# Software Foundation; either version 3, or (at your option) any later -+# version. -+# -+# GCC is distributed in the hope that it will be useful, but WITHOUT -+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -+# License for more details. -+# -+# You should have received a copy of the GNU General Public License -+# along with GCC; see the file COPYING3. If not see -+# . -+ -+# -march= / -mtune= -+OPTSTR_ARCH arch -+OPTSTR_TUNE tune -+ -+STR_CPU_NATIVE native -+STR_CPU_ABI_DEFAULT abi-default -+STR_CPU_LOONGARCH64 loongarch64 -+STR_CPU_LA464 la464 -+STR_CPU_LA364 la364 -+STR_CPU_LA264 la264 -+STR_CPU_LA664 la664 -+ -+# Base architecture -+STR_ISA_BASE_LA64V100 la64 -+ -+# -mfpu -+OPTSTR_ISA_EXT_FPU fpu -+STR_NONE none -+STR_ISA_EXT_FPU0 0 -+STR_ISA_EXT_FPU32 32 -+STR_ISA_EXT_FPU64 64 -+ -+OPTSTR_SOFT_FLOAT soft-float -+OPTSTR_SINGLE_FLOAT single-float -+OPTSTR_DOUBLE_FLOAT double-float -+ -+# SIMD extensions -+OPTSTR_ISA_EXT_SIMD simd -+STR_ISA_EXT_LSX lsx -+STR_ISA_EXT_LASX lasx -+ -+# -mabi= -+OPTSTR_ABI_BASE abi -+STR_ABI_BASE_LP64D lp64d -+STR_ABI_BASE_LP64F lp64f -+STR_ABI_BASE_LP64S lp64s -+STR_ABI_BASE_LP64 lp64 -+ -+# ABI extension types -+STR_ABI_EXT_BASE base -+ -+# -mcmodel= -+OPTSTR_CMODEL cmodel -+STR_CMODEL_NORMAL normal -+STR_CMODEL_TINY tiny -+STR_CMODEL_TS tiny-static -+STR_CMODEL_LARGE large -+STR_CMODEL_EXTREME extreme -diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in -new file mode 100644 -index 000000000..463dfec77 ---- /dev/null -+++ b/gcc/config/loongarch/genopts/loongarch.opt.in -@@ -0,0 +1,242 @@ -+; Generated by "genstr" from the template "loongarch.opt.in" -+; and definitions from "loongarch-strings". -+; -+; Copyright (C) 2020-2022 Free Software Foundation, Inc. -+; -+; This file is part of GCC. -+; -+; GCC is free software; you can redistribute it and/or modify it under -+; the terms of the GNU General Public License as published by the Free -+; Software Foundation; either version 3, or (at your option) any later -+; version. -+; -+; GCC is distributed in the hope that it will be useful, but WITHOUT -+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -+; License for more details. -+; -+; You should have received a copy of the GNU General Public License -+; along with GCC; see the file COPYING3. If not see -+; . -+; -+ -+HeaderInclude -+config/loongarch/loongarch-opts.h -+ -+HeaderInclude -+config/loongarch/loongarch-str.h -+ -+TargetVariable -+unsigned int recip_mask = 0 -+ -+; ISA related options -+;; Base ISA -+Enum -+Name(isa_base) Type(int) -+Basic ISAs of LoongArch: -+ -+EnumValue -+Enum(isa_base) String(@@STR_ISA_BASE_LA64V100@@) Value(ISA_BASE_LA64V100) -+ -+;; ISA extensions / adjustments -+Enum -+Name(isa_ext_fpu) Type(int) -+FPU types of LoongArch: -+ -+EnumValue -+Enum(isa_ext_fpu) String(@@STR_NONE@@) Value(ISA_EXT_NONE) -+ -+EnumValue -+Enum(isa_ext_fpu) String(@@STR_ISA_EXT_FPU32@@) Value(ISA_EXT_FPU32) -+ -+EnumValue -+Enum(isa_ext_fpu) String(@@STR_ISA_EXT_FPU64@@) Value(ISA_EXT_FPU64) -+ -+m@@OPTSTR_ISA_EXT_FPU@@= -+Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET) -+-m@@OPTSTR_ISA_EXT_FPU@@=FPU Generate code for the given FPU. -+ -+m@@OPTSTR_ISA_EXT_FPU@@=@@STR_ISA_EXT_FPU0@@ -+Target RejectNegative Alias(m@@OPTSTR_ISA_EXT_FPU@@=,@@STR_NONE@@) -+ -+m@@OPTSTR_SOFT_FLOAT@@ -+Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_SINGLE_FLOAT@@) -+Prevent the use of all hardware floating-point instructions. -+ -+m@@OPTSTR_SINGLE_FLOAT@@ -+Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_DOUBLE_FLOAT@@) -+Restrict the use of hardware floating-point instructions to 32-bit operations. -+ -+m@@OPTSTR_DOUBLE_FLOAT@@ -+Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_SOFT_FLOAT@@) -+Allow hardware floating-point instructions to cover both 32-bit and 64-bit operations. -+ -+Enum -+Name(isa_ext_simd) Type(int) -+SIMD extension levels of LoongArch: -+ -+EnumValue -+Enum(isa_ext_simd) String(@@STR_NONE@@) Value(ISA_EXT_NONE) -+ -+EnumValue -+Enum(isa_ext_simd) String(@@STR_ISA_EXT_LSX@@) Value(ISA_EXT_SIMD_LSX) -+ -+EnumValue -+Enum(isa_ext_simd) String(@@STR_ISA_EXT_LASX@@) Value(ISA_EXT_SIMD_LASX) -+ -+m@@OPTSTR_ISA_EXT_SIMD@@= -+Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET) -+-m@@OPTSTR_ISA_EXT_SIMD@@=SIMD Generate code for the given SIMD extension. -+ -+m@@STR_ISA_EXT_LSX@@ -+Target Driver Defer Var(la_deferred_options) -+Enable LoongArch SIMD Extension (LSX, 128-bit). -+ -+m@@STR_ISA_EXT_LASX@@ -+Target Driver Defer Var(la_deferred_options) -+Enable LoongArch Advanced SIMD Extension (LASX, 256-bit). -+ -+;; Base target models (implies ISA & tune parameters) -+Enum -+Name(cpu_type) Type(int) -+LoongArch CPU types: -+ -+EnumValue -+Enum(cpu_type) String(@@STR_CPU_NATIVE@@) Value(CPU_NATIVE) -+ -+EnumValue -+Enum(cpu_type) String(@@STR_CPU_ABI_DEFAULT@@) Value(CPU_ABI_DEFAULT) -+ -+EnumValue -+Enum(cpu_type) String(@@STR_CPU_LOONGARCH64@@) Value(CPU_LOONGARCH64) -+ -+EnumValue -+Enum(cpu_type) String(@@STR_CPU_LA664@@) Value(CPU_LA664) -+ -+EnumValue -+Enum(cpu_type) String(@@STR_CPU_LA464@@) Value(CPU_LA464) -+ -+EnumValue -+Enum(cpu_type) String(@@STR_CPU_LA264@@) Value(CPU_LA264) -+ -+EnumValue -+Enum(cpu_type) String(@@STR_CPU_LA364@@) Value(CPU_LA364) -+ -+m@@OPTSTR_ARCH@@= -+Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) -+-m@@OPTSTR_ARCH@@=PROCESSOR Generate code for the given PROCESSOR ISA. -+ -+m@@OPTSTR_TUNE@@= -+Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) -+-m@@OPTSTR_TUNE@@=PROCESSOR Generate optimized code for PROCESSOR. -+ -+ -+; ABI related options -+; (ISA constraints on ABI are handled dynamically) -+ -+;; Base ABI -+Enum -+Name(abi_base) Type(int) -+Base ABI types for LoongArch: -+ -+EnumValue -+Enum(abi_base) String(@@STR_ABI_BASE_LP64D@@) Value(ABI_BASE_LP64D) -+ -+EnumValue -+Enum(abi_base) String(@@STR_ABI_BASE_LP64F@@) Value(ABI_BASE_LP64F) -+ -+EnumValue -+Enum(abi_base) String(@@STR_ABI_BASE_LP64S@@) Value(ABI_BASE_LP64S) -+ -+m@@OPTSTR_ABI_BASE@@= -+Target RejectNegative Joined ToLower Enum(abi_base) Var(la_opt_abi_base) Init(M_OPT_UNSET) -+-m@@OPTSTR_ABI_BASE@@=BASEABI Generate code that conforms to the given BASEABI. -+ -+;; Legacy option: -mabi=lp64 -+m@@OPTSTR_ABI_BASE@@=@@STR_ABI_BASE_LP64@@ -+Target RejectNegative Mask(LP64) -+-m@@OPTSTR_ABI_BASE@@=@@STR_ABI_BASE_LP64@@ Legacy option that enables the lp64 integer ABI. -+ -+;; ABI Extension -+Variable -+int la_opt_abi_ext = M_OPT_UNSET -+ -+mbranch-cost= -+Target RejectNegative Joined UInteger Var(loongarch_branch_cost) -+-mbranch-cost=COST Set the cost of branches to roughly COST instructions. -+ -+mvecarg -+Target Report Var(TARGET_VECARG) Init(1) -+Target pass vect arg uses vector register. -+ -+mmemvec-cost= -+Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) -+mmemvec-cost=COST Set the cost of vector memory access instructions. -+ -+mveclibabi= -+Target RejectNegative Joined Var(loongarch_veclibabi_name) -+Vector library ABI to use. -+ -+mstackrealign -+Target Var(loongarch_stack_realign) Init(1) -+Realign stack in prologue. -+ -+mforce-drap -+Target Var(loongarch_force_drap) Init(0) -+Always use Dynamic Realigned Argument Pointer (DRAP) to realign stack. -+ -+mcheck-zero-division -+Target Mask(CHECK_ZERO_DIV) -+Trap on integer divide by zero. -+ -+mcond-move-int -+Target Var(TARGET_COND_MOVE_INT) Init(1) -+Conditional moves for integral are enabled. -+ -+mcond-move-float -+Target Var(TARGET_COND_MOVE_FLOAT) Init(1) -+Conditional moves for float are enabled. -+ -+mmemcpy -+Target Mask(MEMCPY) -+Prevent optimizing block moves, which is also the default behavior of -Os. -+ -+mstrict-align -+Target Var(TARGET_STRICT_ALIGN) Init(0) -+Do not generate unaligned memory accesses. -+ -+mmax-inline-memcpy-size= -+Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) -+-mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. -+ -+mrecip -+Target Report RejectNegative Var(loongarch_recip) -+Generate reciprocals instead of divss and sqrtss. -+ -+mrecip= -+Target Report RejectNegative Joined Var(loongarch_recip_name) -+Control generation of reciprocal estimates. -+ -+; The code model option names for -mcmodel. -+Enum -+Name(cmodel) Type(int) -+The code model option names for -mcmodel: -+ -+EnumValue -+Enum(cmodel) String(@@STR_CMODEL_NORMAL@@) Value(CMODEL_NORMAL) -+ -+EnumValue -+Enum(cmodel) String(@@STR_CMODEL_TINY@@) Value(CMODEL_TINY) -+ -+EnumValue -+Enum(cmodel) String(@@STR_CMODEL_TS@@) Value(CMODEL_TINY_STATIC) -+ -+EnumValue -+Enum(cmodel) String(@@STR_CMODEL_LARGE@@) Value(CMODEL_LARGE) -+ -+EnumValue -+Enum(cmodel) String(@@STR_CMODEL_EXTREME@@) Value(CMODEL_EXTREME) -+ -+mcmodel= -+Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET) -+Specify the code model. -diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h -index 1304e2e97..603aed5a2 100644 ---- a/gcc/config/loongarch/gnu-user.h -+++ b/gcc/config/loongarch/gnu-user.h -@@ -1,4 +1,5 @@ --/* Definitions for LARCH systems using GNU userspace. -+/* Definitions for LoongArch systems using GNU (glibc-based) userspace, -+ or other userspace with libc derived from glibc. - Copyright (C) 1998-2018 Free Software Foundation, Inc. - - This file is part of GCC. -@@ -17,116 +18,66 @@ You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING3. If not see - . */ - -+/* Define the size of the wide character type. */ - #undef WCHAR_TYPE - #define WCHAR_TYPE "int" - - #undef WCHAR_TYPE_SIZE - #define WCHAR_TYPE_SIZE 32 - --#undef ASM_DECLARE_OBJECT_NAME --#define ASM_DECLARE_OBJECT_NAME loongarch_declare_object_name - --/* If we don't set MASK_ABICALLS, we can't default to PIC. */ --/* #undef TARGET_DEFAULT */ --/* #define TARGET_DEFAULT MASK_ABICALLS */ -+/* GNU-specific SPEC definitions. */ -+#define GNU_USER_LINK_EMULATION "elf" ABI_GRLEN_SPEC "loongarch" - --#define TARGET_OS_CPP_BUILTINS() \ -- do { \ -- GNU_USER_TARGET_OS_CPP_BUILTINS(); \ -- /* The GNU C++ standard library requires this. */ \ -- if (c_dialect_cxx ()) \ -- builtin_define ("_GNU_SOURCE"); \ -- } while (0) -+#undef GLIBC_DYNAMIC_LINKER -+#define GLIBC_DYNAMIC_LINKER \ -+ "/lib" ABI_GRLEN_SPEC "/" \ -+ "%{mabi=lp64d:ld.so.1;" \ -+ "mabi=lp64s:ld-linux-loongarch-lp64s.so.1;" \ -+ "mabi=lp64f:ld-linux-loongarch-lp64f.so.1}" - --#undef SUBTARGET_CPP_SPEC --#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}" -- --/* A standard GNU/Linux mapping. On most targets, it is included in -- CC1_SPEC itself by config/linux.h, but loongarch.h overrides CC1_SPEC -- and provides this hook instead. */ --#undef SUBTARGET_CC1_SPEC --#define SUBTARGET_CC1_SPEC GNU_USER_TARGET_CC1_SPEC -- --/* -G is incompatible with -KPIC which is the default, so only allow objects -- in the small data section if the user explicitly asks for it. */ --#undef LARCH_DEFAULT_GVALUE --#define LARCH_DEFAULT_GVALUE 0 -+#undef MUSL_DYNAMIC_LINKER -+#define MUSL_DYNAMIC_LINKER \ -+ "/lib" ABI_GRLEN_SPEC "/ld-musl-loongarch-" ABI_SPEC ".so.1" - - #undef GNU_USER_TARGET_LINK_SPEC --#define GNU_USER_TARGET_LINK_SPEC "\ -- %{G*} %{EB} %{EL} %{shared} \ -- %{!shared: \ -- %{!static: \ -- %{rdynamic:-export-dynamic} \ -- %{mabi=lp32: -dynamic-linker " GNU_USER_DYNAMIC_LINKERLP32 "} \ -- %{mabi=lp64: -dynamic-linker " GNU_USER_DYNAMIC_LINKERLP64 "}} \ -- %{static}} \ -- %{mabi=lp32:-m" GNU_USER_LINK_EMULATION32 "} \ -- %{mabi=lp64:-m" GNU_USER_LINK_EMULATION64 "}" -+#define GNU_USER_TARGET_LINK_SPEC \ -+ "%{G*} %{shared} -m " GNU_USER_LINK_EMULATION \ -+ "%{!shared: %{static} %{!static: %{rdynamic:-export-dynamic} " \ -+ "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}}" - --#undef LINK_SPEC --#define LINK_SPEC GNU_USER_TARGET_LINK_SPEC - --/* The LARCH assembler has different syntax for .set. We set it to -- .dummy to trap any errors. */ --#undef SET_ASM_OP --#define SET_ASM_OP "\t.dummy\t" -- --#undef ASM_OUTPUT_DEF --#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \ -- do { \ -- fputc ( '\t', FILE); \ -- assemble_name (FILE, LABEL1); \ -- fputs ( " = ", FILE); \ -- assemble_name (FILE, LABEL2); \ -- fputc ( '\n', FILE); \ -- } while (0) -- --/* The glibc _mcount stub will save $v0 for us. Don't mess with saving -- it, since ASM_OUTPUT_REG_PUSH/ASM_OUTPUT_REG_POP do not work in the -- presence of $gp-relative calls. */ --#undef ASM_OUTPUT_REG_PUSH --#undef ASM_OUTPUT_REG_POP -+/* Similar to standard Linux, but adding -ffast-math support. */ -+#undef GNU_USER_TARGET_MATHFILE_SPEC -+#define GNU_USER_TARGET_MATHFILE_SPEC \ -+ "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s}" - - #undef LIB_SPEC - #define LIB_SPEC GNU_USER_TARGET_LIB_SPEC - --#define NO_SHARED_SPECS "" -- --/* -march=native handling only makes sense with compiler running on -- a LARCH chip. */ --#if defined(__loongarch__) --extern const char *host_detect_local_cpu (int argc, const char **argv); --# define EXTRA_SPEC_FUNCTIONS \ -- { "local_cpu_detect", host_detect_local_cpu }, -- --# define MARCH_MTUNE_NATIVE_SPECS \ -- " %{march=native:%. -+ -+;; Uncomment the following line to output automata for debugging. -+;; (automata_option "v") -+ -+;; Automaton for integer instructions. -+(define_automaton "la464_a_alu") -+ -+;; Automaton for floating-point instructions. -+(define_automaton "la464_a_falu") -+ -+;; Automaton for memory operations. -+(define_automaton "la464_a_mem") -+ -+;; Describe the resources. -+ -+(define_cpu_unit "la464_alu1" "la464_a_alu") -+(define_cpu_unit "la464_alu2" "la464_a_alu") -+(define_cpu_unit "la464_mem1" "la464_a_mem") -+(define_cpu_unit "la464_mem2" "la464_a_mem") -+(define_cpu_unit "la464_falu1" "la464_a_falu") -+(define_cpu_unit "la464_falu2" "la464_a_falu") -+ -+;; Describe instruction reservations. -+ -+(define_insn_reservation "la464_arith" 1 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (eq_attr "type" "arith,clz,const,logical, -+ move,nop,shift,signext,slt")) -+ "la464_alu1 | la464_alu2") -+ -+(define_insn_reservation "la464_branch" 1 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (eq_attr "type" "branch,jump,call,condmove,trap")) -+ "la464_alu1 | la464_alu2") -+ -+(define_insn_reservation "la464_imul" 7 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (eq_attr "type" "imul")) -+ "la464_alu1 | la464_alu2") -+ -+(define_insn_reservation "la464_idiv_si" 12 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (and (eq_attr "type" "idiv") -+ (eq_attr "mode" "SI"))) -+ "la464_alu1 | la464_alu2") -+ -+(define_insn_reservation "la464_idiv_di" 25 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (and (eq_attr "type" "idiv") -+ (eq_attr "mode" "DI"))) -+ "la464_alu1 | la464_alu2") -+ -+(define_insn_reservation "la464_load" 4 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (eq_attr "type" "load")) -+ "la464_mem1 | la464_mem2") -+ -+(define_insn_reservation "la464_gpr_fp" 16 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (eq_attr "type" "mftg,mgtf")) -+ "la464_mem1") -+ -+(define_insn_reservation "la464_fpload" 4 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (eq_attr "type" "fpload")) -+ "la464_mem1 | la464_mem2") -+ -+(define_insn_reservation "la464_prefetch" 0 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (eq_attr "type" "prefetch,prefetchx")) -+ "la464_mem1 | la464_mem2") -+ -+(define_insn_reservation "la464_store" 0 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (eq_attr "type" "store,fpstore,fpidxstore")) -+ "la464_mem1 | la464_mem2") -+ -+(define_insn_reservation "la464_fadd" 4 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (eq_attr "type" "fadd,fmul,fmadd")) -+ "la464_falu1 | la464_falu2") -+ -+(define_insn_reservation "la464_fcmp" 2 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (eq_attr "type" "fabs,fcmp,fmove,fneg")) -+ "la464_falu1 | la464_falu2") -+ -+(define_insn_reservation "la464_fcvt" 4 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (eq_attr "type" "fcvt")) -+ "la464_falu1 | la464_falu2") -+ -+(define_insn_reservation "la464_fdiv_sf" 12 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt") -+ (eq_attr "mode" "SF"))) -+ "la464_falu1 | la464_falu2") -+ -+(define_insn_reservation "la464_fdiv_df" 19 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt") -+ (eq_attr "mode" "DF"))) -+ "la464_falu1 | la464_falu2") -+ -+;; Force single-dispatch for unknown or multi. -+(define_insn_reservation "la464_unknown" 1 -+ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") -+ (eq_attr "type" "unknown,multi,atomic,syncloop")) -+ "la464_alu1 + la464_alu2 + la464_falu1 -+ + la464_falu2 + la464_mem1 + la464_mem2") -+ -+;; End of DFA-based pipeline description for la464 -diff --git a/gcc/config/loongarch/larchintrin.h b/gcc/config/loongarch/larchintrin.h -index c649bf3f4..8e26ed6f0 100644 ---- a/gcc/config/loongarch/larchintrin.h -+++ b/gcc/config/loongarch/larchintrin.h -@@ -1,384 +1,353 @@ - /* Intrinsics for LoongArch BASE operations. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Ltd. - -- Copyright (C) 2019 Free Software Foundation, Inc. -- Contributed by xuchenghua@loongson.cn. -+This file is part of GCC. - -- This file is part of GCC. -+GCC is free software; you can redistribute it and/or modify it -+under the terms of the GNU General Public License as published -+by the Free Software Foundation; either version 3, or (at your -+option) any later version. - -- GCC is free software; you can redistribute it and/or modify it -- under the terms of the GNU General Public License as published -- by the Free Software Foundation; either version 3, or (at your -- option) any later version. -+GCC is distributed in the hope that it will be useful, but WITHOUT -+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -+License for more details. - -- GCC is distributed in the hope that it will be useful, but WITHOUT -- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -- License for more details. -+Under Section 7 of GPL version 3, you are granted additional -+permissions described in the GCC Runtime Library Exception, version -+3.1, as published by the Free Software Foundation. - -- Under Section 7 of GPL version 3, you are granted additional -- permissions described in the GCC Runtime Library Exception, version -- 3.1, as published by the Free Software Foundation. -- -- You should have received a copy of the GNU General Public License and -- a copy of the GCC Runtime Library Exception along with this program; -- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -- . */ -+You should have received a copy of the GNU General Public License and -+a copy of the GCC Runtime Library Exception along with this program; -+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+. */ - - #ifndef _GCC_LOONGARCH_BASE_INTRIN_H - #define _GCC_LOONGARCH_BASE_INTRIN_H - - #ifdef __cplusplus --extern "C"{ -+extern "C" { - #endif - --typedef struct drdtime{ -- unsigned long dvalue; -- unsigned long dtimeid; -+typedef struct drdtime -+{ -+ unsigned long dvalue; -+ unsigned long dtimeid; - } __drdtime_t; - --typedef struct rdtime{ -- unsigned int value; -- unsigned int timeid; -+typedef struct rdtime -+{ -+ unsigned int value; -+ unsigned int timeid; - } __rdtime_t; - - #ifdef __loongarch64 --extern __inline __drdtime_t __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__builtin_loongarch_rdtime_d (void) -+extern __inline __drdtime_t -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__rdtime_d (void) - { -- __drdtime_t drdtime; -+ __drdtime_t __drdtime; - __asm__ volatile ( - "rdtime.d\t%[val],%[tid]\n\t" -- : [val]"=&r"(drdtime.dvalue),[tid]"=&r"(drdtime.dtimeid) -- : -- ); -- return drdtime; -+ : [val]"=&r"(__drdtime.dvalue),[tid]"=&r"(__drdtime.dtimeid) -+ :); -+ return __drdtime; - } --#define __rdtime_d __builtin_loongarch_rdtime_d - #endif - --extern __inline __rdtime_t __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__builtin_loongarch_rdtimeh_w (void) -+extern __inline __rdtime_t -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__rdtimeh_w (void) - { -- __rdtime_t rdtime; -+ __rdtime_t __rdtime; - __asm__ volatile ( - "rdtimeh.w\t%[val],%[tid]\n\t" -- : [val]"=&r"(rdtime.value),[tid]"=&r"(rdtime.timeid) -- : -- ); -- return rdtime; -+ : [val]"=&r"(__rdtime.value),[tid]"=&r"(__rdtime.timeid) -+ :); -+ return __rdtime; - } --#define __rdtimel_w __builtin_loongarch_rdtimel_w - --extern __inline __rdtime_t __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__builtin_loongarch_rdtimel_w (void) -+extern __inline __rdtime_t -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__rdtimel_w (void) - { -- __rdtime_t rdtime; -+ __rdtime_t __rdtime; - __asm__ volatile ( - "rdtimel.w\t%[val],%[tid]\n\t" -- : [val]"=&r"(rdtime.value),[tid]"=&r"(rdtime.timeid) -- : -- ); -- return rdtime; -+ : [val]"=&r"(__rdtime.value),[tid]"=&r"(__rdtime.timeid) -+ :); -+ return __rdtime; - } --#define __rdtimeh_w __builtin_loongarch_rdtimeh_w -- --/* Assembly instruction format: rj, fcsr */ --/* Data types in instruction templates: USI, UQI */ --#define __movfcsr2gr(/*ui5*/_1) __builtin_loongarch_movfcsr2gr((_1)); -- --/* Assembly instruction format: 0, fcsr, rj */ --/* Data types in instruction templates: VOID, UQI, USI */ --#define __movgr2fcsr(/*ui5*/ _1, _2) __builtin_loongarch_movgr2fcsr((unsigned short)_1, (unsigned int)_2); -- --#ifdef __loongarch32 --/* Assembly instruction format: ui5, rj, si12 */ --/* Data types in instruction templates: VOID, USI, USI, SI */ --#define __cacop(/*ui5*/ _1, /*unsigned int*/ _2, /*si12*/ _3) ((void)__builtin_loongarch_cacop((_1), (unsigned int)(_2), (_3))) --#elif defined __loongarch64 --/* Assembly instruction format: ui5, rj, si12 */ --/* Data types in instruction templates: VOID, USI, UDI, SI */ --#define __dcacop(/*ui5*/ _1, /*unsigned long int*/ _2, /*si12*/ _3) ((void)__builtin_loongarch_dcacop((_1), (unsigned long int)(_2), (_3))) -+ -+/* Assembly instruction format: rj, fcsr. */ -+/* Data types in instruction templates: USI, UQI. */ -+#define __movfcsr2gr(/*ui5*/ _1) __builtin_loongarch_movfcsr2gr ((_1)); -+ -+/* Assembly instruction format: fcsr, rj. */ -+/* Data types in instruction templates: VOID, UQI, USI. */ -+#define __movgr2fcsr(/*ui5*/ _1, _2) \ -+ __builtin_loongarch_movgr2fcsr ((_1), (unsigned int) _2); -+ -+#if defined __loongarch64 -+/* Assembly instruction format: ui5, rj, si12. */ -+/* Data types in instruction templates: VOID, USI, UDI, SI. */ -+#define __cacop_d(/*ui5*/ _1, /*unsigned long int*/ _2, /*si12*/ _3) \ -+ ((void) __builtin_loongarch_cacop_d ((_1), (unsigned long int) (_2), (_3))) - #else --# error "Don't support this ABI." -+#error "Unsupported ABI." - #endif - --/* Assembly instruction format: rd, rj */ --/* Data types in instruction templates: USI, USI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --unsigned int __cpucfg(unsigned int _1) -+/* Assembly instruction format: rd, rj. */ -+/* Data types in instruction templates: USI, USI. */ -+extern __inline unsigned int -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__cpucfg (unsigned int _1) - { -- return (unsigned int)__builtin_loongarch_cpucfg((unsigned int)_1); -+ return (unsigned int) __builtin_loongarch_cpucfg ((unsigned int) _1); - } - - #ifdef __loongarch64 --/* Assembly instruction format: rd, rj */ --/* Data types in instruction templates: DI, DI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --void __asrtle_d(long int _1, long int _2) -+/* Assembly instruction format: rj, rk. */ -+/* Data types in instruction templates: DI, DI. */ -+extern __inline void -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__asrtle_d (long int _1, long int _2) - { -- __builtin_loongarch_asrtle_d((long int)_1, (long int)_2); -+ __builtin_loongarch_asrtle_d ((long int) _1, (long int) _2); - } - --/* Assembly instruction format: rd, rj */ --/* Data types in instruction templates: DI, DI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --void __asrtgt_d(long int _1, long int _2) -+/* Assembly instruction format: rj, rk. */ -+/* Data types in instruction templates: DI, DI. */ -+extern __inline void -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__asrtgt_d (long int _1, long int _2) - { -- __builtin_loongarch_asrtgt_d((long int)_1, (long int)_2); -+ __builtin_loongarch_asrtgt_d ((long int) _1, (long int) _2); - } - #endif - --#ifdef __loongarch32 --/* Assembly instruction format: rd, rj, ui5 */ --/* Data types in instruction templates: SI, SI, UQI */ --#define __lddir(/*int*/ _1, /*ui5*/ _2) ((int)__builtin_loongarch_lddir((int)(_1), (_2))) --#elif defined __loongarch64 --/* Assembly instruction format: rd, rj, ui5 */ --/* Data types in instruction templates: DI, DI, UQI */ --#define __dlddir(/*long int*/ _1, /*ui5*/ _2) ((long int)__builtin_loongarch_dlddir((long int)(_1), (_2))) -+#if defined __loongarch64 -+/* Assembly instruction format: rd, rj, ui5. */ -+/* Data types in instruction templates: DI, DI, UQI. */ -+#define __lddir_d(/*long int*/ _1, /*ui5*/ _2) \ -+ ((long int) __builtin_loongarch_lddir_d ((long int) (_1), (_2))) - #else --# error "Don't support this ABI." -+#error "Unsupported ABI." - #endif - --#ifdef __loongarch32 --/* Assembly instruction format: rj, ui5 */ --/* Data types in instruction templates: VOID, SI, UQI */ --#define __ldpte(/*int*/ _1, /*ui5*/ _2) ((void)__builtin_loongarch_ldpte((int)(_1), (_2))) --#elif defined __loongarch64 --/* Assembly instruction format: rj, ui5 */ --/* Data types in instruction templates: VOID, DI, UQI */ --#define __dldpte(/*long int*/ _1, /*ui5*/ _2) ((void)__builtin_loongarch_dldpte((long int)(_1), (_2))) -+#if defined __loongarch64 -+/* Assembly instruction format: rj, ui5. */ -+/* Data types in instruction templates: VOID, DI, UQI. */ -+#define __ldpte_d(/*long int*/ _1, /*ui5*/ _2) \ -+ ((void) __builtin_loongarch_ldpte_d ((long int) (_1), (_2))) - #else --# error "Don't support this ABI." -+#error "Unsupported ABI." - #endif - --/* Assembly instruction format: rd, rj, rk */ --/* Data types in instruction templates: SI, QI, SI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --int __crc_w_b_w(char _1, int _2) -+/* Assembly instruction format: rd, rj, rk. */ -+/* Data types in instruction templates: SI, QI, SI. */ -+extern __inline int -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__crc_w_b_w (char _1, int _2) - { -- return (int)__builtin_loongarch_crc_w_b_w((char)_1, (int)_2); -+ return (int) __builtin_loongarch_crc_w_b_w ((char) _1, (int) _2); - } - --/* Assembly instruction format: rd, rj, rk */ --/* Data types in instruction templates: SI, HI, SI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --int __crc_w_h_w(short _1, int _2) -+/* Assembly instruction format: rd, rj, rk. */ -+/* Data types in instruction templates: SI, HI, SI. */ -+extern __inline int -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__crc_w_h_w (short _1, int _2) - { -- return (int)__builtin_loongarch_crc_w_h_w((short)_1, (int)_2); -+ return (int) __builtin_loongarch_crc_w_h_w ((short) _1, (int) _2); - } - --/* Assembly instruction format: rd, rj, rk */ --/* Data types in instruction templates: SI, SI, SI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --int __crc_w_w_w(int _1, int _2) -+/* Assembly instruction format: rd, rj, rk. */ -+/* Data types in instruction templates: SI, SI, SI. */ -+extern __inline int -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__crc_w_w_w (int _1, int _2) - { -- return (int)__builtin_loongarch_crc_w_w_w((int)_1, (int)_2); -+ return (int) __builtin_loongarch_crc_w_w_w ((int) _1, (int) _2); - } - - #ifdef __loongarch64 --/* Assembly instruction format: rd, rj, rk */ --/* Data types in instruction templates: SI, DI, SI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --int __crc_w_d_w(long int _1, int _2) -+/* Assembly instruction format: rd, rj, rk. */ -+/* Data types in instruction templates: SI, DI, SI. */ -+extern __inline int -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__crc_w_d_w (long int _1, int _2) - { -- return (int)__builtin_loongarch_crc_w_d_w((long int)_1, (int)_2); -+ return (int) __builtin_loongarch_crc_w_d_w ((long int) _1, (int) _2); - } - #endif - --/* Assembly instruction format: rd, rj, rk */ --/* Data types in instruction templates: SI, QI, SI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --int __crcc_w_b_w(char _1, int _2) -+/* Assembly instruction format: rd, rj, rk. */ -+/* Data types in instruction templates: SI, QI, SI. */ -+extern __inline int -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__crcc_w_b_w (char _1, int _2) - { -- return (int)__builtin_loongarch_crcc_w_b_w((char)_1, (int)_2); -+ return (int) __builtin_loongarch_crcc_w_b_w ((char) _1, (int) _2); - } - --/* Assembly instruction format: rd, rj, rk */ --/* Data types in instruction templates: SI, HI, SI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --int __crcc_w_h_w(short _1, int _2) -+/* Assembly instruction format: rd, rj, rk. */ -+/* Data types in instruction templates: SI, HI, SI. */ -+extern __inline int -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__crcc_w_h_w (short _1, int _2) - { -- return (int)__builtin_loongarch_crcc_w_h_w((short)_1, (int)_2); -+ return (int) __builtin_loongarch_crcc_w_h_w ((short) _1, (int) _2); - } - --/* Assembly instruction format: rd, rj, rk */ --/* Data types in instruction templates: SI, SI, SI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --int __crcc_w_w_w(int _1, int _2) -+/* Assembly instruction format: rd, rj, rk. */ -+/* Data types in instruction templates: SI, SI, SI. */ -+extern __inline int -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__crcc_w_w_w (int _1, int _2) - { -- return (int)__builtin_loongarch_crcc_w_w_w((int)_1, (int)_2); -+ return (int) __builtin_loongarch_crcc_w_w_w ((int) _1, (int) _2); - } - - #ifdef __loongarch64 --/* Assembly instruction format: rd, rj, rk */ --/* Data types in instruction templates: SI, DI, SI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --int __crcc_w_d_w(long int _1, int _2) -+/* Assembly instruction format: rd, rj, rk. */ -+/* Data types in instruction templates: SI, DI, SI. */ -+extern __inline int -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__crcc_w_d_w (long int _1, int _2) - { -- return (int)__builtin_loongarch_crcc_w_d_w((long int)_1, (int)_2); -+ return (int) __builtin_loongarch_crcc_w_d_w ((long int) _1, (int) _2); - } - #endif - --/* Assembly instruction format: rd, ui14 */ --/* Data types in instruction templates: USI, USI */ --#define __csrrd(/*ui14*/ _1) ((unsigned int)__builtin_loongarch_csrrd((_1))) -+/* Assembly instruction format: rd, ui14. */ -+/* Data types in instruction templates: USI, USI. */ -+#define __csrrd_w(/*ui14*/ _1) \ -+ ((unsigned int) __builtin_loongarch_csrrd_w ((_1))) - --/* Assembly instruction format: rd, ui14 */ --/* Data types in instruction templates: USI, USI, USI */ --#define __csrwr(/*unsigned int*/ _1, /*ui14*/ _2) ((unsigned int)__builtin_loongarch_csrwr((unsigned int)(_1), (_2))) -+/* Assembly instruction format: rd, ui14. */ -+/* Data types in instruction templates: USI, USI, USI. */ -+#define __csrwr_w(/*unsigned int*/ _1, /*ui14*/ _2) \ -+ ((unsigned int) __builtin_loongarch_csrwr_w ((unsigned int) (_1), (_2))) - --/* Assembly instruction format: rd, rj, ui14 */ --/* Data types in instruction templates: USI, USI, USI, USI */ --#define __csrxchg(/*unsigned int*/ _1, /*unsigned int*/ _2, /*ui14*/ _3) ((unsigned int)__builtin_loongarch_csrxchg((unsigned int)(_1), (unsigned int)(_2), (_3))) -+/* Assembly instruction format: rd, rj, ui14. */ -+/* Data types in instruction templates: USI, USI, USI, USI. */ -+#define __csrxchg_w(/*unsigned int*/ _1, /*unsigned int*/ _2, /*ui14*/ _3) \ -+ ((unsigned int) __builtin_loongarch_csrxchg_w ((unsigned int) (_1), \ -+ (unsigned int) (_2), (_3))) - - #ifdef __loongarch64 --/* Assembly instruction format: rd, ui14 */ --/* Data types in instruction templates: UDI, USI */ --#define __dcsrrd(/*ui14*/ _1) ((unsigned long int)__builtin_loongarch_dcsrrd((_1))) -- --/* Assembly instruction format: rd, ui14 */ --/* Data types in instruction templates: UDI, UDI, USI */ --#define __dcsrwr(/*unsigned long int*/ _1, /*ui14*/ _2) ((unsigned long int)__builtin_loongarch_dcsrwr((unsigned long int)(_1), (_2))) -- --/* Assembly instruction format: rd, rj, ui14 */ --/* Data types in instruction templates: UDI, UDI, UDI, USI */ --#define __dcsrxchg(/*unsigned long int*/ _1, /*unsigned long int*/ _2, /*ui14*/ _3) ((unsigned long int)__builtin_loongarch_dcsrxchg((unsigned long int)(_1), (unsigned long int)(_2), (_3))) -+/* Assembly instruction format: rd, ui14. */ -+/* Data types in instruction templates: UDI, USI. */ -+#define __csrrd_d(/*ui14*/ _1) \ -+ ((unsigned long int) __builtin_loongarch_csrrd_d ((_1))) -+ -+/* Assembly instruction format: rd, ui14. */ -+/* Data types in instruction templates: UDI, UDI, USI. */ -+#define __csrwr_d(/*unsigned long int*/ _1, /*ui14*/ _2) \ -+ ((unsigned long int) __builtin_loongarch_csrwr_d ((unsigned long int) (_1), \ -+ (_2))) -+ -+/* Assembly instruction format: rd, rj, ui14. */ -+/* Data types in instruction templates: UDI, UDI, UDI, USI. */ -+#define __csrxchg_d(/*unsigned long int*/ _1, /*unsigned long int*/ _2, \ -+ /*ui14*/ _3) \ -+ ((unsigned long int) __builtin_loongarch_csrxchg_d ( \ -+ (unsigned long int) (_1), (unsigned long int) (_2), (_3))) - #endif - --/* Assembly instruction format: rd, rj */ --/* Data types in instruction templates: UQI, USI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --unsigned char __iocsrrd_b(unsigned int _1) -+/* Assembly instruction format: rd, rj. */ -+/* Data types in instruction templates: UQI, USI. */ -+extern __inline unsigned char -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__iocsrrd_b (unsigned int _1) - { -- return (unsigned char)__builtin_loongarch_iocsrrd_b((unsigned int)_1); -+ return (unsigned char) __builtin_loongarch_iocsrrd_b ((unsigned int) _1); - } - --/* Assembly instruction format: rd, rj */ --/* Data types in instruction templates: UHI, USI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --unsigned short __iocsrrd_h(unsigned int _1) -+/* Assembly instruction format: rd, rj. */ -+/* Data types in instruction templates: UHI, USI. */ -+extern __inline unsigned char -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__iocsrrd_h (unsigned int _1) - { -- return (unsigned short)__builtin_loongarch_iocsrrd_h((unsigned int)_1); -+ return (unsigned short) __builtin_loongarch_iocsrrd_h ((unsigned int) _1); - } - --/* Assembly instruction format: rd, rj */ --/* Data types in instruction templates: USI, USI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --unsigned int __iocsrrd_w(unsigned int _1) -+/* Assembly instruction format: rd, rj. */ -+/* Data types in instruction templates: USI, USI. */ -+extern __inline unsigned int -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__iocsrrd_w (unsigned int _1) - { -- return (unsigned int)__builtin_loongarch_iocsrrd_w((unsigned int)_1); -+ return (unsigned int) __builtin_loongarch_iocsrrd_w ((unsigned int) _1); - } - - #ifdef __loongarch64 --/* Assembly instruction format: rd, rj */ --/* Data types in instruction templates: UDI, USI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --unsigned long int __iocsrrd_d(unsigned int _1) -+/* Assembly instruction format: rd, rj. */ -+/* Data types in instruction templates: UDI, USI. */ -+extern __inline unsigned long int -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__iocsrrd_d (unsigned int _1) - { -- return (unsigned long int)__builtin_loongarch_iocsrrd_d((unsigned int)_1); -+ return (unsigned long int) __builtin_loongarch_iocsrrd_d ((unsigned int) _1); - } - #endif - --/* Assembly instruction format: rd, rj */ --/* Data types in instruction templates: VOID, UQI, USI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --void __iocsrwr_b(unsigned char _1, unsigned int _2) -+/* Assembly instruction format: rd, rj. */ -+/* Data types in instruction templates: VOID, UQI, USI. */ -+extern __inline void -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__iocsrwr_b (unsigned char _1, unsigned int _2) - { -- return (void)__builtin_loongarch_iocsrwr_b((unsigned char)_1, (unsigned int)_2); -+ __builtin_loongarch_iocsrwr_b ((unsigned char) _1, (unsigned int) _2); - } - --/* Assembly instruction format: rd, rj */ --/* Data types in instruction templates: VOID, UHI, USI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --void __iocsrwr_h(unsigned short _1, unsigned int _2) -+/* Assembly instruction format: rd, rj. */ -+/* Data types in instruction templates: VOID, UHI, USI. */ -+extern __inline void -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__iocsrwr_h (unsigned short _1, unsigned int _2) - { -- return (void)__builtin_loongarch_iocsrwr_h((unsigned short)_1, (unsigned int)_2); -+ __builtin_loongarch_iocsrwr_h ((unsigned short) _1, (unsigned int) _2); - } - --/* Assembly instruction format: rd, rj */ --/* Data types in instruction templates: VOID, USI, USI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --void __iocsrwr_w(unsigned int _1, unsigned int _2) -+/* Assembly instruction format: rd, rj. */ -+/* Data types in instruction templates: VOID, USI, USI. */ -+extern __inline void -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__iocsrwr_w (unsigned int _1, unsigned int _2) - { -- return (void)__builtin_loongarch_iocsrwr_w((unsigned int)_1, (unsigned int)_2); -+ __builtin_loongarch_iocsrwr_w ((unsigned int) _1, (unsigned int) _2); - } - - #ifdef __loongarch64 --/* Assembly instruction format: rd, rj */ --/* Data types in instruction templates: VOID, UDI, USI */ --extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --void __iocsrwr_d(unsigned long int _1, unsigned int _2) -+/* Assembly instruction format: rd, rj. */ -+/* Data types in instruction templates: VOID, UDI, USI. */ -+extern __inline void -+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -+__iocsrwr_d (unsigned long int _1, unsigned int _2) - { -- return (void)__builtin_loongarch_iocsrwr_d((unsigned long int)_1, (unsigned int)_2); -+ __builtin_loongarch_iocsrwr_d ((unsigned long int) _1, (unsigned int) _2); - } - #endif - --/* Assembly instruction format: ui15 */ --/* Data types in instruction templates: UQI */ --#define __dbar(/*ui15*/ _1) __builtin_loongarch_dbar((_1)) -- --/* Assembly instruction format: ui15 */ --/* Data types in instruction templates: UQI */ --#define __ibar(/*ui15*/ _1) __builtin_loongarch_ibar((_1)) -- --#define __builtin_loongarch_syscall(a) \ --{ \ -- __asm__ volatile ("syscall %0\n\t" \ -- ::"I"(a)); \ --} --#define __syscall __builtin_loongarch_syscall -- --#define __builtin_loongarch_break(a) \ --{ \ -- __asm__ volatile ("break %0\n\t" \ -- ::"I"(a)); \ --} --#define __break __builtin_loongarch_break -- -- --extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__builtin_loongarch_tlbsrch (void) --{ -- __asm__ volatile ("tlbsrch\n\t"); --} --#define __tlbsrch __builtin_loongarch_tlbsrch -- --extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__builtin_loongarch_tlbrd (void) --{ -- __asm__ volatile ("tlbrd\n\t"); --} --#define __tlbrd __builtin_loongarch_tlbrd -+/* Assembly instruction format: ui15. */ -+/* Data types in instruction templates: USI. */ -+#define __dbar(/*ui15*/ _1) __builtin_loongarch_dbar ((_1)) - --extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__builtin_loongarch_tlbwr (void) --{ -- __asm__ volatile ("tlbwr\n\t"); --} --#define __tlbwr __builtin_loongarch_tlbwr -+/* Assembly instruction format: ui15. */ -+/* Data types in instruction templates: USI. */ -+#define __ibar(/*ui15*/ _1) __builtin_loongarch_ibar ((_1)) - --extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__builtin_loongarch_tlbfill (void) --{ -- __asm__ volatile ("tlbfill\n\t"); --} --#define __tlbfill __builtin_loongarch_tlbfill -- --extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__builtin_loongarch_tlbclr (void) --{ -- __asm__ volatile ("tlbclr\n\t"); --} --#define __tlbclr __builtin_loongarch_tlbclr -- --extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__builtin_loongarch_tlbflush (void) --{ -- __asm__ volatile ("tlbflush\n\t"); --} --#define __tlbflush __builtin_loongarch_tlbflush -+/* Assembly instruction format: ui15. */ -+/* Data types in instruction templates: USI. */ -+#define __syscall(/*ui15*/ _1) __builtin_loongarch_syscall ((_1)) - -+/* Assembly instruction format: ui15. */ -+/* Data types in instruction templates: USI. */ -+#define __break(/*ui15*/ _1) __builtin_loongarch_break ((_1)) - - #ifdef __cplusplus - } -diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md -index 24757aaa1..515336e05 100644 ---- a/gcc/config/loongarch/lasx.md -+++ b/gcc/config/loongarch/lasx.md -@@ -212,6 +212,9 @@ - ;; As ILASX but excludes V32QI. - (define_mode_iterator ILASX_DWH [V4DI V8SI V16HI]) - -+;; As LASX but excludes V32QI. -+(define_mode_iterator LASX_DWH [V4DF V8SF V4DI V8SI V16HI]) -+ - ;; As ILASX but excludes V4DI. - (define_mode_iterator ILASX_WHB [V8SI V16HI V32QI]) - -@@ -227,7 +230,7 @@ - ;; Only used for immediate set shuffle elements instruction. - (define_mode_iterator LASX_WHB_W [V8SI V16HI V32QI V8SF]) - --;; The atribute gives the integer vector mode with same size in Loongson ASX. -+;; The attribute gives the integer vector mode with same size in Loongson ASX. - (define_mode_attr VIMODE256 - [(V4DF "V4DI") - (V8SF "V8SI") -@@ -476,6 +479,37 @@ - (V16HI "w") - (V32QI "w")]) - -+(define_int_iterator FRINT256_S [UNSPEC_LASX_XVFRINTRP_S -+ UNSPEC_LASX_XVFRINTRZ_S -+ UNSPEC_LASX_XVFRINT -+ UNSPEC_LASX_XVFRINTRM_S]) -+ -+(define_int_iterator FRINT256_D [UNSPEC_LASX_XVFRINTRP_D -+ UNSPEC_LASX_XVFRINTRZ_D -+ UNSPEC_LASX_XVFRINT -+ UNSPEC_LASX_XVFRINTRM_D]) -+ -+(define_int_attr frint256_pattern_s -+ [(UNSPEC_LASX_XVFRINTRP_S "ceil") -+ (UNSPEC_LASX_XVFRINTRZ_S "btrunc") -+ (UNSPEC_LASX_XVFRINT "rint") -+ (UNSPEC_LASX_XVFRINTRM_S "floor")]) -+ -+(define_int_attr frint256_pattern_d -+ [(UNSPEC_LASX_XVFRINTRP_D "ceil") -+ (UNSPEC_LASX_XVFRINTRZ_D "btrunc") -+ (UNSPEC_LASX_XVFRINT "rint") -+ (UNSPEC_LASX_XVFRINTRM_D "floor")]) -+ -+(define_int_attr frint256_suffix -+ [(UNSPEC_LASX_XVFRINTRP_S "rp") -+ (UNSPEC_LASX_XVFRINTRP_D "rp") -+ (UNSPEC_LASX_XVFRINTRZ_S "rz") -+ (UNSPEC_LASX_XVFRINTRZ_D "rz") -+ (UNSPEC_LASX_XVFRINT "") -+ (UNSPEC_LASX_XVFRINTRM_S "rm") -+ (UNSPEC_LASX_XVFRINTRM_D "rm")]) -+ - (define_expand "vec_init" - [(match_operand:LASX 0 "register_operand") - (match_operand:LASX 1 "")] -@@ -497,7 +531,6 @@ - "xvpickev.\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8" - [(set_attr "type" "simd_permute") - (set_attr "mode" "") -- (set_attr "can_delay" "no") - (set_attr "length" "8")]) - - (define_expand "vec_unpacks_hi_v8sf" -@@ -522,7 +555,6 @@ - operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode, false/*high_p*/); - }) - -- - (define_expand "vec_unpacks_hi_" - [(match_operand: 0 "register_operand") - (match_operand:ILASX_WHB 1 "register_operand")] -@@ -560,11 +592,11 @@ - }) - - (define_insn "lasx_xvinsgr2vr_" -- [(set (match_operand:LASX_WD 0 "register_operand" "=f") -- (vec_merge:LASX_WD -- (vec_duplicate:LASX_WD -+ [(set (match_operand:ILASX_DW 0 "register_operand" "=f") -+ (vec_merge:ILASX_DW -+ (vec_duplicate:ILASX_DW - (match_operand: 1 "reg_or_0_operand" "rJ")) -- (match_operand:LASX_WD 2 "register_operand" "0") -+ (match_operand:ILASX_DW 2 "register_operand" "0") - (match_operand 3 "const__operand" "")))] - "ISA_HAS_LASX" - { -@@ -651,28 +683,49 @@ - (set_attr "mode" "V4DI")]) - - ;; xshuf.w --(define_insn "lasx_xvperm_w" -- [(set (match_operand:V8SI 0 "register_operand" "=f") -- (unspec:V8SI -- [(match_operand:V8SI 1 "register_operand" "f") -- (match_operand:V8SI 2 "register_operand" "f")] -- UNSPEC_LASX_XVPERM_W))] -+(define_insn "lasx_xvperm_" -+ [(set (match_operand:LASX_W 0 "register_operand" "=f") -+ (unspec:LASX_W -+ [(match_operand:LASX_W 1 "nonimmediate_operand" "f") -+ (match_operand:V8SI 2 "register_operand" "f")] -+ UNSPEC_LASX_XVPERM_W))] - "ISA_HAS_LASX" - "xvperm.w\t%u0,%u1,%u2" - [(set_attr "type" "simd_splat") -- (set_attr "mode" "V8SI")]) -+ (set_attr "mode" "")]) - - ;; xvpermi.d --(define_insn "lasx_xvpermi_d" -- [(set (match_operand:V4DI 0 "register_operand" "=f") -- (unspec:V4DI -- [(match_operand:V4DI 1 "register_operand" "f") -- (match_operand 2 "const_uimm8_operand")] -- UNSPEC_LASX_XVPERMI_D))] -+(define_insn "lasx_xvpermi_d_" -+ [(set (match_operand:LASX 0 "register_operand" "=f") -+ (unspec:LASX -+ [(match_operand:LASX 1 "register_operand" "f") -+ (match_operand:SI 2 "const_uimm8_operand")] -+ UNSPEC_LASX_XVPERMI_D))] - "ISA_HAS_LASX" - "xvpermi.d\t%u0,%u1,%2" - [(set_attr "type" "simd_splat") -- (set_attr "mode" "V4DI")]) -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvpermi_d__1" -+ [(set (match_operand:LASX_D 0 "register_operand" "=f") -+ (vec_select:LASX_D -+ (match_operand:LASX_D 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const_0_to_3_operand") -+ (match_operand 3 "const_0_to_3_operand") -+ (match_operand 4 "const_0_to_3_operand") -+ (match_operand 5 "const_0_to_3_operand")])))] -+ "ISA_HAS_LASX" -+{ -+ int mask = 0; -+ mask |= INTVAL (operands[2]) << 0; -+ mask |= INTVAL (operands[3]) << 2; -+ mask |= INTVAL (operands[4]) << 4; -+ mask |= INTVAL (operands[5]) << 6; -+ operands[2] = GEN_INT (mask); -+ return "xvpermi.d\t%u0,%u1,%2"; -+} -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "")]) - - ;; xvpermi.q - (define_insn "lasx_xvpermi_q_" -@@ -698,82 +751,51 @@ - [(set_attr "type" "simd_copy") - (set_attr "mode" "V4DI")]) - --(define_expand "vec_extract" -- [(match_operand: 0 "register_operand") -- (match_operand:ILASX 1 "register_operand") -+(define_expand "vec_set" -+ [(match_operand:ILASX_DW 0 "register_operand") -+ (match_operand: 1 "reg_or_0_operand") - (match_operand 2 "const__operand")] - "ISA_HAS_LASX" - { -- if (mode == SImode || mode == DImode) -- { -- emit_insn(gen_lasx_xvpickve2gr_ (operands[0], operands[1], operands[2])); -- } -- else -- { -- HOST_WIDE_INT size_0 = GET_MODE_SIZE (GET_MODE (operands[0])); -- HOST_WIDE_INT size_1 = GET_MODE_SIZE (GET_MODE (operands[1])); -- HOST_WIDE_INT val = INTVAL (operands[2]); -+ rtx index = GEN_INT (1 << INTVAL (operands[2])); -+ emit_insn (gen_lasx_xvinsgr2vr_ (operands[0], operands[1], -+ operands[0], index)); -+ DONE; -+}) - -- /* High part */ -- if (val >= size_1/size_0/2 ) -- { -- rtx dest1 = gen_reg_rtx (GET_MODE (operands[1])); -- rtx pos = GEN_INT( val - size_1/size_0/2); -- emit_insn (gen_lasx_xvpermi_q_ (dest1, dest1, operands[1], GEN_INT(1))); -- rtx dest2 = gen_reg_rtx (SImode); -- emit_insn (gen_lsx_vpickve2gr_ (dest2, -- gen_lowpart(mode, dest1), -- pos)); -- emit_move_insn (operands[0], -- gen_lowpart (mode, dest2)); -- } -- else -- { -- rtx dest1 = gen_reg_rtx (SImode); -- emit_insn (gen_lsx_vpickve2gr_ (dest1, -- gen_lowpart(mode, operands[1]), -- operands[2])); -- emit_move_insn (operands[0], -- gen_lowpart (mode, dest1)); -- } -- } -+(define_expand "vec_set" -+ [(match_operand:FLASX 0 "register_operand") -+ (match_operand: 1 "reg_or_0_operand") -+ (match_operand 2 "const__operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx index = GEN_INT (1 << INTVAL (operands[2])); -+ emit_insn (gen_lasx_xvinsve0__scalar (operands[0], operands[1], -+ operands[0], index)); - DONE; - }) - - (define_expand "vec_extract" - [(match_operand: 0 "register_operand") -- (match_operand:FLASX 1 "register_operand") -+ (match_operand:LASX 1 "register_operand") - (match_operand 2 "const__operand")] - "ISA_HAS_LASX" - { -- rtx temp; -- HOST_WIDE_INT val = INTVAL (operands[2]); -- -- if (val == 0) -- temp = operands[1]; -- else -- { -- temp = gen_reg_rtx (mode); -- emit_insn (gen_lasx_xvpickve_ (temp, operands[1], operands[2])); -- } -- emit_insn (gen_lasx_vec_extract_ (operands[0], temp)); -+ loongarch_expand_vector_extract (operands[0], operands[1], -+ INTVAL (operands[2])); - DONE; - }) - --(define_insn_and_split "lasx_vec_extract_" -- [(set (match_operand: 0 "register_operand" "=f") -- (vec_select: -- (match_operand:FLASX 1 "register_operand" "f") -- (parallel [(const_int 0)])))] -+(define_expand "vec_perm" -+ [(match_operand:LASX 0 "register_operand") -+ (match_operand:LASX 1 "register_operand") -+ (match_operand:LASX 2 "register_operand") -+ (match_operand: 3 "register_operand")] - "ISA_HAS_LASX" -- "#" -- "&& reload_completed" -- [(set (match_dup 0) (match_dup 1))] - { -- operands[1] = gen_rtx_REG (mode, REGNO (operands[1])); --} -- [(set_attr "move_type" "fmove") -- (set_attr "mode" "")]) -+ loongarch_expand_vec_perm_1(operands); -+ DONE; -+}) - - ;; FIXME: 256?? - (define_expand "vcondu" -@@ -860,7 +882,6 @@ - { return loongarch_output_move (operands[0], operands[1]); } - [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert") - (set_attr "mode" "") -- (set_attr "can_delay" "no,yes,yes,yes,yes") - (set_attr "length" "8,4,4,4,4")]) - - -@@ -868,7 +889,7 @@ - [(set (match_operand:LASX 0 "nonimmediate_operand") - (match_operand:LASX 1 "move_operand"))] - "reload_completed && ISA_HAS_LASX -- && loongarch_split_move_insn_p (operands[0], operands[1], insn)" -+ && loongarch_split_move_insn_p (operands[0], operands[1])" - [(const_int 0)] - { - loongarch_split_move_insn (operands[0], operands[1], curr_insn); -@@ -1143,7 +1164,25 @@ - [(set_attr "type" "simd_fmul") - (set_attr "mode" "")]) - --(define_insn "div3" -+(define_expand "div3" -+ [(set (match_operand:FLASX 0 "register_operand") -+ (div:FLASX (match_operand:FLASX 1 "register_operand") -+ (match_operand:FLASX 2 "register_operand")))] -+ "ISA_HAS_LASX" -+{ -+ if (mode == V8SFmode -+ && TARGET_RECIP_VEC_DIV -+ && optimize_insn_for_speed_p () -+ && flag_finite_math_only && !flag_trapping_math -+ && flag_unsafe_math_optimizations) -+ { -+ loongarch_emit_swdivsf (operands[0], operands[1], -+ operands[2], V8SFmode); -+ DONE; -+ } -+}) -+ -+(define_insn "*div3" - [(set (match_operand:FLASX 0 "register_operand" "=f") - (div:FLASX (match_operand:FLASX 1 "register_operand" "f") - (match_operand:FLASX 2 "register_operand" "f")))] -@@ -1172,7 +1211,23 @@ - [(set_attr "type" "simd_fmadd") - (set_attr "mode" "")]) - --(define_insn "sqrt2" -+(define_expand "sqrt2" -+ [(set (match_operand:FLASX 0 "register_operand") -+ (sqrt:FLASX (match_operand:FLASX 1 "register_operand")))] -+ "ISA_HAS_LASX" -+{ -+ if (mode == V8SFmode -+ && TARGET_RECIP_VEC_SQRT -+ && flag_unsafe_math_optimizations -+ && optimize_insn_for_speed_p () -+ && flag_finite_math_only && !flag_trapping_math) -+ { -+ loongarch_emit_swrsqrtsf (operands[0], operands[1], V8SFmode, 0); -+ DONE; -+ } -+}) -+ -+(define_insn "*sqrt2" - [(set (match_operand:FLASX 0 "register_operand" "=f") - (sqrt:FLASX (match_operand:FLASX 1 "register_operand" "f")))] - "ISA_HAS_LASX" -@@ -1307,13 +1362,13 @@ - [(set_attr "type" "simd_bit") - (set_attr "mode" "")]) - --(define_insn "lasx_xvbitsel_" -- [(set (match_operand:ILASX 0 "register_operand" "=f") -- (ior:ILASX (and:ILASX (not:ILASX -- (match_operand:ILASX 3 "register_operand" "f")) -- (match_operand:ILASX 1 "register_operand" "f")) -- (and:ILASX (match_dup 3) -- (match_operand:ILASX 2 "register_operand" "f"))))] -+(define_insn "lasx_xvbitsel_" -+ [(set (match_operand:LASX 0 "register_operand" "=f") -+ (ior:LASX (and:LASX (not:LASX -+ (match_operand:LASX 3 "register_operand" "0")) -+ (match_operand:LASX 1 "register_operand" "f")) -+ (and:LASX (match_dup 3) -+ (match_operand:LASX 2 "register_operand" "f"))))] - "ISA_HAS_LASX" - "xvbitsel.v\t%u0,%u1,%u2,%u3" - [(set_attr "type" "simd_bitmov") -@@ -1363,11 +1418,11 @@ - [(set_attr "type" "simd_int_arith") - (set_attr "mode" "")]) - --(define_expand "vec_cmp" -- [(set (match_operand:ILASX 0 "register_operand") -- (match_operator:ILASX 1 "" -- [(match_operand:ILASX 2 "register_operand") -- (match_operand:ILASX 3 "register_operand")]))] -+(define_expand "vec_cmp" -+ [(set (match_operand: 0 "register_operand") -+ (match_operator 1 "" -+ [(match_operand:LASX 2 "register_operand") -+ (match_operand:LASX 3 "register_operand")]))] - "ISA_HAS_LASX" - { - bool ok = loongarch_expand_int_vec_cmp (operands); -@@ -1375,11 +1430,11 @@ - DONE; - }) - --(define_expand "vec_cmp" -- [(set (match_operand:FLASX 0 "register_operand") -- (match_operator:FLASX 1 "" -- [(match_operand:FLASX 2 "register_operand") -- (match_operand:FLASX 3 "register_operand")]))] -+(define_expand "vec_cmpu" -+ [(set (match_operand: 0 "register_operand") -+ (match_operator 1 "" -+ [(match_operand:ILASX 2 "register_operand") -+ (match_operand:ILASX 3 "register_operand")]))] - "ISA_HAS_LASX" - { - bool ok = loongarch_expand_fp_vec_cmp (operands); -@@ -1493,8 +1548,8 @@ - (V2DF "V8SI")]) - - (define_insn "lasx_xvreplgr2vr_" -- [(set (match_operand:LASX 0 "register_operand" "=f,f") -- (vec_duplicate:LASX -+ [(set (match_operand:ILASX 0 "register_operand" "=f,f") -+ (vec_duplicate:ILASX - (match_operand: 1 "reg_or_0_operand" "r,J")))] - "ISA_HAS_LASX" - { -@@ -1508,10 +1563,9 @@ - } - [(set_attr "type" "simd_fill") - (set_attr "mode" "") -- (set_attr "can_delay" "no") - (set_attr "length" "8")]) - --(define_insn "lasx_xvflogb_" -+(define_insn "logb2" - [(set (match_operand:FLASX 0 "register_operand" "=f") - (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] - UNSPEC_LASX_XVFLOGB))] -@@ -1572,6 +1626,15 @@ - [(set_attr "type" "simd_fdiv") - (set_attr "mode" "")]) - -+(define_insn "lasx_xvfrecipe_" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] -+ UNSPEC_RECIPE))] -+ "ISA_HAS_LASX && flag_unsafe_math_optimizations && TARGET_RECIP_VEC_DIV" -+ "xvfrecipe.\t%u0,%u1" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ - (define_insn "lasx_xvfrint_" - [(set (match_operand:FLASX 0 "register_operand" "=f") - (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] -@@ -1590,6 +1653,42 @@ - [(set_attr "type" "simd_fdiv") - (set_attr "mode" "")]) - -+ -+(define_insn "lasx_xvfrsqrte_" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] -+ UNSPEC_RSQRTE))] -+ "ISA_HAS_LASX && flag_unsafe_math_optimizations && TARGET_RECIP_VEC_RSQRT" -+ "xvfrsqrte.\t%u0,%u1" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ -+(define_expand "rsqrt2" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRSQRT))] -+ "ISA_HAS_LASX" -+{ -+ if (mode == V8SFmode -+ && TARGET_RECIP_VEC_RSQRT -+ && flag_unsafe_math_optimizations -+ && optimize_insn_for_speed_p () -+ && flag_finite_math_only && !flag_trapping_math) -+ { -+ loongarch_emit_swrsqrtsf (operands[0], operands[1], V8SFmode, 1); -+ DONE; -+ } -+}) -+ -+(define_insn "*rsqrt2" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRSQRT))] -+ "ISA_HAS_LASX" -+ "xvfrsqrt.\t%u0,%u1" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ - (define_insn "lasx_xvftint_s__" - [(set (match_operand: 0 "register_operand" "=f") - (unspec: [(match_operand:FLASX 1 "register_operand" "f")] -@@ -2325,6 +2424,35 @@ - [(set_attr "type" "simd_shf") - (set_attr "mode" "")]) - -+(define_insn "lasx_xvshuf4i__1" -+ [(set (match_operand:LASX_W 0 "register_operand" "=f") -+ (vec_select:LASX_W -+ (match_operand:LASX_W 1 "nonimmediate_operand" "f") -+ (parallel [(match_operand 2 "const_0_to_3_operand") -+ (match_operand 3 "const_0_to_3_operand") -+ (match_operand 4 "const_0_to_3_operand") -+ (match_operand 5 "const_0_to_3_operand") -+ (match_operand 6 "const_4_to_7_operand") -+ (match_operand 7 "const_4_to_7_operand") -+ (match_operand 8 "const_4_to_7_operand") -+ (match_operand 9 "const_4_to_7_operand")])))] -+ "ISA_HAS_LASX -+ && INTVAL (operands[2]) + 4 == INTVAL (operands[6]) -+ && INTVAL (operands[3]) + 4 == INTVAL (operands[7]) -+ && INTVAL (operands[4]) + 4 == INTVAL (operands[8]) -+ && INTVAL (operands[5]) + 4 == INTVAL (operands[9])" -+{ -+ int mask = 0; -+ mask |= INTVAL (operands[2]) << 0; -+ mask |= INTVAL (operands[3]) << 2; -+ mask |= INTVAL (operands[4]) << 4; -+ mask |= INTVAL (operands[5]) << 6; -+ operands[2] = GEN_INT (mask); -+ -+ return "xvshuf4i.w\t%u0,%u1,%2"; -+} -+ [(set_attr "type" "simd_shf") -+ (set_attr "mode" "")]) - - (define_insn "lasx_xvsrar_" - [(set (match_operand:ILASX 0 "register_operand" "=f") -@@ -2386,11 +2514,11 @@ - [(set_attr "type" "simd_int_arith") - (set_attr "mode" "")]) - --(define_insn "lasx_xvshuf_" -- [(set (match_operand:ILASX_DWH 0 "register_operand" "=f") -- (unspec:ILASX_DWH [(match_operand: 1 "register_operand" "0") -- (match_operand:ILASX_DWH 2 "register_operand" "f") -- (match_operand:ILASX_DWH 3 "register_operand" "f")] -+(define_insn "lasx_xvshuf_" -+ [(set (match_operand:LASX_DWH 0 "register_operand" "=f") -+ (unspec:LASX_DWH [(match_operand:LASX_DWH 1 "register_operand" "0") -+ (match_operand:LASX_DWH 2 "register_operand" "f") -+ (match_operand:LASX_DWH 3 "register_operand" "f")] - UNSPEC_LASX_XVSHUF))] - "ISA_HAS_LASX" - "xvshuf.\t%u0,%u2,%u3" -@@ -2497,14 +2625,14 @@ - [(set_attr "type" "simd_splat") - (set_attr "mode" "")]) - -- (define_insn "lasx_xvreplve0__scalar" -- [(set (match_operand:FLASX 0 "register_operand" "=f") -- (unspec:FLASX [(match_operand: 1 "register_operand" "f")] -- UNSPEC_LASX_XVREPLVE0))] -- "ISA_HAS_LASX" -- "xvreplve0.\t%u0,%u1" -- [(set_attr "type" "simd_splat") -- (set_attr "mode" "")]) -+(define_insn "lasx_xvreplve0__scalar" -+[(set (match_operand:FLASX 0 "register_operand" "=f") -+ (vec_duplicate:FLASX -+ (match_operand: 1 "register_operand" "f")))] -+ "ISA_HAS_LASX" -+ "xvreplve0.\t%u0,%u1" -+ [(set_attr "type" "simd_splat") -+ (set_attr "mode" "")]) - - (define_insn "lasx_xvreplve0_q" - [(set (match_operand:V32QI 0 "register_operand" "=f") -@@ -2544,7 +2672,6 @@ - "xvfcvt.s.d\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8" - [(set_attr "type" "simd_fcvt") - (set_attr "mode" "V8SF") -- (set_attr "can_delay" "no") - (set_attr "length" "8")]) - - ;; Define for builtin function. -@@ -2579,7 +2706,6 @@ - "xvpermi.d\t%u0,%u1,0xfa\n\txvfcvtl.d.s\t%u0,%u0" - [(set_attr "type" "simd_fcvt") - (set_attr "mode" "V4DF") -- (set_attr "can_delay" "no") - (set_attr "length" "12")]) - - ;; Define for builtin function. -@@ -2614,7 +2740,6 @@ - "xvpermi.d\t%u0,%u1,0x50\n\txvfcvtl.d.s\t%u0,%u0" - [(set_attr "type" "simd_fcvt") - (set_attr "mode" "V4DF") -- (set_attr "can_delay" "no") - (set_attr "length" "8")]) - - (define_code_attr lasxbr -@@ -2653,8 +2778,7 @@ - "xvset.\t%z3%u1\n\tbcnez\t%Z3%0"); - } - [(set_attr "type" "simd_branch") -- (set_attr "mode" "") -- (set_attr "compact_form" "never")]) -+ (set_attr "mode" "")]) - - (define_insn "lasx__v_" - [(set (pc) (if_then_else -@@ -2672,12 +2796,8 @@ - "xvset.v\t%Z3%u1\n\tbcnez\t%Z3%0"); - } - [(set_attr "type" "simd_branch") -- (set_attr "mode" "") -- (set_attr "compact_form" "never")]) -- -- -+ (set_attr "mode" "")]) - -- - ;; loongson-asx. - (define_insn "lasx_vext2xv_h_b" - [(set (match_operand:V16HI 0 "register_operand" "=f") -@@ -3339,8 +3459,8 @@ - (set_attr "mode" "V8SF")]) - - (define_insn "lasx_xvfrintrne_s" -- [(set (match_operand:V8SI 0 "register_operand" "=f") -- (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] - UNSPEC_LASX_XVFRINTRNE_S))] - "ISA_HAS_LASX" - "xvfrintrne.s\t%u0,%u1" -@@ -3348,8 +3468,8 @@ - (set_attr "mode" "V8SF")]) - - (define_insn "lasx_xvfrintrne_d" -- [(set (match_operand:V4DI 0 "register_operand" "=f") -- (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] - UNSPEC_LASX_XVFRINTRNE_D))] - "ISA_HAS_LASX" - "xvfrintrne.d\t%u0,%u1" -@@ -3357,8 +3477,8 @@ - (set_attr "mode" "V4DF")]) - - (define_insn "lasx_xvfrintrz_s" -- [(set (match_operand:V8SI 0 "register_operand" "=f") -- (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] - UNSPEC_LASX_XVFRINTRZ_S))] - "ISA_HAS_LASX" - "xvfrintrz.s\t%u0,%u1" -@@ -3366,8 +3486,8 @@ - (set_attr "mode" "V8SF")]) - - (define_insn "lasx_xvfrintrz_d" -- [(set (match_operand:V4DI 0 "register_operand" "=f") -- (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] - UNSPEC_LASX_XVFRINTRZ_D))] - "ISA_HAS_LASX" - "xvfrintrz.d\t%u0,%u1" -@@ -3375,8 +3495,8 @@ - (set_attr "mode" "V4DF")]) - - (define_insn "lasx_xvfrintrp_s" -- [(set (match_operand:V8SI 0 "register_operand" "=f") -- (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] - UNSPEC_LASX_XVFRINTRP_S))] - "ISA_HAS_LASX" - "xvfrintrp.s\t%u0,%u1" -@@ -3384,8 +3504,8 @@ - (set_attr "mode" "V8SF")]) - - (define_insn "lasx_xvfrintrp_d" -- [(set (match_operand:V4DI 0 "register_operand" "=f") -- (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] - UNSPEC_LASX_XVFRINTRP_D))] - "ISA_HAS_LASX" - "xvfrintrp.d\t%u0,%u1" -@@ -3393,8 +3513,8 @@ - (set_attr "mode" "V4DF")]) - - (define_insn "lasx_xvfrintrm_s" -- [(set (match_operand:V8SI 0 "register_operand" "=f") -- (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] - UNSPEC_LASX_XVFRINTRM_S))] - "ISA_HAS_LASX" - "xvfrintrm.s\t%u0,%u1" -@@ -3402,14 +3522,44 @@ - (set_attr "mode" "V8SF")]) - - (define_insn "lasx_xvfrintrm_d" -- [(set (match_operand:V4DI 0 "register_operand" "=f") -- (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] - UNSPEC_LASX_XVFRINTRM_D))] - "ISA_HAS_LASX" - "xvfrintrm.d\t%u0,%u1" - [(set_attr "type" "simd_shift") - (set_attr "mode" "V4DF")]) - -+;; Vector versions of the floating-point frint patterns. -+;; Expands to btrunc, ceil, floor, rint. -+(define_insn "v8sf2" -+ [(set (match_operand:V8SF 0 "register_operand" "=f") -+ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] -+ FRINT256_S))] -+ "ISA_HAS_LASX" -+ "xvfrint.s\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V8SF")]) -+ -+(define_insn "v4df2" -+ [(set (match_operand:V4DF 0 "register_operand" "=f") -+ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] -+ FRINT256_D))] -+ "ISA_HAS_LASX" -+ "xvfrint.d\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4DF")]) -+ -+;; Expands to round. -+(define_insn "round2" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] -+ UNSPEC_LASX_XVFRINT))] -+ "ISA_HAS_LASX" -+ "xvfrint.\t%u0,%u1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ - ;; Offset load and broadcast - (define_expand "lasx_xvldrepl_" - [(match_operand:LASX 0 "register_operand") -@@ -3435,6 +3585,19 @@ - (set_attr "mode" "") - (set_attr "length" "4")]) - -+;; Offset is "0" -+(define_insn "lasx_xvldrepl__insn_0" -+ [(set (match_operand:LASX 0 "register_operand" "=f") -+ (vec_duplicate:LASX -+ (mem: (match_operand:DI 1 "register_operand" "r"))))] -+ "ISA_HAS_LASX" -+{ -+ return "xvldrepl.\t%u0,%1,0"; -+} -+ [(set_attr "type" "simd_load") -+ (set_attr "mode" "") -+ (set_attr "length" "4")]) -+ - ;;XVADDWEV.H.B XVSUBWEV.H.B XVMULWEV.H.B - ;;XVADDWEV.H.BU XVSUBWEV.H.BU XVMULWEV.H.BU - (define_insn "lasx_xvwev_h_b" -@@ -4666,16 +4829,52 @@ - [(set_attr "type" "simd_shift") - (set_attr "mode" "")]) - --(define_insn "lasx_xvpermi_w" -- [(set (match_operand:V8SI 0 "register_operand" "=f") -- (unspec:V8SI [(match_operand:V8SI 1 "register_operand" "0") -- (match_operand:V8SI 2 "register_operand" "f") -- (match_operand 3 "const_uimm8_operand" "")] -- UNSPEC_LASX_XVPERMI))] -+(define_mode_attr VDOUBLEMODEW256 -+ [(V8SI "V16SI") -+ (V8SF "V16SF")]) -+ -+(define_insn "lasx_xvpermi_" -+ [(set (match_operand:LASX_W 0 "register_operand" "=f") -+ (unspec:LASX_W [(match_operand:LASX_W 1 "register_operand" "0") -+ (match_operand:LASX_W 2 "register_operand" "f") -+ (match_operand 3 "const_uimm8_operand" "")] -+ UNSPEC_LASX_XVPERMI))] - "ISA_HAS_LASX" - "xvpermi.w\t%u0,%u2,%3" - [(set_attr "type" "simd_bit") -- (set_attr "mode" "V8SI")]) -+ (set_attr "mode" "")]) -+ -+(define_insn "lasx_xvpermi__1" -+ [(set (match_operand:LASX_W 0 "register_operand" "=f") -+ (vec_select:LASX_W -+ (vec_concat: -+ (match_operand:LASX_W 1 "register_operand" "f") -+ (match_operand:LASX_W 2 "register_operand" "0")) -+ (parallel [(match_operand 3 "const_0_to_3_operand") -+ (match_operand 4 "const_0_to_3_operand" ) -+ (match_operand 5 "const_8_to_11_operand" ) -+ (match_operand 6 "const_8_to_11_operand" ) -+ (match_operand 7 "const_4_to_7_operand" ) -+ (match_operand 8 "const_4_to_7_operand" ) -+ (match_operand 9 "const_12_to_15_operand") -+ (match_operand 10 "const_12_to_15_operand")])))] -+ "ISA_HAS_LASX -+ && INTVAL (operands[3]) + 4 == INTVAL (operands[7]) -+ && INTVAL (operands[4]) + 4 == INTVAL (operands[8]) -+ && INTVAL (operands[5]) + 4 == INTVAL (operands[9]) -+ && INTVAL (operands[6]) + 4 == INTVAL (operands[10])" -+{ -+ int mask = 0; -+ mask |= INTVAL (operands[3]) << 0; -+ mask |= INTVAL (operands[4]) << 2; -+ mask |= (INTVAL (operands[5]) - 8) << 4; -+ mask |= (INTVAL (operands[6]) - 8) << 6; -+ operands[3] = GEN_INT (mask); -+ -+ return "xvpermi.w\t%u0,%u1,%3"; -+} -+ [(set_attr "type" "simd_bit") -+ (set_attr "mode" "")]) - - (define_expand "lasx_xvld" - [(match_operand:V32QI 0 "register_operand") -@@ -4728,10 +4927,24 @@ - (set_attr "mode" "") - (set_attr "length" "4")]) - --(define_insn "lasx_xvinsve0_" -- [(set (match_operand:ILASX_DW 0 "register_operand" "=f") -- (unspec:ILASX_DW [(match_operand:ILASX_DW 1 "register_operand" "0") -- (match_operand:ILASX_DW 2 "register_operand" "f") -+;; Offset is "0" -+(define_insn "lasx_xvstelm__insn_0" -+ [(set (mem: (match_operand:DI 0 "register_operand" "r")) -+ (vec_select: -+ (match_operand:LASX_WD 1 "register_operand" "f") -+ (parallel [(match_operand:SI 2 "const__operand")])))] -+ "ISA_HAS_LASX" -+{ -+ return "xvstelm.\t%u1,%0,0,%2"; -+} -+ [(set_attr "type" "simd_store") -+ (set_attr "mode" "") -+ (set_attr "length" "4")]) -+ -+(define_insn "lasx_xvinsve0_" -+ [(set (match_operand:LASX_WD 0 "register_operand" "=f") -+ (unspec:LASX_WD [(match_operand:LASX_WD 1 "register_operand" "0") -+ (match_operand:LASX_WD 2 "register_operand" "f") - (match_operand 3 "const__operand" "")] - UNSPEC_LASX_XVINSVE0))] - "ISA_HAS_LASX" -@@ -4739,6 +4952,18 @@ - [(set_attr "type" "simd_shf") - (set_attr "mode" "")]) - -+(define_insn "lasx_xvinsve0__scalar" -+ [(set (match_operand:FLASX 0 "register_operand" "=f") -+ (vec_merge:FLASX -+ (vec_duplicate:FLASX -+ (match_operand: 1 "register_operand" "f")) -+ (match_operand:FLASX 2 "register_operand" "0") -+ (match_operand 3 "const__operand" "")))] -+ "ISA_HAS_LASX" -+ "xvinsve0.\t%u0,%u1,%y3" -+ [(set_attr "type" "simd_insert") -+ (set_attr "mode" "")]) -+ - (define_insn "lasx_xvpickve_" - [(set (match_operand:LASX_WD 0 "register_operand" "=f") - (unspec:LASX_WD [(match_operand:LASX_WD 1 "register_operand" "f") -@@ -4749,6 +4974,16 @@ - [(set_attr "type" "simd_shf") - (set_attr "mode" "")]) - -+(define_insn "lasx_xvpickve__scalar" -+ [(set (match_operand: 0 "register_operand" "=f") -+ (vec_select: -+ (match_operand:FLASX 1 "register_operand" "f") -+ (parallel [(match_operand 2 "const__operand" "")])))] -+ "ISA_HAS_LASX" -+ "xvpickve.\t%u0,%u1,%2" -+ [(set_attr "type" "simd_shf") -+ (set_attr "mode" "")]) -+ - (define_insn "lasx_xvssrlrn__" - [(set (match_operand: 0 "register_operand" "=f") - (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") -@@ -4823,3 +5058,142 @@ - [(set_attr "type" "simd_store") - (set_attr "mode" "DI")]) - -+(define_insn "vec_widen_mult_even_v8si" -+ [(set (match_operand:V4DI 0 "register_operand" "=f") -+ (mult:V4DI -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 1 "register_operand" "%f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)]))) -+ (any_extend:V4DI -+ (vec_select:V4SI -+ (match_operand:V8SI 2 "register_operand" "f") -+ (parallel [(const_int 0) (const_int 2) -+ (const_int 4) (const_int 6)])))))] -+ "ISA_HAS_LASX" -+ "xvmulwev.d.w\t%u0,%u1,%u2" -+ [(set_attr "type" "simd_int_arith") -+ (set_attr "mode" "V4DI")]) -+ -+;; Vector reduction operation -+(define_expand "reduc_plus_scal_v4di" -+ [(match_operand:DI 0 "register_operand") -+ (match_operand:V4DI 1 "register_operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx tmp = gen_reg_rtx (V4DImode); -+ rtx tmp1 = gen_reg_rtx (V4DImode); -+ rtx vec_res = gen_reg_rtx (V4DImode); -+ emit_insn (gen_lasx_xvhaddw_q_d (tmp, operands[1], operands[1])); -+ emit_insn (gen_lasx_xvpermi_d_v4di (tmp1, tmp, GEN_INT (2))); -+ emit_insn (gen_addv4di3 (vec_res, tmp, tmp1)); -+ emit_insn (gen_vec_extractv4didi (operands[0], vec_res, const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc_plus_scal_v8si" -+ [(match_operand:SI 0 "register_operand") -+ (match_operand:V8SI 1 "register_operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx tmp = gen_reg_rtx (V4DImode); -+ rtx tmp1 = gen_reg_rtx (V4DImode); -+ rtx vec_res = gen_reg_rtx (V4DImode); -+ emit_insn (gen_lasx_xvhaddw_d_w (tmp, operands[1], operands[1])); -+ emit_insn (gen_lasx_xvhaddw_q_d (tmp1, tmp, tmp)); -+ emit_insn (gen_lasx_xvpermi_d_v4di (tmp, tmp1, GEN_INT (2))); -+ emit_insn (gen_addv4di3 (vec_res, tmp, tmp1)); -+ emit_insn (gen_vec_extractv8sisi (operands[0], gen_lowpart(V8SImode,vec_res), const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc_plus_scal_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:FLASX 1 "register_operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx tmp = gen_reg_rtx (mode); -+ loongarch_expand_vector_reduc (gen_add3, tmp, operands[1]); -+ emit_insn (gen_vec_extract (operands[0], tmp, -+ const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc__scal_" -+ [(any_bitwise: -+ (match_operand: 0 "register_operand") -+ (match_operand:ILASX 1 "register_operand"))] -+ "ISA_HAS_LASX" -+{ -+ rtx tmp = gen_reg_rtx (mode); -+ loongarch_expand_vector_reduc (gen_3, tmp, operands[1]); -+ emit_insn (gen_vec_extract (operands[0], tmp, -+ const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc_smax_scal_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:LASX 1 "register_operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx tmp = gen_reg_rtx (mode); -+ loongarch_expand_vector_reduc (gen_smax3, tmp, operands[1]); -+ emit_insn (gen_vec_extract (operands[0], tmp, -+ const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc_smin_scal_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:LASX 1 "register_operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx tmp = gen_reg_rtx (mode); -+ loongarch_expand_vector_reduc (gen_smin3, tmp, operands[1]); -+ emit_insn (gen_vec_extract (operands[0], tmp, -+ const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc_umax_scal_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILASX 1 "register_operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx tmp = gen_reg_rtx (mode); -+ loongarch_expand_vector_reduc (gen_umax3, tmp, operands[1]); -+ emit_insn (gen_vec_extract (operands[0], tmp, -+ const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc_umin_scal_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILASX 1 "register_operand")] -+ "ISA_HAS_LASX" -+{ -+ rtx tmp = gen_reg_rtx (mode); -+ loongarch_expand_vector_reduc (gen_umin3, tmp, operands[1]); -+ emit_insn (gen_vec_extract (operands[0], tmp, -+ const0_rtx)); -+ DONE; -+}) -+ -+;; merge vec_unpacks_hi_v8sf/vec_unpacks_lo_v8sf -+(define_peephole -+ [(set (match_operand:V4DF 0 "register_operand") -+ (float_extend:V4DF (vec_select:V4SF -+ (match_operand:V8SF 1 "register_operand") -+ (parallel [(const_int 0) (const_int 1) -+ (const_int 2) (const_int 3)])))) -+ (set (match_operand:V4DF 2 "register_operand") -+ (float_extend:V4DF (vec_select:V4SF -+ (match_operand:V8SF 3 "register_operand") -+ (parallel [(const_int 4) (const_int 5) -+ (const_int 6) (const_int 7)]))))] -+ "ISA_HAS_LASX && rtx_equal_p (operands[1], operands[3])" -+{ -+ return "xvpermi.d\t%u2,%u1,0xd8\n\txvfcvtl.d.s\t%u0,%u2\n\txvfcvth.d.s\t%u2,%u2"; -+}) -diff --git a/gcc/config/loongarch/lasxintrin.h b/gcc/config/loongarch/lasxintrin.h -index 185eee869..58f3047ac 100644 ---- a/gcc/config/loongarch/lasxintrin.h -+++ b/gcc/config/loongarch/lasxintrin.h -@@ -3262,70 +3262,70 @@ __m256i __lasx_xvftintrnel_l_s(__m256 _1) - /* Assembly instruction format: xd, xj. */ - /* Data types in instruction templates: V8SI, V8SF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m256i __lasx_xvfrintrne_s(__m256 _1) -+__m256 __lasx_xvfrintrne_s(__m256 _1) - { -- return (__m256i)__builtin_lasx_xvfrintrne_s((v8f32)_1); -+ return (__m256)__builtin_lasx_xvfrintrne_s((v8f32)_1); - } - - /* Assembly instruction format: xd, xj. */ - /* Data types in instruction templates: V4DI, V4DF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m256i __lasx_xvfrintrne_d(__m256d _1) -+__m256d __lasx_xvfrintrne_d(__m256d _1) - { -- return (__m256i)__builtin_lasx_xvfrintrne_d((v4f64)_1); -+ return (__m256d)__builtin_lasx_xvfrintrne_d((v4f64)_1); - } - - /* Assembly instruction format: xd, xj. */ - /* Data types in instruction templates: V8SI, V8SF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m256i __lasx_xvfrintrz_s(__m256 _1) -+__m256 __lasx_xvfrintrz_s(__m256 _1) - { -- return (__m256i)__builtin_lasx_xvfrintrz_s((v8f32)_1); -+ return (__m256)__builtin_lasx_xvfrintrz_s((v8f32)_1); - } - - /* Assembly instruction format: xd, xj. */ - /* Data types in instruction templates: V4DI, V4DF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m256i __lasx_xvfrintrz_d(__m256d _1) -+__m256d __lasx_xvfrintrz_d(__m256d _1) - { -- return (__m256i)__builtin_lasx_xvfrintrz_d((v4f64)_1); -+ return (__m256d)__builtin_lasx_xvfrintrz_d((v4f64)_1); - } - - /* Assembly instruction format: xd, xj. */ - /* Data types in instruction templates: V8SI, V8SF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m256i __lasx_xvfrintrp_s(__m256 _1) -+__m256 __lasx_xvfrintrp_s(__m256 _1) - { -- return (__m256i)__builtin_lasx_xvfrintrp_s((v8f32)_1); -+ return (__m256)__builtin_lasx_xvfrintrp_s((v8f32)_1); - } - - /* Assembly instruction format: xd, xj. */ - /* Data types in instruction templates: V4DI, V4DF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m256i __lasx_xvfrintrp_d(__m256d _1) -+__m256d __lasx_xvfrintrp_d(__m256d _1) - { -- return (__m256i)__builtin_lasx_xvfrintrp_d((v4f64)_1); -+ return (__m256d)__builtin_lasx_xvfrintrp_d((v4f64)_1); - } - - /* Assembly instruction format: xd, xj. */ - /* Data types in instruction templates: V8SI, V8SF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m256i __lasx_xvfrintrm_s(__m256 _1) -+__m256 __lasx_xvfrintrm_s(__m256 _1) - { -- return (__m256i)__builtin_lasx_xvfrintrm_s((v8f32)_1); -+ return (__m256)__builtin_lasx_xvfrintrm_s((v8f32)_1); - } - - /* Assembly instruction format: xd, xj. */ - /* Data types in instruction templates: V4DI, V4DF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m256i __lasx_xvfrintrm_d(__m256d _1) -+__m256d __lasx_xvfrintrm_d(__m256d _1) - { -- return (__m256i)__builtin_lasx_xvfrintrm_d((v4f64)_1); -+ return (__m256d)__builtin_lasx_xvfrintrm_d((v4f64)_1); - } - - /* Assembly instruction format: xd, rj, si12. */ - /* Data types in instruction templates: V32QI, CVPOINTER, SI. */ --#define __lasx_xvld(/*void **/ _1, /*si12*/ _2) ((__m256i)__builtin_lasx_xvld((void *)(_1), (_2))) -+#define __lasx_xvld(/*void **/ _1, /*si12*/ _2) ((__m256i)__builtin_lasx_xvld((void const *)(_1), (_2))) - - /* Assembly instruction format: xd, rj, si12. */ - /* Data types in instruction templates: VOID, V32QI, CVPOINTER, SI. */ -@@ -3426,9 +3426,9 @@ __m256i __lasx_xvorn_v(__m256i _1, __m256i _2) - /* Assembly instruction format: xd, rj, rk. */ - /* Data types in instruction templates: V32QI, CVPOINTER, DI. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m256i __lasx_xvldx(void * _1, long int _2) -+__m256i __lasx_xvldx(void const * _1, long int _2) - { -- return (__m256i)__builtin_lasx_xvldx((void *)_1, (long int)_2); -+ return (__m256i)__builtin_lasx_xvldx((void const *)_1, (long int)_2); - } - - /* Assembly instruction format: xd, rj, rk. */ -@@ -3609,19 +3609,19 @@ __m256i __lasx_xvperm_w(__m256i _1, __m256i _2) - - /* Assembly instruction format: xd, rj, si12. */ - /* Data types in instruction templates: V32QI, CVPOINTER, SI. */ --#define __lasx_xvldrepl_b(/*void **/ _1, /*si12*/ _2) ((__m256i)__builtin_lasx_xvldrepl_b((void *)(_1), (_2))) -+#define __lasx_xvldrepl_b(/*void **/ _1, /*si12*/ _2) ((__m256i)__builtin_lasx_xvldrepl_b((void const *)(_1), (_2))) - - /* Assembly instruction format: xd, rj, si11. */ - /* Data types in instruction templates: V16HI, CVPOINTER, SI. */ --#define __lasx_xvldrepl_h(/*void **/ _1, /*si11*/ _2) ((__m256i)__builtin_lasx_xvldrepl_h((void *)(_1), (_2))) -+#define __lasx_xvldrepl_h(/*void **/ _1, /*si11*/ _2) ((__m256i)__builtin_lasx_xvldrepl_h((void const *)(_1), (_2))) - - /* Assembly instruction format: xd, rj, si10. */ - /* Data types in instruction templates: V8SI, CVPOINTER, SI. */ --#define __lasx_xvldrepl_w(/*void **/ _1, /*si10*/ _2) ((__m256i)__builtin_lasx_xvldrepl_w((void *)(_1), (_2))) -+#define __lasx_xvldrepl_w(/*void **/ _1, /*si10*/ _2) ((__m256i)__builtin_lasx_xvldrepl_w((void const *)(_1), (_2))) - - /* Assembly instruction format: xd, rj, si9. */ - /* Data types in instruction templates: V4DI, CVPOINTER, SI. */ --#define __lasx_xvldrepl_d(/*void **/ _1, /*si9*/ _2) ((__m256i)__builtin_lasx_xvldrepl_d((void *)(_1), (_2))) -+#define __lasx_xvldrepl_d(/*void **/ _1, /*si9*/ _2) ((__m256i)__builtin_lasx_xvldrepl_d((void const *)(_1), (_2))) - - /* Assembly instruction format: rd, xj, ui3. */ - /* Data types in instruction templates: SI, V8SI, UQI. */ -diff --git a/gcc/config/loongarch/linux-common.h b/gcc/config/loongarch/linux-common.h -deleted file mode 100644 -index 9e1a1b50f..000000000 ---- a/gcc/config/loongarch/linux-common.h -+++ /dev/null -@@ -1,68 +0,0 @@ --/* Definitions for LARCH running Linux-based GNU systems with ELF format. -- Copyright (C) 2012-2018 Free Software Foundation, Inc. -- --This file is part of GCC. -- --GCC is free software; you can redistribute it and/or modify --it under the terms of the GNU General Public License as published by --the Free Software Foundation; either version 3, or (at your option) --any later version. -- --GCC is distributed in the hope that it will be useful, --but WITHOUT ANY WARRANTY; without even the implied warranty of --MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --GNU General Public License for more details. -- --You should have received a copy of the GNU General Public License --along with GCC; see the file COPYING3. If not see --. */ -- --#undef TARGET_OS_CPP_BUILTINS --#define TARGET_OS_CPP_BUILTINS() \ -- do { \ -- GNU_USER_TARGET_OS_CPP_BUILTINS(); \ -- /* The GNU C++ standard library requires this. */ \ -- if (c_dialect_cxx ()) \ -- builtin_define ("_GNU_SOURCE"); \ -- ANDROID_TARGET_OS_CPP_BUILTINS(); \ -- } while (0) -- --#define EXTRA_TARGET_D_OS_VERSIONS() \ -- ANDROID_TARGET_D_OS_VERSIONS(); -- --#undef LINK_SPEC --#define LINK_SPEC \ -- LINUX_OR_ANDROID_LD (GNU_USER_TARGET_LINK_SPEC, \ -- GNU_USER_TARGET_LINK_SPEC " " ANDROID_LINK_SPEC) -- --#undef SUBTARGET_CC1_SPEC --#define SUBTARGET_CC1_SPEC \ -- LINUX_OR_ANDROID_CC (GNU_USER_TARGET_CC1_SPEC, \ -- GNU_USER_TARGET_CC1_SPEC " " ANDROID_CC1_SPEC) -- --#undef CC1PLUS_SPEC --#define CC1PLUS_SPEC \ -- LINUX_OR_ANDROID_CC ("", ANDROID_CC1PLUS_SPEC) -- --#undef LIB_SPEC --#define LIB_SPEC \ -- LINUX_OR_ANDROID_LD (GNU_USER_TARGET_LIB_SPEC, \ -- GNU_USER_TARGET_NO_PTHREADS_LIB_SPEC " " ANDROID_LIB_SPEC) -- --#undef STARTFILE_SPEC --#define STARTFILE_SPEC \ -- LINUX_OR_ANDROID_LD (GNU_USER_TARGET_STARTFILE_SPEC, ANDROID_STARTFILE_SPEC) -- --#undef ENDFILE_SPEC --#define ENDFILE_SPEC \ -- LINUX_OR_ANDROID_LD (GNU_USER_TARGET_MATHFILE_SPEC " " \ -- GNU_USER_TARGET_ENDFILE_SPEC, \ -- GNU_USER_TARGET_MATHFILE_SPEC " " \ -- ANDROID_ENDFILE_SPEC) -- --/* Define this to be nonzero if static stack checking is supported. */ --#define STACK_CHECK_STATIC_BUILTIN 1 -- --/* FIXME*/ --/* The default value isn't sufficient in 64-bit mode. */ --#define STACK_CHECK_PROTECT (TARGET_64BIT ? 16 * 1024 : 12 * 1024) -diff --git a/gcc/config/loongarch/linux.h b/gcc/config/loongarch/linux.h -index 520a8ef32..59854251f 100644 ---- a/gcc/config/loongarch/linux.h -+++ b/gcc/config/loongarch/linux.h -@@ -1,4 +1,4 @@ --/* Definitions for LARCH running Linux-based GNU systems with ELF format. -+/* Definitions for Linux-based systems with libraries in ELF format. - Copyright (C) 1998-2018 Free Software Foundation, Inc. - - This file is part of GCC. -@@ -17,17 +17,34 @@ You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING3. If not see - . */ - --#define GNU_USER_LINK_EMULATION32 "elf32loongarch" --#define GNU_USER_LINK_EMULATION64 "elf64loongarch" -+/* Default system library search paths. -+ * This ensures that a compiler configured with --disable-multilib -+ * can work in a multilib environment. */ - --#define GLIBC_DYNAMIC_LINKERLP32 \ -- "/lib32/ld.so.1" --#define GLIBC_DYNAMIC_LINKERLP64 \ -- "/lib64/ld.so.1" -+#if defined(LA_DISABLE_MULTILIB) && defined(LA_DISABLE_MULTIARCH) - --#define GNU_USER_DYNAMIC_LINKERLP32 GLIBC_DYNAMIC_LINKERLP32 --#define GNU_USER_DYNAMIC_LINKERLP64 GLIBC_DYNAMIC_LINKERLP64 -+ #if DEFAULT_ABI_BASE == ABI_BASE_LP64D -+ #define ABI_LIBDIR "lib64" -+ #elif DEFAULT_ABI_BASE == ABI_BASE_LP64F -+ #define ABI_LIBDIR "lib64/f32" -+ #elif DEFAULT_ABI_BASE == ABI_BASE_LP64S -+ #define ABI_LIBDIR "lib64/sf" -+ #endif - -+#endif -+ -+#ifndef ABI_LIBDIR -+#define ABI_LIBDIR "lib" -+#endif -+ -+#define STANDARD_STARTFILE_PREFIX_1 "/" ABI_LIBDIR "/" -+#define STANDARD_STARTFILE_PREFIX_2 "/usr/" ABI_LIBDIR "/" -+ -+ -+/* Define this to be nonzero if static stack checking is supported. */ -+#define STACK_CHECK_STATIC_BUILTIN 1 -+ -+/* The default value isn't sufficient in 64-bit mode. */ -+#define STACK_CHECK_PROTECT (TARGET_64BIT ? 16 * 1024 : 12 * 1024) - --#undef TARGET_ASM_FILE_END - #define TARGET_ASM_FILE_END file_end_indicate_exec_stack -diff --git a/gcc/config/loongarch/loongarch-builtins.c b/gcc/config/loongarch/loongarch-builtins.c -index 9fa68b11f..b326ec46c 100644 ---- a/gcc/config/loongarch/loongarch-builtins.c -+++ b/gcc/config/loongarch/loongarch-builtins.c -@@ -1,7 +1,6 @@ -- --/* Subroutines used for expanding LOONGARCH builtins. -- Copyright (C) 2011-2018 Free Software Foundation, Inc. -- Contributed by Andrew Waterman (andrew@sifive.com). -+/* Subroutines used for expanding LoongArch builtins. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Co. Ltd. - - This file is part of GCC. - -@@ -30,50 +29,29 @@ along with GCC; see the file COPYING3. If not see - #include "tree.h" - #include "memmodel.h" - #include "gimple.h" --#include "cfghooks.h" --#include "df.h" - #include "tm_p.h" --#include "stringpool.h" --#include "attribs.h" - #include "optabs.h" --#include "regs.h" --#include "emit-rtl.h" - #include "recog.h" --#include "cgraph.h" - #include "diagnostic.h" --#include "insn-attr.h" --#include "output.h" --#include "alias.h" - #include "fold-const.h" --#include "varasm.h" --#include "stor-layout.h" --#include "calls.h" --#include "explow.h" - #include "expr.h" --#include "libfuncs.h" --#include "reload.h" --#include "common/common-target.h" - #include "langhooks.h" --#include "cfgrtl.h" --#include "cfganal.h" --#include "sched-int.h" --#include "gimplify.h" --#include "target-globals.h" --#include "tree-pass.h" --#include "context.h" -+#include "emit-rtl.h" -+#include "explow.h" - #include "builtins.h" --#include "rtl-iter.h" -+#include "stringpool.h" -+#include "case-cfn-macros.h" - --/* This file should be included last. */ --#include "target-def.h" - /* Macros to create an enumeration identifier for a function prototype. */ - #define LARCH_FTYPE_NAME1(A, B) LARCH_##A##_FTYPE_##B - #define LARCH_FTYPE_NAME2(A, B, C) LARCH_##A##_FTYPE_##B##_##C - #define LARCH_FTYPE_NAME3(A, B, C, D) LARCH_##A##_FTYPE_##B##_##C##_##D --#define LARCH_FTYPE_NAME4(A, B, C, D, E) LARCH_##A##_FTYPE_##B##_##C##_##D##_##E -+#define LARCH_FTYPE_NAME4(A, B, C, D, E) \ -+ LARCH_##A##_FTYPE_##B##_##C##_##D##_##E - - /* Classifies the prototype of a built-in function. */ --enum loongarch_function_type { -+enum loongarch_function_type -+{ - #define DEF_LARCH_FTYPE(NARGS, LIST) LARCH_FTYPE_NAME##NARGS LIST, - #include "config/loongarch/loongarch-ftypes.def" - #undef DEF_LARCH_FTYPE -@@ -81,7 +59,8 @@ enum loongarch_function_type { - }; - - /* Specifies how a built-in function should be converted into rtl. */ --enum loongarch_builtin_type { -+enum loongarch_builtin_type -+{ - /* The function corresponds directly to an .md pattern. The return - value is mapped to operand 0 and the arguments are mapped to - operands 1 and above. */ -@@ -91,23 +70,23 @@ enum loongarch_builtin_type { - value and the arguments are mapped to operands 0 and above. */ - LARCH_BUILTIN_DIRECT_NO_TARGET, - -+ /* For generating LoongArch LSX. */ -+ LARCH_BUILTIN_LSX, -+ - /* The function corresponds to an LSX conditional branch instruction - combined with a compare instruction. */ - LARCH_BUILTIN_LSX_TEST_BRANCH, - -- /* For generating LoongArch LSX. */ -- LARCH_BUILTIN_LSX, -- - /* For generating LoongArch LASX. */ - LARCH_BUILTIN_LASX, - - /* The function corresponds to an LASX conditional branch instruction - combined with a compare instruction. */ -- LARCH_BUILTIN_LASX_TEST_BRANCH, -+ LARCH_BUILTIN_LASX_TEST_BRANCH - - }; - --/* Invoke MACRO (COND) for each C.cond.fmt condition. */ -+/* Invoke MACRO (COND) for each fcmp.cond.{s/d} condition. */ - #define LARCH_FP_CONDITIONS(MACRO) \ - MACRO (f), \ - MACRO (un), \ -@@ -127,26 +106,27 @@ enum loongarch_builtin_type { - MACRO (ngt) - - /* Enumerates the codes above as LARCH_FP_COND_. */ --#define DECLARE_LARCH_COND(X) LARCH_FP_COND_ ## X --enum loongarch_fp_condition { -+#define DECLARE_LARCH_COND(X) LARCH_FP_COND_##X -+enum loongarch_fp_condition -+{ - LARCH_FP_CONDITIONS (DECLARE_LARCH_COND) - }; - #undef DECLARE_LARCH_COND - - /* Index X provides the string representation of LARCH_FP_COND_. */ - #define STRINGIFY(X) #X --const char *const loongarch_fp_conditions[16] = { -- LARCH_FP_CONDITIONS (STRINGIFY) --}; -+const char *const -+loongarch_fp_conditions[16] = {LARCH_FP_CONDITIONS (STRINGIFY)}; - #undef STRINGIFY --/* Declare an availability predicate for built-in functions that require -+ -+/* Declare an availability predicate for built-in functions that require - * COND to be true. NAME is the main part of the predicate's name. */ --#define AVAIL_ALL(NAME, COND) \ -- static unsigned int \ -- loongarch_builtin_avail_##NAME (void) \ -- { \ -- return (COND) ? 1 : 0; \ -- } -+#define AVAIL_ALL(NAME, COND) \ -+ static unsigned int \ -+ loongarch_builtin_avail_##NAME (void) \ -+ { \ -+ return (COND) ? 1 : 0; \ -+ } - - static unsigned int - loongarch_builtin_avail_default (void) -@@ -154,14 +134,12 @@ loongarch_builtin_avail_default (void) - return 1; - } - /* This structure describes a single built-in function. */ --struct loongarch_builtin_description { -+struct loongarch_builtin_description -+{ - /* The code of the main .md file instruction. See loongarch_builtin_type - for more information. */ - enum insn_code icode; - -- /* The floating-point comparison code to use with ICODE, if any. */ -- enum loongarch_fp_condition cond; -- - /* The name of the built-in function. */ - const char *name; - -@@ -176,8 +154,8 @@ struct loongarch_builtin_description { - }; - - AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI) --AVAIL_ALL (lsx, TARGET_LSX) --AVAIL_ALL (lasx, TARGET_LASX) -+AVAIL_ALL (lsx, ISA_HAS_LSX) -+AVAIL_ALL (lasx, ISA_HAS_LASX) - - /* Construct a loongarch_builtin_description from the given arguments. - -@@ -194,31 +172,32 @@ AVAIL_ALL (lasx, TARGET_LASX) - - AVAIL is the name of the availability predicate, without the leading - loongarch_builtin_avail_. */ --#define LARCH_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \ -- FUNCTION_TYPE, AVAIL) \ -- { CODE_FOR_loongarch_ ## INSN, LARCH_FP_COND_ ## COND, \ -- "__builtin_loongarch_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \ -- loongarch_builtin_avail_ ## AVAIL } -+#define LARCH_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL) \ -+ { \ -+ CODE_FOR_loongarch_##INSN, "__builtin_loongarch_" NAME, \ -+ BUILTIN_TYPE, FUNCTION_TYPE, \ -+ loongarch_builtin_avail_##AVAIL \ -+ } - - /* Define __builtin_loongarch_, which is a LARCH_BUILTIN_DIRECT function - mapped to instruction CODE_FOR_loongarch_, FUNCTION_TYPE and AVAIL - are as for LARCH_BUILTIN. */ --#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \ -- LARCH_BUILTIN (INSN, f, #INSN, LARCH_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL) -+#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \ -+ LARCH_BUILTIN (INSN, #INSN, LARCH_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL) - - /* Define __builtin_loongarch_, which is a LARCH_BUILTIN_DIRECT_NO_TARGET - function mapped to instruction CODE_FOR_loongarch_, FUNCTION_TYPE - and AVAIL are as for LARCH_BUILTIN. */ --#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \ -- LARCH_BUILTIN (INSN, f, #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ -- FUNCTION_TYPE, AVAIL) -+#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \ -+ LARCH_BUILTIN (INSN, #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ -+ FUNCTION_TYPE, AVAIL) - - /* Define an LSX LARCH_BUILTIN_DIRECT function __builtin_lsx_ - for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description - field. */ - #define LSX_BUILTIN(INSN, FUNCTION_TYPE) \ -- { CODE_FOR_lsx_ ## INSN, LARCH_FP_COND_f, \ -- "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT, \ -+ { CODE_FOR_lsx_ ## INSN, \ -+ "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT, \ - FUNCTION_TYPE, loongarch_builtin_avail_lsx } - - -@@ -226,7 +205,7 @@ AVAIL_ALL (lasx, TARGET_LASX) - for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description - field. */ - #define LSX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \ -- { CODE_FOR_lsx_ ## INSN, LARCH_FP_COND_f, \ -+ { CODE_FOR_lsx_ ## INSN, \ - "__builtin_lsx_" #INSN, LARCH_BUILTIN_LSX_TEST_BRANCH, \ - FUNCTION_TYPE, loongarch_builtin_avail_lsx } - -@@ -234,7 +213,7 @@ AVAIL_ALL (lasx, TARGET_LASX) - for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description - field. */ - #define LSX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \ -- { CODE_FOR_lsx_ ## INSN, LARCH_FP_COND_f, \ -+ { CODE_FOR_lsx_ ## INSN, \ - "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ - FUNCTION_TYPE, loongarch_builtin_avail_lsx } - -@@ -242,7 +221,7 @@ AVAIL_ALL (lasx, TARGET_LASX) - for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description - field. */ - #define LASX_BUILTIN(INSN, FUNCTION_TYPE) \ -- { CODE_FOR_lasx_ ## INSN, LARCH_FP_COND_f, \ -+ { CODE_FOR_lasx_ ## INSN, \ - "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX, \ - FUNCTION_TYPE, loongarch_builtin_avail_lasx } - -@@ -250,7 +229,7 @@ AVAIL_ALL (lasx, TARGET_LASX) - for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description - field. */ - #define LASX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \ -- { CODE_FOR_lasx_ ## INSN, LARCH_FP_COND_f, \ -+ { CODE_FOR_lasx_ ## INSN, \ - "__builtin_lasx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ - FUNCTION_TYPE, loongarch_builtin_avail_lasx } - -@@ -258,65 +237,10 @@ AVAIL_ALL (lasx, TARGET_LASX) - for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description - field. */ - #define LASX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \ -- { CODE_FOR_lasx_ ## INSN, LARCH_FP_COND_f, \ -+ { CODE_FOR_lasx_ ## INSN, \ - "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX_TEST_BRANCH, \ - FUNCTION_TYPE, loongarch_builtin_avail_lasx } - --/* LoongArch BASE instructions define CODE_FOR_loongarch_xxx */ --#define CODE_FOR_loongarch_fmax_sf CODE_FOR_smaxsf3 --#define CODE_FOR_loongarch_fmax_df CODE_FOR_smaxdf3 --#define CODE_FOR_loongarch_fmin_sf CODE_FOR_sminsf3 --#define CODE_FOR_loongarch_fmin_df CODE_FOR_smindf3 --#define CODE_FOR_loongarch_fmaxa_sf CODE_FOR_smaxasf3 --#define CODE_FOR_loongarch_fmaxa_df CODE_FOR_smaxadf3 --#define CODE_FOR_loongarch_fmina_sf CODE_FOR_sminasf3 --#define CODE_FOR_loongarch_fmina_df CODE_FOR_sminadf3 --#define CODE_FOR_loongarch_fclass_s CODE_FOR_fclass_s --#define CODE_FOR_loongarch_fclass_d CODE_FOR_fclass_d --#define CODE_FOR_loongarch_frint_s CODE_FOR_frint_s --#define CODE_FOR_loongarch_frint_d CODE_FOR_frint_d --#define CODE_FOR_loongarch_bytepick_w CODE_FOR_bytepick_w --#define CODE_FOR_loongarch_bytepick_d CODE_FOR_bytepick_d --#define CODE_FOR_loongarch_bitrev_4b CODE_FOR_bitrev_4b --#define CODE_FOR_loongarch_bitrev_8b CODE_FOR_bitrev_8b -- --/* LoongArch support crc */ --#define CODE_FOR_loongarch_crc_w_b_w CODE_FOR_crc_w_b_w --#define CODE_FOR_loongarch_crc_w_h_w CODE_FOR_crc_w_h_w --#define CODE_FOR_loongarch_crc_w_w_w CODE_FOR_crc_w_w_w --#define CODE_FOR_loongarch_crc_w_d_w CODE_FOR_crc_w_d_w --#define CODE_FOR_loongarch_crcc_w_b_w CODE_FOR_crcc_w_b_w --#define CODE_FOR_loongarch_crcc_w_h_w CODE_FOR_crcc_w_h_w --#define CODE_FOR_loongarch_crcc_w_w_w CODE_FOR_crcc_w_w_w --#define CODE_FOR_loongarch_crcc_w_d_w CODE_FOR_crcc_w_d_w -- --/* Privileged state instruction */ --#define CODE_FOR_loongarch_cpucfg CODE_FOR_cpucfg --#define CODE_FOR_loongarch_asrtle_d CODE_FOR_asrtle_d --#define CODE_FOR_loongarch_asrtgt_d CODE_FOR_asrtgt_d --#define CODE_FOR_loongarch_csrrd CODE_FOR_csrrd --#define CODE_FOR_loongarch_dcsrrd CODE_FOR_dcsrrd --#define CODE_FOR_loongarch_csrwr CODE_FOR_csrwr --#define CODE_FOR_loongarch_dcsrwr CODE_FOR_dcsrwr --#define CODE_FOR_loongarch_csrxchg CODE_FOR_csrxchg --#define CODE_FOR_loongarch_dcsrxchg CODE_FOR_dcsrxchg --#define CODE_FOR_loongarch_iocsrrd_b CODE_FOR_iocsrrd_b --#define CODE_FOR_loongarch_iocsrrd_h CODE_FOR_iocsrrd_h --#define CODE_FOR_loongarch_iocsrrd_w CODE_FOR_iocsrrd_w --#define CODE_FOR_loongarch_iocsrrd_d CODE_FOR_iocsrrd_d --#define CODE_FOR_loongarch_iocsrwr_b CODE_FOR_iocsrwr_b --#define CODE_FOR_loongarch_iocsrwr_h CODE_FOR_iocsrwr_h --#define CODE_FOR_loongarch_iocsrwr_w CODE_FOR_iocsrwr_w --#define CODE_FOR_loongarch_iocsrwr_d CODE_FOR_iocsrwr_d --#define CODE_FOR_loongarch_lddir CODE_FOR_lddir --#define CODE_FOR_loongarch_dlddir CODE_FOR_dlddir --#define CODE_FOR_loongarch_ldpte CODE_FOR_ldpte --#define CODE_FOR_loongarch_dldpte CODE_FOR_dldpte --#define CODE_FOR_loongarch_cacop CODE_FOR_cacop --#define CODE_FOR_loongarch_dcacop CODE_FOR_dcacop --#define CODE_FOR_loongarch_dbar CODE_FOR_dbar --#define CODE_FOR_loongarch_ibar CODE_FOR_ibar -- - /* LoongArch SX define CODE_FOR_lsx_xxx */ - #define CODE_FOR_lsx_vsadd_b CODE_FOR_ssaddv16qi3 - #define CODE_FOR_lsx_vsadd_h CODE_FOR_ssaddv8hi3 -@@ -389,6 +313,8 @@ AVAIL_ALL (lasx, TARGET_LASX) - #define CODE_FOR_lsx_vfmin_d CODE_FOR_sminv2df3 - #define CODE_FOR_lsx_vfsqrt_s CODE_FOR_sqrtv4sf2 - #define CODE_FOR_lsx_vfsqrt_d CODE_FOR_sqrtv2df2 -+#define CODE_FOR_lsx_vflogb_s CODE_FOR_logbv4sf2 -+#define CODE_FOR_lsx_vflogb_d CODE_FOR_logbv2df2 - #define CODE_FOR_lsx_vmax_b CODE_FOR_smaxv16qi3 - #define CODE_FOR_lsx_vmax_h CODE_FOR_smaxv8hi3 - #define CODE_FOR_lsx_vmax_w CODE_FOR_smaxv4si3 -@@ -654,6 +580,8 @@ AVAIL_ALL (lasx, TARGET_LASX) - #define CODE_FOR_lasx_xvfmin_d CODE_FOR_sminv4df3 - #define CODE_FOR_lasx_xvfsqrt_s CODE_FOR_sqrtv8sf2 - #define CODE_FOR_lasx_xvfsqrt_d CODE_FOR_sqrtv4df2 -+#define CODE_FOR_lasx_xvflogb_s CODE_FOR_logbv8sf2 -+#define CODE_FOR_lasx_xvflogb_d CODE_FOR_logbv4df2 - #define CODE_FOR_lasx_xvmax_b CODE_FOR_smaxv32qi3 - #define CODE_FOR_lasx_xvmax_h CODE_FOR_smaxv16hi3 - #define CODE_FOR_lasx_xvmax_w CODE_FOR_smaxv8si3 -@@ -771,6 +699,7 @@ AVAIL_ALL (lasx, TARGET_LASX) - #define CODE_FOR_lasx_xvfnmsub_d CODE_FOR_xvfnmsubv4df4_nmsub4 - - #define CODE_FOR_lasx_xvpermi_q CODE_FOR_lasx_xvpermi_q_v32qi -+#define CODE_FOR_lasx_xvpermi_d CODE_FOR_lasx_xvpermi_d_v4di - #define CODE_FOR_lasx_xbnz_v CODE_FOR_lasx_xbnz_v_b - #define CODE_FOR_lasx_xbz_v CODE_FOR_lasx_xbz_v_b - -@@ -857,36 +786,17 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { - #define LARCH_MOVGR2FCSR 1 - DIRECT_NO_TARGET_BUILTIN (movgr2fcsr, LARCH_VOID_FTYPE_UQI_USI, hard_float), - -- DIRECT_NO_TARGET_BUILTIN (cacop, LARCH_VOID_FTYPE_USI_USI_SI, default), -- DIRECT_NO_TARGET_BUILTIN (dcacop, LARCH_VOID_FTYPE_USI_UDI_SI, default), -+ DIRECT_NO_TARGET_BUILTIN (cacop_w, LARCH_VOID_FTYPE_USI_USI_SI, default), -+ DIRECT_NO_TARGET_BUILTIN (cacop_d, LARCH_VOID_FTYPE_USI_UDI_SI, default), - DIRECT_NO_TARGET_BUILTIN (dbar, LARCH_VOID_FTYPE_USI, default), - DIRECT_NO_TARGET_BUILTIN (ibar, LARCH_VOID_FTYPE_USI, default), - -- DIRECT_BUILTIN (fmax_sf, LARCH_SF_FTYPE_SF_SF, hard_float), -- DIRECT_BUILTIN (fmax_df, LARCH_DF_FTYPE_DF_DF, hard_float), -- DIRECT_BUILTIN (fmin_sf, LARCH_SF_FTYPE_SF_SF, hard_float), -- DIRECT_BUILTIN (fmin_df, LARCH_DF_FTYPE_DF_DF, hard_float), -- DIRECT_BUILTIN (fmaxa_sf, LARCH_SF_FTYPE_SF_SF, hard_float), -- DIRECT_BUILTIN (fmaxa_df, LARCH_DF_FTYPE_DF_DF, hard_float), -- DIRECT_BUILTIN (fmina_sf, LARCH_SF_FTYPE_SF_SF, hard_float), -- DIRECT_BUILTIN (fmina_df, LARCH_DF_FTYPE_DF_DF, hard_float), -- DIRECT_BUILTIN (fclass_s, LARCH_SF_FTYPE_SF, hard_float), -- DIRECT_BUILTIN (fclass_d, LARCH_DF_FTYPE_DF, hard_float), -- DIRECT_BUILTIN (frint_s, LARCH_SF_FTYPE_SF, hard_float), -- DIRECT_BUILTIN (frint_d, LARCH_DF_FTYPE_DF, hard_float), -- DIRECT_BUILTIN (bytepick_w, LARCH_SI_FTYPE_SI_SI_QI, default), -- DIRECT_BUILTIN (bytepick_d, LARCH_DI_FTYPE_DI_DI_QI, default), -- DIRECT_BUILTIN (bitrev_4b, LARCH_SI_FTYPE_SI, default), -- DIRECT_BUILTIN (bitrev_8b, LARCH_DI_FTYPE_DI, default), -- DIRECT_BUILTIN (cpucfg, LARCH_USI_FTYPE_USI, default), -- DIRECT_BUILTIN (asrtle_d, LARCH_VOID_FTYPE_DI_DI, default), -- DIRECT_BUILTIN (asrtgt_d, LARCH_VOID_FTYPE_DI_DI, default), -- DIRECT_BUILTIN (dlddir, LARCH_DI_FTYPE_DI_UQI, default), -- DIRECT_BUILTIN (lddir, LARCH_SI_FTYPE_SI_UQI, default), -- DIRECT_NO_TARGET_BUILTIN (dldpte, LARCH_VOID_FTYPE_DI_UQI, default), -- DIRECT_NO_TARGET_BUILTIN (ldpte, LARCH_VOID_FTYPE_SI_UQI, default), -+ DIRECT_BUILTIN (lddir_d, LARCH_DI_FTYPE_DI_UQI, default), -+ DIRECT_BUILTIN (lddir_w, LARCH_SI_FTYPE_SI_UQI, default), -+ DIRECT_NO_TARGET_BUILTIN (ldpte_d, LARCH_VOID_FTYPE_DI_UQI, default), -+ DIRECT_NO_TARGET_BUILTIN (ldpte_w, LARCH_VOID_FTYPE_SI_UQI, default), - -- /* CRC Instrinsic */ -+ /* CRC Instrinsic */ - - DIRECT_BUILTIN (crc_w_b_w, LARCH_SI_FTYPE_QI_SI, default), - DIRECT_BUILTIN (crc_w_h_w, LARCH_SI_FTYPE_HI_SI, default), -@@ -897,12 +807,12 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { - DIRECT_BUILTIN (crcc_w_w_w, LARCH_SI_FTYPE_SI_SI, default), - DIRECT_BUILTIN (crcc_w_d_w, LARCH_SI_FTYPE_DI_SI, default), - -- DIRECT_BUILTIN (csrrd, LARCH_USI_FTYPE_USI, default), -- DIRECT_BUILTIN (dcsrrd, LARCH_UDI_FTYPE_USI, default), -- DIRECT_BUILTIN (csrwr, LARCH_USI_FTYPE_USI_USI, default), -- DIRECT_BUILTIN (dcsrwr, LARCH_UDI_FTYPE_UDI_USI, default), -- DIRECT_BUILTIN (csrxchg, LARCH_USI_FTYPE_USI_USI_USI, default), -- DIRECT_BUILTIN (dcsrxchg, LARCH_UDI_FTYPE_UDI_UDI_USI, default), -+ DIRECT_BUILTIN (csrrd_w, LARCH_USI_FTYPE_USI, default), -+ DIRECT_BUILTIN (csrrd_d, LARCH_UDI_FTYPE_USI, default), -+ DIRECT_BUILTIN (csrwr_w, LARCH_USI_FTYPE_USI_USI, default), -+ DIRECT_BUILTIN (csrwr_d, LARCH_UDI_FTYPE_UDI_USI, default), -+ DIRECT_BUILTIN (csrxchg_w, LARCH_USI_FTYPE_USI_USI_USI, default), -+ DIRECT_BUILTIN (csrxchg_d, LARCH_UDI_FTYPE_UDI_UDI_USI, default), - DIRECT_BUILTIN (iocsrrd_b, LARCH_UQI_FTYPE_USI, default), - DIRECT_BUILTIN (iocsrrd_h, LARCH_UHI_FTYPE_USI, default), - DIRECT_BUILTIN (iocsrrd_w, LARCH_USI_FTYPE_USI, default), -@@ -912,6 +822,12 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { - DIRECT_NO_TARGET_BUILTIN (iocsrwr_w, LARCH_VOID_FTYPE_USI_USI, default), - DIRECT_NO_TARGET_BUILTIN (iocsrwr_d, LARCH_VOID_FTYPE_UDI_USI, default), - -+ DIRECT_BUILTIN (cpucfg, LARCH_USI_FTYPE_USI, default), -+ DIRECT_NO_TARGET_BUILTIN (asrtle_d, LARCH_VOID_FTYPE_DI_DI, default), -+ DIRECT_NO_TARGET_BUILTIN (asrtgt_d, LARCH_VOID_FTYPE_DI_DI, default), -+ DIRECT_NO_TARGET_BUILTIN (syscall, LARCH_VOID_FTYPE_USI, default), -+ DIRECT_NO_TARGET_BUILTIN (break, LARCH_VOID_FTYPE_USI, default), -+ - /* Built-in functions for LSX. */ - LSX_BUILTIN (vsll_b, LARCH_V16QI_FTYPE_V16QI_V16QI), - LSX_BUILTIN (vsll_h, LARCH_V8HI_FTYPE_V8HI_V8HI), -@@ -1439,14 +1355,14 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { - LSX_BUILTIN (vftintrmh_l_s, LARCH_V2DI_FTYPE_V4SF), - LSX_BUILTIN (vftintrnel_l_s, LARCH_V2DI_FTYPE_V4SF), - LSX_BUILTIN (vftintrneh_l_s, LARCH_V2DI_FTYPE_V4SF), -- LSX_BUILTIN (vfrintrne_s, LARCH_V4SI_FTYPE_V4SF), -- LSX_BUILTIN (vfrintrne_d, LARCH_V2DI_FTYPE_V2DF), -- LSX_BUILTIN (vfrintrz_s, LARCH_V4SI_FTYPE_V4SF), -- LSX_BUILTIN (vfrintrz_d, LARCH_V2DI_FTYPE_V2DF), -- LSX_BUILTIN (vfrintrp_s, LARCH_V4SI_FTYPE_V4SF), -- LSX_BUILTIN (vfrintrp_d, LARCH_V2DI_FTYPE_V2DF), -- LSX_BUILTIN (vfrintrm_s, LARCH_V4SI_FTYPE_V4SF), -- LSX_BUILTIN (vfrintrm_d, LARCH_V2DI_FTYPE_V2DF), -+ LSX_BUILTIN (vfrintrne_s, LARCH_V4SF_FTYPE_V4SF), -+ LSX_BUILTIN (vfrintrne_d, LARCH_V2DF_FTYPE_V2DF), -+ LSX_BUILTIN (vfrintrz_s, LARCH_V4SF_FTYPE_V4SF), -+ LSX_BUILTIN (vfrintrz_d, LARCH_V2DF_FTYPE_V2DF), -+ LSX_BUILTIN (vfrintrp_s, LARCH_V4SF_FTYPE_V4SF), -+ LSX_BUILTIN (vfrintrp_d, LARCH_V2DF_FTYPE_V2DF), -+ LSX_BUILTIN (vfrintrm_s, LARCH_V4SF_FTYPE_V4SF), -+ LSX_BUILTIN (vfrintrm_d, LARCH_V2DF_FTYPE_V2DF), - LSX_NO_TARGET_BUILTIN (vstelm_b, LARCH_VOID_FTYPE_V16QI_CVPOINTER_SI_UQI), - LSX_NO_TARGET_BUILTIN (vstelm_h, LARCH_VOID_FTYPE_V8HI_CVPOINTER_SI_UQI), - LSX_NO_TARGET_BUILTIN (vstelm_w, LARCH_VOID_FTYPE_V4SI_CVPOINTER_SI_UQI), -@@ -2152,14 +2068,14 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { - LASX_BUILTIN (xvftintrml_l_s, LARCH_V4DI_FTYPE_V8SF), - LASX_BUILTIN (xvftintrneh_l_s, LARCH_V4DI_FTYPE_V8SF), - LASX_BUILTIN (xvftintrnel_l_s, LARCH_V4DI_FTYPE_V8SF), -- LASX_BUILTIN (xvfrintrne_s, LARCH_V8SI_FTYPE_V8SF), -- LASX_BUILTIN (xvfrintrne_d, LARCH_V4DI_FTYPE_V4DF), -- LASX_BUILTIN (xvfrintrz_s, LARCH_V8SI_FTYPE_V8SF), -- LASX_BUILTIN (xvfrintrz_d, LARCH_V4DI_FTYPE_V4DF), -- LASX_BUILTIN (xvfrintrp_s, LARCH_V8SI_FTYPE_V8SF), -- LASX_BUILTIN (xvfrintrp_d, LARCH_V4DI_FTYPE_V4DF), -- LASX_BUILTIN (xvfrintrm_s, LARCH_V8SI_FTYPE_V8SF), -- LASX_BUILTIN (xvfrintrm_d, LARCH_V4DI_FTYPE_V4DF), -+ LASX_BUILTIN (xvfrintrne_s, LARCH_V8SF_FTYPE_V8SF), -+ LASX_BUILTIN (xvfrintrne_d, LARCH_V4DF_FTYPE_V4DF), -+ LASX_BUILTIN (xvfrintrz_s, LARCH_V8SF_FTYPE_V8SF), -+ LASX_BUILTIN (xvfrintrz_d, LARCH_V4DF_FTYPE_V4DF), -+ LASX_BUILTIN (xvfrintrp_s, LARCH_V8SF_FTYPE_V8SF), -+ LASX_BUILTIN (xvfrintrp_d, LARCH_V4DF_FTYPE_V4DF), -+ LASX_BUILTIN (xvfrintrm_s, LARCH_V8SF_FTYPE_V8SF), -+ LASX_BUILTIN (xvfrintrm_d, LARCH_V4DF_FTYPE_V4DF), - LASX_BUILTIN (xvld, LARCH_V32QI_FTYPE_CVPOINTER_SI), - LASX_NO_TARGET_BUILTIN (xvst, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI), - LASX_NO_TARGET_BUILTIN (xvstelm_b, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI_UQI), -@@ -2391,6 +2307,27 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { - LASX_BUILTIN (xvssrarni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), - }; - -+/* Index I is the function declaration for loongarch_builtins[I], or null if -+ the function isn't defined on this target. */ -+static GTY (()) tree loongarch_builtin_decls[ARRAY_SIZE (loongarch_builtins)]; -+/* Get the index I of the function declaration for loongarch_builtin_decls[I] -+ using the instruction code or return null if not defined for the target. */ -+static GTY (()) int loongarch_get_builtin_decl_index[NUM_INSN_CODES]; -+ -+/* Return a type for 'const volatile void*'. */ -+ -+static tree -+loongarch_build_cvpointer_type (void) -+{ -+ static tree cache; -+ -+ if (cache == NULL_TREE) -+ cache = build_pointer_type (build_qualified_type (void_type_node, -+ TYPE_QUAL_CONST -+ | TYPE_QUAL_VOLATILE)); -+ return cache; -+} -+ - - /* MODE is a vector mode whose elements have type TYPE. Return the type - of the vector itself. */ -@@ -2411,26 +2348,12 @@ loongarch_builtin_vector_type (tree type, machine_mode mode) - return types[mode_index]; - } - --/* Return a type for 'const volatile void *'. */ -- --static tree --loongarch_build_cvpointer_type (void) --{ -- static tree cache; -- -- if (cache == NULL_TREE) -- cache = build_pointer_type (build_qualified_type -- (void_type_node, -- TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)); -- return cache; --} -- - /* Source-level argument types. */ - #define LARCH_ATYPE_VOID void_type_node - #define LARCH_ATYPE_INT integer_type_node - #define LARCH_ATYPE_POINTER ptr_type_node - #define LARCH_ATYPE_CVPOINTER loongarch_build_cvpointer_type () --#define LARCH_ATYPE_BOOLEAN boolean_type_node -+#define LARCH_ATYPE_BOOLEAN boolean_type_node - /* Standard mode-based argument types. */ - #define LARCH_ATYPE_QI intQI_type_node - #define LARCH_ATYPE_UQI unsigned_intQI_type_node -@@ -2495,8 +2418,7 @@ loongarch_build_cvpointer_type (void) - - /* LARCH_FTYPE_ATYPESN takes N LARCH_FTYPES-like type codes and lists - their associated LARCH_ATYPEs. */ --#define LARCH_FTYPE_ATYPES1(A, B) \ -- LARCH_ATYPE_##A, LARCH_ATYPE_##B -+#define LARCH_FTYPE_ATYPES1(A, B) LARCH_ATYPE_##A, LARCH_ATYPE_##B - - #define LARCH_FTYPE_ATYPES2(A, B, C) \ - LARCH_ATYPE_##A, LARCH_ATYPE_##B, LARCH_ATYPE_##C -@@ -2508,13 +2430,6 @@ loongarch_build_cvpointer_type (void) - LARCH_ATYPE_##A, LARCH_ATYPE_##B, LARCH_ATYPE_##C, LARCH_ATYPE_##D, \ - LARCH_ATYPE_##E - --/* Index I is the function declaration for loongarch_builtins[I], or null if the -- function isn't defined on this target. */ --static GTY(()) tree loongarch_builtin_decls[ARRAY_SIZE (loongarch_builtins)]; --/* Get the index I of the function declaration for loongarch_builtin_decls[I] -- using the instruction code or return null if not defined for the target. */ --static GTY(()) int loongarch_get_builtin_decl_index[NUM_INSN_CODES]; -- - /* Return the function type associated with function prototype TYPE. */ - - static tree -@@ -2525,11 +2440,10 @@ loongarch_build_function_type (enum loongarch_function_type type) - if (types[(int) type] == NULL_TREE) - switch (type) - { --#define DEF_LARCH_FTYPE(NUM, ARGS) \ -- case LARCH_FTYPE_NAME##NUM ARGS: \ -- types[(int) type] \ -- = build_function_type_list (LARCH_FTYPE_ATYPES##NUM ARGS, \ -- NULL_TREE); \ -+#define DEF_LARCH_FTYPE(NUM, ARGS) \ -+ case LARCH_FTYPE_NAME##NUM ARGS: \ -+ types[(int) type] \ -+ = build_function_type_list (LARCH_FTYPE_ATYPES##NUM ARGS, NULL_TREE); \ - break; - #include "config/loongarch/loongarch-ftypes.def" - #undef DEF_LARCH_FTYPE -@@ -2547,6 +2461,7 @@ loongarch_init_builtins (void) - { - const struct loongarch_builtin_description *d; - unsigned int i; -+ tree type; - - /* Iterate through all of the bdesc arrays, initializing all of the - builtin functions. */ -@@ -2555,10 +2470,10 @@ loongarch_init_builtins (void) - d = &loongarch_builtins[i]; - if (d->avail ()) - { -+ type = loongarch_build_function_type (d->function_type); - loongarch_builtin_decls[i] -- = add_builtin_function (d->name, -- loongarch_build_function_type (d->function_type), -- i, BUILT_IN_MD, NULL, NULL); -+ = add_builtin_function (d->name, type, i, BUILT_IN_MD, NULL, -+ NULL); - loongarch_get_builtin_decl_index[d->icode] = i; - } - } -@@ -2574,6 +2489,104 @@ loongarch_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED) - return loongarch_builtin_decls[code]; - } - -+/* Handler for an SLEEF-style interface to -+ a library with vectorized intrinsics. */ -+static tree -+loongarch_builtin_vectorized_libsleef (combined_fn fn, tree type_out, tree type_in) -+{ -+ char name[20]; -+ tree fntype, new_fndecl; -+ unsigned args = 1; -+ const char *bname; -+ machine_mode el_mode, in_mode; -+ int n, in_n; -+ -+ /* The SLEEF is suitable for unsafe math only. */ -+ if (!flag_unsafe_math_optimizations || !ISA_HAS_LSX) -+ return NULL_TREE; -+ -+ el_mode = TYPE_MODE (TREE_TYPE (type_out)); -+ n = TYPE_VECTOR_SUBPARTS (type_out); -+ in_mode = TYPE_MODE (TREE_TYPE (type_in)); -+ in_n = TYPE_VECTOR_SUBPARTS (type_in); -+ if (el_mode != in_mode -+ || n != in_n) -+ return NULL_TREE; -+ -+ switch (fn) -+ { -+ CASE_CFN_ATAN2: -+ CASE_CFN_POW: -+ args = 2; -+ gcc_fallthrough (); -+ -+ CASE_CFN_EXP: -+ CASE_CFN_LOG: -+ CASE_CFN_LOG1P: -+ CASE_CFN_LOG2: -+ CASE_CFN_LOG10: -+ CASE_CFN_TANH: -+ CASE_CFN_TAN: -+ CASE_CFN_ATAN: -+ CASE_CFN_ATANH: -+ CASE_CFN_CBRT: -+ CASE_CFN_SINH: -+ CASE_CFN_SIN: -+ CASE_CFN_ASINH: -+ CASE_CFN_ASIN: -+ CASE_CFN_COSH: -+ CASE_CFN_COS: -+ CASE_CFN_ACOSH: -+ CASE_CFN_ACOS: -+ break; -+ -+ default: -+ return NULL_TREE; -+ } -+ -+ tree fndecl = mathfn_built_in (TREE_TYPE (type_in), fn); -+ bname = IDENTIFIER_POINTER (DECL_NAME (fndecl)); -+ -+ if (args == 1) -+ { -+ if (n == 8 && el_mode == SFmode) -+ sprintf (name, "_ZGVdN8v_%s", bname+10); -+ else if (n == 4 && el_mode == DFmode) -+ sprintf (name, "_ZGVdN4v_%s", bname+10); -+ else if (n == 4 && el_mode == SFmode) -+ sprintf (name, "_ZGVbN4v_%s", bname+10); -+ else -+ sprintf (name, "_ZGVbN2v_%s", bname+10); -+ -+ fntype = build_function_type_list (type_out, type_in, NULL); -+ } -+ else if (args == 2) -+ { -+ if (n == 8 && el_mode == SFmode) -+ sprintf (name, "_ZGVdN8vv_%s", bname+10); -+ else if (n == 4 && el_mode == DFmode) -+ sprintf (name, "_ZGVdN4vv_%s", bname+10); -+ else if (n == 4 && el_mode == SFmode) -+ sprintf (name, "_ZGVbN4vv_%s", bname+10); -+ else -+ sprintf (name, "_ZGVbN2vv_%s", bname+10); -+ -+ fntype = build_function_type_list (type_out, type_in, type_in, NULL); -+ } -+ else -+ gcc_unreachable (); -+ -+ /* Build a function declaration for the vectorized function. */ -+ new_fndecl = build_decl (BUILTINS_LOCATION, -+ FUNCTION_DECL, get_identifier (name), fntype); -+ TREE_PUBLIC (new_fndecl) = 1; -+ DECL_EXTERNAL (new_fndecl) = 1; -+ DECL_IS_NOVOPS (new_fndecl) = 1; -+ TREE_READONLY (new_fndecl) = 1; -+ -+ return new_fndecl; -+} -+ - /* Implement TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION. */ - - tree -@@ -2599,20 +2612,82 @@ loongarch_builtin_vectorized_function (unsigned int fn, tree type_out, tree type - - switch (fn) - { -- case BUILT_IN_SQRT: -- if (out_mode == DFmode && out_n == 2 -- && in_mode == DFmode && in_n == 2) -- return LARCH_GET_BUILTIN (lsx_vfsqrt_d); -+ CASE_CFN_CEIL: -+ if (out_mode == DFmode && in_mode == DFmode) -+ { -+ if (out_n == 2 && in_n == 2) -+ return LARCH_GET_BUILTIN (lsx_vfrintrp_d); -+ if (out_n == 4 && in_n == 4) -+ return LARCH_GET_BUILTIN (lasx_xvfrintrp_d); -+ } -+ if (out_mode == SFmode && in_mode == SFmode) -+ { -+ if (out_n == 4 && in_n == 4) -+ return LARCH_GET_BUILTIN (lsx_vfrintrp_s); -+ if (out_n == 8 && in_n == 8) -+ return LARCH_GET_BUILTIN (lasx_xvfrintrp_s); -+ } - break; -- case BUILT_IN_SQRTF: -- if (out_mode == SFmode && out_n == 4 -- && in_mode == SFmode && in_n == 4) -- return LARCH_GET_BUILTIN (lsx_vfsqrt_s); -+ -+ CASE_CFN_TRUNC: -+ if (out_mode == DFmode && in_mode == DFmode) -+ { -+ if (out_n == 2 && in_n == 2) -+ return LARCH_GET_BUILTIN (lsx_vfrintrz_d); -+ if (out_n == 4 && in_n == 4) -+ return LARCH_GET_BUILTIN (lasx_xvfrintrz_d); -+ } -+ if (out_mode == SFmode && in_mode == SFmode) -+ { -+ if (out_n == 4 && in_n == 4) -+ return LARCH_GET_BUILTIN (lsx_vfrintrz_s); -+ if (out_n == 8 && in_n == 8) -+ return LARCH_GET_BUILTIN (lasx_xvfrintrz_s); -+ } - break; -+ -+ CASE_CFN_RINT: -+ CASE_CFN_ROUND: -+ if (out_mode == DFmode && in_mode == DFmode) -+ { -+ if (out_n == 2 && in_n == 2) -+ return LARCH_GET_BUILTIN (lsx_vfrint_d); -+ if (out_n == 4 && in_n == 4) -+ return LARCH_GET_BUILTIN (lasx_xvfrint_d); -+ } -+ if (out_mode == SFmode && in_mode == SFmode) -+ { -+ if (out_n == 4 && in_n == 4) -+ return LARCH_GET_BUILTIN (lsx_vfrint_s); -+ if (out_n == 8 && in_n == 8) -+ return LARCH_GET_BUILTIN (lasx_xvfrint_s); -+ } -+ break; -+ -+ CASE_CFN_FLOOR: -+ if (out_mode == DFmode && in_mode == DFmode) -+ { -+ if (out_n == 2 && in_n == 2) -+ return LARCH_GET_BUILTIN (lsx_vfrintrm_d); -+ if (out_n == 4 && in_n == 4) -+ return LARCH_GET_BUILTIN (lasx_xvfrintrm_d); -+ } -+ if (out_mode == SFmode && in_mode == SFmode) -+ { -+ if (out_n == 4 && in_n == 4) -+ return LARCH_GET_BUILTIN (lsx_vfrintrm_s); -+ if (out_n == 8 && in_n == 8) -+ return LARCH_GET_BUILTIN (lasx_xvfrintrm_s); -+ } -+ break; -+ - default: - break; - } - -+ /* Dispatch to a handler for a vectorization library. */ -+ if (loongarch_veclibabi_name && strcmp (loongarch_veclibabi_name, "sleef") == 0) -+ return loongarch_builtin_vectorized_libsleef (combined_fn (fn), type_out, type_in); - return NULL_TREE; - } - -@@ -2621,7 +2696,7 @@ loongarch_builtin_vectorized_function (unsigned int fn, tree type_out, tree type - - static void - loongarch_prepare_builtin_arg (struct expand_operand *op, tree exp, -- unsigned int argno) -+ unsigned int argno) - { - tree arg; - rtx value; -@@ -2649,11 +2724,10 @@ loongarch_gen_const_int_vector (machine_mode mode, HOST_WIDE_INT val) - - static rtx - loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, -- struct expand_operand *ops, bool has_target_p) -+ struct expand_operand *ops, bool has_target_p) - { - machine_mode imode; - int rangelo = 0, rangehi = 0, error_opno = 0; -- rtx sireg; - - switch (icode) - { -@@ -3002,7 +3076,7 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, - - static rtx - loongarch_expand_builtin_direct (enum insn_code icode, rtx target, tree exp, -- bool has_target_p) -+ bool has_target_p) - { - struct expand_operand ops[MAX_RECOG_OPERANDS]; - int opno, argno; -@@ -3069,7 +3143,8 @@ loongarch_expand_builtin_lsx_test_branch (enum insn_code icode, tree exp) - - rtx - loongarch_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, -- machine_mode mode, int ignore) -+ machine_mode mode ATTRIBUTE_UNUSED, -+ int ignore ATTRIBUTE_UNUSED) - { - tree fndecl; - unsigned int fcode, avail; -@@ -3097,6 +3172,7 @@ loongarch_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, - } - gcc_unreachable (); - } -+ - /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */ - - void -@@ -3112,32 +3188,32 @@ loongarch_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update) - tree set_fcsr = loongarch_builtin_decls[LARCH_MOVGR2FCSR]; - tree get_fcsr_hold_call = build_call_expr (get_fcsr, 1, const0); - tree hold_assign_orig = build4 (TARGET_EXPR, LARCH_ATYPE_USI, -- fcsr_orig_var, get_fcsr_hold_call, -- NULL, NULL); -+ fcsr_orig_var, get_fcsr_hold_call, -+ NULL, NULL); - tree hold_mod_val = build2 (BIT_AND_EXPR, LARCH_ATYPE_USI, fcsr_orig_var, - build_int_cst (LARCH_ATYPE_USI, 0xffe0ffe0)); - tree hold_assign_mod = build4 (TARGET_EXPR, LARCH_ATYPE_USI, -- fcsr_mod_var, hold_mod_val, NULL, NULL); -- tree set_fcsr_hold_call = build_call_expr (set_fcsr, 2, const0, fcsr_mod_var); -- tree hold_all = build2 (COMPOUND_EXPR, LARCH_ATYPE_USI, -- hold_assign_orig, hold_assign_mod); -- *hold = build2 (COMPOUND_EXPR, void_type_node, hold_all, -- set_fcsr_hold_call); -+ fcsr_mod_var, hold_mod_val, NULL, NULL); -+ tree set_fcsr_hold_call = build_call_expr (set_fcsr, 2, const0, -+ fcsr_mod_var); -+ tree hold_all = build2 (COMPOUND_EXPR, LARCH_ATYPE_USI, hold_assign_orig, -+ hold_assign_mod); -+ *hold = build2 (COMPOUND_EXPR, void_type_node, hold_all, set_fcsr_hold_call); - - *clear = build_call_expr (set_fcsr, 2, const0, fcsr_mod_var); - - tree get_fcsr_update_call = build_call_expr (get_fcsr, 1, const0); - *update = build4 (TARGET_EXPR, LARCH_ATYPE_USI, exceptions_var, -- get_fcsr_update_call, NULL, NULL); -- tree set_fcsr_update_call = build_call_expr (set_fcsr, 2, const0, fcsr_orig_var); -+ get_fcsr_update_call, NULL, NULL); -+ tree set_fcsr_update_call = build_call_expr (set_fcsr, 2, const0, -+ fcsr_orig_var); - *update = build2 (COMPOUND_EXPR, void_type_node, *update, - set_fcsr_update_call); - tree atomic_feraiseexcept - = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT); -- tree int_exceptions_var = fold_convert (integer_type_node, -- exceptions_var); -- tree atomic_feraiseexcept_call = build_call_expr (atomic_feraiseexcept, -- 1, int_exceptions_var); -+ tree int_exceptions_var = fold_convert (integer_type_node, exceptions_var); -+ tree atomic_feraiseexcept_call = build_call_expr (atomic_feraiseexcept, 1, -+ int_exceptions_var); - *update = build2 (COMPOUND_EXPR, void_type_node, *update, - atomic_feraiseexcept_call); - } -@@ -3149,4 +3225,3 @@ loongarch_build_builtin_va_list (void) - { - return ptr_type_node; - } -- -diff --git a/gcc/config/loongarch/loongarch-c.c b/gcc/config/loongarch/loongarch-c.c -index 6eac43bdf..f8583f7aa 100644 ---- a/gcc/config/loongarch/loongarch-c.c -+++ b/gcc/config/loongarch/loongarch-c.c -@@ -1,22 +1,22 @@ - /* LoongArch-specific code for C family languages. -- Copyright (C) 2020-2021 Free Software Foundation, Inc. -- Contributed by Andrew Waterman (zhouyingkun@mail.loongson.cn). -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Ltd. - -- This file is part of GCC. -+This file is part of GCC. - -- GCC is free software; you can redistribute it and/or modify -- it under the terms of the GNU General Public License as published by -- the Free Software Foundation; either version 3, or (at your option) -- any later version. -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. - -- GCC is distributed in the hope that it will be useful, -- but WITHOUT ANY WARRANTY; without even the implied warranty of -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -- GNU General Public License for more details. -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. - -- You should have received a copy of the GNU General Public License -- along with GCC; see the file COPYING3. If not see -- . */ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ - - #define IN_TARGET_CODE 1 - -@@ -31,7 +31,28 @@ - #define builtin_define(TXT) cpp_define (pfile, TXT) - #define builtin_assert(TXT) cpp_assert (pfile, TXT) - --/* TODO: what is the pfile technique ??? !!! */ -+/* Define preprocessor macros for the -march and -mtune options. -+ PREFIX is either _LOONGARCH_ARCH or _LOONGARCH_TUNE, INFO is -+ the selected processor. If INFO's canonical name is "foo", -+ define PREFIX to be "foo", and define an additional macro -+ PREFIX_FOO. */ -+#define LARCH_CPP_SET_PROCESSOR(PREFIX, CPU_TYPE) \ -+ do \ -+ { \ -+ char *macro, *p; \ -+ int cpu_type = (CPU_TYPE); \ -+ \ -+ macro = concat ((PREFIX), "_", \ -+ loongarch_cpu_strings[cpu_type], NULL); \ -+ for (p = macro; *p != 0; p++) \ -+ *p = TOUPPER (*p); \ -+ \ -+ builtin_define (macro); \ -+ builtin_define_with_value ((PREFIX), \ -+ loongarch_cpu_strings[cpu_type], 1); \ -+ free (macro); \ -+ } \ -+ while (0) - - void - loongarch_cpu_cpp_builtins (cpp_reader *pfile) -@@ -40,10 +61,43 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile) - builtin_assert ("cpu=loongarch"); - builtin_define ("__loongarch__"); - -- if (TARGET_FLOAT64) -- builtin_define ("__loongarch_fpr=64"); -+ LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", la_target.cpu_arch); -+ LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", la_target.cpu_tune); -+ -+ /* Base architecture / ABI. */ -+ if (TARGET_64BIT) -+ { -+ builtin_define ("__loongarch_grlen=64"); -+ builtin_define ("__loongarch64"); -+ } -+ -+ if (TARGET_ABI_LP64) -+ { -+ builtin_define ("_ABILP64=3"); -+ builtin_define ("_LOONGARCH_SIM=_ABILP64"); -+ builtin_define ("__loongarch_lp64"); -+ } -+ -+ /* These defines reflect the ABI in use, not whether the -+ FPU is directly accessible. */ -+ if (TARGET_DOUBLE_FLOAT_ABI) -+ builtin_define ("__loongarch_double_float=1"); -+ else if (TARGET_SINGLE_FLOAT_ABI) -+ builtin_define ("__loongarch_single_float=1"); -+ -+ if (TARGET_DOUBLE_FLOAT_ABI || TARGET_SINGLE_FLOAT_ABI) -+ builtin_define ("__loongarch_hard_float=1"); - else -- builtin_define ("__loongarch_fpr=32"); -+ builtin_define ("__loongarch_soft_float=1"); -+ -+ -+ /* ISA Extensions. */ -+ if (TARGET_DOUBLE_FLOAT) -+ builtin_define ("__loongarch_frlen=64"); -+ else if (TARGET_SINGLE_FLOAT) -+ builtin_define ("__loongarch_frlen=32"); -+ else -+ builtin_define ("__loongarch_frlen=0"); - - if (ISA_HAS_LSX) - { -@@ -62,74 +116,12 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile) - builtin_define ("__loongarch_simd_width=256"); - } - -- LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", loongarch_arch_info); -- LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", loongarch_tune_info); -- -- -- switch (loongarch_abi) -- { -- case ABILP32: -- builtin_define ("_ABILP32=1"); -- builtin_define ("_LOONGARCH_SIM=_ABILP32"); -- builtin_define ("__loongarch32"); -- break; -- -- case ABILPX32: -- builtin_define ("_ABILPX32=2"); -- builtin_define ("_LOONGARCH_SIM=_ABILPX32"); -- break; -- -- case ABILP64: -- builtin_define ("_ABILP64=3"); -- builtin_define ("_LOONGARCH_SIM=_ABILP64"); -- builtin_define ("__loongarch64"); -- break; -- } - -+ /* Native Data Sizes. */ - builtin_define_with_int_value ("_LOONGARCH_SZINT", INT_TYPE_SIZE); - builtin_define_with_int_value ("_LOONGARCH_SZLONG", LONG_TYPE_SIZE); - builtin_define_with_int_value ("_LOONGARCH_SZPTR", POINTER_SIZE); -- builtin_define_with_int_value ("_LOONGARCH_FPSET", -- 32 / MAX_FPRS_PER_FMT); -- builtin_define_with_int_value ("_LOONGARCH_SPFPSET", -- 32); -- -- /* These defines reflect the ABI in use, not whether the -- FPU is directly accessible. */ -- if (TARGET_NO_FLOAT) -- builtin_define ("__loongarch_no_float"); -- else if (TARGET_HARD_FLOAT_ABI) -- builtin_define ("__loongarch_hard_float"); -- else -- builtin_define ("__loongarch_soft_float"); -+ builtin_define_with_int_value ("_LOONGARCH_FPSET", 32); -+ builtin_define_with_int_value ("_LOONGARCH_SPFPSET", 32); - -- if (TARGET_SINGLE_FLOAT) -- builtin_define ("__loongarch_single_float"); -- -- /* Macros dependent on the C dialect. */ -- if (preprocessing_asm_p ()) -- { -- builtin_define_std ("LANGUAGE_ASSEMBLY"); -- builtin_define ("_LANGUAGE_ASSEMBLY"); -- } -- else if (c_dialect_cxx ()) -- { -- builtin_define ("_LANGUAGE_C_PLUS_PLUS"); -- builtin_define ("__LANGUAGE_C_PLUS_PLUS"); -- builtin_define ("__LANGUAGE_C_PLUS_PLUS__"); -- } -- else -- { -- builtin_define_std ("LANGUAGE_C"); -- builtin_define ("_LANGUAGE_C"); -- } -- -- if (c_dialect_objc ()) -- { -- builtin_define ("_LANGUAGE_OBJECTIVE_C"); -- builtin_define ("__LANGUAGE_OBJECTIVE_C"); -- /* Bizarre, but retained for backwards compatibility. */ -- builtin_define_std ("LANGUAGE_C"); -- builtin_define ("_LANGUAGE_C"); -- } - } -diff --git a/gcc/config/loongarch/loongarch-cpu.c b/gcc/config/loongarch/loongarch-cpu.c -new file mode 100644 -index 000000000..ce2e649c8 ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-cpu.c -@@ -0,0 +1,291 @@ -+/* Definitions for LoongArch CPU properties. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Ltd. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#define IN_TARGET_CODE 1 -+ -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "tm.h" -+#include "diagnostic-core.h" -+ -+#include "loongarch-def.h" -+#include "loongarch-opts.h" -+#include "loongarch-cpu.h" -+#include "loongarch-str.h" -+ -+/* Native CPU detection with "cpucfg" */ -+#define N_CPUCFG_WORDS 0x15 -+static uint32_t cpucfg_cache[N_CPUCFG_WORDS] = { 0 }; -+static const int cpucfg_useful_idx[] = {0, 1, 2, 16, 17, 18, 19}; -+ -+static uint32_t -+read_cpucfg_word (int wordno) -+{ -+ /* To make cross-compiler shut up. */ -+ (void) wordno; -+ uint32_t ret = 0; -+ -+ #ifdef __loongarch__ -+ __asm__ ("cpucfg %0,%1\n\t" :"=r"(ret) :"r"(wordno)); -+ #endif -+ -+ return ret; -+} -+ -+void -+cache_cpucfg (void) -+{ -+ for (unsigned int i = 0; i < sizeof (cpucfg_useful_idx) / sizeof (int); i++) -+ { -+ cpucfg_cache[cpucfg_useful_idx[i]] -+ = read_cpucfg_word (cpucfg_useful_idx[i]); -+ } -+} -+ -+uint32_t -+get_native_prid (void) -+{ -+ /* Fill loongarch_cpu_default_config[CPU_NATIVE] with cpucfg data, -+ see "Loongson Architecture Reference Manual" -+ (Volume 1, Section 2.2.10.5) */ -+ return cpucfg_cache[0]; -+} -+ -+const char* -+get_native_prid_str (void) -+{ -+ static char prid_str[9]; -+ sprintf (prid_str, "%08x", cpucfg_cache[0]); -+ return (const char*) prid_str; -+} -+ -+ -+/* Fill property tables for CPU_NATIVE. */ -+void -+fill_native_cpu_config (struct loongarch_target *tgt) -+{ -+ int arch_native_p = tgt->cpu_arch == CPU_NATIVE; -+ int tune_native_p = tgt->cpu_tune == CPU_NATIVE; -+ int native_cpu_type = CPU_NATIVE; -+ -+ /* Nothing needs to be done unless "-march/tune=native" -+ is given or implied. */ -+ if (!arch_native_p && !tune_native_p) -+ return; -+ -+ /* Fill cpucfg_cache with the "cpucfg" instruction. */ -+ cache_cpucfg (); -+ -+ /* Fill: tgt->cpu_arch | tgt->cpu_tune -+ With: processor ID (PRID) -+ At: cpucfg_words[0][31:0] */ -+ -+ switch (cpucfg_cache[0] & 0x00ffff00) -+ { -+ case 0x0014d000: /* LA664 */ -+ native_cpu_type = CPU_LA664; -+ break; -+ -+ case 0x0014c000: /* LA464 */ -+ native_cpu_type = CPU_LA464; -+ break; -+ -+ case 0x0014b000: /* LA364 */ -+ native_cpu_type = CPU_LA364; -+ break; -+ -+ case 0x0014a000: /* LA264 */ -+ native_cpu_type = CPU_LA264; -+ break; -+ -+ default: -+ /* Unknown PRID. */ -+ if (tune_native_p) -+ inform (UNKNOWN_LOCATION, "unknown processor ID %<0x%x%>, " -+ "some tuning parameters will fall back to default", -+ cpucfg_cache[0]); -+ break; -+ } -+ -+ /* if -march=native */ -+ if (arch_native_p) -+ { -+ int tmp; -+ tgt->cpu_arch = native_cpu_type; -+ -+ /* Fill: loongarch_cpu_default_isa[tgt->cpu_arch].base -+ With: base architecture (ARCH) -+ At: cpucfg_words[1][1:0] */ -+ -+ #define PRESET_ARCH (loongarch_cpu_default_isa[tgt->cpu_arch].base) -+ switch (cpucfg_cache[1] & 0x3) -+ { -+ case 0x02: -+ tmp = ISA_BASE_LA64V100; -+ break; -+ -+ default: -+ fatal_error (UNKNOWN_LOCATION, -+ "unknown native base architecture %<0x%x%>, %qs failed", -+ (unsigned int) (cpucfg_cache[1] & 0x3), -+ "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); -+ } -+ -+ /* Check consistency with PRID presets. */ -+ if (native_cpu_type != CPU_NATIVE && tmp != PRESET_ARCH) -+ warning (0, "base architecture %qs differs from PRID preset %qs", -+ loongarch_isa_base_strings[tmp], -+ loongarch_isa_base_strings[PRESET_ARCH]); -+ -+ /* Use the native value anyways. */ -+ PRESET_ARCH = tmp; -+ -+ /* Fill: loongarch_cpu_default_isa[tgt->cpu_arch].fpu -+ With: FPU type (FP, FP_SP, FP_DP) -+ At: cpucfg_words[2][2:0] */ -+ -+ #define PRESET_FPU (loongarch_cpu_default_isa[tgt->cpu_arch].fpu) -+ switch (cpucfg_cache[2] & 0x7) -+ { -+ case 0x07: -+ tmp = ISA_EXT_FPU64; -+ break; -+ -+ case 0x03: -+ tmp = ISA_EXT_FPU32; -+ break; -+ -+ case 0x00: -+ tmp = ISA_EXT_NONE; -+ break; -+ -+ default: -+ fatal_error (UNKNOWN_LOCATION, -+ "unknown native FPU type %<0x%x%>, %qs failed", -+ (unsigned int) (cpucfg_cache[2] & 0x7), -+ "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); -+ } -+ -+ /* Check consistency with PRID presets. */ -+ if (native_cpu_type != CPU_NATIVE && tmp != PRESET_FPU) -+ warning (0, "floating-point unit %qs differs from PRID preset %qs", -+ loongarch_isa_ext_strings[tmp], -+ loongarch_isa_ext_strings[PRESET_FPU]); -+ -+ /* Use the native value anyways. */ -+ PRESET_FPU = tmp; -+ -+ -+ /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].simd -+ With: SIMD extension type (LSX, LASX) -+ At: cpucfg_words[2][7:6] */ -+ -+ #define PRESET_SIMD (loongarch_cpu_default_isa[tgt->cpu_arch].simd) -+ switch (cpucfg_cache[2] & 0xc0) -+ { -+ case 0xc0: -+ tmp = ISA_EXT_SIMD_LASX; -+ break; -+ -+ case 0x40: -+ tmp = ISA_EXT_SIMD_LSX; -+ break; -+ -+ case 0x80: -+ warning (0, "unknown SIMD extension " -+ "(%qs disabled while %qs is enabled), disabling SIMD", -+ loongarch_isa_ext_strings[ISA_EXT_SIMD_LSX], -+ loongarch_isa_ext_strings[ISA_EXT_SIMD_LASX]); -+ -+ case 0x00: -+ tmp = 0; -+ break; -+ } -+ -+ /* Check consistency with PRID presets. */ -+ /* -+ if (native_cpu_type != CPU_NATIVE && tmp != PRESET_SIMD) -+ warning (0, "SIMD extension %qs differs from PRID preset %qs", -+ loongarch_isa_ext_strings[tmp], -+ loongarch_isa_ext_strings[PRESET_SIMD]); -+ */ -+ -+ /* Use the native value anyways. */ -+ PRESET_SIMD = tmp; -+ } -+ -+ if (tune_native_p) -+ { -+ tgt->cpu_tune = native_cpu_type; -+ -+ /* Fill: loongarch_cpu_cache[tgt->cpu_tune] -+ With: cache size info -+ At: cpucfg_words[16:20][31:0] */ -+ -+ #define PRESET_CACHE (loongarch_cpu_cache[tgt->cpu_tune]) -+ struct loongarch_cache native_cache; -+ int l1d_present = 0, l1u_present = 0; -+ int l2d_present = 0; -+ uint32_t l1_szword, l2_szword; -+ -+ l1u_present |= cpucfg_cache[16] & 3; /* bit[1:0]: unified l1 */ -+ l1d_present |= cpucfg_cache[16] & 4; /* bit[2:2]: l1d */ -+ l1_szword = l1d_present ? 18 : (l1u_present ? 17 : 0); -+ l1_szword = l1_szword ? cpucfg_cache[l1_szword]: 0; -+ -+ l2d_present |= cpucfg_cache[16] & 24; /* bit[4:3]: unified l2 */ -+ l2d_present |= cpucfg_cache[16] & 128; /* bit[7:7]: l2d */ -+ l2_szword = l2d_present ? cpucfg_cache[19]: 0; -+ -+ native_cache.l1d_line_size -+ = 1 << ((l1_szword & 0x7f000000) >> 24); /* bit[30:24]: log2(line) */ -+ -+ native_cache.l1d_size -+ = (1 << ((l1_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */ -+ * ((l1_szword & 0x0000ffff) + 1) /* bit[15:0]: sets - 1 */ -+ * (1 << ((l1_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(line) */ -+ >> 10; /* in kibibytes */ -+ -+ native_cache.l2d_size -+ = (1 << ((l2_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */ -+ * ((l2_szword & 0x0000ffff) + 1) /* bit[15:0]: sets - 1 */ -+ * (1 << ((l2_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(linesz) */ -+ >> 10; /* in kibibytes */ -+ -+ /* -+ if (native_cpu_type != CPU_NATIVE && ( -+ native_cache.l1d_line_size != PRESET_CACHE.l1d_line_size || -+ native_cache.l1d_size != PRESET_CACHE.l1d_size || -+ native_cache.l2d_size != PRESET_CACHE.l2d_size)) -+ warning (0, "native cache info (%) " -+ "differs from PRID preset (%)", -+ native_cache.l1d_size, native_cache.l2d_size, -+ native_cache.l1d_line_size, -+ PRESET_CACHE.l1d_size, PRESET_CACHE.l2d_size, -+ PRESET_CACHE.l1d_line_size); -+ */ -+ -+ /* Use the native value anyways. */ -+ PRESET_CACHE.l1d_line_size = native_cache.l1d_line_size; -+ PRESET_CACHE.l1d_size = native_cache.l1d_size; -+ PRESET_CACHE.l2d_size = native_cache.l2d_size; -+ } -+} -diff --git a/gcc/config/loongarch/loongarch-d.c b/gcc/config/loongarch/loongarch-cpu.h -similarity index 59% -rename from gcc/config/loongarch/loongarch-d.c -rename to gcc/config/loongarch/loongarch-cpu.h -index 971e5d33e..08d018372 100644 ---- a/gcc/config/loongarch/loongarch-d.c -+++ b/gcc/config/loongarch/loongarch-cpu.h -@@ -1,5 +1,7 @@ --/* Subroutines for the D front end on the LARCH architecture. -- Copyright (C) 2017 Free Software Foundation, Inc. -+/* Definitions for loongarch native cpu property detection routines. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ -+This file is part of GCC. - - GCC is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by -@@ -15,17 +17,15 @@ You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING3. If not see - . */ - --#include "config.h" -+#ifndef LOONGARCH_CPU_H -+#define LOONGARCH_CPU_H -+ - #include "system.h" --#include "coretypes.h" --#include "tm.h" --#include "d/d-target.h" --#include "d/d-target-def.h" -- --/* Implement TARGET_D_CPU_VERSIONS for LARCH targets. */ -- --void --loongarch_d_target_versions (void) --{ -- // need to be improved !! --} -+#include "loongarch-def.h" -+ -+void cache_cpucfg (void); -+void fill_native_cpu_config (struct loongarch_target *tgt); -+uint32_t get_native_prid (void); -+const char* get_native_prid_str (void); -+ -+#endif /* LOONGARCH_CPU_H */ -diff --git a/gcc/config/loongarch/loongarch-cpus.def b/gcc/config/loongarch/loongarch-cpus.def -deleted file mode 100644 -index 7ce2508e3..000000000 ---- a/gcc/config/loongarch/loongarch-cpus.def -+++ /dev/null -@@ -1,38 +0,0 @@ --/* LARCH CPU names. -- Copyright (C) 1989-2018 Free Software Foundation, Inc. -- --This file is part of GCC. -- --GCC is free software; you can redistribute it and/or modify --it under the terms of the GNU General Public License as published by --the Free Software Foundation; either version 3, or (at your option) --any later version. -- --GCC is distributed in the hope that it will be useful, --but WITHOUT ANY WARRANTY; without even the implied warranty of --MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --GNU General Public License for more details. -- --You should have received a copy of the GNU General Public License --along with GCC; see the file COPYING3. If not see --. */ -- --/* A table describing all the processors GCC knows about. The first -- mention of an ISA level is taken as the canonical name for that -- ISA. -- -- To ease comparison, please keep this table in the same order -- as GAS's loongarch_cpu_info_table. Please also make sure that -- LARCH_ISA_LEVEL_SPEC and LARCH_ARCH_FLOAT_SPEC handle all -march -- options correctly. -- -- Before including this file, define a macro: -- -- LARCH_CPU (NAME, CPU, ISA, FLAGS) -- -- where the arguments are the fields of struct loongarch_cpu_info. */ -- --/* Entries for generic ISAs. */ --LARCH_CPU ("loongarch64", PROCESSOR_LOONGARCH64, 0, 0) --LARCH_CPU ("la464", PROCESSOR_LA464, 0, 0) -- -diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c -new file mode 100644 -index 000000000..dde7a5dba ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-def.c -@@ -0,0 +1,232 @@ -+/* LoongArch static properties. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Ltd. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#include "loongarch-def.h" -+#include "loongarch-str.h" -+ -+/* CPU property tables. */ -+const char* -+loongarch_cpu_strings[N_TUNE_TYPES] = { -+ [CPU_NATIVE] = STR_CPU_NATIVE, -+ [CPU_ABI_DEFAULT] = STR_CPU_ABI_DEFAULT, -+ [CPU_LOONGARCH64] = STR_CPU_LOONGARCH64, -+ [CPU_LA464] = STR_CPU_LA464, -+ [CPU_LA364] = STR_CPU_LA364, -+ [CPU_LA264] = STR_CPU_LA264, -+ [CPU_LA664] = STR_CPU_LA664, -+}; -+ -+struct loongarch_isa -+loongarch_cpu_default_isa[N_ARCH_TYPES] = { -+ [CPU_LOONGARCH64] = { -+ .base = ISA_BASE_LA64V100, -+ .fpu = ISA_EXT_FPU64, -+ .simd = 0, -+ }, -+ [CPU_LA464] = { -+ .base = ISA_BASE_LA64V100, -+ .fpu = ISA_EXT_FPU64, -+ .simd = ISA_EXT_SIMD_LASX, -+ }, -+ [CPU_LA364] = { -+ .base = ISA_BASE_LA64V100, -+ .fpu = ISA_EXT_FPU64, -+ .simd = ISA_EXT_SIMD_LSX, -+ }, -+ [CPU_LA264] = { -+ .base = ISA_BASE_LA64V100, -+ .fpu = ISA_EXT_FPU64, -+ .simd = ISA_EXT_SIMD_LSX, -+ }, -+ [CPU_LA664] = { -+ .base = ISA_BASE_LA64V100, -+ .fpu = ISA_EXT_FPU64, -+ .simd = ISA_EXT_SIMD_LASX, -+ }, -+}; -+ -+struct loongarch_cache -+loongarch_cpu_cache[N_TUNE_TYPES] = { -+ [CPU_LOONGARCH64] = { -+ .l1d_line_size = 64, -+ .l1d_size = 64, -+ .l2d_size = 256, -+ .simultaneous_prefetches = 4, -+ }, -+ [CPU_LA464] = { -+ .l1d_line_size = 64, -+ .l1d_size = 64, -+ .l2d_size = 256, -+ .simultaneous_prefetches = 4, -+ }, -+ [CPU_LA364] = { -+ .l1d_line_size = 64, -+ .l1d_size = 64, -+ .l2d_size = 0, -+ .simultaneous_prefetches = 4, -+ }, -+ [CPU_LA264] = { -+ .l1d_line_size = 64, -+ .l1d_size = 32, -+ .l2d_size = 0, -+ .simultaneous_prefetches = 4, -+ }, -+ [CPU_LA664] = { -+ .l1d_line_size = 64, -+ .l1d_size = 64, -+ .l2d_size = 256, -+ .simultaneous_prefetches = 4, -+ }, -+}; -+ -+/* RTX costs */ -+/* Default RTX cost initializer. */ -+#define COSTS_N_INSNS(N) ((N) * 4) -+#define DEFAULT_COSTS \ -+ .fp_add = COSTS_N_INSNS (1), \ -+ .fp_mult_sf = COSTS_N_INSNS (2), \ -+ .fp_mult_df = COSTS_N_INSNS (4), \ -+ .fp_div_sf = COSTS_N_INSNS (6), \ -+ .fp_div_df = COSTS_N_INSNS (8), \ -+ .int_mult_si = COSTS_N_INSNS (1), \ -+ .int_mult_di = COSTS_N_INSNS (1), \ -+ .int_div_si = COSTS_N_INSNS (4), \ -+ .int_div_di = COSTS_N_INSNS (6), \ -+ .branch_cost = 6, \ -+ .memory_latency = 4 -+ -+/* The following properties cannot be looked up directly using "cpucfg". -+ So it is necessary to provide a default value for "unknown native" -+ tune targets (i.e. -mtune=native while PRID does not correspond to -+ any known "-mtune" type). */ -+ -+struct loongarch_rtx_cost_data -+loongarch_cpu_rtx_cost_data[N_TUNE_TYPES] = { -+ [CPU_NATIVE] = { -+ DEFAULT_COSTS -+ }, -+ [CPU_LOONGARCH64] = { -+ DEFAULT_COSTS -+ }, -+ [CPU_LA464] = { -+ DEFAULT_COSTS -+ }, -+ [CPU_LA364] = { -+ DEFAULT_COSTS -+ }, -+ [CPU_LA264] = { -+ DEFAULT_COSTS -+ }, -+ [CPU_LA664] = { -+ DEFAULT_COSTS -+ }, -+}; -+ -+/* RTX costs to use when optimizing for size. */ -+const struct loongarch_rtx_cost_data -+loongarch_rtx_cost_optimize_size = { -+ .fp_add = 4, -+ .fp_mult_sf = 4, -+ .fp_mult_df = 4, -+ .fp_div_sf = 4, -+ .fp_div_df = 4, -+ .int_mult_si = 4, -+ .int_mult_di = 4, -+ .int_div_si = 4, -+ .int_div_di = 4, -+ .branch_cost = 2, -+ .memory_latency = 4, -+}; -+ -+int -+loongarch_cpu_issue_rate[N_TUNE_TYPES] = { -+ [CPU_NATIVE] = 4, -+ [CPU_LOONGARCH64] = 4, -+ [CPU_LA464] = 4, -+ [CPU_LA364] = 3, -+ [CPU_LA264] = 2, -+ [CPU_LA664] = 6, -+}; -+ -+int -+loongarch_cpu_multipass_dfa_lookahead[N_TUNE_TYPES] = { -+ [CPU_NATIVE] = 4, -+ [CPU_LOONGARCH64] = 4, -+ [CPU_LA464] = 4, -+ [CPU_LA364] = 4, -+ [CPU_LA264] = 4, -+ [CPU_LA664] = 4, -+}; -+ -+/* Wiring string definitions from loongarch-str.h to global arrays -+ with standard index values from loongarch-opts.h, so we can -+ print config-related messages and do ABI self-spec filtering -+ from the driver in a self-consistent manner. */ -+ -+const char* -+loongarch_isa_base_strings[N_ISA_BASE_TYPES] = { -+ [ISA_BASE_LA64V100] = STR_ISA_BASE_LA64V100, -+}; -+ -+const char* -+loongarch_isa_ext_strings[N_ISA_EXT_TYPES] = { -+ [ISA_EXT_NONE] = STR_NONE, -+ [ISA_EXT_FPU32] = STR_ISA_EXT_FPU32, -+ [ISA_EXT_FPU64] = STR_ISA_EXT_FPU64, -+ [ISA_EXT_SIMD_LSX] = STR_ISA_EXT_LSX, -+ [ISA_EXT_SIMD_LASX] = STR_ISA_EXT_LASX, -+}; -+ -+const char* -+loongarch_abi_base_strings[N_ABI_BASE_OPTS] = { -+ [ABI_BASE_LP64D] = STR_ABI_BASE_LP64D, -+ [ABI_BASE_LP64F] = STR_ABI_BASE_LP64F, -+ [ABI_BASE_LP64S] = STR_ABI_BASE_LP64S, -+ [ABI_BASE_LP64] = STR_ABI_BASE_LP64, -+}; -+ -+const char* -+loongarch_abi_ext_strings[N_ABI_EXT_TYPES] = { -+ [ABI_EXT_BASE] = STR_ABI_EXT_BASE, -+}; -+ -+const char* -+loongarch_cmodel_strings[] = { -+ [CMODEL_NORMAL] = STR_CMODEL_NORMAL, -+ [CMODEL_TINY] = STR_CMODEL_TINY, -+ [CMODEL_TINY_STATIC] = STR_CMODEL_TS, -+ [CMODEL_LARGE] = STR_CMODEL_LARGE, -+ [CMODEL_EXTREME] = STR_CMODEL_EXTREME, -+}; -+ -+ -+/* ABI-related definitions. */ -+const struct loongarch_isa -+abi_minimal_isa[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES] = { -+ [ABI_BASE_LP64D] = { -+ [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_FPU64, .simd = 0}, -+ }, -+ [ABI_BASE_LP64F] = { -+ [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_FPU32, .simd = 0}, -+ }, -+ [ABI_BASE_LP64S] = { -+ [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_NONE, .simd = 0}, -+ }, -+}; -diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h -new file mode 100644 -index 000000000..45d9ac16c ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-def.h -@@ -0,0 +1,161 @@ -+/* LoongArch definitions. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Ltd. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+/* Definition of standard codes for: -+ - base architecture types (isa_base), -+ - ISA extensions (isa_ext), -+ - base ABI types (abi_base), -+ - ABI extension types (abi_ext). -+ -+ - code models (cmodel) -+ - other command-line switches (switch) -+ -+ These values are primarily used for implementing option handling -+ logic in "loongarch.opt", "loongarch-driver.c" and "loongarch-opt.c". -+ -+ As for the result of this option handling process, the following -+ scheme is adopted to represent the final configuration: -+ -+ - The target ABI is encoded with a tuple (abi_base, abi_ext) -+ using the code defined below. -+ -+ - The target ISA is encoded with a "struct loongarch_isa" defined -+ in loongarch-cpu.h. -+ -+ - The target microarchitecture is represented with a cpu model -+ index defined in loongarch-cpu.h. -+*/ -+ -+#ifndef LOONGARCH_DEF_H -+#define LOONGARCH_DEF_H -+ -+#include "loongarch-tune.h" -+ -+#ifdef __cplusplus -+extern "C" { -+#endif -+ -+/* enum isa_base */ -+extern const char* loongarch_isa_base_strings[]; -+#define ISA_BASE_LA64V100 0 -+#define N_ISA_BASE_TYPES 1 -+ -+/* enum isa_ext_* */ -+extern const char* loongarch_isa_ext_strings[]; -+#define ISA_EXT_NONE 0 -+#define ISA_EXT_FPU32 1 -+#define ISA_EXT_FPU64 2 -+#define N_ISA_EXT_FPU_TYPES 3 -+#define ISA_EXT_SIMD_LSX 3 -+#define ISA_EXT_SIMD_LASX 4 -+#define N_ISA_EXT_TYPES 5 -+ -+/* enum abi_base */ -+extern const char* loongarch_abi_base_strings[]; -+#define ABI_BASE_LP64D 0 -+#define ABI_BASE_LP64F 1 -+#define ABI_BASE_LP64S 2 -+#define N_ABI_BASE_TYPES 3 -+#define ABI_BASE_LP64 3 -+#define N_ABI_BASE_OPTS 4 -+ -+#define IS_LP64_ABI_BASE(C) \ -+ (C == ABI_BASE_LP64D || C == ABI_BASE_LP64F || C == ABI_BASE_LP64S) -+ -+#define TO_LP64_ABI_BASE(C) (C) -+ -+#define ABI_FPU_64(abi_base) \ -+ (abi_base == ABI_BASE_LP64D) -+#define ABI_FPU_32(abi_base) \ -+ (abi_base == ABI_BASE_LP64F) -+#define ABI_FPU_NONE(abi_base) \ -+ (abi_base == ABI_BASE_LP64S) -+ -+ -+/* enum abi_ext */ -+extern const char* loongarch_abi_ext_strings[]; -+#define ABI_EXT_BASE 0 -+#define N_ABI_EXT_TYPES 1 -+ -+/* enum cmodel */ -+extern const char* loongarch_cmodel_strings[]; -+#define CMODEL_NORMAL 0 -+#define CMODEL_TINY 1 -+#define CMODEL_TINY_STATIC 2 -+#define CMODEL_LARGE 3 -+#define CMODEL_EXTREME 4 -+#define N_CMODEL_TYPES 5 -+ -+/* The common default value for variables whose assignments -+ are triggered by command-line options. */ -+ -+#define M_OPT_UNSET -1 -+#define M_OPT_ABSENT(opt_enum) ((opt_enum) == M_OPT_UNSET) -+ -+ -+/* Internal representation of the target. */ -+struct loongarch_isa -+{ -+ int base; /* ISA_BASE_ */ -+ int fpu; /* ISA_EXT_FPU_ */ -+ int simd; /* ISA_EXT_SIMD_ */ -+}; -+ -+struct loongarch_abi -+{ -+ int base; /* ABI_BASE_ */ -+ int ext; /* ABI_EXT_ */ -+}; -+ -+struct loongarch_target -+{ -+ struct loongarch_isa isa; -+ struct loongarch_abi abi; -+ int cpu_arch; /* CPU_ */ -+ int cpu_tune; /* same */ -+ int cmodel; /* CMODEL_ */ -+}; -+ -+/* CPU properties. */ -+/* index */ -+#define CPU_NATIVE 0 -+#define CPU_ABI_DEFAULT 1 -+#define CPU_LOONGARCH64 2 -+#define CPU_LA464 3 -+#define CPU_LA364 4 -+#define CPU_LA264 5 -+#define CPU_LA664 6 -+#define N_ARCH_TYPES 7 -+#define N_TUNE_TYPES 7 -+#define CPU_NONE 8 -+ -+/* parallel tables */ -+extern const char* loongarch_cpu_strings[]; -+extern struct loongarch_isa loongarch_cpu_default_isa[]; -+extern int loongarch_cpu_issue_rate[]; -+extern int loongarch_cpu_multipass_dfa_lookahead[]; -+ -+extern struct loongarch_cache loongarch_cpu_cache[]; -+extern struct loongarch_rtx_cost_data loongarch_cpu_rtx_cost_data[]; -+ -+#ifdef __cplusplus -+} -+#endif -+#endif /* LOONGARCH_DEF_H */ -diff --git a/gcc/config/loongarch/loongarch-driver.c b/gcc/config/loongarch/loongarch-driver.c -new file mode 100644 -index 000000000..1f56df84f ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-driver.c -@@ -0,0 +1,206 @@ -+/* Subroutines for the gcc driver. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Ltd. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#define IN_TARGET_CODE 1 -+ -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "tm.h" -+#include "obstack.h" -+#include "diagnostic-core.h" -+#include "opts.h" -+ -+#include "loongarch-opts.h" -+#include "loongarch-driver.h" -+ -+/* This flag is set to 1 if we believe that the user might be avoiding -+ linking (implicitly) against something from the startfile search paths. */ -+static int no_link = 0; -+ -+/* Use the public obstack from the gcc driver (defined in gcc.c). -+ This is for allocating space for the returned string. */ -+extern struct obstack opts_obstack; -+ -+const char* -+la_driver_init (int argc ATTRIBUTE_UNUSED, const char **argv ATTRIBUTE_UNUSED) -+{ -+ /* Initialize all fields of la_target to -1 */ -+ loongarch_init_target (&la_target, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET, -+ M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET); -+ return ""; -+} -+ -+const char* -+driver_set_no_link (int argc, const char **argv) -+{ -+ no_link = 1; -+ return ""; -+} -+ -+const char* -+driver_set_m_parm (int argc, const char **argv) -+{ -+ gcc_assert (argc == 2); -+ -+#define LARCH_DRIVER_PARSE_PARM(OPT_IDX, NAME, OPTSTR_LIST, \ -+ OPT_IDX_LO, OPT_IDX_HI) \ -+ if (strcmp (argv[0], OPTSTR_##NAME) == 0) \ -+ for (int i = (OPT_IDX_LO); i < (OPT_IDX_HI); i++) \ -+ { \ -+ if ((OPTSTR_LIST)[i] != 0) \ -+ if (strcmp (argv[1], (OPTSTR_LIST)[i]) == 0) \ -+ { \ -+ (OPT_IDX) = i; \ -+ return 0; \ -+ } \ -+ } -+ -+ LARCH_DRIVER_PARSE_PARM (la_target.abi.base, ABI_BASE, \ -+ loongarch_abi_base_strings, 0, N_ABI_BASE_OPTS) -+ -+ LARCH_DRIVER_PARSE_PARM (la_target.isa.fpu, ISA_EXT_FPU, \ -+ loongarch_isa_ext_strings, 0, N_ISA_EXT_FPU_TYPES) -+ -+ LARCH_DRIVER_PARSE_PARM (la_target.isa.simd, ISA_EXT_SIMD, \ -+ loongarch_isa_ext_strings, 0, N_ISA_EXT_TYPES) -+ -+ LARCH_DRIVER_PARSE_PARM (la_target.cpu_arch, ARCH, \ -+ loongarch_cpu_strings, 0, N_ARCH_TYPES) -+ -+ LARCH_DRIVER_PARSE_PARM (la_target.cpu_tune, TUNE, \ -+ loongarch_cpu_strings, 0, N_TUNE_TYPES) -+ -+ LARCH_DRIVER_PARSE_PARM (la_target.cmodel, CMODEL, \ -+ loongarch_cmodel_strings, 0, N_CMODEL_TYPES) -+ -+ gcc_unreachable (); -+} -+ -+static void -+driver_record_deferred_opts (struct loongarch_flags *flags) -+{ -+ unsigned int i; -+ cl_deferred_option *opt; -+ vec *v = (vec *) la_deferred_options; -+ -+ gcc_assert (flags); -+ -+ /* Initialize flags */ -+ flags->flt = M_OPT_UNSET; -+ flags->flt_str = NULL; -+ flags->sx[0] = flags->sx[1] = 0; -+ -+ int sx_flag_idx = 0; -+ -+ if (v) -+ FOR_EACH_VEC_ELT (*v, i, opt) -+ { -+ switch (opt->opt_index) -+ { -+ case OPT_mlsx: -+ flags->sx[sx_flag_idx++] = ISA_EXT_SIMD_LSX * (opt->value ? 1 : -1); -+ break; -+ -+ case OPT_mlasx: -+ flags->sx[sx_flag_idx++] = ISA_EXT_SIMD_LASX * (opt->value ? 1 : -1); -+ break; -+ -+ case OPT_msoft_float: -+ flags->flt = ISA_EXT_NONE; -+ flags->flt_str = OPTSTR_SOFT_FLOAT; -+ break; -+ -+ case OPT_msingle_float: -+ flags->flt = ISA_EXT_FPU32; -+ flags->flt_str = OPTSTR_SINGLE_FLOAT; -+ break; -+ -+ case OPT_mdouble_float: -+ flags->flt = ISA_EXT_FPU64; -+ flags->flt_str = OPTSTR_DOUBLE_FLOAT; -+ break; -+ -+ default: -+ gcc_unreachable (); -+ } -+ gcc_assert (sx_flag_idx <= 2); -+ } -+} -+ -+const char* -+driver_get_normalized_m_opts (int argc, const char **argv ATTRIBUTE_UNUSED) -+{ -+ if (argc != 0) -+ return " %eget_normalized_m_opts requires no argument.\n"; -+ -+ struct loongarch_flags flags; -+ driver_record_deferred_opts (&flags); -+ loongarch_config_target (&la_target, &flags, !no_link /* follow_multilib_list */); -+ -+ /* Output normalized option strings. */ -+ obstack_blank (&opts_obstack, 0); -+ -+#undef APPEND_LTR -+#define APPEND_LTR(S) \ -+ obstack_grow (&opts_obstack, (const void*) (S), \ -+ sizeof ((S)) / sizeof (char) -1) -+ -+#undef APPEND_VAL -+#define APPEND_VAL(S) \ -+ obstack_grow (&opts_obstack, (const void*) (S), strlen ((S))) -+ -+#undef APPEND_OPT -+#define APPEND_OPT(NAME) \ -+ APPEND_LTR (" %. */ -+ -+#ifndef LOONGARCH_DRIVER_H -+#define LOONGARCH_DRIVER_H -+ -+#include "loongarch-str.h" -+ -+extern const char* -+la_driver_init (int argc, const char **argv); -+ -+extern const char* -+driver_set_m_parm (int argc, const char **argv); -+ -+extern const char* -+driver_set_no_link (int argc, const char **argv); -+ -+extern const char* -+driver_get_normalized_m_opts (int argc, const char **argv); -+ -+#define EXTRA_SPEC_FUNCTIONS \ -+ { "driver_init", la_driver_init }, \ -+ { "set_m_parm", driver_set_m_parm }, \ -+ { "set_no_link", driver_set_no_link }, \ -+ { "get_normalized_m_opts", driver_get_normalized_m_opts }, -+ -+/* Pre-process ABI-related options. */ -+#define LA_SET_PARM_SPEC(NAME) \ -+ " %{m" OPTSTR_##NAME "=*: %:set_m_parm(" OPTSTR_##NAME " %*)}" \ -+ -+#define DRIVER_HANDLE_MACHINE_OPTIONS \ -+ " %:driver_init()" \ -+ " %{c|S|E|nostdlib: %:set_no_link()}" \ -+ " %{nostartfiles: %{nodefaultlibs: %:set_no_link()}}" \ -+ LA_SET_PARM_SPEC (ABI_BASE) \ -+ LA_SET_PARM_SPEC (ARCH) \ -+ LA_SET_PARM_SPEC (TUNE) \ -+ LA_SET_PARM_SPEC (ISA_EXT_FPU) \ -+ LA_SET_PARM_SPEC (ISA_EXT_SIMD) \ -+ LA_SET_PARM_SPEC (CMODEL) \ -+ " %:get_normalized_m_opts()" -+ -+#define DRIVER_SELF_SPECS \ -+ DRIVER_HANDLE_MACHINE_OPTIONS -+ -+/* ABI spec strings. */ -+#define ABI_GRLEN_SPEC \ -+ "%{mabi=lp64*:64}" \ -+ -+#define ABI_SPEC \ -+ "%{mabi=lp64d:lp64d}" \ -+ "%{mabi=lp64f:lp64f}" \ -+ "%{mabi=lp64s:lp64s}" \ -+ -+#endif /* LOONGARCH_DRIVER_H */ -diff --git a/gcc/config/loongarch/loongarch-ftypes.def b/gcc/config/loongarch/loongarch-ftypes.def -index a10a025ba..1ef4e2dc8 100644 ---- a/gcc/config/loongarch/loongarch-ftypes.def -+++ b/gcc/config/loongarch/loongarch-ftypes.def -@@ -1,5 +1,7 @@ --/* Definitions of prototypes for LARCH built-in functions. -*- C -*- -- Copyright (C) 2007-2018 Free Software Foundation, Inc. -+/* Definitions of prototypes for LoongArch built-in functions. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Co. Ltd. -+ Based on MIPS target for GNU compiler. - - This file is part of GCC. - -@@ -18,11 +20,11 @@ along with GCC; see the file COPYING3. If not see - . */ - - /* Invoke DEF_LARCH_FTYPE (NARGS, LIST) for each prototype used by -- LARCH built-in functions, where: -+ LoongArch built-in functions, where: - - NARGS is the number of arguments. - LIST contains the return-type code followed by the codes for each -- argument type. -+ argument type. - - Argument- and return-type codes are either modes or one of the following: - -@@ -30,65 +32,55 @@ along with GCC; see the file COPYING3. If not see - INT for integer_type_node - POINTER for ptr_type_node - -- (we don't use PTR because that's a ANSI-compatibillity macro). -+ (we don't use PTR because that's a ANSI-compatibility macro). - - Please keep this list lexicographically sorted by the LIST argument. */ --DEF_LARCH_FTYPE (1, (DF, DF)) --DEF_LARCH_FTYPE (2, (DF, DF, DF)) --DEF_LARCH_FTYPE (1, (DF, V2DF)) --DEF_LARCH_FTYPE (1, (DF, V4DF)) - --DEF_LARCH_FTYPE (1, (DI, DI)) --DEF_LARCH_FTYPE (1, (DI, SI)) --DEF_LARCH_FTYPE (1, (DI, UQI)) --DEF_LARCH_FTYPE (1, (UDI, USI)) -+/* Non-vector builtin types. */ -+ - DEF_LARCH_FTYPE (1, (UQI, USI)) --DEF_LARCH_FTYPE (1, (USI, UQI)) - DEF_LARCH_FTYPE (1, (UHI, USI)) --DEF_LARCH_FTYPE (2, (DI, DI, DI)) --DEF_LARCH_FTYPE (2, (DI, DI, SI)) --DEF_LARCH_FTYPE (2, (DI, DI, UQI)) -+DEF_LARCH_FTYPE (1, (USI, USI)) -+DEF_LARCH_FTYPE (1, (UDI, USI)) -+DEF_LARCH_FTYPE (1, (USI, UQI)) -+DEF_LARCH_FTYPE (1, (VOID, USI)) -+ -+DEF_LARCH_FTYPE (2, (VOID, UQI, USI)) -+DEF_LARCH_FTYPE (2, (VOID, UHI, USI)) -+DEF_LARCH_FTYPE (2, (VOID, USI, USI)) -+DEF_LARCH_FTYPE (2, (VOID, UDI, USI)) - DEF_LARCH_FTYPE (2, (VOID, DI, UQI)) - DEF_LARCH_FTYPE (2, (VOID, SI, UQI)) -+DEF_LARCH_FTYPE (2, (VOID, DI, DI)) -+DEF_LARCH_FTYPE (2, (SI, SI, UQI)) -+DEF_LARCH_FTYPE (2, (DI, DI, UQI)) -+DEF_LARCH_FTYPE (2, (SI, QI, SI)) -+DEF_LARCH_FTYPE (2, (SI, HI, SI)) -+DEF_LARCH_FTYPE (2, (SI, SI, SI)) -+DEF_LARCH_FTYPE (2, (SI, DI, SI)) -+DEF_LARCH_FTYPE (2, (USI, USI, USI)) - DEF_LARCH_FTYPE (2, (UDI, UDI, USI)) --DEF_LARCH_FTYPE (3, (DI, DI, SI, SI)) --DEF_LARCH_FTYPE (3, (DI, DI, USI, USI)) --DEF_LARCH_FTYPE (3, (DI, DI, DI, QI)) -+ -+DEF_LARCH_FTYPE (3, (VOID, USI, USI, SI)) -+DEF_LARCH_FTYPE (3, (VOID, USI, UDI, SI)) -+DEF_LARCH_FTYPE (3, (USI, USI, USI, USI)) - DEF_LARCH_FTYPE (3, (UDI, UDI, UDI, USI)) -+ -+/* Vector builtin types. */ -+ -+DEF_LARCH_FTYPE (1, (DF, V2DF)) -+DEF_LARCH_FTYPE (1, (DF, V4DF)) - DEF_LARCH_FTYPE (3, (DI, DI, V2HI, V2HI)) - DEF_LARCH_FTYPE (3, (DI, DI, V4QI, V4QI)) --DEF_LARCH_FTYPE (2, (DI, POINTER, SI)) --DEF_LARCH_FTYPE (2, (DI, SI, SI)) --DEF_LARCH_FTYPE (2, (DI, USI, USI)) - DEF_LARCH_FTYPE (2, (DI, V2DI, UQI)) - DEF_LARCH_FTYPE (2, (DI, V4DI, UQI)) - --DEF_LARCH_FTYPE (2, (INT, DF, DF)) --DEF_LARCH_FTYPE (2, (INT, SF, SF)) - DEF_LARCH_FTYPE (2, (INT, V2SF, V2SF)) - DEF_LARCH_FTYPE (4, (INT, V2SF, V2SF, V2SF, V2SF)) - --DEF_LARCH_FTYPE (1, (SF, SF)) --DEF_LARCH_FTYPE (2, (SF, SF, SF)) - DEF_LARCH_FTYPE (1, (SF, V2SF)) - DEF_LARCH_FTYPE (1, (SF, V4SF)) - --DEF_LARCH_FTYPE (2, (SI, DI, SI)) --DEF_LARCH_FTYPE (2, (SI, POINTER, SI)) --DEF_LARCH_FTYPE (1, (SI, SI)) --DEF_LARCH_FTYPE (1, (USI, USI)) --DEF_LARCH_FTYPE (1, (SI, UDI)) --DEF_LARCH_FTYPE (2, (QI, QI, QI)) --DEF_LARCH_FTYPE (2, (HI, HI, HI)) --DEF_LARCH_FTYPE (2, (SI, QI, SI)) --DEF_LARCH_FTYPE (2, (SI, HI, SI)) --DEF_LARCH_FTYPE (2, (SI, SI, SI)) --DEF_LARCH_FTYPE (2, (SI, SI, UQI)) --DEF_LARCH_FTYPE (2, (USI, USI, USI)) --DEF_LARCH_FTYPE (3, (SI, SI, SI, SI)) --DEF_LARCH_FTYPE (3, (SI, SI, SI, QI)) --DEF_LARCH_FTYPE (3, (USI, USI, USI, USI)) --DEF_LARCH_FTYPE (1, (SI, UQI)) - DEF_LARCH_FTYPE (1, (SI, UV16QI)) - DEF_LARCH_FTYPE (1, (SI, UV32QI)) - DEF_LARCH_FTYPE (1, (SI, UV2DI)) -@@ -106,9 +98,7 @@ DEF_LARCH_FTYPE (2, (SI, V4QI, V4QI)) - DEF_LARCH_FTYPE (2, (SI, V4SI, UQI)) - DEF_LARCH_FTYPE (2, (SI, V8SI, UQI)) - DEF_LARCH_FTYPE (2, (SI, V8HI, UQI)) --DEF_LARCH_FTYPE (1, (SI, VOID)) - --DEF_LARCH_FTYPE (2, (UDI, UDI, UDI)) - DEF_LARCH_FTYPE (2, (USI, V32QI, UQI)) - DEF_LARCH_FTYPE (2, (UDI, UV2SI, UV2SI)) - DEF_LARCH_FTYPE (2, (USI, V8SI, UQI)) -@@ -119,8 +109,6 @@ DEF_LARCH_FTYPE (2, (UDI, V4DI, UQI)) - DEF_LARCH_FTYPE (2, (USI, V16QI, UQI)) - DEF_LARCH_FTYPE (2, (USI, V4SI, UQI)) - DEF_LARCH_FTYPE (2, (USI, V8HI, UQI)) --DEF_LARCH_FTYPE (1, (USI, VOID)) -- - DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, UQI)) - DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, USI)) - DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, UV16QI)) -@@ -476,19 +464,6 @@ DEF_LARCH_FTYPE (2, (V8QI, V4HI, V4HI)) - DEF_LARCH_FTYPE (1, (V8QI, V8QI)) - DEF_LARCH_FTYPE (2, (V8QI, V8QI, V8QI)) - --DEF_LARCH_FTYPE (2, (VOID, SI, CVPOINTER)) --DEF_LARCH_FTYPE (2, (VOID, SI, SI)) --DEF_LARCH_FTYPE (2, (VOID, DI, DI)) --DEF_LARCH_FTYPE (2, (VOID, UQI, SI)) --DEF_LARCH_FTYPE (1, (VOID, USI)) --DEF_LARCH_FTYPE (2, (VOID, USI, UQI)) --DEF_LARCH_FTYPE (1, (VOID, UHI)) --DEF_LARCH_FTYPE (2, (VOID, UQI, USI)) --DEF_LARCH_FTYPE (2, (VOID, UHI, USI)) --DEF_LARCH_FTYPE (2, (VOID, USI, USI)) --DEF_LARCH_FTYPE (2, (VOID, UDI, USI)) --DEF_LARCH_FTYPE (3, (VOID, USI, USI, SI)) --DEF_LARCH_FTYPE (3, (VOID, USI, UDI, SI)) - DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, SI)) - DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, DI)) - DEF_LARCH_FTYPE (3, (VOID, V32QI, CVPOINTER, SI)) -@@ -648,36 +623,36 @@ DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV16QI, V16QI)) - DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV16QI, UV16QI)) - - --DEF_LARCH_FTYPE(2,(V4DI,V16HI,V16HI)) --DEF_LARCH_FTYPE(2,(V4DI,UV4SI,V4SI)) --DEF_LARCH_FTYPE(2,(V8SI,UV16HI,V16HI)) --DEF_LARCH_FTYPE(2,(V16HI,UV32QI,V32QI)) --DEF_LARCH_FTYPE(2,(V4DI,UV8SI,V8SI)) --DEF_LARCH_FTYPE(3,(V4DI,V4DI,V16HI,V16HI)) --DEF_LARCH_FTYPE(2,(UV32QI,V32QI,UV32QI)) --DEF_LARCH_FTYPE(2,(UV16HI,V16HI,UV16HI)) --DEF_LARCH_FTYPE(2,(UV8SI,V8SI,UV8SI)) --DEF_LARCH_FTYPE(2,(UV4DI,V4DI,UV4DI)) --DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV4DI,V4DI)) --DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV8SI,V8SI)) --DEF_LARCH_FTYPE(3,(V8SI,V8SI,UV16HI,V16HI)) --DEF_LARCH_FTYPE(3,(V16HI,V16HI,UV32QI,V32QI)) --DEF_LARCH_FTYPE(2,(V4DI,UV4DI,V4DI)) --DEF_LARCH_FTYPE(2,(V8SI,V32QI,V32QI)) --DEF_LARCH_FTYPE(2,(UV4DI,UV16HI,UV16HI)) --DEF_LARCH_FTYPE(2,(V4DI,UV16HI,V16HI)) --DEF_LARCH_FTYPE(3,(V8SI,V8SI,V32QI,V32QI)) --DEF_LARCH_FTYPE(3,(UV8SI,UV8SI,UV32QI,UV32QI)) --DEF_LARCH_FTYPE(3,(UV4DI,UV4DI,UV16HI,UV16HI)) --DEF_LARCH_FTYPE(3,(V8SI,V8SI,UV32QI,V32QI)) --DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV16HI,V16HI)) --DEF_LARCH_FTYPE(2,(UV8SI,UV32QI,UV32QI)) --DEF_LARCH_FTYPE(2,(V8SI,UV32QI,V32QI)) -- --DEF_LARCH_FTYPE(4,(VOID,V16QI,CVPOINTER,SI,UQI)) --DEF_LARCH_FTYPE(4,(VOID,V8HI,CVPOINTER,SI,UQI)) --DEF_LARCH_FTYPE(4,(VOID,V4SI,CVPOINTER,SI,UQI)) --DEF_LARCH_FTYPE(4,(VOID,V2DI,CVPOINTER,SI,UQI)) -+DEF_LARCH_FTYPE (2, (V4DI, V16HI, V16HI)) -+DEF_LARCH_FTYPE (2, (V4DI, UV4SI, V4SI)) -+DEF_LARCH_FTYPE (2, (V8SI, UV16HI, V16HI)) -+DEF_LARCH_FTYPE (2, (V16HI, UV32QI, V32QI)) -+DEF_LARCH_FTYPE (2, (V4DI, UV8SI, V8SI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, V16HI, V16HI)) -+DEF_LARCH_FTYPE (2, (UV32QI, V32QI, UV32QI)) -+DEF_LARCH_FTYPE (2, (UV16HI, V16HI, UV16HI)) -+DEF_LARCH_FTYPE (2, (UV8SI, V8SI, UV8SI)) -+DEF_LARCH_FTYPE (2, (UV4DI, V4DI, UV4DI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, UV4DI, V4DI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, UV8SI, V8SI)) -+DEF_LARCH_FTYPE (3, (V8SI, V8SI, UV16HI, V16HI)) -+DEF_LARCH_FTYPE (3, (V16HI, V16HI, UV32QI, V32QI)) -+DEF_LARCH_FTYPE (2, (V4DI, UV4DI, V4DI)) -+DEF_LARCH_FTYPE (2, (V8SI, V32QI, V32QI)) -+DEF_LARCH_FTYPE (2, (UV4DI, UV16HI, UV16HI)) -+DEF_LARCH_FTYPE (2, (V4DI, UV16HI, V16HI)) -+DEF_LARCH_FTYPE (3, (V8SI, V8SI, V32QI, V32QI)) -+DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV32QI, UV32QI)) -+DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV16HI, UV16HI)) -+DEF_LARCH_FTYPE (3, (V8SI, V8SI, UV32QI, V32QI)) -+DEF_LARCH_FTYPE (3, (V4DI, V4DI, UV16HI, V16HI)) -+DEF_LARCH_FTYPE (2, (UV8SI, UV32QI, UV32QI)) -+DEF_LARCH_FTYPE (2, (V8SI, UV32QI, V32QI)) -+ -+DEF_LARCH_FTYPE (4, (VOID, V16QI, CVPOINTER, SI, UQI)) -+DEF_LARCH_FTYPE (4, (VOID, V8HI, CVPOINTER, SI, UQI)) -+DEF_LARCH_FTYPE (4, (VOID, V4SI, CVPOINTER, SI, UQI)) -+DEF_LARCH_FTYPE (4, (VOID, V2DI, CVPOINTER, SI, UQI)) - - DEF_LARCH_FTYPE (2, (DI, V16QI, UQI)) - DEF_LARCH_FTYPE (2, (DI, V8HI, UQI)) -@@ -699,16 +674,16 @@ DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, V16HI, USI)) - DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, V8SI, USI)) - DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, V4DI, USI)) - --DEF_LARCH_FTYPE(4,(VOID,V32QI,CVPOINTER,SI,UQI)) --DEF_LARCH_FTYPE(4,(VOID,V16HI,CVPOINTER,SI,UQI)) --DEF_LARCH_FTYPE(4,(VOID,V8SI,CVPOINTER,SI,UQI)) --DEF_LARCH_FTYPE(4,(VOID,V4DI,CVPOINTER,SI,UQI)) -+DEF_LARCH_FTYPE (4, (VOID, V32QI, CVPOINTER, SI, UQI)) -+DEF_LARCH_FTYPE (4, (VOID, V16HI, CVPOINTER, SI, UQI)) -+DEF_LARCH_FTYPE (4, (VOID, V8SI, CVPOINTER, SI, UQI)) -+DEF_LARCH_FTYPE (4, (VOID, V4DI, CVPOINTER, SI, UQI)) - --DEF_LARCH_FTYPE (1, (BOOLEAN,V16QI)) --DEF_LARCH_FTYPE(2,(V16QI,CVPOINTER,CVPOINTER)) --DEF_LARCH_FTYPE(3,(VOID,V16QI,CVPOINTER,CVPOINTER)) --DEF_LARCH_FTYPE(2,(V32QI,CVPOINTER,CVPOINTER)) --DEF_LARCH_FTYPE(3,(VOID,V32QI,CVPOINTER,CVPOINTER)) -+DEF_LARCH_FTYPE (1, (BOOLEAN, V16QI)) -+DEF_LARCH_FTYPE (2, (V16QI, CVPOINTER, CVPOINTER)) -+DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, CVPOINTER)) -+DEF_LARCH_FTYPE (2, (V32QI, CVPOINTER, CVPOINTER)) -+DEF_LARCH_FTYPE (3, (VOID, V32QI, CVPOINTER, CVPOINTER)) - - DEF_LARCH_FTYPE (3, (V16QI, V16QI, SI, UQI)) - DEF_LARCH_FTYPE (3, (V2DI, V2DI, SI, UQI)) -diff --git a/gcc/config/loongarch/loongarch-modes.def b/gcc/config/loongarch/loongarch-modes.def -index fe5bc38d9..53392b484 100644 ---- a/gcc/config/loongarch/loongarch-modes.def -+++ b/gcc/config/loongarch/loongarch-modes.def -@@ -1,5 +1,7 @@ --/* LARCH extra machine modes. -- Copyright (C) 2003-2018 Free Software Foundation, Inc. -+/* LoongArch extra machine modes. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Co. Ltd. -+ Based on MIPS target for GNU compiler. - - This file is part of GCC. - -diff --git a/gcc/config/loongarch/loongarch-opts.c b/gcc/config/loongarch/loongarch-opts.c -new file mode 100644 -index 000000000..cf11f67d1 ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-opts.c -@@ -0,0 +1,725 @@ -+#define IN_TARGET_CODE 1 -+ -+#include "config.h" -+#include "system.h" -+#include "coretypes.h" -+#include "tm.h" -+#include "obstack.h" -+#include "diagnostic-core.h" -+ -+#include "loongarch-cpu.h" -+#include "loongarch-opts.h" -+#include "loongarch-str.h" -+#include "loongarch-def.h" -+ -+struct loongarch_target la_target; -+ -+/* ABI-related configuration. */ -+#define ABI_COUNT (sizeof(abi_priority_list)/sizeof(struct loongarch_abi)) -+static const struct loongarch_abi -+abi_priority_list[] = { -+ {ABI_BASE_LP64D, ABI_EXT_BASE}, -+ {ABI_BASE_LP64F, ABI_EXT_BASE}, -+ {ABI_BASE_LP64S, ABI_EXT_BASE}, -+}; -+ -+/* Initialize enabled_abi_types from TM_MULTILIB_LIST. */ -+#ifdef LA_DISABLE_MULTILIB -+#define MULTILIB_LIST_LEN 1 -+#else -+#define MULTILIB_LIST_LEN (sizeof (tm_multilib_list) / sizeof (int) / 2) -+static const int tm_multilib_list[] = { TM_MULTILIB_LIST }; -+#endif -+static int enabled_abi_types[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES] = { 0 }; -+ -+#define isa_required(ABI) (abi_minimal_isa[(ABI).base][(ABI).ext]) -+extern "C" const struct loongarch_isa -+abi_minimal_isa[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES]; -+ -+static inline int -+is_multilib_enabled (struct loongarch_abi abi) -+{ -+ return enabled_abi_types[abi.base][abi.ext]; -+} -+ -+static void -+init_enabled_abi_types () -+{ -+#ifdef LA_DISABLE_MULTILIB -+ enabled_abi_types[DEFAULT_ABI_BASE][DEFAULT_ABI_EXT] = 1; -+#else -+ int abi_base, abi_ext; -+ for (unsigned int i = 0; i < MULTILIB_LIST_LEN; i++) -+ { -+ abi_base = tm_multilib_list[i << 1]; -+ abi_ext = tm_multilib_list[(i << 1) + 1]; -+ enabled_abi_types[abi_base][abi_ext] = 1; -+ } -+#endif -+} -+ -+/* String processing. */ -+static struct obstack msg_obstack; -+#define APPEND_STRING(STR) obstack_grow (&msg_obstack, STR, strlen(STR)); -+#define APPEND1(CH) obstack_1grow(&msg_obstack, CH); -+ -+static const char* abi_str (struct loongarch_abi abi); -+static const char* isa_str (const struct loongarch_isa *isa, char separator); -+static const char* arch_str (const struct loongarch_target *target); -+static const char* multilib_enabled_abi_list (); /* Misc */ -+static struct loongarch_abi isa_default_abi (const struct loongarch_isa *isa); -+static int isa_base_compat_p (const struct loongarch_isa *set1, -+ const struct loongarch_isa *set2); -+static int isa_fpu_compat_p (const struct loongarch_isa *set1, -+ const struct loongarch_isa *set2); -+static int abi_compat_p (const struct loongarch_isa *isa, -+ struct loongarch_abi abi); -+static int abi_default_cpu_arch (struct loongarch_abi abi, struct loongarch_isa *isa); -+ -+/* Mandatory configure-time defaults. */ -+#ifndef DEFAULT_ABI_BASE -+#error missing definition of DEFAULT_ABI_BASE in ${tm_defines}. -+#endif -+ -+#ifndef DEFAULT_ABI_EXT -+#error missing definition of DEFAULT_ABI_EXT in ${tm_defines}. -+#endif -+ -+#ifndef DEFAULT_CPU_ARCH -+#error missing definition of DEFAULT_CPU_ARCH in ${tm_defines}. -+#endif -+ -+/* Optional configure-time defaults. */ -+#ifdef DEFAULT_CPU_TUNE -+static int with_default_tune = 1; -+#else -+#define DEFAULT_CPU_TUNE -1 -+static int with_default_tune = 0; -+#endif -+ -+#ifdef DEFAULT_ISA_EXT_FPU -+static int with_default_fpu = 1; -+#else -+#define DEFAULT_ISA_EXT_FPU -1 -+static int with_default_fpu = 0; -+#endif -+ -+#ifdef DEFAULT_ISA_EXT_SIMD -+static int with_default_simd = 1; -+#else -+#define DEFAULT_ISA_EXT_SIMD -1 -+static int with_default_simd = 0; -+#endif -+ -+ -+/* Initialize loongarch_target from separate option variables. */ -+ -+void -+loongarch_init_target (struct loongarch_target *target, -+ int cpu_arch, int cpu_tune, int fpu, int simd, -+ int abi_base, int abi_ext, int cmodel) -+{ -+ if (!target) -+ return; -+ target->cpu_arch = cpu_arch; -+ target->cpu_tune = cpu_tune; -+ target->isa.fpu = fpu; -+ target->isa.simd = simd; -+ target->abi.base = abi_base; -+ target->abi.ext = abi_ext; -+ target->cmodel = cmodel; -+} -+ -+ -+/* Handle combinations of -m parameters -+ (see loongarch.opt and loongarch-opts.h). */ -+ -+void -+loongarch_config_target (struct loongarch_target *target, -+ struct loongarch_flags *flags, -+ int follow_multilib_list_p) -+{ -+ struct loongarch_target t; -+ if (!target) -+ return; -+ -+ /* Initialization */ -+ init_enabled_abi_types (); -+ obstack_init (&msg_obstack); -+ -+ struct { -+ int arch, tune, fpu, simd, abi_base, abi_ext, cmodel, abi_flt; -+ } constrained = { -+ M_OPT_ABSENT (target->cpu_arch) ? 0 : 1, -+ M_OPT_ABSENT (target->cpu_tune) ? 0 : 1, -+ M_OPT_ABSENT (target->isa.fpu) ? 0 : 1, -+ M_OPT_ABSENT (target->isa.simd) ? 0 : 1, -+ M_OPT_ABSENT (target->abi.base) ? 0 : 1, -+ M_OPT_ABSENT (target->abi.ext) ? 0 : 1, -+ M_OPT_ABSENT (target->cmodel) ? 0 : 1, -+ M_OPT_ABSENT (target->abi.base) ? 0 : 1, -+ }; -+ -+ /* 1. Target ABI */ -+ if (constrained.abi_base && target->abi.base >= N_ABI_BASE_TYPES) -+ /* Special treatments for legacy options ("-mabi=lp64") -+ in GCC driver. */ -+ switch (target->abi.base) -+ { -+ case ABI_BASE_LP64: -+ t.abi.base = TO_LP64_ABI_BASE (DEFAULT_ABI_BASE); -+ constrained.abi_flt = 0; -+ break; -+ -+ default: -+ gcc_unreachable (); -+ } -+ else if (constrained.abi_base) -+ t.abi.base = target->abi.base; -+ else -+ t.abi.base = DEFAULT_ABI_BASE; -+ -+ t.abi.ext = constrained.abi_ext ? target->abi.ext : DEFAULT_ABI_EXT; -+ -+ /* Process -m*-float flags */ -+ if (flags && !M_OPT_ABSENT (flags->flt)) -+ { -+ /* Modifying the original "target" here makes it easier to write the -+ t.isa.fpu assignment below, because otherwise there would be three -+ levels of precedence (-m*-float / -mfpu / -march) to be handled -+ (now the first two are merged). */ -+ -+ target->isa.fpu = flags->flt; -+ constrained.fpu = 1; -+ -+ /* The target ISA is not ready yet, but (isa_required (t.abi) -+ + forced fpu) is enough for computing the forced base ABI. */ -+ -+ struct loongarch_isa force_isa = isa_required (t.abi); -+ force_isa.fpu = flags->flt; -+ -+ struct loongarch_abi force_abi; -+ force_abi.base = isa_default_abi (&force_isa).base; -+ -+ if (constrained.abi_base && constrained.abi_flt -+ && (t.abi.base != force_abi.base)) -+ { -+ force_abi.ext = t.abi.ext; -+ inform (UNKNOWN_LOCATION, -+ "%<-m%s%> overrides %<-m%s=%s%>, adjusting ABI to %qs", -+ flags->flt_str, OPTSTR_ABI_BASE, -+ loongarch_abi_base_strings[t.abi.base], -+ abi_str (force_abi)); -+ } -+ -+ t.abi.base = force_abi.base; -+ constrained.abi_flt = 1; -+ } -+ -+#ifdef LA_DISABLE_MULTILIB -+ if (follow_multilib_list_p) -+ if (t.abi.base != DEFAULT_ABI_BASE || t.abi.ext != DEFAULT_ABI_EXT) -+ { -+ static const struct loongarch_abi default_abi -+ = {DEFAULT_ABI_BASE, DEFAULT_ABI_EXT}; -+ -+ warning (0, "ABI changed (%qs to %qs) while multilib is disabled", -+ abi_str (default_abi), abi_str (t.abi)); -+ } -+#endif -+ -+ /* 2. Target CPU */ -+ t.cpu_arch = constrained.arch ? target->cpu_arch : DEFAULT_CPU_ARCH; -+ -+ /* If cpu_tune is not set using neither -mtune nor --with-tune, -+ the current cpu_arch is used as its default. */ -+ t.cpu_tune = constrained.tune ? target->cpu_tune -+ : (constrained.arch ? target->cpu_arch : -+ (with_default_tune ? DEFAULT_CPU_TUNE : DEFAULT_CPU_ARCH)); -+ -+ -+ /* Handle -march/tune=native */ -+#ifdef __loongarch__ -+ /* For native compilers, gather local CPU information -+ and fill the "CPU_NATIVE" index of arrays defined in -+ loongarch-cpu.c. */ -+ -+ fill_native_cpu_config (&t); -+ -+#else -+ if (t.cpu_arch == CPU_NATIVE) -+ fatal_error (UNKNOWN_LOCATION, -+ "%qs does not work on a cross compiler", -+ "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); -+ -+ else if (t.cpu_tune == CPU_NATIVE) -+ fatal_error (UNKNOWN_LOCATION, -+ "%qs does not work on a cross compiler", -+ "-m" OPTSTR_TUNE "=" STR_CPU_NATIVE); -+#endif -+ -+ /* Handle -march/tune=abi-default */ -+ if (t.cpu_tune == CPU_ABI_DEFAULT) -+ t.cpu_tune = abi_default_cpu_arch (t.abi, NULL); -+ -+ if (t.cpu_arch == CPU_ABI_DEFAULT) -+ { -+ t.cpu_arch = abi_default_cpu_arch (t.abi, &(t.isa)); -+ loongarch_cpu_default_isa[t.cpu_arch] = t.isa; -+ } -+ -+ /* 3. Target base ISA */ -+config_target_isa: -+ -+ /* Get default ISA from "-march" or its default value. */ -+ t.isa = loongarch_cpu_default_isa[t.cpu_arch]; -+ -+ /* Apply incremental changes. */ -+ /* "-march=native" overrides the default FPU type. */ -+ -+ t.isa.fpu = constrained.fpu ? target->isa.fpu : -+ (constrained.arch ? t.isa.fpu : -+ (with_default_fpu ? DEFAULT_ISA_EXT_FPU : t.isa.fpu)); -+ -+ t.isa.simd = constrained.simd ? target->isa.simd : -+ (constrained.arch ? t.isa.simd : -+ (with_default_simd ? DEFAULT_ISA_EXT_SIMD : t.isa.simd)); -+ -+ /* apply -m[no-]lsx and -m[no-]lasx flags */ -+ if (flags) -+ for (int i = 0; i < 2; i++) -+ { -+ switch (SX_FLAG_TYPE (flags->sx[i])) -+ { -+ case ISA_EXT_SIMD_LSX: -+ constrained.simd = 1; -+ if (flags->sx[i] > 0 && t.isa.simd != ISA_EXT_SIMD_LASX) -+ t.isa.simd = ISA_EXT_SIMD_LSX; -+ else if (flags->sx[i] < 0) -+ t.isa.simd = ISA_EXT_NONE; -+ break; -+ -+ case ISA_EXT_SIMD_LASX: -+ constrained.simd = 1; -+ if (flags->sx[i] < 0 && t.isa.simd == ISA_EXT_SIMD_LASX) -+ t.isa.simd = ISA_EXT_SIMD_LSX; -+ else if (flags->sx[i] > 0) -+ t.isa.simd = ISA_EXT_SIMD_LASX; -+ break; -+ -+ case 0: -+ break; -+ -+ default: -+ gcc_unreachable(); -+ } -+ } -+ -+ /* All SIMD extensions imply a 64-bit FPU: -+ - silently adjust t.isa.fpu to "fpu64" if it is unconstrained. -+ - warn if -msingle-float / -msoft-float is on, -+ then disable SIMD extensions (done in driver) -+ - abort if -mfpu=0 / -mfpu=32 is forced. */ -+ -+ if (t.isa.simd != ISA_EXT_NONE && t.isa.fpu != ISA_EXT_FPU64) -+ { -+ if (!constrained.fpu) -+ { -+ /* As long as the arch-default "t.isa.simd" is set to non-zero -+ for an element "t" in loongarch_cpu_default_isa, "t.isa.fpu" -+ should be set to "ISA_EXT_FPU64" accordingly. Thus reaching -+ here must be the result of forcing -mlsx/-mlasx explicitly. */ -+ gcc_assert (constrained.simd); -+ -+ inform (UNKNOWN_LOCATION, -+ "enabing %qs promotes %<%s%s%> to %<%s%s%>", -+ loongarch_isa_ext_strings[t.isa.simd], -+ OPTSTR_ISA_EXT_FPU, loongarch_isa_ext_strings[t.isa.fpu], -+ OPTSTR_ISA_EXT_FPU, loongarch_isa_ext_strings[ISA_EXT_FPU64]); -+ -+ t.isa.fpu = ISA_EXT_FPU64; -+ } -+ else if (flags && (flags->flt == ISA_EXT_NONE || flags->flt == ISA_EXT_FPU32)) -+ { -+ if (constrained.simd) -+ inform (UNKNOWN_LOCATION, -+ "%qs is disabled by %<-m%s%>, because it requires %<%s%s%>", -+ loongarch_isa_ext_strings[t.isa.simd], flags->flt_str, -+ OPTSTR_ISA_EXT_FPU, loongarch_isa_ext_strings[ISA_EXT_FPU64]); -+ -+ t.isa.simd = ISA_EXT_NONE; -+ } -+ else -+ { -+ /* -mfpu=0 / -mfpu=32 is set. */ -+ if (constrained.simd) -+ fatal_error (UNKNOWN_LOCATION, -+ "%<-m%s=%s%> conflicts with %qs, which requires %<%s%s%>", -+ OPTSTR_ISA_EXT_FPU, loongarch_isa_ext_strings[t.isa.fpu], -+ loongarch_isa_ext_strings[t.isa.simd], -+ OPTSTR_ISA_EXT_FPU, loongarch_isa_ext_strings[ISA_EXT_FPU64]); -+ -+ /* Same as above. */ -+ t.isa.simd = ISA_EXT_NONE; -+ } -+ } -+ -+ -+ /* 4. ABI-ISA compatibility */ -+ /* Note: -+ - There IS a unique default -march value for each ABI type -+ (config.gcc: triplet -> abi -> default arch). -+ -+ - If the base ABI is incompatible with the default arch, -+ try using the default -march it implies (and mark it -+ as "constrained" this time), then re-apply step 3. */ -+ -+ struct loongarch_abi abi_tmp; -+ const struct loongarch_isa* isa_min; -+ -+ abi_tmp = t.abi; -+ isa_min = &isa_required (abi_tmp); -+ -+ if (isa_base_compat_p (&t.isa, isa_min)); /* OK */ -+ else if (!constrained.arch) -+ { -+ /* Base architecture can only be implied by -march, -+ so we adjust that first if it is not constrained. */ -+ int fallback_arch = abi_default_cpu_arch (t.abi, NULL); -+ -+ if (t.cpu_arch == CPU_NATIVE) -+ warning (0, "your native CPU architecture (%qs) " -+ "does not support %qs ABI, falling back to %<-m%s=%s%>", -+ arch_str (&t), abi_str (t.abi), OPTSTR_ARCH, -+ loongarch_cpu_strings[fallback_arch]); -+ else -+ warning (0, "default CPU architecture (%qs) " -+ "does not support %qs ABI, falling back to %<-m%s=%s%>", -+ arch_str (&t), abi_str (t.abi), OPTSTR_ARCH, -+ loongarch_cpu_strings[fallback_arch]); -+ -+ t.cpu_arch = fallback_arch; -+ constrained.arch = 1; -+ goto config_target_isa; -+ } -+ else if (!constrained.abi_base) -+ { -+ /* If -march is given while -mabi is not, -+ try selecting another base ABI type. */ -+ abi_tmp.base = isa_default_abi (&t.isa).base; -+ } -+ else -+ goto fatal; -+ -+ if (isa_fpu_compat_p (&t.isa, isa_min)); /* OK */ -+ else if (!constrained.fpu) -+ t.isa.fpu = isa_min->fpu; -+ else if (!constrained.abi_base) -+ /* If -march is compatible with the default ABI -+ while -mfpu is not. */ -+ abi_tmp.base = isa_default_abi (&t.isa).base; -+ else -+ goto fatal; -+ -+ if (0) -+fatal: -+ fatal_error (UNKNOWN_LOCATION, -+ "unable to implement ABI %qs with instruction set %qs", -+ abi_str (t.abi), isa_str (&t.isa, '/')); -+ -+ -+ /* Using the fallback ABI. */ -+ if (abi_tmp.base != t.abi.base || abi_tmp.ext != t.abi.ext) -+ { -+ /* This flag is only set in the GCC driver. */ -+ if (follow_multilib_list_p) -+ { -+ -+ /* Continue falling back until we find a feasible ABI type -+ enabled by TM_MULTILIB_LIST. */ -+ if (!is_multilib_enabled (abi_tmp)) -+ { -+ for (unsigned int i = 0; i < ABI_COUNT; i++) -+ { -+ if (is_multilib_enabled (abi_priority_list[i]) -+ && abi_compat_p (&t.isa, abi_priority_list[i])) -+ { -+ abi_tmp = abi_priority_list[i]; -+ -+ warning (0, "ABI %qs cannot be implemented due to " -+ "limited instruction set %qs, " -+ "falling back to %qs", abi_str (t.abi), -+ isa_str (&t.isa, '/'), abi_str (abi_tmp)); -+ -+ goto fallback; -+ } -+ } -+ -+ /* Otherwise, keep using abi_tmp with a warning. */ -+#ifdef LA_DISABLE_MULTILIB -+ warning (0, "instruction set %qs cannot implement " -+ "default ABI %qs, falling back to %qs", -+ isa_str (&t.isa, '/'), abi_str (t.abi), -+ abi_str (abi_tmp)); -+#else -+ warning (0, "no multilib-enabled ABI (%qs) can be implemented " -+ "with instruction set %qs, falling back to %qs", -+ multilib_enabled_abi_list (), -+ isa_str (&t.isa, '/'), abi_str (abi_tmp)); -+#endif -+ } -+ } -+ -+fallback: -+ t.abi = abi_tmp; -+ } -+ else if (follow_multilib_list_p) -+ { -+ if (!is_multilib_enabled (t.abi)) -+ { -+ inform (UNKNOWN_LOCATION, -+ "ABI %qs is not enabled at configure-time, " -+ "the linker might report an error", abi_str (t.abi)); -+ -+ inform (UNKNOWN_LOCATION, "ABI with startfiles: %s", -+ multilib_enabled_abi_list ()); -+ } -+ } -+ -+ -+ /* 5. Target code model */ -+ t.cmodel = constrained.cmodel ? target->cmodel : CMODEL_NORMAL; -+ -+ /* Cleanup and return. */ -+ obstack_free (&msg_obstack, NULL); -+ *target = t; -+} -+ -+/* Returns the default ABI for the given instruction set. */ -+static inline struct loongarch_abi -+isa_default_abi (const struct loongarch_isa *isa) -+{ -+ struct loongarch_abi abi; -+ -+ switch (isa->fpu) -+ { -+ case ISA_EXT_FPU64: -+ if (isa->base == ISA_BASE_LA64V100) -+ abi.base = ABI_BASE_LP64D; -+ break; -+ -+ case ISA_EXT_FPU32: -+ if (isa->base == ISA_BASE_LA64V100) -+ abi.base = ABI_BASE_LP64F; -+ break; -+ -+ case ISA_EXT_NONE: -+ if (isa->base == ISA_BASE_LA64V100) -+ abi.base = ABI_BASE_LP64S; -+ break; -+ -+ default: -+ gcc_unreachable (); -+ } -+ -+ abi.ext = ABI_EXT_BASE; -+ return abi; -+} -+ -+/* Check if set2 is a subset of set1. */ -+static inline int -+isa_base_compat_p (const struct loongarch_isa *set1, -+ const struct loongarch_isa *set2) -+{ -+ switch (set2->base) -+ { -+ case ISA_BASE_LA64V100: -+ return (set1->base == ISA_BASE_LA64V100); -+ -+ default: -+ gcc_unreachable (); -+ } -+} -+ -+static inline int -+isa_fpu_compat_p (const struct loongarch_isa *set1, -+ const struct loongarch_isa *set2) -+{ -+ switch (set2->fpu) -+ { -+ case ISA_EXT_FPU64: -+ return set1->fpu == ISA_EXT_FPU64; -+ -+ case ISA_EXT_FPU32: -+ return set1->fpu == ISA_EXT_FPU32 || set1->fpu == ISA_EXT_FPU64; -+ -+ case ISA_EXT_NONE: -+ return 1; -+ -+ default: -+ gcc_unreachable (); -+ } -+ -+} -+ -+static inline int -+abi_compat_p (const struct loongarch_isa *isa, struct loongarch_abi abi) -+{ -+ int compatible = 1; -+ const struct loongarch_isa *isa2 = &isa_required (abi); -+ -+ /* Append conditionals for new ISA components below. */ -+ compatible = compatible && isa_base_compat_p (isa, isa2); -+ compatible = compatible && isa_fpu_compat_p (isa, isa2); -+ return compatible; -+} -+ -+/* The behavior of this function should be consistent -+ with config.gcc. */ -+static int -+abi_default_cpu_arch (struct loongarch_abi abi, -+ struct loongarch_isa *isa) -+{ -+ static struct loongarch_isa tmp; -+ if (!isa) -+ isa = &tmp; -+ -+ if (abi.ext == ABI_EXT_BASE) -+ switch (abi.base) -+ { -+ case ABI_BASE_LP64D: -+ case ABI_BASE_LP64F: -+ case ABI_BASE_LP64S: -+ *isa = isa_required (abi); -+ return CPU_LOONGARCH64; -+ } -+ gcc_unreachable (); -+} -+ -+static const char* -+abi_str (struct loongarch_abi abi) -+{ -+ /* "/base" can be omitted. */ -+ if (abi.ext == ABI_EXT_BASE) -+ return (const char*) -+ obstack_copy0 (&msg_obstack, loongarch_abi_base_strings[abi.base], -+ strlen (loongarch_abi_base_strings[abi.base])); -+ else -+ { -+ APPEND_STRING (loongarch_abi_base_strings[abi.base]) -+ APPEND1 ('/') -+ APPEND_STRING (loongarch_abi_ext_strings[abi.ext]) -+ APPEND1 ('\0') -+ -+ return XOBFINISH (&msg_obstack, const char *); -+ } -+} -+ -+static const char* -+isa_str (const struct loongarch_isa *isa, char separator) -+{ -+ APPEND_STRING (loongarch_isa_base_strings[isa->base]) -+ APPEND1 (separator) -+ -+ if (isa->fpu == ISA_EXT_NONE) -+ { -+ APPEND_STRING ("no" OPTSTR_ISA_EXT_FPU) -+ } -+ else -+ { -+ APPEND_STRING (OPTSTR_ISA_EXT_FPU) -+ APPEND_STRING (loongarch_isa_ext_strings[isa->fpu]) -+ } -+ -+ switch (isa->simd) -+ { -+ case ISA_EXT_SIMD_LSX: -+ case ISA_EXT_SIMD_LASX: -+ APPEND1 (separator); -+ APPEND_STRING (loongarch_isa_ext_strings[isa->simd]); -+ break; -+ -+ default: -+ gcc_assert (isa->simd == 0); -+ } -+ APPEND1 ('\0') -+ -+ /* Add more here. */ -+ -+ return XOBFINISH (&msg_obstack, const char *); -+} -+ -+static const char* -+arch_str (const struct loongarch_target *target) -+{ -+ if (target->cpu_arch == CPU_NATIVE) -+ { -+ /* Describe a native CPU with unknown PRID. */ -+ const char* isa_string = isa_str (&target->isa, ','); -+ APPEND_STRING ("PRID: 0x") -+ APPEND_STRING (get_native_prid_str ()) -+ APPEND_STRING (", ISA features: ") -+ APPEND_STRING (isa_string) -+ } -+ else -+ APPEND_STRING (loongarch_cpu_strings[target->cpu_arch]); -+ -+ APPEND1 ('\0') -+ return XOBFINISH (&msg_obstack, const char *); -+} -+ -+static const char* -+multilib_enabled_abi_list () -+{ -+ int enabled_abi_idx[MULTILIB_LIST_LEN] = { 0 }; -+ const char* enabled_abi_str[MULTILIB_LIST_LEN] = { NULL }; -+ unsigned int j = 0; -+ -+ for (unsigned int i = 0; i < ABI_COUNT && j < MULTILIB_LIST_LEN; i++) -+ { -+ if (enabled_abi_types[abi_priority_list[i].base] -+ [abi_priority_list[i].ext]) -+ { -+ enabled_abi_idx[j++] = i; -+ } -+ } -+ -+ for (unsigned int k = 0; k < j; k++) -+ { -+ enabled_abi_str[k] = abi_str (abi_priority_list[enabled_abi_idx[k]]); -+ } -+ -+ for (unsigned int k = 0; k < j - 1; k++) -+ { -+ APPEND_STRING (enabled_abi_str[k]) -+ APPEND1 (',') -+ APPEND1 (' ') -+ } -+ APPEND_STRING (enabled_abi_str[j - 1]) -+ APPEND1 ('\0') -+ -+ return XOBFINISH (&msg_obstack, const char *); -+} -+ -+/* option status feedback for "gcc --help=target -Q" */ -+void -+loongarch_update_gcc_opt_status (struct loongarch_target *target, -+ struct gcc_options *opts, -+ struct gcc_options *opts_set) -+{ -+ (void) opts_set; -+ -+ /* status of -mabi */ -+ opts->x_la_opt_abi_base = target->abi.base; -+ -+ opts->x_target_flags |= -+ IS_LP64_ABI_BASE (target->abi.base) ? MASK_LP64 : 0; -+ -+ /* status of -march and -mtune */ -+ opts->x_la_opt_cpu_arch = target->cpu_arch; -+ opts->x_la_opt_cpu_tune = target->cpu_tune; -+ -+ /* status of -mfpu and -msimd */ -+ opts->x_la_opt_fpu = target->isa.fpu; -+ opts->x_la_opt_simd = target->isa.simd; -+} -diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h -index 21639fa74..33eb8b2da 100644 ---- a/gcc/config/loongarch/loongarch-opts.h -+++ b/gcc/config/loongarch/loongarch-opts.h -@@ -1,5 +1,6 @@ --/* Definitions for option handling for LARCH. -- Copyright (C) 1989-2018 Free Software Foundation, Inc. -+/* Definitions for loongarch-specific option handling. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Ltd. - - This file is part of GCC. - -@@ -17,18 +18,81 @@ You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING3. If not see - . */ - --#ifndef LARCH_OPTS_H --#define LARCH_OPTS_H -+#ifndef LOONGARCH_OPTS_H -+#define LOONGARCH_OPTS_H - --#define LARCH_ARCH_OPTION_NATIVE -1 -+#include "loongarch-def.h" - -+/* Target configuration */ -+extern struct loongarch_target la_target; - --enum loongarch_code_model { -- LARCH_CMODEL_NORMAL, -- LARCH_CMODEL_TINY, -- LARCH_CMODEL_TINY_STATIC, -- LARCH_CMODEL_LARGE, -- LARCH_CMODEL_EXTREME -+/* Flag status */ -+struct loongarch_flags { -+ int flt; const char* flt_str; -+#define SX_FLAG_TYPE(x) ((x) < 0 ? -(x) : (x)) -+ int sx[2]; - }; - -+#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS) -+ -+/* Initialize loongarch_target from separate option variables. */ -+void -+loongarch_init_target (struct loongarch_target *target, -+ int cpu_arch, int cpu_tune, int fpu, int simd, -+ int abi_base, int abi_ext, int cmodel); -+ -+ -+/* Handler for "-m" option combinations, -+ shared by the driver and the compiler proper. */ -+void -+loongarch_config_target (struct loongarch_target *target, -+ struct loongarch_flags *flags, -+ int follow_multilib_list_p); -+ -+/* option status feedback for "gcc --help=target -Q" */ -+void -+loongarch_update_gcc_opt_status (struct loongarch_target *target, -+ struct gcc_options *opts, -+ struct gcc_options *opts_set); - #endif -+ -+ -+/* Macros for common conditional expressions used in loongarch.{c,h,md} */ -+#define TARGET_CMODEL_NORMAL (la_target.cmodel == CMODEL_NORMAL) -+#define TARGET_CMODEL_TINY (la_target.cmodel == CMODEL_TINY) -+#define TARGET_CMODEL_TINY_STATIC (la_target.cmodel == CMODEL_TINY_STATIC) -+#define TARGET_CMODEL_LARGE (la_target.cmodel == CMODEL_LARGE) -+#define TARGET_CMODEL_EXTREME (la_target.cmodel == CMODEL_EXTREME) -+ -+#define TARGET_HARD_FLOAT (la_target.isa.fpu != ISA_EXT_NONE) -+#define TARGET_HARD_FLOAT_ABI (la_target.abi.base == ABI_BASE_LP64D \ -+ || la_target.abi.base == ABI_BASE_LP64F) -+ -+#define TARGET_SOFT_FLOAT (la_target.isa.fpu == ISA_EXT_NONE) -+#define TARGET_SOFT_FLOAT_ABI (la_target.abi.base == ABI_BASE_LP64S) -+#define TARGET_SINGLE_FLOAT (la_target.isa.fpu == ISA_EXT_FPU32) -+#define TARGET_SINGLE_FLOAT_ABI (la_target.abi.base == ABI_BASE_LP64F) -+#define TARGET_DOUBLE_FLOAT (la_target.isa.fpu == ISA_EXT_FPU64) -+#define TARGET_DOUBLE_FLOAT_ABI (la_target.abi.base == ABI_BASE_LP64D) -+ -+#define TARGET_64BIT (la_target.isa.base == ISA_BASE_LA64V100) -+#define TARGET_ABI_LP64 (la_target.abi.base == ABI_BASE_LP64D \ -+ || la_target.abi.base == ABI_BASE_LP64F \ -+ || la_target.abi.base == ABI_BASE_LP64S) -+ -+#define ISA_HAS_LSX (la_target.isa.simd == ISA_EXT_SIMD_LSX \ -+ || la_target.isa.simd == ISA_EXT_SIMD_LASX) -+#define ISA_HAS_LASX (la_target.isa.simd == ISA_EXT_SIMD_LASX) -+ -+ -+/* TARGET_ macros for use in *.md template conditionals */ -+#define TARGET_uARCH_LA464 (la_target.cpu_tune == CPU_LA464) -+#define TARGET_uARCH_LA364 (la_target.cpu_tune == CPU_LA364) -+#define TARGET_uARCH_LA264 (la_target.cpu_tune == CPU_LA264) -+#define TARGET_uARCH_LA664 (la_target.cpu_tune == CPU_LA664) -+ -+/* Note: optimize_size may vary across functions, -+ while -m[no]-memcpy imposes a global constraint. */ -+#define TARGET_DO_OPTIMIZE_BLOCK_MOVE_P loongarch_do_optimize_block_move_p() -+ -+#endif /* LOONGARCH_OPTS_H */ -diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h -index c36fdd37d..498d80514 100644 ---- a/gcc/config/loongarch/loongarch-protos.h -+++ b/gcc/config/loongarch/loongarch-protos.h -@@ -1,9 +1,7 @@ --/* Prototypes of target machine for GNU compiler. LARCH version. -+/* Prototypes of target machine for GNU compiler. LoongArch version. - Copyright (C) 1989-2018 Free Software Foundation, Inc. -- Contributed by A. Lichnewsky (lich@inria.inria.fr). -- Changed by Michael Meissner (meissner@osf.org). -- 64-bit r4000 support by Ian Lance Taylor (ian@cygnus.com) and -- Brendan Eich (brendan@microunity.com). -+ Contributed by Loongson Ltd. -+ Based on MIPS target for GNU compiler. - - This file is part of GCC. - -@@ -21,24 +19,8 @@ You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING3. If not see - . */ - --#ifndef GCC_LARCH_PROTOS_H --#define GCC_LARCH_PROTOS_H -- --/* Describes how a symbol is used. -- -- SYMBOL_CONTEXT_CALL -- The symbol is used as the target of a call instruction. -- -- SYMBOL_CONTEXT_LEA -- The symbol is used in a load-address operation. -- -- SYMBOL_CONTEXT_MEM -- The symbol is used as the address in a MEM. */ --enum loongarch_symbol_context { -- SYMBOL_CONTEXT_CALL, -- SYMBOL_CONTEXT_LEA, -- SYMBOL_CONTEXT_MEM --}; -+#ifndef GCC_LOONGARCH_PROTOS_H -+#define GCC_LOONGARCH_PROTOS_H - - /* Classifies a SYMBOL_REF, LABEL_REF or UNSPEC address. - -@@ -57,67 +39,30 @@ enum loongarch_symbol_type { - SYMBOL_GOT_DISP, - SYMBOL_TLS, - SYMBOL_TLSGD, -- SYMBOL_TLSLDM, -+ SYMBOL_TLSLDM - }; - #define NUM_SYMBOL_TYPES (SYMBOL_TLSLDM + 1) - --/* Classifies a type of call. -- -- LARCH_CALL_NORMAL -- A normal call or call_value pattern. -- -- LARCH_CALL_SIBCALL -- A sibcall or sibcall_value pattern. -- -- LARCH_CALL_EPILOGUE -- A call inserted in the epilogue. */ --enum loongarch_call_type { -- LARCH_CALL_NORMAL, -- LARCH_CALL_SIBCALL, -- LARCH_CALL_EPILOGUE --}; -- --/* Controls the conditions under which certain instructions are split. -- -- SPLIT_IF_NECESSARY -- Only perform splits that are necessary for correctness -- (because no unsplit version exists). -- -- SPLIT_FOR_SPEED -- Perform splits that are necessary for correctness or -- beneficial for code speed. -- -- SPLIT_FOR_SIZE -- Perform splits that are necessary for correctness or -- beneficial for code size. */ --enum loongarch_split_type { -- SPLIT_IF_NECESSARY, -- SPLIT_FOR_SPEED, -- SPLIT_FOR_SIZE --}; - extern const char *const loongarch_fp_conditions[16]; - --extern const char *loongarch_output_gpr_save (unsigned); -+/* Routines implemented in loongarch.c. */ -+extern rtx loongarch_emit_move (rtx, rtx); - extern HOST_WIDE_INT loongarch_initial_elimination_offset (int, int); - extern void loongarch_expand_prologue (void); - extern void loongarch_expand_epilogue (bool); - extern bool loongarch_can_use_return_insn (void); --extern rtx loongarch_function_value (const_tree, const_tree, enum machine_mode); --extern bool loongarch_symbolic_constant_p (rtx, enum loongarch_symbol_context, -- enum loongarch_symbol_type *); -+ -+extern bool loongarch_symbolic_constant_p (rtx, enum loongarch_symbol_type *); - extern int loongarch_regno_mode_ok_for_base_p (int, machine_mode, bool); --extern bool loongarch_stack_address_p (rtx, machine_mode); - extern int loongarch_address_insns (rtx, machine_mode, bool); - extern int loongarch_const_insns (rtx); - extern int loongarch_split_const_insns (rtx); - extern int loongarch_split_128bit_const_insns (rtx); - extern int loongarch_load_store_insns (rtx, rtx_insn *); - extern int loongarch_idiv_insns (machine_mode); --extern rtx loongarch_emit_move (rtx, rtx); - #ifdef RTX_CODE - extern void loongarch_emit_binary (enum rtx_code, rtx, rtx, rtx); - #endif --extern rtx loongarch_pic_base_register (rtx); - extern bool loongarch_split_symbol (rtx, rtx, machine_mode, rtx *); - extern rtx loongarch_unspec_address (rtx, enum loongarch_symbol_type); - extern rtx loongarch_strip_unspec_address (rtx); -@@ -126,9 +71,9 @@ extern bool loongarch_legitimize_move (machine_mode, rtx, rtx); - extern rtx loongarch_legitimize_call_address (rtx); - - extern rtx loongarch_subword (rtx, bool); --extern bool loongarch_split_move_p (rtx, rtx, enum loongarch_split_type); --extern void loongarch_split_move (rtx, rtx, enum loongarch_split_type, rtx); --extern bool loongarch_split_move_insn_p (rtx, rtx, rtx); -+extern bool loongarch_split_move_p (rtx, rtx); -+extern void loongarch_split_move (rtx, rtx, rtx); -+extern bool loongarch_split_move_insn_p (rtx, rtx); - extern void loongarch_split_move_insn (rtx, rtx, rtx); - extern void loongarch_split_128bit_move (rtx, rtx); - extern bool loongarch_split_128bit_move_p (rtx, rtx); -@@ -139,50 +84,29 @@ extern void loongarch_split_lsx_insert_d (rtx, rtx, rtx, rtx); - extern void loongarch_split_lsx_fill_d (rtx, rtx); - extern const char *loongarch_output_move (rtx, rtx); - extern bool loongarch_cfun_has_cprestore_slot_p (void); --extern bool loongarch_cprestore_address_p (rtx, bool); - #ifdef RTX_CODE - extern void loongarch_expand_scc (rtx *); - extern bool loongarch_expand_int_vec_cmp (rtx *); - extern bool loongarch_expand_fp_vec_cmp (rtx *); - extern void loongarch_expand_conditional_branch (rtx *); --extern void loongarch_expand_conditional_move (rtx *); -+extern bool loongarch_expand_conditional_move_la464 (rtx *); - extern void loongarch_expand_conditional_trap (rtx); - #endif --extern bool loongarch_get_pic_call_symbol (rtx *, int); - extern void loongarch_set_return_address (rtx, rtx); - extern bool loongarch_move_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int); --extern bool loongarch_store_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int); - extern bool loongarch_expand_block_move (rtx, rtx, rtx); - --extern void loongarch_init_cumulative_args (CUMULATIVE_ARGS *, tree); - extern bool loongarch_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT, -- HOST_WIDE_INT, bool); -+ HOST_WIDE_INT, bool); - extern bool loongarch_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT, -- HOST_WIDE_INT); --extern bool loongarch_mem_fits_mode_p (machine_mode mode, rtx x); -+ HOST_WIDE_INT); - extern HOST_WIDE_INT loongarch_debugger_offset (rtx, HOST_WIDE_INT); - --extern void loongarch_push_asm_switch (struct loongarch_asm_switch *); --extern void loongarch_pop_asm_switch (struct loongarch_asm_switch *); - extern void loongarch_output_external (FILE *, tree, const char *); - extern void loongarch_output_ascii (FILE *, const char *, size_t); --extern void loongarch_output_aligned_decl_common (FILE *, tree, const char *, -- unsigned HOST_WIDE_INT, -- unsigned int); --extern void loongarch_declare_common_object (FILE *, const char *, -- const char *, unsigned HOST_WIDE_INT, -- unsigned int, bool); --extern void loongarch_declare_object (FILE *, const char *, const char *, -- const char *, ...) ATTRIBUTE_PRINTF_4; --extern void loongarch_declare_object_name (FILE *, const char *, tree); --extern void loongarch_finish_declare_object (FILE *, tree, int, int); --extern void loongarch_set_text_contents_type (FILE *, const char *, -- unsigned long, bool); -- - extern bool loongarch_small_data_pattern_p (rtx); - extern rtx loongarch_rewrite_small_data (rtx); - extern rtx loongarch_return_addr (int, rtx); --extern bool loongarch_must_initialize_gp_p (void); - - extern bool loongarch_const_vector_same_val_p (rtx, machine_mode); - extern bool loongarch_const_vector_same_bytes_p (rtx, machine_mode); -@@ -194,26 +118,27 @@ extern bool loongarch_const_vector_bitimm_clr_p (rtx, machine_mode); - extern rtx loongarch_lsx_vec_parallel_const_half (machine_mode, bool); - extern rtx loongarch_gen_const_int_vector (machine_mode, HOST_WIDE_INT); - extern enum reg_class loongarch_secondary_reload_class (enum reg_class, -- machine_mode, -- rtx, bool); -+ machine_mode, -+ rtx, bool); - extern int loongarch_class_max_nregs (enum reg_class, machine_mode); - - extern machine_mode loongarch_hard_regno_caller_save_mode (unsigned int, -- unsigned int, -- machine_mode); -+ unsigned int, -+ machine_mode); - extern int loongarch_adjust_insn_length (rtx_insn *, int); - extern const char *loongarch_output_conditional_branch (rtx_insn *, rtx *, -- const char *, const char *); --extern const char *loongarch_output_order_conditional_branch (rtx_insn *, rtx *, -- bool); --extern const char *loongarch_output_equal_conditional_branch (rtx_insn *, rtx *, -- bool); -+ const char *, -+ const char *); -+extern const char *loongarch_output_order_conditional_branch (rtx_insn *, -+ rtx *, -+ bool); -+extern const char *loongarch_output_equal_conditional_branch (rtx_insn *, -+ rtx *, -+ bool); - extern const char *loongarch_output_division (const char *, rtx *); - extern const char *loongarch_lsx_output_division (const char *, rtx *); - extern const char *loongarch_output_probe_stack_range (rtx, rtx, rtx); - extern bool loongarch_hard_regno_rename_ok (unsigned int, unsigned int); --extern bool loongarch_linked_madd_p (rtx_insn *, rtx_insn *); --extern bool loongarch_store_data_bypass_p (rtx_insn *, rtx_insn *); - extern int loongarch_dspalu_bypass_p (rtx, rtx); - extern rtx loongarch_prefetch_cookie (rtx, rtx); - -@@ -226,9 +151,6 @@ extern const char *current_section_name (void); - extern unsigned int current_section_flags (void); - extern bool loongarch_use_ins_ext_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT); - --extern bool and_operands_ok (machine_mode, rtx, rtx); --extern bool loongarch_fmadd_bypass (rtx_insn *, rtx_insn *); -- - union loongarch_gen_fn_ptrs - { - rtx (*fn_8) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx); -@@ -239,25 +161,26 @@ union loongarch_gen_fn_ptrs - }; - - extern void loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs, -- rtx, rtx, rtx, rtx, rtx); -+ rtx, rtx, rtx, rtx, rtx); - - extern void loongarch_expand_vector_init (rtx, rtx); - extern void loongarch_expand_vec_unpack (rtx op[2], bool, bool); -+extern void loongarch_expand_vec_perm (rtx, rtx, rtx, rtx); -+extern void loongarch_expand_vec_perm_1 (rtx[]); -+extern void loongarch_expand_vector_extract (rtx, rtx, int); -+extern void loongarch_expand_vector_reduc (rtx (*)(rtx, rtx, rtx), rtx, rtx); - - extern int loongarch_ldst_scaled_shift (machine_mode); - extern bool loongarch_signed_immediate_p (unsigned HOST_WIDE_INT, int, int); - extern bool loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT, int, int); --extern bool loongarch_load_store_pair_p (bool, rtx *); --extern bool loongarch_movep_target_p (rtx, rtx); - extern bool loongarch_12bit_offset_address_p (rtx, machine_mode); - extern bool loongarch_14bit_shifted_offset_address_p (rtx, machine_mode); -+extern bool loongarch_base_index_address_p (rtx, machine_mode); - extern bool loongarch_9bit_offset_address_p (rtx, machine_mode); --extern bool lwsp_swsp_address_p (rtx, machine_mode); - extern rtx loongarch_expand_thread_pointer (rtx); - - extern bool loongarch_eh_uses (unsigned int); - extern bool loongarch_epilogue_uses (unsigned int); --extern int loongarch_trampoline_code_size (void); - extern bool loongarch_load_store_bonding_p (rtx *, machine_mode, bool); - extern bool loongarch_la464_128_store_p (rtx[]); - extern bool loongarch_la464_128_load_p (rtx[]); -@@ -270,10 +193,6 @@ typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx); - extern void loongarch_register_frame_header_opt (void); - extern void loongarch_expand_vec_cond_expr (machine_mode, machine_mode, rtx *); - --extern void loongarch_declare_function_name(FILE *, const char *, tree); --/* Routines implemented in loongarch-d.c */ --extern void loongarch_d_target_versions (void); -- - /* Routines implemented in loongarch-c.c. */ - void loongarch_cpu_cpp_builtins (cpp_reader *); - -@@ -281,10 +200,12 @@ extern void loongarch_init_builtins (void); - extern void loongarch_atomic_assign_expand_fenv (tree *, tree *, tree *); - extern tree loongarch_builtin_decl (unsigned int, bool); - extern rtx loongarch_expand_builtin (tree, rtx, rtx subtarget ATTRIBUTE_UNUSED, -- machine_mode, int); -+ machine_mode, int); - extern tree loongarch_builtin_vectorized_function (unsigned int, tree, tree); - extern rtx loongarch_gen_const_int_vector_shuffle (machine_mode, int); - extern tree loongarch_build_builtin_va_list (void); -- - extern rtx loongarch_build_signbit_mask (machine_mode, bool, bool); -+extern void loongarch_emit_swrsqrtsf (rtx, rtx, machine_mode, bool); -+extern void loongarch_emit_swdivsf (rtx, rtx, rtx, machine_mode); -+extern rtx loongarch_prefetch_cookie (rtx, rtx); - #endif /* ! GCC_LARCH_PROTOS_H */ -diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h -new file mode 100644 -index 000000000..aca3d667b ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-str.h -@@ -0,0 +1,68 @@ -+/* Generated automatically by "genstr" from "loongarch-strings". -+ Please do not edit this file directly. -+ -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Ltd. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#ifndef LOONGARCH_STR_H -+#define LOONGARCH_STR_H -+ -+#define OPTSTR_ARCH "arch" -+#define OPTSTR_TUNE "tune" -+ -+#define STR_CPU_NATIVE "native" -+#define STR_CPU_ABI_DEFAULT "abi-default" -+#define STR_CPU_LOONGARCH64 "loongarch64" -+#define STR_CPU_LA464 "la464" -+#define STR_CPU_LA364 "la364" -+#define STR_CPU_LA264 "la264" -+#define STR_CPU_LA664 "la664" -+ -+#define STR_ISA_BASE_LA64V100 "la64" -+ -+#define OPTSTR_ISA_EXT_FPU "fpu" -+#define STR_NONE "none" -+#define STR_ISA_EXT_FPU0 "0" -+#define STR_ISA_EXT_FPU32 "32" -+#define STR_ISA_EXT_FPU64 "64" -+ -+#define OPTSTR_SOFT_FLOAT "soft-float" -+#define OPTSTR_SINGLE_FLOAT "single-float" -+#define OPTSTR_DOUBLE_FLOAT "double-float" -+ -+#define OPTSTR_ISA_EXT_SIMD "simd" -+#define STR_ISA_EXT_LSX "lsx" -+#define STR_ISA_EXT_LASX "lasx" -+ -+#define OPTSTR_ABI_BASE "abi" -+#define STR_ABI_BASE_LP64D "lp64d" -+#define STR_ABI_BASE_LP64F "lp64f" -+#define STR_ABI_BASE_LP64S "lp64s" -+#define STR_ABI_BASE_LP64 "lp64" -+ -+#define STR_ABI_EXT_BASE "base" -+ -+#define OPTSTR_CMODEL "cmodel" -+#define STR_CMODEL_NORMAL "normal" -+#define STR_CMODEL_TINY "tiny" -+#define STR_CMODEL_TS "tiny-static" -+#define STR_CMODEL_LARGE "large" -+#define STR_CMODEL_EXTREME "extreme" -+ -+#endif /* LOONGARCH_STR_H */ -diff --git a/gcc/config/loongarch/loongarch-tables.opt b/gcc/config/loongarch/loongarch-tables.opt -deleted file mode 100644 -index 80794b564..000000000 ---- a/gcc/config/loongarch/loongarch-tables.opt -+++ /dev/null -@@ -1,34 +0,0 @@ --; -*- buffer-read-only: t -*- --; Generated automatically by genopt.sh from loongarch-cpus.def. -- --; Copyright (C) 2011-2018 Free Software Foundation, Inc. --; --; This file is part of GCC. --; --; GCC is free software; you can redistribute it and/or modify it under --; the terms of the GNU General Public License as published by the Free --; Software Foundation; either version 3, or (at your option) any later --; version. --; --; GCC is distributed in the hope that it will be useful, but WITHOUT ANY --; WARRANTY; without even the implied warranty of MERCHANTABILITY or --; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License --; for more details. --; --; You should have received a copy of the GNU General Public License --; along with GCC; see the file COPYING3. If not see --; . -- --Enum --Name(loongarch_arch_opt_value) Type(int) --Known LARCH CPUs (for use with the -march= and -mtune= options): -- --EnumValue --Enum(loongarch_arch_opt_value) String(native) Value(LARCH_ARCH_OPTION_NATIVE) DriverOnly -- --EnumValue --Enum(loongarch_arch_opt_value) String(loongarch64) Value(0) Canonical -- --EnumValue --Enum(loongarch_arch_opt_value) String(la464) Value(1) Canonical -- -diff --git a/gcc/config/loongarch/loongarch-tune.h b/gcc/config/loongarch/loongarch-tune.h -new file mode 100644 -index 000000000..bb01f2d98 ---- /dev/null -+++ b/gcc/config/loongarch/loongarch-tune.h -@@ -0,0 +1,51 @@ -+/* Definitions for microarchitecture-related data structures. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Ltd. -+ -+This file is part of GCC. -+ -+GCC is free software; you can redistribute it and/or modify -+it under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. -+ -+GCC is distributed in the hope that it will be useful, -+but WITHOUT ANY WARRANTY; without even the implied warranty of -+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+GNU General Public License for more details. -+ -+You should have received a copy of the GNU General Public License -+along with GCC; see the file COPYING3. If not see -+. */ -+ -+#ifndef LOONGARCH_TUNE_H -+#define LOONGARCH_TUNE_H -+ -+/* RTX costs of various operations on the different architectures. */ -+struct loongarch_rtx_cost_data -+{ -+ unsigned short fp_add; -+ unsigned short fp_mult_sf; -+ unsigned short fp_mult_df; -+ unsigned short fp_div_sf; -+ unsigned short fp_div_df; -+ unsigned short int_mult_si; -+ unsigned short int_mult_di; -+ unsigned short int_div_si; -+ unsigned short int_div_di; -+ unsigned short branch_cost; -+ unsigned short memory_latency; -+}; -+ -+/* Costs to use when optimizing for size. */ -+extern const struct loongarch_rtx_cost_data loongarch_rtx_cost_optimize_size; -+ -+/* Cache size record of known processor models. */ -+struct loongarch_cache { -+ int l1d_line_size; /* bytes */ -+ int l1d_size; /* KiB */ -+ int l2d_size; /* kiB */ -+ int simultaneous_prefetches; /* number of parallel prefetch */ -+}; -+ -+#endif /* LOONGARCH_TUNE_H */ -diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c -index e556f81e4..a1dde5a0f 100644 ---- a/gcc/config/loongarch/loongarch.c -+++ b/gcc/config/loongarch/loongarch.c -@@ -1,9 +1,7 @@ --/* Subroutines used for LARCH code generation. -- Copyright (C) 1989-2018 Free Software Foundation, Inc. -- Contributed by A. Lichnewsky, lich@inria.inria.fr. -- Changes by Michael Meissner, meissner@osf.org. -- 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and -- Brendan Eich, brendan@microunity.com. -+/* Subroutines used for LoongArch code generation. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Technology Co. Ltd.. -+ Based on MIPS and RISC-V target for GNU compiler. - - This file is part of GCC. - -@@ -63,8 +61,14 @@ along with GCC; see the file COPYING3. If not see - #include "target-globals.h" - #include "tree-pass.h" - #include "context.h" -+#include "shrink-wrap.h" - #include "builtins.h" - #include "rtl-iter.h" -+#include "cfgloop.h" -+#include "gimple-iterator.h" -+#include "tree-vectorizer.h" -+#include "params.h" -+#include "opts.h" - - /* This file should be included last. */ - #include "target-def.h" -@@ -76,48 +80,20 @@ along with GCC; see the file COPYING3. If not see - && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES) - - /* Extract the symbol or label from UNSPEC wrapper X. */ --#define UNSPEC_ADDRESS(X) \ -- XVECEXP (X, 0, 0) -+#define UNSPEC_ADDRESS(X) XVECEXP (X, 0, 0) - - /* Extract the symbol type from UNSPEC wrapper X. */ - #define UNSPEC_ADDRESS_TYPE(X) \ - ((enum loongarch_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST)) - --/* The maximum distance between the top of the stack frame and the -- value $sp has when we save and restore registers. --*/ --#define LARCH_MAX_FIRST_STACK_STEP 0x7f0 -- - /* True if INSN is a loongarch.md pattern or asm statement. */ - /* ??? This test exists through the compiler, perhaps it should be -- moved to rtl.h. */ -+ moved to rtl.h. */ - #define USEFUL_INSN_P(INSN) \ - (NONDEBUG_INSN_P (INSN) \ - && GET_CODE (PATTERN (INSN)) != USE \ - && GET_CODE (PATTERN (INSN)) != CLOBBER) - --/* If INSN is a delayed branch sequence, return the first instruction -- in the sequence, otherwise return INSN itself. */ --#define SEQ_BEGIN(INSN) \ -- (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \ -- ? as_a (XVECEXP (PATTERN (INSN), 0, 0)) \ -- : (INSN)) -- --/* Likewise for the last instruction in a delayed branch sequence. */ --#define SEQ_END(INSN) \ -- (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \ -- ? as_a (XVECEXP (PATTERN (INSN), \ -- 0, \ -- XVECLEN (PATTERN (INSN), 0) - 1)) \ -- : (INSN)) -- --/* Execute the following loop body with SUBINSN set to each instruction -- between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */ --#define FOR_EACH_SUBINSN(SUBINSN, INSN) \ -- for ((SUBINSN) = SEQ_BEGIN (INSN); \ -- (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \ -- (SUBINSN) = NEXT_INSN (SUBINSN)) -- - /* True if bit BIT is set in VALUE. */ - #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0) - -@@ -127,54 +103,25 @@ along with GCC; see the file COPYING3. If not see - A natural register + offset address. The register satisfies - loongarch_valid_base_register_p and the offset is a const_arith_operand. - -+ ADDRESS_REG_REG -+ A base register indexed by (optionally scaled) register. -+ - ADDRESS_CONST_INT - A signed 16-bit constant address. - - ADDRESS_SYMBOLIC: - A constant symbolic address. */ --enum loongarch_address_type { -+enum loongarch_address_type -+{ - ADDRESS_REG, -+ ADDRESS_REG_REG, - ADDRESS_CONST_INT, - ADDRESS_SYMBOLIC - }; - - --/* A class used to control a comdat-style stub that we output in each -- translation unit that needs it. */ --class loongarch_one_only_stub { --public: -- virtual ~loongarch_one_only_stub () {} -- -- /* Return the name of the stub. */ -- virtual const char *get_name () = 0; -- -- /* Output the body of the function to asm_out_file. */ -- virtual void output_body () = 0; --}; -- --/* Tuning information that is automatically derived from other sources -- (such as the scheduler). */ --static struct { -- /* The architecture and tuning settings that this structure describes. */ -- enum processor arch; -- enum processor tune; -- -- /* True if the structure has been initialized. */ -- bool initialized_p; -- --} loongarch_tuning_info; -- --/* Information about an address described by loongarch_address_type. -- -- ADDRESS_CONST_INT -- No fields are used. -- -- ADDRESS_REG -- REG is the base register and OFFSET is the constant offset. -- -- ADDRESS_SYMBOLIC -- SYMBOL_TYPE is the type of symbol that the address references. */ --struct loongarch_address_info { -+struct loongarch_address_info -+{ - enum loongarch_address_type type; - rtx reg; - rtx offset; -@@ -184,224 +131,82 @@ struct loongarch_address_info { - /* Method to load immediate number fields. - - METHOD_NORMAL: -- load immediate number 0-31 bit -+ Load bit 0-31 of the immediate number. - - METHOD_LU32I: -- load imm 32-51 bit -+ Load bit 32-51 of the immediate number. - - METHOD_LU52I: -- load imm 52-63 bit -+ load bit 52-63 of the immediate number. - - METHOD_INSV: -- imm 0xfff00000fffffxxx -+ immediates like 0xfff00000fffffxxx - */ --enum loongarch_load_imm_method { -+enum loongarch_load_imm_method -+{ - METHOD_NORMAL, - METHOD_LU32I, - METHOD_LU52I, - METHOD_INSV - }; - --/* One stage in a constant building sequence. These sequences have -- the form: -- -- A = VALUE[0] -- A = A CODE[1] VALUE[1] -- A = A CODE[2] VALUE[2] -- ... -- -- where A is an accumulator, each CODE[i] is a binary rtl operation -- and each VALUE[i] is a constant integer. CODE[0] is undefined. */ --struct loongarch_integer_op { -+struct loongarch_integer_op -+{ - enum rtx_code code; -- unsigned HOST_WIDE_INT value; -+ HOST_WIDE_INT value; - enum loongarch_load_imm_method method; - }; - - /* The largest number of operations needed to load an integer constant. -- The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI. -- When the lowest bit is clear, we can try, but reject a sequence with -- an extra SLL at the end. */ --#define LARCH_MAX_INTEGER_OPS 9 -- --/* Costs of various operations on the different architectures. */ -- --struct loongarch_rtx_cost_data --{ -- unsigned short fp_add; -- unsigned short fp_mult_sf; -- unsigned short fp_mult_df; -- unsigned short fp_div_sf; -- unsigned short fp_div_df; -- unsigned short int_mult_si; -- unsigned short int_mult_di; -- unsigned short int_div_si; -- unsigned short int_div_di; -- unsigned short branch_cost; -- unsigned short memory_latency; --}; -- --/* Global variables for machine-dependent things. */ -- --/* The -G setting, or the configuration's default small-data limit if -- no -G option is given. */ --static unsigned int loongarch_small_data_threshold; -- --/* The number of file directives written by loongarch_output_filename. */ --int num_source_filenames; -- --/* The name that appeared in the last .file directive written by -- loongarch_output_filename, or "" if loongarch_output_filename hasn't -- written anything yet. */ --const char *current_function_file = ""; -+ The worst accepted case for 64-bit constants is LU12I.W,LU32I.D,LU52I.D,ORI -+ or LU12I.W,LU32I.D,LU52I.D,ADDI.D DECL_ASSEMBLER_NAME. */ -+#define LARCH_MAX_INTEGER_OPS 4 - - /* Arrays that map GCC register numbers to debugger register numbers. */ --int loongarch_dbx_regno[FIRST_PSEUDO_REGISTER]; - int loongarch_dwarf_regno[FIRST_PSEUDO_REGISTER]; - --/* The current instruction-set architecture. */ --enum processor loongarch_arch; --const struct loongarch_cpu_info *loongarch_arch_info; -- --/* The processor that we should tune the code for. */ --enum processor loongarch_tune; --const struct loongarch_cpu_info *loongarch_tune_info; -- --/* The ISA level associated with loongarch_arch. */ --int loongarch_isa; -- --/* The ISA revision level. */ --int loongarch_isa_rev; -- --/* Which cost information to use. */ --static const struct loongarch_rtx_cost_data *loongarch_cost; -- - /* Index [M][R] is true if register R is allowed to hold a value of mode M. */ --static bool loongarch_hard_regno_mode_ok_p[MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER]; -+static bool loongarch_hard_regno_mode_ok_p[MAX_MACHINE_MODE] -+ [FIRST_PSEUDO_REGISTER]; - - /* Index C is true if character C is a valid PRINT_OPERAND punctation - character. */ - static bool loongarch_print_operand_punct[256]; - --static GTY (()) int loongarch_output_filename_first_time = 1; -- --/* loongarch_use_pcrel_pool_p[X] is true if symbols of type X should be -- forced into a PC-relative constant pool. */ --bool loongarch_use_pcrel_pool_p[NUM_SYMBOL_TYPES]; -- --/* Cached value of can_issue_more. This is cached in loongarch_variable_issue hook -- and returned from loongarch_sched_reorder2. */ -+/* Cached value of can_issue_more. This is cached in loongarch_variable_issue -+ hook and returned from loongarch_sched_reorder2. */ - static int cached_can_issue_more; - - /* Index R is the smallest register class that contains register R. */ - const enum reg_class loongarch_regno_to_class[FIRST_PSEUDO_REGISTER] = { -- GR_REGS, GR_REGS, GR_REGS, GR_REGS, -- JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, -- JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, -- SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, -- SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, -- SIBCALL_REGS, GR_REGS, GR_REGS, JALR_REGS, -- JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, -- JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, -- -- FP_REGS, FP_REGS, FP_REGS, FP_REGS, -- FP_REGS, FP_REGS, FP_REGS, FP_REGS, -- FP_REGS, FP_REGS, FP_REGS, FP_REGS, -- FP_REGS, FP_REGS, FP_REGS, FP_REGS, -- FP_REGS, FP_REGS, FP_REGS, FP_REGS, -- FP_REGS, FP_REGS, FP_REGS, FP_REGS, -- FP_REGS, FP_REGS, FP_REGS, FP_REGS, -- FP_REGS, FP_REGS, FP_REGS, FP_REGS, -- ST_REGS, ST_REGS, ST_REGS, ST_REGS, -- ST_REGS, ST_REGS, ST_REGS, ST_REGS, -- FRAME_REGS, FRAME_REGS --}; -- --static tree loongarch_handle_interrupt_attr (tree *, tree, tree, int, bool *); --static tree loongarch_handle_use_shadow_register_set_attr (tree *, tree, tree, int, -- bool *); -- --/* The value of TARGET_ATTRIBUTE_TABLE. */ --static const struct attribute_spec loongarch_attribute_table[] = { -- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, -- affects_type_identity, handler, exclude } */ -- { "long_call", 0, 0, false, true, true, false, NULL, NULL }, -- { "short_call", 0, 0, false, true, true, false, NULL, NULL }, -- { "far", 0, 0, false, true, true, false, NULL, NULL }, -- { "near", 0, 0, false, true, true, false, NULL, NULL }, -- { "nocompression", 0, 0, true, false, false, false, NULL, NULL }, -- /* Allow functions to be specified as interrupt handlers */ -- { "interrupt", 0, 1, false, true, true, false, loongarch_handle_interrupt_attr, -- NULL }, -- { "use_shadow_register_set", 0, 1, false, true, true, false, -- loongarch_handle_use_shadow_register_set_attr, NULL }, -- { "keep_interrupts_masked", 0, 0, false, true, true, false, NULL, NULL }, -- { "use_debug_exception_return", 0, 0, false, true, true, false, NULL, NULL }, -- { NULL, 0, 0, false, false, false, false, NULL, NULL } --}; -- --/* A table describing all the processors GCC knows about; see -- loongarch-cpus.def for details. */ --static const struct loongarch_cpu_info loongarch_cpu_info_table[] = { --#define LARCH_CPU(NAME, CPU, ISA, FLAGS) \ -- { NAME, CPU, ISA, FLAGS }, --#include "loongarch-cpus.def" --#undef LARCH_CPU --}; -- --/* Default costs. If these are used for a processor we should look -- up the actual costs. */ --#define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \ -- COSTS_N_INSNS (7), /* fp_mult_sf */ \ -- COSTS_N_INSNS (8), /* fp_mult_df */ \ -- COSTS_N_INSNS (23), /* fp_div_sf */ \ -- COSTS_N_INSNS (36), /* fp_div_df */ \ -- COSTS_N_INSNS (10), /* int_mult_si */ \ -- COSTS_N_INSNS (10), /* int_mult_di */ \ -- COSTS_N_INSNS (69), /* int_div_si */ \ -- COSTS_N_INSNS (69), /* int_div_di */ \ -- 2, /* branch_cost */ \ -- 4 /* memory_latency */ -- --/* Floating-point costs for processors without an FPU. Just assume that -- all floating-point libcalls are very expensive. */ --#define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \ -- COSTS_N_INSNS (256), /* fp_mult_sf */ \ -- COSTS_N_INSNS (256), /* fp_mult_df */ \ -- COSTS_N_INSNS (256), /* fp_div_sf */ \ -- COSTS_N_INSNS (256) /* fp_div_df */ -- --/* Costs to use when optimizing for size. */ --static const struct loongarch_rtx_cost_data loongarch_rtx_cost_optimize_size = { -- COSTS_N_INSNS (1), /* fp_add */ -- COSTS_N_INSNS (1), /* fp_mult_sf */ -- COSTS_N_INSNS (1), /* fp_mult_df */ -- COSTS_N_INSNS (1), /* fp_div_sf */ -- COSTS_N_INSNS (1), /* fp_div_df */ -- COSTS_N_INSNS (1), /* int_mult_si */ -- COSTS_N_INSNS (1), /* int_mult_di */ -- COSTS_N_INSNS (1), /* int_div_si */ -- COSTS_N_INSNS (1), /* int_div_di */ -- 2, /* branch_cost */ -- 4 /* memory_latency */ -+ GR_REGS, GR_REGS, GR_REGS, GR_REGS, -+ JIRL_REGS, JIRL_REGS, JIRL_REGS, JIRL_REGS, -+ JIRL_REGS, JIRL_REGS, JIRL_REGS, JIRL_REGS, -+ SIBCALL_REGS, JIRL_REGS, SIBCALL_REGS, SIBCALL_REGS, -+ SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, -+ SIBCALL_REGS, GR_REGS, GR_REGS, JIRL_REGS, -+ JIRL_REGS, JIRL_REGS, JIRL_REGS, JIRL_REGS, -+ JIRL_REGS, JIRL_REGS, JIRL_REGS, JIRL_REGS, -+ -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FP_REGS, FP_REGS, FP_REGS, FP_REGS, -+ FCC_REGS, FCC_REGS, FCC_REGS, FCC_REGS, -+ FCC_REGS, FCC_REGS, FCC_REGS, FCC_REGS, -+ FRAME_REGS, FRAME_REGS - }; - --/* Costs to use when optimizing for speed, indexed by processor. */ --static const struct loongarch_rtx_cost_data -- loongarch_rtx_cost_data[NUM_PROCESSOR_VALUES] = { -- { /* loongarch */ -- DEFAULT_COSTS -- }, -- { /* loongarch64 */ -- DEFAULT_COSTS -- }, -- { /* la464 */ -- DEFAULT_COSTS -- } --}; -+/* Which cost information to use. */ -+static const struct loongarch_rtx_cost_data *loongarch_cost; - - /* Information about a single argument. */ --struct loongarch_arg_info { -+struct loongarch_arg_info -+{ - /* True if the argument is at least partially passed on the stack. */ - bool stack_p; - -@@ -419,21 +224,6 @@ struct loongarch_arg_info { - unsigned int fpr_offset; - }; - -- --/* Emit a move from SRC to DEST. Assume that the move expanders can -- handle all moves if !can_create_pseudo_p (). The distinction is -- important because, unlike emit_move_insn, the move expanders know -- how to force Pmode objects into the constant pool even when the -- constant pool address is not itself legitimate. */ -- --rtx --loongarch_emit_move (rtx dest, rtx src) --{ -- return (can_create_pseudo_p () -- ? emit_move_insn (dest, src) -- : emit_move_insn_1 (dest, src)); --} -- - /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at - least PARM_BOUNDARY bits of alignment, but will be given anything up - to PREFERRED_STACK_BOUNDARY bits if the type requires it. */ -@@ -470,7 +260,8 @@ loongarch_pass_mode_in_fpr_p (machine_mode mode) - return 0; - } - --typedef struct { -+typedef struct -+{ - const_tree type; - HOST_WIDE_INT offset; - } loongarch_aggregate_field; -@@ -480,18 +271,18 @@ typedef struct { - - static int - loongarch_flatten_aggregate_field (const_tree type, -- loongarch_aggregate_field fields[2], -- int n, HOST_WIDE_INT offset, -- const int use_vecarg_p) -+ loongarch_aggregate_field fields[2], int n, -+ HOST_WIDE_INT offset, -+ const int use_vecarg_p) - { - switch (TREE_CODE (type)) - { - case RECORD_TYPE: -- /* Can't handle incomplete types nor sizes that are not fixed. */ -- if (!COMPLETE_TYPE_P (type) -- || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST -- || !tree_fits_uhwi_p (TYPE_SIZE (type))) -- return -1; -+ /* Can't handle incomplete types nor sizes that are not fixed. */ -+ if (!COMPLETE_TYPE_P (type) -+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST -+ || !tree_fits_uhwi_p (TYPE_SIZE (type))) -+ return -1; - - for (tree f = TYPE_FIELDS (type); f; f = DECL_CHAIN (f)) - if (TREE_CODE (f) == FIELD_DECL) -@@ -500,7 +291,8 @@ loongarch_flatten_aggregate_field (const_tree type, - return -1; - - HOST_WIDE_INT pos = offset + int_byte_position (f); -- n = loongarch_flatten_aggregate_field (TREE_TYPE (f), fields, n, pos, 0); -+ n = loongarch_flatten_aggregate_field (TREE_TYPE (f), fields, n, -+ pos, 0); - if (n < 0) - return -1; - } -@@ -513,7 +305,8 @@ loongarch_flatten_aggregate_field (const_tree type, - tree index = TYPE_DOMAIN (type); - tree elt_size = TYPE_SIZE_UNIT (TREE_TYPE (type)); - int n_subfields = loongarch_flatten_aggregate_field (TREE_TYPE (type), -- subfields, 0, offset, 0); -+ subfields, 0, -+ offset, 0); - - /* Can't handle incomplete types nor sizes that are not fixed. */ - if (n_subfields <= 0 -@@ -528,7 +321,7 @@ loongarch_flatten_aggregate_field (const_tree type, - return -1; - - n_elts = 1 + tree_to_uhwi (TYPE_MAX_VALUE (index)) -- - tree_to_uhwi (TYPE_MIN_VALUE (index)); -+ - tree_to_uhwi (TYPE_MIN_VALUE (index)); - gcc_assert (n_elts >= 0); - - for (HOST_WIDE_INT i = 0; i < n_elts; i++) -@@ -566,11 +359,11 @@ loongarch_flatten_aggregate_field (const_tree type, - } - - default: -- if (n < 2 -+ if ((n < 2 - && ((SCALAR_FLOAT_TYPE_P (type) - && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FP_ARG) - || (INTEGRAL_TYPE_P (type) -- && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD)) -+ && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD))) - || (use_vecarg_p && VECTOR_TYPE_P (type) - && ((ISA_HAS_LSX && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_LSX_REG) - || (ISA_HAS_LASX && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_LASX_REG)))) -@@ -589,8 +382,8 @@ loongarch_flatten_aggregate_field (const_tree type, - - static int - loongarch_flatten_aggregate_argument (const_tree type, -- loongarch_aggregate_field fields[2], -- const int use_vecarg_p) -+ loongarch_aggregate_field fields[2], -+ const int use_vecarg_p) - { - if (!type || !((TREE_CODE (type) == RECORD_TYPE) - || (use_vecarg_p && TREE_CODE (type) == VECTOR_TYPE))) -@@ -603,9 +396,9 @@ loongarch_flatten_aggregate_argument (const_tree type, - two floating-point registers. If so, populate FIELDS accordingly. */ - - static unsigned --loongarch_pass_aggregate_in_fpr_pair_p (const_tree type, -- loongarch_aggregate_field fields[2], -- const int use_vecarg_p) -+loongarch_pass_aggregate_num_fpr (const_tree type, -+ loongarch_aggregate_field fields[2], -+ const int use_vecarg_p) - { - int n = loongarch_flatten_aggregate_argument (type, fields, use_vecarg_p); - -@@ -616,13 +409,13 @@ loongarch_pass_aggregate_in_fpr_pair_p (const_tree type, - return n > 0 ? n : 0; - } - --/* See whether TYPE is a record whose fields should be returned in one or -+/* See whether TYPE is a record whose fields should be returned in one - floating-point register and one integer register. If so, populate - FIELDS accordingly. */ - - static bool - loongarch_pass_aggregate_in_fpr_and_gpr_p (const_tree type, -- loongarch_aggregate_field fields[2]) -+ loongarch_aggregate_field fields[2]) - { - unsigned num_int = 0, num_float = 0; - int n = loongarch_flatten_aggregate_argument (type, fields, 0); -@@ -640,20 +433,21 @@ loongarch_pass_aggregate_in_fpr_and_gpr_p (const_tree type, - when the value has mode VALUE_MODE and the type has TYPE_MODE. The - two modes may be different for structures like: - -- struct __attribute__((packed)) foo { float f; } -+ struct __attribute__((packed)) foo { float f; } - -- where the SFmode value "f" is passed in REGNO but the struct itself -- has mode BLKmode. */ -+ where the SFmode value "f" is passed in REGNO but the struct itself -+ has mode BLKmode. */ - - static rtx - loongarch_pass_fpr_single (machine_mode type_mode, unsigned regno, -- machine_mode value_mode) -+ machine_mode value_mode, -+ HOST_WIDE_INT offset) - { - rtx x = gen_rtx_REG (value_mode, regno); - - if (type_mode != value_mode) - { -- x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx); -+ x = gen_rtx_EXPR_LIST (VOIDmode, x, GEN_INT (offset)); - x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x)); - } - return x; -@@ -666,19 +460,16 @@ loongarch_pass_fpr_single (machine_mode type_mode, unsigned regno, - - static rtx - loongarch_pass_fpr_pair (machine_mode mode, unsigned regno1, -- machine_mode mode1, HOST_WIDE_INT offset1, -- unsigned regno2, machine_mode mode2, -- HOST_WIDE_INT offset2) -+ machine_mode mode1, HOST_WIDE_INT offset1, -+ unsigned regno2, machine_mode mode2, -+ HOST_WIDE_INT offset2) - { -- return gen_rtx_PARALLEL -- (mode, -- gen_rtvec (2, -- gen_rtx_EXPR_LIST (VOIDmode, -- gen_rtx_REG (mode1, regno1), -- GEN_INT (offset1)), -- gen_rtx_EXPR_LIST (VOIDmode, -- gen_rtx_REG (mode2, regno2), -- GEN_INT (offset2)))); -+ return gen_rtx_PARALLEL ( -+ mode, gen_rtvec (2, -+ gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode1, regno1), -+ GEN_INT (offset1)), -+ gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode2, regno2), -+ GEN_INT (offset2)))); - } - - /* Fill INFO with information about a single argument, and return an -@@ -689,9 +480,9 @@ loongarch_pass_fpr_pair (machine_mode mode, unsigned regno1, - returning the argument, or false if passing the argument. */ - - static rtx --loongarch_get_arg_info (struct loongarch_arg_info *info, const CUMULATIVE_ARGS *cum, -- machine_mode mode, const_tree type, bool named, -- bool return_p) -+loongarch_get_arg_info (struct loongarch_arg_info *info, -+ const CUMULATIVE_ARGS *cum, machine_mode mode, -+ const_tree type, bool named, bool return_p) - { - unsigned num_bytes, num_words; - unsigned fpr_base = return_p ? FP_RETURN : FP_ARG_FIRST; -@@ -713,21 +504,23 @@ loongarch_get_arg_info (struct loongarch_arg_info *info, const CUMULATIVE_ARGS * - unsigned gregno = gpr_base + info->gpr_offset; - - /* Pass one- or two-element floating-point aggregates in FPRs. */ -- if ((info->num_fprs = loongarch_pass_aggregate_in_fpr_pair_p (type, fields, use_vecarg_p)) -+ if ((info->num_fprs -+ = loongarch_pass_aggregate_num_fpr (type, fields, use_vecarg_p)) - && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS) - switch (info->num_fprs) - { - case 1: - return loongarch_pass_fpr_single (mode, fregno, -- TYPE_MODE (fields[0].type)); -+ TYPE_MODE (fields[0].type), -+ fields[0].offset); - - case 2: - return loongarch_pass_fpr_pair (mode, fregno, -- TYPE_MODE (fields[0].type), -- fields[0].offset, -- fregno + 1, -- TYPE_MODE (fields[1].type), -- fields[1].offset); -+ TYPE_MODE (fields[0].type), -+ fields[0].offset, -+ fregno + 1, -+ TYPE_MODE (fields[1].type), -+ fields[1].offset); - - default: - gcc_unreachable (); -@@ -742,9 +535,10 @@ loongarch_get_arg_info (struct loongarch_arg_info *info, const CUMULATIVE_ARGS * - return gen_rtx_REG (mode, fregno); - - case MODE_COMPLEX_FLOAT: -- return loongarch_pass_fpr_pair (mode, fregno, GET_MODE_INNER (mode), 0, -- fregno + 1, GET_MODE_INNER (mode), -- GET_MODE_UNIT_SIZE (mode)); -+ return loongarch_pass_fpr_pair (mode, fregno, -+ GET_MODE_INNER (mode), 0, -+ fregno + 1, GET_MODE_INNER (mode), -+ GET_MODE_UNIT_SIZE (mode)); - - default: - gcc_unreachable (); -@@ -761,10 +555,11 @@ loongarch_get_arg_info (struct loongarch_arg_info *info, const CUMULATIVE_ARGS * - if (!SCALAR_FLOAT_TYPE_P (fields[0].type)) - std::swap (fregno, gregno); - -- return loongarch_pass_fpr_pair (mode, fregno, TYPE_MODE (fields[0].type), -- fields[0].offset, -- gregno, TYPE_MODE (fields[1].type), -- fields[1].offset); -+ return loongarch_pass_fpr_pair (mode, fregno, -+ TYPE_MODE (fields[0].type), -+ fields[0].offset, gregno, -+ TYPE_MODE (fields[1].type), -+ fields[1].offset); - } - } - -@@ -791,7 +586,7 @@ loongarch_get_arg_info (struct loongarch_arg_info *info, const CUMULATIVE_ARGS * - - static rtx - loongarch_function_arg (cumulative_args_t cum_v, machine_mode mode, -- const_tree type, bool named) -+ const_tree type, bool named) - { - CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); - struct loongarch_arg_info info; -@@ -806,7 +601,7 @@ loongarch_function_arg (cumulative_args_t cum_v, machine_mode mode, - - static void - loongarch_function_arg_advance (cumulative_args_t cum_v, machine_mode mode, -- const_tree type, bool named) -+ const_tree type, bool named) - { - CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); - struct loongarch_arg_info info; -@@ -825,11 +620,12 @@ loongarch_function_arg_advance (cumulative_args_t cum_v, machine_mode mode, - - static int - loongarch_arg_partial_bytes (cumulative_args_t cum, -- machine_mode mode, tree type, bool named) -+ machine_mode mode, tree type, bool named) - { - struct loongarch_arg_info arg; - -- loongarch_get_arg_info (&arg, get_cumulative_args (cum), mode, type, named, false); -+ loongarch_get_arg_info (&arg, get_cumulative_args (cum), -+ mode, type, named, false); - return arg.stack_p ? arg.num_gprs * UNITS_PER_WORD : 0; - } - -@@ -837,8 +633,9 @@ loongarch_arg_partial_bytes (cumulative_args_t cum, - VALTYPE is the return type and MODE is VOIDmode. For libcalls, - VALTYPE is null and MODE is the mode of the return value. */ - --rtx --loongarch_function_value (const_tree type, const_tree func, machine_mode mode) -+static rtx -+loongarch_function_value_1 (const_tree type, const_tree func, -+ machine_mode mode) - { - struct loongarch_arg_info info; - CUMULATIVE_ARGS args; -@@ -854,15 +651,34 @@ loongarch_function_value (const_tree type, const_tree func, machine_mode mode) - mode = promote_function_mode (type, mode, &unsigned_p, func, 1); - } - -- memset (&args, 0, sizeof args); -+ memset (&args, 0, sizeof (args)); - return loongarch_get_arg_info (&info, &args, mode, type, true, true); - } - --/* Implement TARGET_PASS_BY_REFERENCE. */ -+ -+/* Implement TARGET_FUNCTION_VALUE. */ -+ -+static rtx -+loongarch_function_value (const_tree valtype, const_tree fn_decl_or_type, -+ bool outgoing ATTRIBUTE_UNUSED) -+{ -+ return loongarch_function_value_1 (valtype, fn_decl_or_type, VOIDmode); -+} -+ -+/* Implement TARGET_LIBCALL_VALUE. */ -+ -+static rtx -+loongarch_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED) -+{ -+ return loongarch_function_value_1 (NULL_TREE, NULL_TREE, mode); -+} -+ -+ -+/* Implement TARGET_PASS_BY_REFERENCE. */ - - static bool - loongarch_pass_by_reference (cumulative_args_t cum_v, machine_mode mode, -- const_tree type, bool named) -+ const_tree type, bool named) - { - HOST_WIDE_INT size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode); - struct loongarch_arg_info info; -@@ -886,23 +702,25 @@ loongarch_pass_by_reference (cumulative_args_t cum_v, machine_mode mode, - /* Implement TARGET_RETURN_IN_MEMORY. */ - - static bool --loongarch_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED) -+loongarch_return_in_memory (const_tree type, -+ const_tree fndecl ATTRIBUTE_UNUSED) - { - CUMULATIVE_ARGS args; - cumulative_args_t cum = pack_cumulative_args (&args); - - /* The rules for returning in memory are the same as for passing the - first named argument by reference. */ -- memset (&args, 0, sizeof args); -+ memset (&args, 0, sizeof (args)); - return loongarch_pass_by_reference (cum, TYPE_MODE (type), type, true); - } - - /* Implement TARGET_SETUP_INCOMING_VARARGS. */ - - static void --loongarch_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode, -- tree type, int *pretend_size ATTRIBUTE_UNUSED, -- int no_rtl) -+loongarch_setup_incoming_varargs (cumulative_args_t cum, -+ machine_mode mode, tree type, -+ int *pretend_size ATTRIBUTE_UNUSED, -+ int no_rtl) - { - CUMULATIVE_ARGS local_cum; - int gp_saved; -@@ -911,7 +729,8 @@ loongarch_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode, - argument. Advance a local copy of CUM past the last "real" named - argument, to find out how many registers are left over. */ - local_cum = *get_cumulative_args (cum); -- loongarch_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1); -+ loongarch_function_arg_advance (pack_cumulative_args (&local_cum), -+ mode, type, 1); - - /* Found out how many registers we need to save. */ - gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs; -@@ -920,12 +739,11 @@ loongarch_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode, - { - rtx ptr = plus_constant (Pmode, virtual_incoming_args_rtx, - REG_PARM_STACK_SPACE (cfun->decl) -- - gp_saved * UNITS_PER_WORD); -+ - gp_saved * UNITS_PER_WORD); - rtx mem = gen_frame_mem (BLKmode, ptr); - set_mem_alias_set (mem, get_varargs_alias_set ()); - -- move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST, -- mem, gp_saved); -+ move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST, mem, gp_saved); - } - if (REG_PARM_STACK_SPACE (cfun->decl) == 0) - cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD; -@@ -941,8 +759,7 @@ loongarch_set_frame_expr (rtx frame_pattern) - - insn = get_last_insn (); - RTX_FRAME_RELATED_P (insn) = 1; -- REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR, -- frame_pattern, -+ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR, frame_pattern, - REG_NOTES (insn)); - } - -@@ -963,8 +780,8 @@ static bool - loongarch_save_reg_p (unsigned int regno) - { - bool call_saved = !global_regs[regno] && !call_used_regs[regno]; -- bool might_clobber = crtl->saves_all_registers -- || df_regs_ever_live_p (regno); -+ bool might_clobber -+ = crtl->saves_all_registers || df_regs_ever_live_p (regno); - - if (call_saved && might_clobber) - return true; -@@ -978,15 +795,6 @@ loongarch_save_reg_p (unsigned int regno) - return false; - } - --/* Determine whether to call GPR save/restore routines. */ --static bool --loongarch_use_save_libcall (const struct loongarch_frame_info *frame) --{ -- // FIXME: if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed) -- return false; -- --} -- - /* Determine which GPR save/restore routine to call. */ - - static unsigned -@@ -998,43 +806,114 @@ loongarch_save_libcall_count (unsigned mask) - abort (); - } - -+/* Find an available register to be used as dynamic realign argument -+ pointer regsiter. Such a register will be written in prologue and -+ used in begin of body, so it must not be -+ 1. parameter passing register. -+ 2. GOT pointer. -+ We reuse static-chain register if it is available. Otherwise, we -+ use r15 for loongarch64(There may be a better choice. TODO). -+ -+ Return: the regno of chosen register. */ -+ -+static unsigned int -+find_drap_reg (void) -+{ -+ tree decl = cfun->decl; -+ /* Always use callee-saved register if there are no caller-saved -+ registers. */ -+ /* Use r15 for nested function or function need static chain. -+ Since function with tail call may use any caller-saved -+ registers in epilogue, DRAP must not use caller-saved -+ register in such case. */ -+ if (DECL_STATIC_CHAIN (decl) -+ || crtl->tail_call_emit) -+ return DRAP_REGNUM; -+ -+ return STATIC_CHAIN_REGNUM; -+} -+ -+ -+/* Return Dynamic Realign Argument Pointer RTX. Now there isn't any. */ -+ -+static rtx -+loongarch_get_drap_rtx (void) -+{ -+ if (crtl->stack_alignment_needed <= STACK_BOUNDARY -+ || (get_frame_size () == 0 && crtl->args.size == 0)) -+ { -+ crtl->stack_realign_needed = false; -+ return NULL; -+ } -+ -+ if (loongarch_force_drap) -+ crtl->need_drap = true; -+ -+ if (stack_realign_drap) -+ { -+ /* Assign DRAP to vDRAP and returns vDRAP */ -+ unsigned int regno = find_drap_reg (); -+ rtx drap_vreg; -+ rtx arg_ptr; -+ rtx_insn *seq, *insn; -+ -+ arg_ptr = gen_rtx_REG (Pmode, regno); -+ crtl->drap_reg = arg_ptr; -+ -+ start_sequence (); -+ drap_vreg = copy_to_reg (arg_ptr); -+ seq = get_insns (); -+ end_sequence (); -+ -+ insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ())); -+ if (!optimize) -+ { -+ add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg); -+ RTX_FRAME_RELATED_P (insn) = 1; -+ } -+ return drap_vreg; -+ } -+ else -+ return NULL; -+} -+ - /* Populate the current function's loongarch_frame_info structure. - -- LARCH stack frames grown downward. High addresses are at the top. -- -- +-------------------------------+ -- | | -- | incoming stack arguments | -- | | -- +-------------------------------+ <-- incoming stack pointer -- | | -- | callee-allocated save area | -- | for arguments that are | -- | split between registers and | -- | the stack | -- | | -- +-------------------------------+ <-- arg_pointer_rtx -- | | -- | callee-allocated save area | -- | for register varargs | -- | | -- +-------------------------------+ <-- hard_frame_pointer_rtx; -- | | stack_pointer_rtx + gp_sp_offset -- | GPR save area | + UNITS_PER_WORD -- | | -- +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset -- | | + UNITS_PER_HWVALUE -- | FPR save area | -- | | -- +-------------------------------+ <-- frame_pointer_rtx (virtual) -- | | -- | local variables | -- | | -- P +-------------------------------+ -- | | -- | outgoing stack arguments | -- | | -- +-------------------------------+ <-- stack_pointer_rtx -+ LoongArch stack frames grown downward. High addresses are at the top. -+ -+ +-------------------------------+ -+ | | -+ | incoming stack arguments | -+ | | -+ +-------------------------------+ <-- incoming stack pointer -+ | | -+ | callee-allocated save area | -+ | for arguments that are | -+ | split between registers and | -+ | the stack | -+ | | -+ +-------------------------------+ <-- arg_pointer_rtx (virtual) -+ | | -+ | callee-allocated save area | -+ | for register varargs | -+ | | -+ +-------------------------------+ <-- hard_frame_pointer_rtx; -+ | | stack_pointer_rtx + gp_sp_offset -+ | GPR save area | + UNITS_PER_WORD -+ | | -+ +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset -+ | | + UNITS_PER_HWVALUE -+ | FPR save area | -+ | | -+ +-------------------------------+ <-- frame_pointer_rtx (virtual) -+ | | -+ | local variables | -+ | | -+ P +-------------------------------+ -+ | | -+ | outgoing stack arguments | -+ | | -+ +-------------------------------+ <-- stack_pointer_rtx - - Dynamic stack allocations such as alloca insert data at point P. - They decrease stack_pointer_rtx but leave frame_pointer_rtx and -@@ -1050,58 +929,93 @@ loongarch_compute_frame_info (void) - frame = &cfun->machine->frame; - memset (frame, 0, sizeof (*frame)); - -- /* Find out which GPRs we need to save. */ -- for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) -+ /* Find out which GPRs we need to save. */ -+ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) -+ if (loongarch_save_reg_p (regno)) -+ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++; -+ -+ /* If this function calls eh_return, we must also save and restore the -+ EH data registers. */ -+ if (crtl->calls_eh_return) -+ for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++) -+ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++; -+ -+ /* Find out which FPRs we need to save. This loop must iterate over -+ the same space as its companion in loongarch_for_each_saved_reg. */ -+ if (TARGET_HARD_FLOAT) -+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) - if (loongarch_save_reg_p (regno)) -- frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++; -- -- /* If this function calls eh_return, we must also save and restore the -- EH data registers. */ -- if (crtl->calls_eh_return) -- for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++) -- frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++; -- -- /* Find out which FPRs we need to save. This loop must iterate over -- the same space as its companion in loongarch_for_each_saved_reg. */ -- if (TARGET_HARD_FLOAT) -- for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) -- if (loongarch_save_reg_p (regno)) -- frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++; -- -- /* At the bottom of the frame are any outgoing stack arguments. */ -- offset = LARCH_STACK_ALIGN (crtl->outgoing_args_size); -- /* Next are local stack variables. */ -- offset += LARCH_STACK_ALIGN (get_frame_size ()); -- /* The virtual frame pointer points above the local variables. */ -+ frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++; -+ -+ /* Treat drap reg as a callee-saved reg. */ -+ if (stack_realign_drap) -+ frame->mask |= 1 << (find_drap_reg ()), num_x_saved++; -+ -+ /* At the bottom of the frame are any outgoing stack arguments. */ -+ offset = LARCH_STACK_ALIGN2 (crtl->outgoing_args_size); -+ /* Next are local stack variables. */ -+ offset += LARCH_STACK_ALIGN2 (get_frame_size ()); -+ /* The virtual frame pointer points above the local variables. */ - frame->frame_pointer_offset = offset; -- /* Next are the callee-saved FPRs. */ -+ /* Next are the callee-saved FPRs. */ - if (frame->fmask) -- offset += LARCH_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG); -- frame->fp_sp_offset = offset - UNITS_PER_FP_REG; -- /* Next are the callee-saved GPRs. */ -+ { -+ if (crtl->stack_realign_needed) -+ offset += num_f_saved * UNITS_PER_FP_REG; -+ else -+ offset += LARCH_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG); -+ frame->fp_sp_offset = offset - UNITS_PER_FP_REG; -+ } -+ else -+ frame->fp_sp_offset = offset; -+ /* Next are the callee-saved GPRs. */ - if (frame->mask) - { -- unsigned x_save_size = LARCH_STACK_ALIGN (num_x_saved * UNITS_PER_WORD); -- unsigned num_save_restore = 1 + loongarch_save_libcall_count (frame->mask); -+ unsigned x_save_size; -+ if (crtl->stack_realign_needed) -+ x_save_size = num_x_saved * UNITS_PER_WORD; -+ else -+ x_save_size = LARCH_STACK_ALIGN (num_x_saved * UNITS_PER_WORD); -+ unsigned num_save_restore -+ = 1 + loongarch_save_libcall_count (frame->mask); - - /* Only use save/restore routines if they don't alter the stack size. */ - if (LARCH_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size) - frame->save_libcall_adjustment = x_save_size; - - offset += x_save_size; -+ frame->gp_sp_offset = offset - UNITS_PER_WORD; - } -- frame->gp_sp_offset = offset - UNITS_PER_WORD; -- /* The hard frame pointer points above the callee-saved GPRs. */ -- frame->hard_frame_pointer_offset = offset; -- /* Above the hard frame pointer is the callee-allocated varags save area. */ -- offset += LARCH_STACK_ALIGN (cfun->machine->varargs_size); -+ else -+ frame->gp_sp_offset = offset; -+ -+ /* The hard frame pointer points above the callee-saved GPRs. */ -+ if (crtl->stack_realign_needed) -+ frame->hard_frame_pointer_offset = frame->gp_sp_offset; /* For dwarf. */ -+ else -+ frame->hard_frame_pointer_offset = offset; -+ -+ /* Realign here for saving space if crtl->stack_realign_needed is true. */ -+ if (stack_realign_drap) -+ offset = LARCH_STACK_ALIGN2 (offset); -+ else if (stack_realign_fp) -+ offset = LARCH_STACK_ALIGN (offset); -+ -+ /* Above the hard frame pointer is the callee-allocated varags save area. */ -+ if (stack_realign_fp) -+ offset += LARCH_STACK_ALIGN (cfun->machine->varargs_size); -+ else -+ offset += LARCH_STACK_ALIGN2 (cfun->machine->varargs_size); - /* Next is the callee-allocated area for pretend stack arguments. */ -- offset += LARCH_STACK_ALIGN (crtl->args.pretend_args_size); -+ if (stack_realign_fp) -+ offset += LARCH_STACK_ALIGN (crtl->args.pretend_args_size); -+ else -+ offset += LARCH_STACK_ALIGN2 (crtl->args.pretend_args_size); - /* Arg pointer must be below pretend args, but must be above alignment - padding. */ - frame->arg_pointer_offset = offset - crtl->args.pretend_args_size; - frame->total_size = offset; -- /* Next points the incoming stack pointer and any incoming arguments. */ -+ /* Next points the incoming stack pointer and any incoming arguments. */ - - /* Only use save/restore routines when the GPRs are atop the frame. */ - if (frame->hard_frame_pointer_offset != frame->total_size) -@@ -1117,8 +1031,6 @@ loongarch_initial_elimination_offset (int from, int to) - { - HOST_WIDE_INT src, dest; - -- loongarch_compute_frame_info (); -- - if (to == HARD_FRAME_POINTER_REGNUM) - dest = cfun->machine->frame.hard_frame_pointer_offset; - else if (to == STACK_POINTER_REGNUM) -@@ -1145,8 +1057,8 @@ typedef void (*loongarch_save_restore_fn) (rtx, rtx); - stack pointer. */ - - static void --loongarch_save_restore_reg (machine_mode mode, int regno, -- HOST_WIDE_INT offset, loongarch_save_restore_fn fn) -+loongarch_save_restore_reg (machine_mode mode, int regno, HOST_WIDE_INT offset, -+ loongarch_save_restore_fn fn) - { - rtx mem; - -@@ -1159,12 +1071,29 @@ loongarch_save_restore_reg (machine_mode mode, int regno, - of the frame. */ - - static void --loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset, loongarch_save_restore_fn fn) -+loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset, -+ loongarch_save_restore_fn fn) - { - HOST_WIDE_INT offset; - -- /* Save the link register and s-registers. */ -+ /* Save the link register and s-registers. */ - offset = cfun->machine->frame.gp_sp_offset - sp_offset; -+ -+ /* The drap reg and fp reg have been saved in loongarch_expand_prologue -+ * when stack_realign_drap is true. */ -+ if (stack_realign_drap) -+ offset -= UNITS_PER_WORD * cfun->machine->frame.gpr_saved_num; -+ -+ /* Save fp reg first for access incoming-args in stack easily -+ * when stack_realign_fp is true. */ -+ if (stack_realign_fp) -+ { -+ loongarch_save_restore_reg (word_mode, HARD_FRAME_POINTER_REGNUM, -+ offset, fn); -+ cfun->machine->frame.mask &= (~(1LL << HARD_FRAME_POINTER_REGNUM)); -+ offset -= UNITS_PER_WORD; -+ } -+ - for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) - if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) - { -@@ -1172,6 +1101,10 @@ loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset, loongarch_save_restore_fn - offset -= UNITS_PER_WORD; - } - -+ /* Undo. */ -+ if (stack_realign_fp) -+ cfun->machine->frame.mask |= (1LL << HARD_FRAME_POINTER_REGNUM); -+ - /* This loop must iterate over the same space as its companion in - loongarch_compute_frame_info. */ - offset = cfun->machine->frame.fp_sp_offset - sp_offset; -@@ -1185,6 +1118,19 @@ loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset, loongarch_save_restore_fn - } - } - -+/* Emit a move from SRC to DEST. Assume that the move expanders can -+ handle all moves if !can_create_pseudo_p (). The distinction is -+ important because, unlike emit_move_insn, the move expanders know -+ how to force Pmode objects into the constant pool even when the -+ constant pool address is not itself legitimate. */ -+ -+rtx -+loongarch_emit_move (rtx dest, rtx src) -+{ -+ return (can_create_pseudo_p () ? emit_move_insn (dest, src) -+ : emit_move_insn_1 (dest, src)); -+} -+ - /* Save register REG to MEM. Make the instruction frame-related. */ - - static void -@@ -1207,575 +1153,690 @@ loongarch_restore_reg (rtx reg, rtx mem) - RTX_FRAME_RELATED_P (insn) = 1; - } - --/* Return the code to invoke the GPR save routine. */ -- --const char * --loongarch_output_gpr_save (unsigned mask) --{ -- static char s[32]; -- unsigned n = loongarch_save_libcall_count (mask); -- -- ssize_t bytes = snprintf (s, sizeof (s), "call\tt0,__loongarch_save_%u", n); -- gcc_assert ((size_t) bytes < sizeof (s)); -- -- return s; --} -- --#define IMM_BITS 12 -- --#define IMM_REACH (1LL << IMM_BITS) -- - /* For stack frames that can't be allocated with a single ADDI instruction, - compute the best value to initially allocate. It must at a minimum -- allocate enough space to spill the callee-saved registers. If TARGET_RVC, -- try to pick a value that will allow compression of the register saves -- without adding extra instructions. */ -+ allocate enough space to spill the callee-saved registers. */ - - static HOST_WIDE_INT - loongarch_first_stack_step (struct loongarch_frame_info *frame) - { -- if (SMALL_OPERAND (frame->total_size)) -+ -+ /* Only for fpr/gpr saved regs first when stack_realign_fp is true. */ -+ if (stack_realign_fp) -+ return frame->total_size - frame->frame_pointer_offset; -+ -+ HOST_WIDE_INT realign_size = crtl->stack_alignment_needed / BITS_PER_UNIT; -+ -+ if (IMM12_OPERAND (frame->total_size)) - return frame->total_size; - -- HOST_WIDE_INT min_first_step = -- LARCH_STACK_ALIGN (frame->total_size - frame->fp_sp_offset); -+ HOST_WIDE_INT min_first_step -+ = LARCH_STACK_ALIGN2 (frame->total_size - frame->fp_sp_offset); - HOST_WIDE_INT max_first_step = IMM_REACH / 2 - PREFERRED_STACK_BOUNDARY / 8; - HOST_WIDE_INT min_second_step = frame->total_size - max_first_step; -- gcc_assert (min_first_step <= max_first_step); - - /* As an optimization, use the least-significant bits of the total frame -- size, so that the second adjustment step is just LUI + ADD. */ -- if (!SMALL_OPERAND (min_second_step) -+ size, so that the second adjustment step is just LU12I + ADD. */ -+ if (!IMM12_OPERAND (min_second_step) - && frame->total_size % IMM_REACH < IMM_REACH / 2 - && frame->total_size % IMM_REACH >= min_first_step) - return frame->total_size % IMM_REACH; - -- return max_first_step; --} -- --static rtx --loongarch_adjust_libcall_cfi_prologue () --{ -- rtx dwarf = NULL_RTX; -- rtx adjust_sp_rtx, reg, mem, insn; -- int saved_size = cfun->machine->frame.save_libcall_adjustment; -- int offset; -- -- for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) -- if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) -- { -- /* The save order is ra, s0 to s8. */ -- if (regno == RETURN_ADDR_REGNUM) -- offset = saved_size - UNITS_PER_WORD; -- else -- offset = saved_size - ((regno - S0_REGNUM + 2) * UNITS_PER_WORD); -- -- reg = gen_rtx_REG (SImode, regno); -- mem = gen_frame_mem (SImode, plus_constant (Pmode, -- stack_pointer_rtx, -- offset)); -- -- insn = gen_rtx_SET (mem, reg); -- dwarf = alloc_reg_note (REG_CFA_OFFSET, insn, dwarf); -- } -- -- /* Debug info for adjust sp. */ -- adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx, -- stack_pointer_rtx, GEN_INT (-saved_size)); -- dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx, -- dwarf); -- return dwarf; -+ return crtl->stack_realign_needed ? (max_first_step < realign_size -+ ? realign_size -+ : ROUND_DOWN (max_first_step, -+ realign_size)) -+ : max_first_step; - } - - static void - loongarch_emit_stack_tie (void) - { -- if (Pmode == SImode) -- emit_insn (gen_stack_tiesi (stack_pointer_rtx, hard_frame_pointer_rtx)); -- else -- emit_insn (gen_stack_tiedi (stack_pointer_rtx, hard_frame_pointer_rtx)); --} -- --/* Return nonzero if this function is known to have a null epilogue. -- This allows the optimizer to omit jumps to jumps if no stack -- was created. */ -- --bool --loongarch_can_use_return_insn (void) --{ -- return reload_completed && cfun->machine->frame.total_size == 0; -+ emit_insn (PMODE_INSN (gen_stack_tie, -+ (stack_pointer_rtx, hard_frame_pointer_rtx))); - } - --static rtx --loongarch_adjust_libcall_cfi_epilogue () --{ -- rtx dwarf = NULL_RTX; -- rtx adjust_sp_rtx, reg; -- int saved_size = cfun->machine->frame.save_libcall_adjustment; -- -- /* Debug info for adjust sp. */ -- adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx, -- stack_pointer_rtx, GEN_INT (saved_size)); -- dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx, -- dwarf); -- -- for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) -- if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) -- { -- reg = gen_rtx_REG (SImode, regno); -- dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf); -- } -+#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP) - -- return dwarf; --} -+#if PROBE_INTERVAL > 16384 -+#error Cannot use indexed addressing mode for stack probing -+#endif - --/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P -- says which. */ -+/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE, -+ inclusive. These are offsets from the current stack pointer. */ - --void --loongarch_expand_epilogue (bool sibcall_p) -+static void -+loongarch_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size) - { -- /* Split the frame into two. STEP1 is the amount of stack we should -- deallocate before restoring the registers. STEP2 is the amount we -- should deallocate afterwards. -+ HOST_WIDE_INT rounded_size; -+ rtx r12 = LARCH_PROLOGUE_TEMP2 (Pmode); -+ rtx r14 = LARCH_PROLOGUE_TEMP3 (Pmode); - -- Start off by assuming that no registers need to be restored. */ -- struct loongarch_frame_info *frame = &cfun->machine->frame; -- unsigned mask = frame->mask; -- HOST_WIDE_INT step1 = frame->total_size; -- HOST_WIDE_INT step2 = 0; -- bool use_restore_libcall = !sibcall_p && loongarch_use_save_libcall (frame); -- rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM); -- rtx insn; -+ size = size + first; -+ /* Sanity check for the addressing mode we're going to use. */ -+ gcc_assert (first <= 16384); - -- /* We need to add memory barrier to prevent read from deallocated stack. */ -- bool need_barrier_p = (get_frame_size () -- + cfun->machine->frame.arg_pointer_offset) != 0; -+ /* Step 1: round SIZE to the previous multiple of the interval. */ - -- if (!sibcall_p && loongarch_can_use_return_insn ()) -- { -- emit_jump_insn (gen_return ()); -- return; -- } -+ rounded_size = ROUND_DOWN (size, PROBE_INTERVAL); - -- /* Move past any dynamic stack allocations. */ -- if (cfun->calls_alloca) -+ /* Step 2: compute initial and final value of the loop counter. */ -+ -+ emit_move_insn (r14, GEN_INT (PROBE_INTERVAL)); -+ /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */ -+ if (rounded_size != 0) - { -- /* Emit a barrier to prevent loads from a deallocated stack. */ -- loongarch_emit_stack_tie (); -- need_barrier_p = false; -+ emit_move_insn (r12, GEN_INT (rounded_size)); -+ emit_insn (gen_rtx_SET (r12, gen_rtx_MINUS (Pmode, -+ stack_pointer_rtx, r12))); - -- rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset); -- if (!SMALL_OPERAND (INTVAL (adjust))) -- { -- loongarch_emit_move (N_LARCH_PROLOGUE_TEMP (Pmode), adjust); -- adjust = N_LARCH_PROLOGUE_TEMP (Pmode); -- } -+ /* Step 3: the loop - -- insn = emit_insn ( -- gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx, -- adjust)); -+ do -+ { -+ TEST_ADDR = TEST_ADDR + PROBE_INTERVAL -+ probe at TEST_ADDR -+ } -+ while (TEST_ADDR != LAST_ADDR) - -- rtx dwarf = NULL_RTX; -- rtx cfa_adjust_value = gen_rtx_PLUS ( -- Pmode, hard_frame_pointer_rtx, -- GEN_INT (-frame->hard_frame_pointer_offset)); -- rtx cfa_adjust_rtx = gen_rtx_SET (stack_pointer_rtx, cfa_adjust_value); -- dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, cfa_adjust_rtx, dwarf); -- RTX_FRAME_RELATED_P (insn) = 1; -+ probes at FIRST + N * PROBE_INTERVAL for values of N from 1 -+ until it is equal to ROUNDED_SIZE. */ - -- REG_NOTES (insn) = dwarf; -+ emit_insn (PMODE_INSN (gen_probe_stack_range, (stack_pointer_rtx, -+ stack_pointer_rtx, r12, r14))); - } - -- /* If we need to restore registers, deallocate as much stack as -- possible in the second step without going out of range. */ -- if ((frame->mask | frame->fmask) != 0) -- { -- step2 = loongarch_first_stack_step (frame); -- step1 -= step2; -- } -+ /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time -+ that SIZE is equal to ROUNDED_SIZE. */ - -- /* Set TARGET to BASE + STEP1. */ -- if (step1 > 0) -+ if (size != rounded_size) - { -- /* Emit a barrier to prevent loads from a deallocated stack. */ -- loongarch_emit_stack_tie (); -- need_barrier_p = false; -- -- /* Get an rtx for STEP1 that we can add to BASE. */ -- rtx adjust = GEN_INT (step1); -- if (!SMALL_OPERAND (step1)) -+ if (size - rounded_size >= PROBE_INTERVAL/2) - { -- loongarch_emit_move (N_LARCH_PROLOGUE_TEMP (Pmode), adjust); -- adjust = N_LARCH_PROLOGUE_TEMP (Pmode); -+ emit_move_insn (r14, GEN_INT (size - rounded_size)); -+ emit_insn (gen_rtx_SET (stack_pointer_rtx, gen_rtx_MINUS (Pmode, -+ stack_pointer_rtx, -+ r14))); - } -+ else -+ emit_insn (gen_rtx_SET (stack_pointer_rtx, gen_rtx_PLUS (Pmode, -+ stack_pointer_rtx, -+ GEN_INT (rounded_size - size)))); - -- insn = emit_insn ( -- gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust)); -- -- rtx dwarf = NULL_RTX; -- rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx, -- GEN_INT (step2)); -- -- dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf); -- RTX_FRAME_RELATED_P (insn) = 1; -- -- REG_NOTES (insn) = dwarf; - } - -- if (use_restore_libcall) -- frame->mask = 0; /* Temporarily fib that we need not save GPRs. */ -- -- /* Restore the registers. */ -- loongarch_for_each_saved_reg (frame->total_size - step2, loongarch_restore_reg); -- -- if (use_restore_libcall) -+ if (first) - { -- frame->mask = mask; /* Undo the above fib. */ -- gcc_assert (step2 >= frame->save_libcall_adjustment); -- step2 -= frame->save_libcall_adjustment; -+ emit_move_insn (r12, GEN_INT (first)); -+ emit_insn (gen_rtx_SET (stack_pointer_rtx, gen_rtx_PLUS (Pmode, -+ stack_pointer_rtx, r12))); - } - -- if (need_barrier_p) -- loongarch_emit_stack_tie (); -+ /* Make sure nothing is scheduled before we are done. */ -+ emit_insn (gen_blockage ()); -+} - -- /* Deallocate the final bit of the frame. */ -- if (step2 > 0) -- { -- insn = emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, -- GEN_INT (step2))); -+/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are -+ absolute addresses. */ -+const char * -+loongarch_output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3) -+{ -+ static int labelno = 0; -+ char loop_lab[32], tmp[64]; -+ rtx xops[3]; - -- rtx dwarf = NULL_RTX; -- rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx, -- const0_rtx); -- dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf); -- RTX_FRAME_RELATED_P (insn) = 1; -+ ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++); - -- REG_NOTES (insn) = dwarf; -+ /* Loop. */ -+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab); -+ -+ /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */ -+ xops[0] = reg1; -+ xops[1] = GEN_INT (-PROBE_INTERVAL); -+ xops[2] = reg3; -+ if (TARGET_64BIT) -+ output_asm_insn ("sub.d\t%0,%0,%2", xops); -+ else -+ output_asm_insn ("sub.w\t%0,%0,%2", xops); -+ -+ /* Probe at TEST_ADDR, test if TEST_ADDR == LAST_ADDR and branch. */ -+ xops[1] = reg2; -+ strcpy (tmp, "bne\t%0,%1,"); -+ if (TARGET_64BIT) -+ output_asm_insn ("st.d\t$r0,%0,0", xops); -+ else -+ output_asm_insn ("st.w\t$r0,%0,0", xops); -+ output_asm_insn (strcat (tmp, &loop_lab[1]), xops); -+ -+ return ""; -+} -+ -+/* Expand the "prologue" pattern. */ -+ -+void -+loongarch_expand_prologue (void) -+{ -+ struct loongarch_frame_info *frame; -+ HOST_WIDE_INT size; -+ rtx insn; -+ HOST_WIDE_INT realign_size; -+ HOST_WIDE_INT offset; -+ unsigned mask; -+ HOST_WIDE_INT saved_gpr_num = 0; -+ -+ /* Finalize crtl->stack_realign_needed and frame_pointer_needed flags. */ -+ if((crtl->stack_realign_needed || (!flag_omit_frame_pointer && optimize)) && loongarch_stack_realign) -+ { -+ unsigned int incoming_stack_boundary -+ = (crtl->parm_stack_boundary > PREFERRED_STACK_BOUNDARY -+ ? crtl->parm_stack_boundary : PREFERRED_STACK_BOUNDARY); -+ unsigned int stack_alignment -+ = (crtl->is_leaf -+ ? crtl->max_used_stack_slot_alignment -+ : crtl->stack_alignment_needed); -+ unsigned int stack_realign -+ = (incoming_stack_boundary < stack_alignment); -+ -+ if ((get_frame_size () + crtl->outgoing_args_size) == 0 -+ && (crtl->args.size == 0) -+ && frame_pointer_needed -+ && crtl->is_leaf -+ && crtl->sp_is_unchanging -+ && !cfun->calls_alloca -+ && !crtl->calls_eh_return -+ && !(STACK_CHECK_MOVING_SP -+ && flag_stack_check -+ && flag_exceptions -+ && cfun->can_throw_non_call_exceptions)) -+ { -+ /* If drap has been set, but it actually isn't live at the -+ start of the function, there is no reason to set it up. */ -+ if (crtl->drap_reg) -+ { -+ basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; -+ if (! REGNO_REG_SET_P (DF_LR_IN (bb), -+ REGNO (crtl->drap_reg))) -+ { -+ crtl->drap_reg = NULL_RTX; -+ crtl->need_drap = false; -+ } -+ } -+ frame_pointer_needed = false; -+ crtl->stack_realign_needed = false; -+ crtl->max_used_stack_slot_alignment = incoming_stack_boundary; -+ crtl->stack_alignment_needed = incoming_stack_boundary; -+ crtl->stack_alignment_estimated = incoming_stack_boundary; -+ if (crtl->preferred_stack_boundary > incoming_stack_boundary) -+ crtl->preferred_stack_boundary = incoming_stack_boundary; -+ -+ df_finish_pass (true); -+ df_scan_alloc (NULL); -+ df_scan_blocks (); -+ df_compute_regs_ever_live (true); -+ df_analyze (); -+ loongarch_compute_frame_info(); -+ } -+ } -+ -+ frame = &cfun->machine->frame; -+ size = frame->total_size; -+ -+ mask = frame->mask; -+ -+ realign_size = crtl->stack_alignment_needed / BITS_PER_UNIT; -+ -+ if (flag_stack_usage_info) -+ { -+ if (stack_realign_drap) -+ { -+ current_function_dynamic_stack_size += crtl->stack_alignment_needed / BITS_PER_UNIT; -+ } -+ current_function_static_stack_size = size; - } - -- if (use_restore_libcall) -+ /* When stack_realign_drap is true, save current sp in drap-reg then realign. */ -+ if (stack_realign_drap) - { -- rtx dwarf = loongarch_adjust_libcall_cfi_epilogue (); -- insn = emit_insn (gen_gpr_restore (GEN_INT (loongarch_save_libcall_count (mask)))); -+ rtx tmp_reg = plus_constant (Pmode, stack_pointer_rtx, 0); -+ insn = emit_insn (gen_rtx_SET (crtl->drap_reg, tmp_reg)); - RTX_FRAME_RELATED_P (insn) = 1; -- REG_NOTES (insn) = dwarf; - -- emit_jump_insn (gen_gpr_restore_return (ra)); -- return; -+ int log2_realigned_bytes = exact_log2 (realign_size); -+ tmp_reg = gen_rtx_REG (Pmode, GP_REG_FIRST); -+ insn = emit_insn (gen_insvdi (stack_pointer_rtx, -+ GEN_INT (log2_realigned_bytes), -+ const0_rtx, -+ tmp_reg)); -+ insn = gen_anddi3 (stack_pointer_rtx, -+ stack_pointer_rtx, -+ GEN_INT (-realign_size)); -+ loongarch_set_frame_expr (insn); - } - -- /* Add in the __builtin_eh_return stack adjustment. */ -- if (crtl->calls_eh_return) -- emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, -- EH_RETURN_STACKADJ_RTX)); -+ /* Save the registers. */ -+ if ((frame->mask | frame->fmask) != 0) -+ { -+ HOST_WIDE_INT step1 = MIN (size, loongarch_first_stack_step (frame)); - -- if (!sibcall_p) -- emit_jump_insn (gen_simple_return_internal (ra)); --} -+ /* Save fp first for dwarf. */ -+ if (stack_realign_drap) -+ { -+ gcc_assert (step1 % realign_size == 0); -+ if (frame->mask & (1LL << HARD_FRAME_POINTER_REGNUM)) -+ { -+ emit_insn (gen_add3_insn (stack_pointer_rtx, -+ stack_pointer_rtx, -+ GEN_INT (-(frame->total_size -+ - frame->gp_sp_offset)))); -+ step1 -= (frame->total_size - frame->gp_sp_offset); -+ loongarch_save_restore_reg (word_mode, HARD_FRAME_POINTER_REGNUM, -+ 0, loongarch_save_reg); -+ cfun->machine->frame.mask -+ = frame->mask & ~(1LL << HARD_FRAME_POINTER_REGNUM); -+ saved_gpr_num ++; -+ } -+ /* Set up the frame pointer, if we're using one. */ -+ if (frame_pointer_needed) -+ { -+ insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx); -+ RTX_FRAME_RELATED_P (insn) = 1; - -- --static rtx loongarch_find_pic_call_symbol (rtx_insn *, rtx, bool); --static int loongarch_register_move_cost (machine_mode, reg_class_t, -- reg_class_t); -- --/* Predicates to test for presence of "near"/"short_call" and "far"/"long_call" -- attributes on the given TYPE. */ -+ loongarch_emit_stack_tie (); -+ } -+ } - --static bool --loongarch_near_type_p (const_tree type) --{ -- return (lookup_attribute ("short_call", TYPE_ATTRIBUTES (type)) != NULL -- || lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL); --} -+ if (!IMM12_OPERAND (-step1) && stack_realign_drap) -+ { -+ loongarch_emit_move (LARCH_PROLOGUE_TEMP (Pmode), GEN_INT (-step1)); -+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, -+ LARCH_PROLOGUE_TEMP (Pmode))); - --static bool --loongarch_far_type_p (const_tree type) --{ -- return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL -- || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL); --} -+ /* Describe the effect of the previous instructions. */ -+ insn = plus_constant (Pmode, stack_pointer_rtx, -step1); -+ insn = gen_rtx_SET (stack_pointer_rtx, insn); -+ loongarch_set_frame_expr (insn); -+ } -+ else -+ { -+ insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, -+ GEN_INT (-step1)); -+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; -+ } - -+ if (saved_gpr_num && stack_realign_drap) -+ size -= (step1 + frame->total_size - frame->gp_sp_offset); -+ else -+ size -= step1; - --/* Check if the interrupt attribute is set for a function. */ -+ if (stack_realign_drap && (frame->mask & (1LL << find_drap_reg ()))) -+ { -+ offset = cfun->machine->frame.gp_sp_offset - size -+ - UNITS_PER_WORD * saved_gpr_num; -+ loongarch_save_restore_reg (word_mode, find_drap_reg (), -+ offset, loongarch_save_reg); -+ cfun->machine->frame.mask -+ = frame->mask & ~(1LL << (find_drap_reg ())); -+ saved_gpr_num ++; -+ } - --static bool --loongarch_interrupt_type_p (tree type) --{ -- return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL; --} -+ cfun->machine->frame.gpr_saved_num = saved_gpr_num; -+ loongarch_for_each_saved_reg (size, loongarch_save_reg); -+ cfun->machine->frame.mask = mask; -+ } - --/* Implement TARGET_COMP_TYPE_ATTRIBUTES. */ -+ /* Set up the frame pointer, if we're using one. */ -+ if (frame_pointer_needed && !stack_realign_drap) -+ { -+ insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx, -+ GEN_INT (frame->hard_frame_pointer_offset - size)); -+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; - --static int --loongarch_comp_type_attributes (const_tree type1, const_tree type2) --{ -- /* Disallow mixed near/far attributes. */ -- if (loongarch_far_type_p (type1) && loongarch_near_type_p (type2)) -- return 0; -- if (loongarch_near_type_p (type1) && loongarch_far_type_p (type2)) -- return 0; -- return 1; --} -+ loongarch_emit_stack_tie (); -+ } - --/* Implement TARGET_INSERT_ATTRIBUTES. */ -+ /* Stack realign when stack_realign_fp is true. */ -+ if (stack_realign_fp) -+ { -+ int log2_realigned_bytes = exact_log2 (realign_size); -+ rtx tmp_reg = gen_rtx_REG (Pmode, GP_REG_FIRST); -+ insn = emit_insn (gen_insvdi (stack_pointer_rtx, -+ GEN_INT (log2_realigned_bytes), -+ const0_rtx, -+ tmp_reg)); -+ insn = gen_anddi3 (stack_pointer_rtx, -+ stack_pointer_rtx, -+ GEN_INT (-realign_size)); -+ loongarch_set_frame_expr (insn); -+ } - --static void --loongarch_insert_attributes (tree decl, tree *attributes) --{ --} -+ /* Allocate the rest of the frame. */ -+ if ((flag_stack_check == STATIC_BUILTIN_STACK_CHECK -+ || flag_stack_clash_protection) -+ && size > 0) -+ { -+ loongarch_emit_probe_stack_range (get_stack_check_protect (), size); - --/* Implement TARGET_MERGE_DECL_ATTRIBUTES. */ -+ /* Describe the effect of the previous instructions. */ -+ insn = plus_constant (Pmode, stack_pointer_rtx, -size); -+ insn = gen_rtx_SET (stack_pointer_rtx, insn); -+ loongarch_set_frame_expr (insn); -+ } -+ else -+ { -+ if (size > 0) -+ { -+ if (stack_realign_drap) -+ gcc_assert (size % realign_size == 0); - --static tree --loongarch_merge_decl_attributes (tree olddecl, tree newdecl) --{ -- return merge_attributes (DECL_ATTRIBUTES (olddecl), -- DECL_ATTRIBUTES (newdecl)); -+ if (IMM12_OPERAND (-size)) -+ { -+ insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, -+ GEN_INT (-size)); -+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; -+ } -+ else -+ { -+ loongarch_emit_move (LARCH_PROLOGUE_TEMP (Pmode), GEN_INT (-size)); -+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, -+ LARCH_PROLOGUE_TEMP (Pmode))); -+ -+ /* Describe the effect of the previous instructions. */ -+ insn = plus_constant (Pmode, stack_pointer_rtx, -size); -+ insn = gen_rtx_SET (stack_pointer_rtx, insn); -+ loongarch_set_frame_expr (insn); -+ } -+ } -+ } - } - --/* Implement TARGET_CAN_INLINE_P. */ -+/* Return nonzero if this function is known to have a null epilogue. -+ This allows the optimizer to omit jumps to jumps if no stack -+ was created. */ - --static bool --loongarch_can_inline_p (tree caller, tree callee) -+bool -+loongarch_can_use_return_insn (void) - { -- return default_target_can_inline_p (caller, callee); -+ return reload_completed && cfun->machine->frame.total_size == 0; - } - --/* Handle an "interrupt" attribute with an optional argument. */ -+/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P -+ says which. */ - --static tree --loongarch_handle_interrupt_attr (tree *node ATTRIBUTE_UNUSED, tree name, tree args, -- int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) -+void -+loongarch_expand_epilogue (bool sibcall_p) - { -- /* Check for an argument. */ -- if (is_attribute_p ("interrupt", name) && args != NULL) -+ /* Split the frame into two. STEP1 is the amount of stack we should -+ deallocate before restoring the registers. STEP2 is the amount we -+ should deallocate afterwards. -+ -+ Start off by assuming that no registers need to be restored. */ -+ struct loongarch_frame_info *frame = &cfun->machine->frame; -+ unsigned mask = frame->mask; -+ HOST_WIDE_INT step1 = frame->total_size; -+ HOST_WIDE_INT step2 = 0; -+ rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM); -+ rtx insn; -+ HOST_WIDE_INT offset; -+ HOST_WIDE_INT saved_gpr_num = 0; -+ -+ /* We need to add memory barrier to prevent read from deallocated stack. */ -+ bool need_barrier_p -+ = (get_frame_size () + cfun->machine->frame.arg_pointer_offset) != 0; -+ -+ if (!sibcall_p && loongarch_can_use_return_insn ()) - { -- tree cst; -+ emit_jump_insn (gen_return ()); -+ return; -+ } - -- cst = TREE_VALUE (args); -- if (TREE_CODE (cst) != STRING_CST) -- { -- warning (OPT_Wattributes, -- "%qE attribute requires a string argument", -- name); -- *no_add_attrs = true; -- } -- else if (strcmp (TREE_STRING_POINTER (cst), "eic") != 0 -- && strncmp (TREE_STRING_POINTER (cst), "vector=", 7) != 0) -- { -- warning (OPT_Wattributes, -- "argument to %qE attribute is neither eic, nor " -- "vector=", name); -- *no_add_attrs = true; -- } -- else if (strncmp (TREE_STRING_POINTER (cst), "vector=", 7) == 0) -- { -- const char *arg = TREE_STRING_POINTER (cst) + 7; -+ if (!stack_realign_fp) -+ { -+ /* Move past any dynamic stack allocations. */ -+ if (cfun->calls_alloca) -+ { -+ /* Emit a barrier to prevent loads from a deallocated stack. */ -+ loongarch_emit_stack_tie (); -+ need_barrier_p = false; - -- /* Acceptable names are: sw0,sw1,hw0,hw1,hw2,hw3,hw4,hw5. */ -- if (strlen (arg) != 3 -- || (arg[0] != 's' && arg[0] != 'h') -- || arg[1] != 'w' -- || (arg[0] == 's' && arg[2] != '0' && arg[2] != '1') -- || (arg[0] == 'h' && (arg[2] < '0' || arg[2] > '5'))) -+ rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset); -+ if (!IMM12_OPERAND (INTVAL (adjust))) - { -- warning (OPT_Wattributes, -- "interrupt vector to %qE attribute is not " -- "vector=(sw0|sw1|hw0|hw1|hw2|hw3|hw4|hw5)", -- name); -- *no_add_attrs = true; -+ loongarch_emit_move (LARCH_PROLOGUE_TEMP (Pmode), adjust); -+ adjust = LARCH_PROLOGUE_TEMP (Pmode); - } -- } - -- return NULL_TREE; -- } -+ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, -+ hard_frame_pointer_rtx, -+ adjust)); - -- return NULL_TREE; --} -+ if (!(stack_realign_drap)) -+ { -+ rtx dwarf = NULL_RTX; -+ rtx minus_offset = GEN_INT (-frame->hard_frame_pointer_offset); -+ rtx cfa_adjust_value = gen_rtx_PLUS (Pmode, -+ hard_frame_pointer_rtx, -+ minus_offset); - --/* Handle a "use_shadow_register_set" attribute with an optional argument. */ -+ rtx cfa_adjust_rtx = gen_rtx_SET (stack_pointer_rtx, cfa_adjust_value); -+ dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, cfa_adjust_rtx, dwarf); -+ RTX_FRAME_RELATED_P (insn) = 1; - --static tree --loongarch_handle_use_shadow_register_set_attr (tree *node ATTRIBUTE_UNUSED, -- tree name, tree args, -- int flags ATTRIBUTE_UNUSED, -- bool *no_add_attrs) --{ -- /* Check for an argument. */ -- if (is_attribute_p ("use_shadow_register_set", name) && args != NULL) -- { -- tree cst; -+ REG_NOTES (insn) = dwarf; -+ } -+ } - -- cst = TREE_VALUE (args); -- if (TREE_CODE (cst) != STRING_CST) -- { -- warning (OPT_Wattributes, -- "%qE attribute requires a string argument", -- name); -- *no_add_attrs = true; -- } -- else if (strcmp (TREE_STRING_POINTER (cst), "intstack") != 0) -- { -- warning (OPT_Wattributes, -- "argument to %qE attribute is not intstack", name); -- *no_add_attrs = true; -- } -+ /* If we need to restore registers, deallocate as much stack as -+ possible in the second step without going out of range. */ -+ if ((frame->mask | frame->fmask) != 0) -+ { -+ step2 = loongarch_first_stack_step (frame); -+ step1 -= step2; -+ } - -- return NULL_TREE; -- } -+ /* Set TARGET to BASE + STEP1. */ -+ if (step1 > 0) -+ { -+ /* Emit a barrier to prevent loads from a deallocated stack. */ -+ loongarch_emit_stack_tie (); -+ need_barrier_p = false; - -- return NULL_TREE; --} -- --/* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR -- and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */ -+ /* Get an rtx for STEP1 that we can add to BASE. */ -+ rtx adjust = GEN_INT (step1); -+ if (!IMM12_OPERAND (step1)) -+ { -+ loongarch_emit_move (LARCH_PROLOGUE_TEMP (Pmode), adjust); -+ adjust = LARCH_PROLOGUE_TEMP (Pmode); -+ } - --static void --loongarch_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr) --{ -- if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))) -+ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, -+ stack_pointer_rtx, -+ adjust)); -+ -+ rtx dwarf = NULL_RTX; -+ rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, -+ stack_pointer_rtx, -+ GEN_INT (step2)); -+ -+ dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf); -+ RTX_FRAME_RELATED_P (insn) = 1; -+ -+ REG_NOTES (insn) = dwarf; -+ } -+ -+ /* Restore drap reg and fp reg first when stack_realign_drap is true. */ -+ if (stack_realign_drap) -+ { -+ if (frame->mask & (1LL << HARD_FRAME_POINTER_REGNUM)) -+ { -+ offset = cfun->machine->frame.gp_sp_offset -+ - (frame->total_size - step2) ; -+ loongarch_save_restore_reg (word_mode, HARD_FRAME_POINTER_REGNUM, -+ offset, loongarch_restore_reg); -+ cfun->machine->frame.mask -+ = frame->mask & ~(1LL << HARD_FRAME_POINTER_REGNUM); -+ saved_gpr_num ++; -+ } -+ if (frame->mask & (1LL << find_drap_reg ())) -+ { -+ offset = cfun->machine->frame.gp_sp_offset -+ - (frame->total_size - step2) - UNITS_PER_WORD * saved_gpr_num; -+ loongarch_save_restore_reg (word_mode, find_drap_reg (), -+ offset, loongarch_restore_reg); -+ cfun->machine->frame.mask -+ = frame->mask & ~(1LL << (find_drap_reg ())); -+ saved_gpr_num ++; -+ } -+ cfun->machine->frame.gpr_saved_num = saved_gpr_num; -+ } -+ } -+ else /* stack_realign_fp. */ - { -- *base_ptr = XEXP (x, 0); -- *offset_ptr = INTVAL (XEXP (x, 1)); -+ /* If we need to restore registers, deallocate as much stack as -+ possible in the second step without going out of range. */ -+ if ((frame->mask | frame->fmask) != 0) -+ { -+ step2 = loongarch_first_stack_step (frame); -+ rtx tmp_reg = plus_constant (Pmode, -+ hard_frame_pointer_rtx, -+ -(frame->hard_frame_pointer_offset -+ - frame->frame_pointer_offset)); -+ insn = emit_insn (gen_rtx_SET (stack_pointer_rtx, tmp_reg)); -+ RTX_FRAME_RELATED_P (insn) = 1; -+ } - } -- else -+ -+ /* Restore the registers. */ -+ loongarch_for_each_saved_reg (frame->total_size - step2, -+ loongarch_restore_reg); -+ -+ cfun->machine->frame.mask = mask; -+ -+ if (need_barrier_p) -+ loongarch_emit_stack_tie (); -+ -+ /* Deallocate the final bit of the frame. */ -+ if (step2 > 0) - { -- *base_ptr = x; -- *offset_ptr = 0; -+ if (stack_realign_drap) -+ { -+ rtx tmp_reg = gen_rtx_REG (Pmode, find_drap_reg ()); -+ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, -+ tmp_reg, -+ const0_rtx)); -+ } -+ else -+ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, -+ stack_pointer_rtx, -+ GEN_INT (step2))); -+ -+ rtx dwarf = NULL_RTX; -+ rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx, const0_rtx); -+ dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf); -+ RTX_FRAME_RELATED_P (insn) = 1; -+ -+ REG_NOTES (insn) = dwarf; - } -+ -+ /* Add in the __builtin_eh_return stack adjustment. */ -+ if (crtl->calls_eh_return) -+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, -+ EH_RETURN_STACKADJ_RTX)); -+ -+ if (!sibcall_p) -+ emit_jump_insn (gen_simple_return_internal (ra)); - } -- --static unsigned int loongarch_build_integer (struct loongarch_integer_op *, -- unsigned HOST_WIDE_INT); -+ -+#define LU32I_B (0xfffffULL << 32) -+#define LU52I_B (0xfffULL << 52) - - /* Fill CODES with a sequence of rtl operations to load VALUE. -- Return the number of operations needed. -- Split interger in loongarch_output_move. */ -+ Return the number of operations needed. */ - - static unsigned int - loongarch_build_integer (struct loongarch_integer_op *codes, -- unsigned HOST_WIDE_INT value) -+ HOST_WIDE_INT value) -+ - { -- uint32_t hi32, lo32; -- char all0_bit_vec, sign_bit_vec, allf_bit_vec, paritial_is_sext_of_prev; - unsigned int cost = 0; - -- lo32 = value & 0xffffffff; -- hi32 = value >> 32; -- -- all0_bit_vec = (((hi32 & 0xfff00000) == 0) << 3) -- | (((hi32 & 0x000fffff) == 0) << 2) -- | (((lo32 & 0xfffff000) == 0) << 1) -- | ((lo32 & 0x00000fff) == 0); -- sign_bit_vec = (((hi32 & 0x80000000) != 0) << 3) -- | (((hi32 & 0x00080000) != 0) << 2) -- | (((lo32 & 0x80000000) != 0) << 1) -- | ((lo32 & 0x00000800) != 0); -- allf_bit_vec = (((hi32 & 0xfff00000) == 0xfff00000) << 3) -- | (((hi32 & 0x000fffff) == 0x000fffff) << 2) -- | (((lo32 & 0xfffff000) == 0xfffff000) << 1) -- | ((lo32 & 0x00000fff) == 0x00000fff); -- paritial_is_sext_of_prev = (all0_bit_vec ^ allf_bit_vec) -- & (all0_bit_vec ^ (sign_bit_vec << 1)); -- -- do -- { -- if (paritial_is_sext_of_prev == 0x7) -- { -- codes[0].code = UNKNOWN; -- codes[0].method = METHOD_LU52I; -- codes[0].value = value & 0xfff0000000000000; -- cost++; -- break; -- } -- if ((all0_bit_vec & 0x3) == 0x2) -- { -- codes[cost].code = UNKNOWN; -- codes[cost].method = METHOD_NORMAL; -- codes[cost].value = value & 0xfff; -- cost++; -- } -- else -- { -- switch (paritial_is_sext_of_prev & 0x3) -- { -- case 0: -- codes[cost].code = UNKNOWN; -- codes[cost].method = METHOD_NORMAL; -- codes[cost].value = ((HOST_WIDE_INT)value << 32 >> 32) & 0xfffffffffffff000; -- cost++; -- codes[cost].code = IOR; -- codes[cost].method = METHOD_NORMAL; -- codes[cost].value = value & 0xfff; -- cost++; -- break; -- case 1: -- codes[cost].code = UNKNOWN; -- codes[cost].method = METHOD_NORMAL; -- codes[cost].value = ((HOST_WIDE_INT)value << 32 >> 32) & 0xfffffffffffff000; -- cost++; -- break; -- case 2: -- codes[cost].code = UNKNOWN; -- codes[cost].method = METHOD_NORMAL; -- codes[cost].value = (HOST_WIDE_INT)value << 52 >> 52; -- cost++; -- break; -- case 3: -- codes[cost].code = UNKNOWN; -- codes[cost].method = METHOD_NORMAL; -- codes[cost].value = 0; -+ /* Get the lower 32 bits of the value. */ -+ HOST_WIDE_INT low_part = (int32_t)value; -+ -+ if (IMM12_OPERAND (low_part) || IMM12_OPERAND_UNSIGNED (low_part)) -+ { -+ /* The value of the lower 32 bit be loaded with one instruction. -+ lu12i.w. */ -+ codes[0].code = UNKNOWN; -+ codes[0].method = METHOD_NORMAL; -+ codes[0].value = low_part; -+ cost++; -+ } -+ else -+ { -+ /* lu12i.w + ior. */ -+ codes[0].code = UNKNOWN; -+ codes[0].method = METHOD_NORMAL; -+ codes[0].value = low_part & ~(IMM_REACH - 1); -+ cost++; -+ HOST_WIDE_INT iorv = low_part & (IMM_REACH - 1); -+ if (iorv != 0) -+ { -+ codes[1].code = IOR; -+ codes[1].method = METHOD_NORMAL; -+ codes[1].value = iorv; - cost++; -- break; -- default: -- gcc_unreachable (); - } -- } -+ } - -- if (((value & 0xfffffffffffff800) ^ 0xfff00000fffff800) == 0) -+ if (TARGET_64BIT) -+ { -+ bool lu32i[2] = {(value & LU32I_B) == 0, (value & LU32I_B) == LU32I_B}; -+ bool lu52i[2] = {(value & LU52I_B) == 0, (value & LU52I_B) == LU52I_B}; -+ -+ int sign31 = (value & (HOST_WIDE_INT_1U << 31)) >> 31; -+ int sign51 = (value & (HOST_WIDE_INT_1U << 51)) >> 51; -+ /* Determine whether the upper 32 bits are sign-extended from the lower -+ 32 bits. If it is, the instructions to load the high order can be -+ ommitted. */ -+ if (lu32i[sign31] && lu52i[sign31]) -+ return cost; -+ /* Determine whether bits 32-51 are sign-extended from the lower 32 -+ bits. If so, directly load 52-63 bits. */ -+ else if (lu32i[sign31]) - { -- codes[cost].method = METHOD_INSV; -- cost++; -- break; -+ codes[cost].method = METHOD_LU52I; -+ codes[cost].value = value & LU52I_B; -+ return cost + 1; - } - -- switch (paritial_is_sext_of_prev >> 2) -- { -- case 0: -- codes[cost].method = METHOD_LU32I; -- codes[cost].value = ((HOST_WIDE_INT)value << 12 >> 12) & 0xffffffff00000000; -- cost++; -- case 1: -+ codes[cost].method = METHOD_LU32I; -+ codes[cost].value = (value & LU32I_B) | (sign51 ? LU52I_B : 0); -+ cost++; -+ -+ /* Determine whether the 52-61 bits are sign-extended from the low order, -+ and if not, load the 52-61 bits. */ -+ if (!lu52i[(value & (HOST_WIDE_INT_1U << 51)) >> 51]) -+ { - codes[cost].method = METHOD_LU52I; -- codes[cost].value = value & 0xfff0000000000000; -- cost++; -- break; -- case 2: -- codes[cost].method = METHOD_LU32I; -- codes[cost].value = ((HOST_WIDE_INT)value << 12 >> 12) & 0xffffffff00000000; -- cost++; -- break; -- case 3: -- break; -- default: -- gcc_unreachable (); -- } -+ codes[cost].value = value & LU52I_B; -+ cost++; -+ } - } -- while (0); -+ -+ gcc_assert (cost <= LARCH_MAX_INTEGER_OPS); - - return cost; - } -- -+ - /* Fill CODES with a sequence of rtl operations to load VALUE. - Return the number of operations needed. -- Split interger in loongarch_output_move. */ -+ Split interger in loongarch_output_move. */ - - static unsigned int - loongarch_integer_cost (HOST_WIDE_INT value) - { - struct loongarch_integer_op codes[LARCH_MAX_INTEGER_OPS]; -- return loongarch_build_integer(codes, value); -+ return loongarch_build_integer (codes, value); - } - - /* Implement TARGET_LEGITIMATE_CONSTANT_P. */ -@@ -1785,14 +1846,13 @@ loongarch_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x) - { - return loongarch_const_insns (x) > 0; - } -- - - /* Return true if X is a thread-local symbol. */ - - static bool - loongarch_tls_symbol_p (rtx x) - { -- return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0; -+ return SYMBOL_REF_P (x) && SYMBOL_REF_TLS_MODEL (x) != 0; - } - - /* Return true if SYMBOL_REF X is associated with a global symbol -@@ -1809,9 +1869,6 @@ loongarch_global_symbol_p (const_rtx x) - if (!decl) - return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x); - -- /* Weakref symbols are not TREE_PUBLIC, but their targets are global -- or weak symbols. Relocations in the object file will be against -- the target symbol, so it's that symbol's binding that matters here. */ - return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl)); - } - -@@ -1826,9 +1883,6 @@ loongarch_global_symbol_noweak_p (const_rtx x) - if (!decl) - return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x); - -- /* Weakref symbols are not TREE_PUBLIC, but their targets are global -- or weak symbols. Relocations in the object file will be against -- the target symbol, so it's that symbol's binding that matters here. */ - return DECL_P (decl) && TREE_PUBLIC (decl); - } - -@@ -1841,7 +1895,6 @@ loongarch_weak_symbol_p (const_rtx x) - return DECL_P (decl) && DECL_WEAK (decl); - } - -- - /* Return true if SYMBOL_REF X binds locally. */ - - bool -@@ -1850,9 +1903,8 @@ loongarch_symbol_binds_local_p (const_rtx x) - if (GET_CODE (x) == LABEL_REF) - return false; - -- return (SYMBOL_REF_DECL (x) -- ? targetm.binds_local_p (SYMBOL_REF_DECL (x)) -- : SYMBOL_REF_LOCAL_P (x)); -+ return (SYMBOL_REF_DECL (x) ? targetm.binds_local_p (SYMBOL_REF_DECL (x)) -+ : SYMBOL_REF_LOCAL_P (x)); - } - - /* Return true if OP is a constant vector with the number of units in MODE, -@@ -1995,38 +2047,34 @@ loongarch_const_vector_shuffle_set_p (rtx op, machine_mode mode) - static bool - loongarch_rtx_constant_in_small_data_p (machine_mode mode) - { -- return (GET_MODE_SIZE (mode) <= loongarch_small_data_threshold); -+ return (GET_MODE_SIZE (mode) <= g_switch_value); - } - - /* Return the method that should be used to access SYMBOL_REF or -- LABEL_REF X in context CONTEXT. */ -+ LABEL_REF X. */ - - static enum loongarch_symbol_type --loongarch_classify_symbol (const_rtx x, enum loongarch_symbol_context context) -+loongarch_classify_symbol (const_rtx x) - { -- if (TARGET_RTP_PIC) -- return SYMBOL_GOT_DISP; -- - if (GET_CODE (x) == LABEL_REF) -- { -- return SYMBOL_GOT_DISP; -- } -+ return SYMBOL_GOT_DISP; - -- gcc_assert (GET_CODE (x) == SYMBOL_REF); -+ gcc_assert (SYMBOL_REF_P (x)); - - if (SYMBOL_REF_TLS_MODEL (x)) - return SYMBOL_TLS; - -- if (GET_CODE (x) == SYMBOL_REF) -+ if (SYMBOL_REF_P (x)) - return SYMBOL_GOT_DISP; -+ -+ return SYMBOL_GOT_DISP; - } - --/* Return true if X is a symbolic constant that can be used in context -- CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */ -+/* Return true if X is a symbolic constant. If it is, -+ store the type of the symbol in *SYMBOL_TYPE. */ - - bool --loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_context context, -- enum loongarch_symbol_type *symbol_type) -+loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_type *symbol_type) - { - rtx offset; - -@@ -2036,9 +2084,9 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_context context, - *symbol_type = UNSPEC_ADDRESS_TYPE (x); - x = UNSPEC_ADDRESS (x); - } -- else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF) -+ else if (SYMBOL_REF_P (x) || GET_CODE (x) == LABEL_REF) - { -- *symbol_type = loongarch_classify_symbol (x, context); -+ *symbol_type = loongarch_classify_symbol (x); - if (*symbol_type == SYMBOL_TLS) - return true; - } -@@ -2052,8 +2100,6 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_context context, - relocations. */ - switch (*symbol_type) - { -- /* Fall through. */ -- - case SYMBOL_GOT_DISP: - case SYMBOL_TLSGD: - case SYMBOL_TLSLDM: -@@ -2062,17 +2108,25 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_context context, - } - gcc_unreachable (); - } -- --/* Like loongarch_symbol_insns We rely on the fact that, in the worst case. */ -+ -+/* If MODE is MAX_MACHINE_MODE, return the number of instructions needed -+ to load symbols of type TYPE into a register. Return 0 if the given -+ type of symbol cannot be used as an immediate operand. -+ -+ Otherwise, return the number of instructions needed to load or store -+ values of mode MODE to or from addresses of type TYPE. Return 0 if -+ the given type of symbol is not valid in addresses. */ - - static int --loongarch_symbol_insns_1 (enum loongarch_symbol_type type, machine_mode mode) -+loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode) - { -- if (loongarch_use_pcrel_pool_p[(int) type]) -- { -- /* The constant must be loaded and then dereferenced. */ -- return 0; -- } -+ /* LSX LD.* and ST.* cannot support loading symbols via an immediate -+ operand. */ -+ if (LSX_SUPPORTED_MODE_P (mode)) -+ return 0; -+ -+ if (LASX_SUPPORTED_MODE_P (mode)) -+ return 0; - - switch (type) - { -@@ -2082,8 +2136,6 @@ loongarch_symbol_insns_1 (enum loongarch_symbol_type type, machine_mode mode) - if (mode != MAX_MACHINE_MODE) - return 0; - -- /* Fall through. */ -- - return 3; - - case SYMBOL_TLSGD: -@@ -2097,30 +2149,6 @@ loongarch_symbol_insns_1 (enum loongarch_symbol_type type, machine_mode mode) - gcc_unreachable (); - } - --/* If MODE is MAX_MACHINE_MODE, return the number of instructions needed -- to load symbols of type TYPE into a register. Return 0 if the given -- type of symbol cannot be used as an immediate operand. -- -- Otherwise, return the number of instructions needed to load or store -- values of mode MODE to or from addresses of type TYPE. Return 0 if -- the given type of symbol is not valid in addresses. -- -- In both cases, instruction counts are based off BASE_INSN_LENGTH. */ -- --static int --loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode) --{ -- /* LSX LD.* and ST.* cannot support loading symbols via an immediate -- operand. */ -- if (LSX_SUPPORTED_MODE_P (mode)) -- return 0; -- -- if (LASX_SUPPORTED_MODE_P (mode)) -- return 0; -- -- return loongarch_symbol_insns_1 (type, mode) * (1); --} -- - /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */ - - static bool -@@ -2129,11 +2157,6 @@ loongarch_cannot_force_const_mem (machine_mode mode, rtx x) - enum loongarch_symbol_type type; - rtx base, offset; - -- /* There is no assembler syntax for expressing an address-sized -- high part. */ -- if (GET_CODE (x) == HIGH) -- return true; -- - /* As an optimization, reject constants that loongarch_legitimize_move - can expand inline. - -@@ -2147,16 +2170,12 @@ loongarch_cannot_force_const_mem (machine_mode mode, rtx x) - return true; - - split_const (x, &base, &offset); -- if (loongarch_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type)) -+ if (loongarch_symbolic_constant_p (base, &type)) - { -- /* See whether we explicitly want these symbols in the pool. */ -- if (loongarch_use_pcrel_pool_p[(int) type]) -- return false; -- - /* The same optimization as for CONST_INT. */ -- if (SMALL_INT (offset) && loongarch_symbol_insns (type, MAX_MACHINE_MODE) > 0) -+ if (IMM12_INT (offset) -+ && loongarch_symbol_insns (type, MAX_MACHINE_MODE) > 0) - return true; -- - } - - /* TLS symbols must be computed by loongarch_legitimize_move. */ -@@ -2166,22 +2185,13 @@ loongarch_cannot_force_const_mem (machine_mode mode, rtx x) - return false; - } - --/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for -- constants when we're using a per-function constant pool. */ -- --static bool --loongarch_use_blocks_for_constant_p (machine_mode mode ATTRIBUTE_UNUSED, -- const_rtx x ATTRIBUTE_UNUSED) --{ -- return 1; --} -- - /* Return true if register REGNO is a valid base register for mode MODE. - STRICT_P is true if REG_OK_STRICT is in effect. */ - - int --loongarch_regno_mode_ok_for_base_p (int regno, machine_mode mode, -- bool strict_p) -+loongarch_regno_mode_ok_for_base_p (int regno, -+ machine_mode mode ATTRIBUTE_UNUSED, -+ bool strict_p) - { - if (!HARD_REGISTER_NUM_P (regno)) - { -@@ -2196,7 +2206,6 @@ loongarch_regno_mode_ok_for_base_p (int regno, machine_mode mode, - if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM) - return true; - -- - return GP_REG_P (regno); - } - -@@ -2206,7 +2215,7 @@ loongarch_regno_mode_ok_for_base_p (int regno, machine_mode mode, - static bool - loongarch_valid_base_register_p (rtx x, machine_mode mode, bool strict_p) - { -- if (!strict_p && GET_CODE (x) == SUBREG) -+ if (!strict_p && SUBREG_P (x)) - x = SUBREG_REG (x); - - return (REG_P (x) -@@ -2220,8 +2229,8 @@ static bool - loongarch_valid_offset_p (rtx x, machine_mode mode) - { - /* Check that X is a signed 12-bit number, -- * or check that X is a signed 16-bit number -- * and offset 4 byte aligned */ -+ or check that X is a signed 16-bit number -+ and offset 4 byte aligned. */ - if (!(const_arith_operand (x, Pmode) - || ((mode == E_SImode || mode == E_DImode) - && const_imm16_operand (x, Pmode) -@@ -2231,7 +2240,7 @@ loongarch_valid_offset_p (rtx x, machine_mode mode) - /* We may need to split multiword moves, so make sure that every word - is accessible. */ - if (GET_MODE_SIZE (mode) > UNITS_PER_WORD -- && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD)) -+ && !IMM12_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD)) - return false; - - /* LSX LD.* and ST.* supports 10-bit signed offsets. */ -@@ -2248,13 +2257,42 @@ loongarch_valid_offset_p (rtx x, machine_mode mode) - return true; - } - -+static bool -+loongarch_valid_index_p (struct loongarch_address_info *info, rtx x, -+ machine_mode mode, bool strict_p) -+{ -+ rtx index; -+ -+ if ((REG_P (x) || SUBREG_P (x)) -+ && GET_MODE (x) == Pmode) -+ { -+ index = x; -+ } -+ else -+ return false; -+ -+ if (!strict_p -+ && SUBREG_P (index) -+ && contains_reg_of_mode[GENERAL_REGS][GET_MODE (SUBREG_REG (index))]) -+ index = SUBREG_REG (index); -+ -+ if (loongarch_valid_base_register_p (index, mode, strict_p)) -+ { -+ info->type = ADDRESS_REG_REG; -+ info->offset = index; -+ return true; -+ } -+ -+ return false; -+} -+ - /* Return true if X is a valid address for machine mode MODE. If it is, - fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in - effect. */ - - static bool - loongarch_classify_address (struct loongarch_address_info *info, rtx x, -- machine_mode mode, bool strict_p) -+ machine_mode mode, bool strict_p) - { - switch (GET_CODE (x)) - { -@@ -2266,21 +2304,26 @@ loongarch_classify_address (struct loongarch_address_info *info, rtx x, - return loongarch_valid_base_register_p (info->reg, mode, strict_p); - - case PLUS: -+/* -+ if (loongarch_valid_base_register_p (XEXP (x, 0), mode, strict_p) -+ && loongarch_valid_index_p (info, XEXP (x, 1), mode, strict_p)) -+ { -+ info->reg = XEXP (x, 0); -+ return true; -+ } -+ -+ if (loongarch_valid_base_register_p (XEXP (x, 1), mode, strict_p) -+ && loongarch_valid_index_p (info, XEXP (x, 0), mode, strict_p)) -+ { -+ info->reg = XEXP (x, 1); -+ return true; -+ } -+*/ - info->type = ADDRESS_REG; - info->reg = XEXP (x, 0); - info->offset = XEXP (x, 1); - return (loongarch_valid_base_register_p (info->reg, mode, strict_p) - && loongarch_valid_offset_p (info->offset, mode)); -- #if 0 -- case LABEL_REF: -- case SYMBOL_REF: -- info->type = ADDRESS_SYMBOLIC; -- return (loongarch_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM, -- &info->symbol_type) -- && loongarch_symbol_insns (info->symbol_type, mode) > 0 -- && !loongarch_split_p[info->symbol_type]); -- -- #endif - default: - return false; - } -@@ -2296,39 +2339,21 @@ loongarch_legitimate_address_p (machine_mode mode, rtx x, bool strict_p) - return loongarch_classify_address (&addr, x, mode, strict_p); - } - --/* Return true if X is a legitimate $sp-based address for mode MODE. */ -- --bool --loongarch_stack_address_p (rtx x, machine_mode mode) --{ -- struct loongarch_address_info addr; -- -- return (loongarch_classify_address (&addr, x, mode, false) -- && addr.type == ADDRESS_REG -- && addr.reg == stack_pointer_rtx); --} -- --/* Return true if ADDR matches the pattern for the L{B,H,W,D}{,U}X load -- indexed address instruction. Note that such addresses are -- not considered legitimate in the TARGET_LEGITIMATE_ADDRESS_P -- sense, because their use is so restricted. */ -+/* Return true if ADDR matches the pattern for the indexed address -+ instruction. */ - - static bool --loongarch_lx_address_p (rtx addr, machine_mode mode) -+loongarch_index_address_p (rtx addr, machine_mode mode ATTRIBUTE_UNUSED) - { - if (GET_CODE (addr) != PLUS - || !REG_P (XEXP (addr, 0)) - || !REG_P (XEXP (addr, 1))) - return false; -- if (LSX_SUPPORTED_MODE_P (mode)) -- return true; -- return false; -+ return true; - } -- - - /* Return the number of instructions needed to load or store a value -- of mode MODE at address X, assuming that BASE_INSN_LENGTH is the -- length of one instruction. Return 0 if X isn't valid for MODE. -+ of mode MODE at address X. Return 0 if X isn't valid for MODE. - Assume that multiword moves may need to be split into word moves - if MIGHT_SPLIT_P, otherwise assume that a single load or store is - enough. */ -@@ -2338,7 +2363,8 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) - { - struct loongarch_address_info addr; - int factor; -- bool lsx_p = (!might_split_p && (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))); -+ bool lsx_p = (!might_split_p && -+ (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))); - - if (!loongarch_classify_address (&addr, x, mode, false)) - return 0; -@@ -2367,6 +2393,9 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) - } - return factor; - -+ case ADDRESS_REG_REG: -+ return lsx_p ? 0 : factor; -+ - case ADDRESS_CONST_INT: - return lsx_p ? 0 : factor; - -@@ -2380,7 +2409,8 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) - shifted left SHIFT bits before being used. */ - - bool --loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0) -+loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT x, int bits, -+ int shift = 0) - { - return (x & ((1 << shift) - 1)) == 0 && x < ((unsigned) 1 << (shift + bits)); - } -@@ -2389,7 +2419,8 @@ loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = - shifted left SHIFT bits before being used. */ - - bool --loongarch_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0) -+loongarch_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits, -+ int shift = 0) - { - x += 1 << (bits + shift - 1); - return loongarch_unsigned_immediate_p (x, bits, shift); -@@ -2408,20 +2439,6 @@ loongarch_ldst_scaled_shift (machine_mode mode) - return shift; - } - --/* Return true if X is a legitimate address that conforms to the requirements -- for a microLARCH LWSP or SWSP insn. */ -- --bool --lwsp_swsp_address_p (rtx x, machine_mode mode) --{ -- struct loongarch_address_info addr; -- -- return (loongarch_classify_address (&addr, x, mode, false) -- && addr.type == ADDRESS_REG -- && REGNO (addr.reg) == STACK_POINTER_REGNUM -- && uw5_operand (addr.offset, mode)); --} -- - /* Return true if X is a legitimate address with a 12-bit offset. - MODE is the mode of the value being accessed. */ - -@@ -2433,54 +2450,47 @@ loongarch_12bit_offset_address_p (rtx x, machine_mode mode) - return (loongarch_classify_address (&addr, x, mode, false) - && addr.type == ADDRESS_REG - && CONST_INT_P (addr.offset) -- && ULARCH_12BIT_OFFSET_P (INTVAL (addr.offset))); -+ && LARCH_U12BIT_OFFSET_P (INTVAL (addr.offset))); - } - --/* Return true if X is a legitimate address with a 9-bit offset. -+/* Return true if X is a legitimate address with a 14-bit offset shifted 2. - MODE is the mode of the value being accessed. */ - - bool --loongarch_9bit_offset_address_p (rtx x, machine_mode mode) -+loongarch_14bit_shifted_offset_address_p (rtx x, machine_mode mode) - { - struct loongarch_address_info addr; - - return (loongarch_classify_address (&addr, x, mode, false) - && addr.type == ADDRESS_REG - && CONST_INT_P (addr.offset) -- && LARCH_9BIT_OFFSET_P (INTVAL (addr.offset))); -+ && LARCH_16BIT_OFFSET_P (INTVAL (addr.offset)) -+ && LARCH_SHIFT_2_OFFSET_P (INTVAL (addr.offset))); - } - --/* Return true if X is a legitimate address with a 14-bit offset shifted 2. -- MODE is the mode of the value being accessed. */ -- - bool --loongarch_14bit_shifted_offset_address_p (rtx x, machine_mode mode) -+loongarch_base_index_address_p (rtx x, machine_mode mode) - { - struct loongarch_address_info addr; - - return (loongarch_classify_address (&addr, x, mode, false) -- && addr.type == ADDRESS_REG -- && CONST_INT_P (addr.offset) -- && LISA_16BIT_OFFSET_P (INTVAL (addr.offset)) -- && LISA_SHIFT_2_OFFSET_P (INTVAL (addr.offset))); -+ && addr.type == ADDRESS_REG_REG -+ && REG_P (addr.offset)); - } - -- - /* Return the number of instructions needed to load constant X, -- assuming that BASE_INSN_LENGTH is the length of one instruction. - Return 0 if X isn't a valid constant. */ - - int - loongarch_const_insns (rtx x) - { -- struct loongarch_integer_op codes[LARCH_MAX_INTEGER_OPS]; - enum loongarch_symbol_type symbol_type; - rtx offset; - - switch (GET_CODE (x)) - { - case CONST_INT: -- return loongarch_build_integer (codes, INTVAL (x)); -+ return loongarch_integer_cost (INTVAL (x)); - - case CONST_VECTOR: - if ((ISA_HAS_LSX || ISA_HAS_LASX) -@@ -2488,19 +2498,18 @@ loongarch_const_insns (rtx x) - return 1; - /* Fall through. */ - case CONST_DOUBLE: -- /* Allow zeros for normal mode, where we can use $0. */ - return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0; - - case CONST: - /* See if we can refer to X directly. */ -- if (loongarch_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type)) -+ if (loongarch_symbolic_constant_p (x, &symbol_type)) - return loongarch_symbol_insns (symbol_type, MAX_MACHINE_MODE); - - /* Otherwise try splitting the constant into a base and offset. -- If the offset is a 16-bit value, we can load the base address -- into a register and then use (D)ADDIU to add in the offset. -+ If the offset is a 12-bit value, we can load the base address -+ into a register and then use ADDI.{W/D} to add in the offset. - If the offset is larger, we can load the base and offset -- into separate registers and add them together with (D)ADDU. -+ into separate registers and add them together with ADD.{W/D}. - However, the latter is only possible before reload; during - and after reload, we must have the option of forcing the - constant into the pool instead. */ -@@ -2510,18 +2519,18 @@ loongarch_const_insns (rtx x) - int n = loongarch_const_insns (x); - if (n != 0) - { -- if (SMALL_INT (offset)) -+ if (IMM12_INT (offset)) - return n + 1; - else if (!targetm.cannot_force_const_mem (GET_MODE (x), x)) -- return n + 1 + loongarch_build_integer (codes, INTVAL (offset)); -+ return n + 1 + loongarch_integer_cost (INTVAL (offset)); - } - } - return 0; - - case SYMBOL_REF: - case LABEL_REF: -- return loongarch_symbol_insns (loongarch_classify_symbol (x, SYMBOL_CONTEXT_LEA), -- MAX_MACHINE_MODE); -+ return loongarch_symbol_insns ( -+ loongarch_classify_symbol (x), MAX_MACHINE_MODE); - - default: - return 0; -@@ -2530,8 +2539,7 @@ loongarch_const_insns (rtx x) - - /* X is a doubleword constant that can be handled by splitting it into - two words and loading each word separately. Return the number of -- instructions required to do this, assuming that BASE_INSN_LENGTH -- is the length of one instruction. */ -+ instructions required to do this. */ - - int - loongarch_split_const_insns (rtx x) -@@ -2565,8 +2573,7 @@ loongarch_subword_at_byte (rtx op, unsigned int byte) - } - - /* Return the number of instructions needed to implement INSN, -- given that it loads from or stores to MEM. Assume that -- BASE_INSN_LENGTH is the length of one instruction. */ -+ given that it loads from or stores to MEM. */ - - int - loongarch_load_store_insns (rtx mem, rtx_insn *insn) -@@ -2583,18 +2590,18 @@ loongarch_load_store_insns (rtx mem, rtx_insn *insn) - if (might_split_p) - { - set = single_set (insn); -- if (set && !loongarch_split_move_insn_p (SET_DEST (set), SET_SRC (set), insn)) -+ if (set -+ && !loongarch_split_move_insn_p (SET_DEST (set), SET_SRC (set))) - might_split_p = false; - } - - return loongarch_address_insns (XEXP (mem, 0), mode, might_split_p); - } - --/* Return the number of instructions needed for an integer division, -- assuming that BASE_INSN_LENGTH is the length of one instruction. */ -+/* Return the number of instructions needed for an integer division. */ - - int --loongarch_idiv_insns (machine_mode mode) -+loongarch_idiv_insns (machine_mode mode ATTRIBUTE_UNUSED) - { - int count; - -@@ -2605,7 +2612,6 @@ loongarch_idiv_insns (machine_mode mode) - return count; - } - -- - /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */ - - void -@@ -2619,7 +2625,8 @@ loongarch_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1) - of mode MODE. Return that new register. */ - - static rtx --loongarch_force_binary (machine_mode mode, enum rtx_code code, rtx op0, rtx op1) -+loongarch_force_binary (machine_mode mode, enum rtx_code code, rtx op0, -+ rtx op1) - { - rtx reg; - -@@ -2643,13 +2650,12 @@ loongarch_force_temporary (rtx dest, rtx value) - } - } - -- - /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE, - then add CONST_INT OFFSET to the result. */ - - static rtx - loongarch_unspec_address_offset (rtx base, rtx offset, -- enum loongarch_symbol_type symbol_type) -+ enum loongarch_symbol_type symbol_type) - { - base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base), - UNSPEC_ADDRESS_FIRST + symbol_type); -@@ -2684,42 +2690,20 @@ loongarch_strip_unspec_address (rtx op) - return op; - } - -- --/* Return a base register that holds pic_offset_table_rtx. -- TEMP, if nonnull, is a scratch Pmode base register. */ -- --rtx --loongarch_pic_base_register (rtx temp) --{ -- return pic_offset_table_rtx; -- --} -- --/* If SRC is the RHS of a load_call insn, return the underlying symbol -- reference. Return NULL_RTX otherwise. */ -- --static rtx --loongarch_strip_unspec_call (rtx src) --{ -- if (GET_CODE (src) == UNSPEC && XINT (src, 1) == UNSPEC_LOAD_CALL) -- return loongarch_strip_unspec_address (XVECEXP (src, 0, 1)); -- return NULL_RTX; --} -- - /* Return a legitimate address for REG + OFFSET. TEMP is as for - loongarch_force_temporary; it is only needed when OFFSET is not a -- SMALL_OPERAND. */ -+ IMM12_OPERAND. */ - - static rtx - loongarch_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset) - { -- if (!SMALL_OPERAND (offset)) -+ if (!IMM12_OPERAND (offset)) - { - rtx high; - -- /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. -- The addition inside the macro CONST_HIGH_PART may cause an -- overflow, so we need to force a sign-extension check. */ -+ /* Leave OFFSET as a 12-bit offset and put the excess in HIGH. -+ The addition inside the macro CONST_HIGH_PART may cause an -+ overflow, so we need to force a sign-extension check. */ - high = gen_int_mode (CONST_HIGH_PART (offset), Pmode); - offset = CONST_LOW_PART (offset); - high = loongarch_force_temporary (temp, high); -@@ -2727,49 +2711,40 @@ loongarch_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset) - } - return plus_constant (Pmode, reg, offset); - } -- -+ - /* The __tls_get_attr symbol. */ --static GTY(()) rtx loongarch_tls_symbol; -+static GTY (()) rtx loongarch_tls_symbol; - - /* Load an entry from the GOT for a TLS GD access. */ - --static rtx loongarch_got_load_tls_gd (rtx dest, rtx sym) -+static rtx -+loongarch_got_load_tls_gd (rtx dest, rtx sym) - { -- if (Pmode == DImode) -- return gen_got_load_tls_gddi (dest, sym); -- else -- return gen_got_load_tls_gdsi (dest, sym); -+ return PMODE_INSN (gen_got_load_tls_gd, (dest, sym)); - } - - /* Load an entry from the GOT for a TLS LD access. */ - --static rtx loongarch_got_load_tls_ld (rtx dest, rtx sym) -+static rtx -+loongarch_got_load_tls_ld (rtx dest, rtx sym) - { -- if (Pmode == DImode) -- return gen_got_load_tls_lddi (dest, sym); -- else -- return gen_got_load_tls_ldsi (dest, sym); -+ return PMODE_INSN (gen_got_load_tls_ld, (dest, sym)); - } - -- - /* Load an entry from the GOT for a TLS IE access. */ - --static rtx loongarch_got_load_tls_ie (rtx dest, rtx sym) -+static rtx -+loongarch_got_load_tls_ie (rtx dest, rtx sym) - { -- if (Pmode == DImode) -- return gen_got_load_tls_iedi (dest, sym); -- else -- return gen_got_load_tls_iesi (dest, sym); -+ return PMODE_INSN (gen_got_load_tls_ie, (dest, sym)); - } - - /* Add in the thread pointer for a TLS LE access. */ - --static rtx loongarch_got_load_tls_le (rtx dest, rtx sym) -+static rtx -+loongarch_got_load_tls_le (rtx dest, rtx sym) - { -- if (Pmode == DImode) -- return gen_got_load_tls_ledi (dest, sym); -- else -- return gen_got_load_tls_lesi (dest, sym); -+ return PMODE_INSN (gen_got_load_tls_le, (dest, sym)); - } - - /* Return an instruction sequence that calls __tls_get_addr. SYM is -@@ -2799,7 +2774,8 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) - else - gcc_unreachable (); - -- insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol, const0_rtx)); -+ insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol, -+ const0_rtx)); - RTL_CONST_CALL_P (insn) = 1; - use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0); - insn = get_insns (); -@@ -2820,12 +2796,6 @@ loongarch_legitimize_tls_address (rtx loc) - enum tls_model model = SYMBOL_REF_TLS_MODEL (loc); - rtx_insn *insn; - -- /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */ -- #if 0 -- if (!flag_pic) -- model = TLS_MODEL_LOCAL_EXEC; -- #endif -- - switch (model) - { - case TLS_MODEL_LOCAL_DYNAMIC: -@@ -2843,7 +2813,7 @@ loongarch_legitimize_tls_address (rtx loc) - break; - - case TLS_MODEL_INITIAL_EXEC: -- /* la.tls.ie; tp-relative add */ -+ /* la.tls.ie; tp-relative add */ - tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); - tmp = gen_reg_rtx (Pmode); - emit_insn (loongarch_got_load_tls_ie (tmp, loc)); -@@ -2852,7 +2822,7 @@ loongarch_legitimize_tls_address (rtx loc) - break; - - case TLS_MODEL_LOCAL_EXEC: -- /* la.tls.le; tp-relative add */ -+ /* la.tls.le; tp-relative add */ - tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); - tmp = gen_reg_rtx (Pmode); - emit_insn (loongarch_got_load_tls_le (tmp, loc)); -@@ -2865,7 +2835,7 @@ loongarch_legitimize_tls_address (rtx loc) - } - return dest; - } -- -+ - rtx - loongarch_legitimize_call_address (rtx addr) - { -@@ -2877,7 +2847,25 @@ loongarch_legitimize_call_address (rtx addr) - } - return addr; - } -- -+ -+/* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR -+ and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */ -+ -+static void -+loongarch_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr) -+{ -+ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))) -+ { -+ *base_ptr = XEXP (x, 0); -+ *offset_ptr = INTVAL (XEXP (x, 1)); -+ } -+ else -+ { -+ *base_ptr = x; -+ *offset_ptr = 0; -+ } -+} -+ - /* If X is not a valid address for mode MODE, force it into a register. */ - - static rtx -@@ -2895,7 +2883,7 @@ loongarch_force_address (rtx x, machine_mode mode) - - static rtx - loongarch_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, -- machine_mode mode) -+ machine_mode mode) - { - rtx base, addr; - HOST_WIDE_INT offset; -@@ -2941,28 +2929,30 @@ loongarch_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value) - } - else - x = force_reg (mode, x); -+ - switch (codes[i].method) - { - case METHOD_NORMAL: -- x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value)); -+ x = gen_rtx_fmt_ee (codes[i].code, mode, x, -+ GEN_INT (codes[i].value)); - break; - case METHOD_LU32I: -- emit_insn (gen_rtx_SET (x, gen_rtx_IOR (DImode, -- gen_rtx_ZERO_EXTEND (DImode, -- gen_rtx_SUBREG (SImode, x, 0)), -- GEN_INT (codes[i].value)))); -+ emit_insn ( -+ gen_rtx_SET (x, -+ gen_rtx_IOR (DImode, -+ gen_rtx_ZERO_EXTEND ( -+ DImode, gen_rtx_SUBREG (SImode, x, 0)), -+ GEN_INT (codes[i].value)))); - break; - case METHOD_LU52I: -- emit_insn (gen_lu52i_d (x, x, -- GEN_INT (0xfffffffffffff), -- GEN_INT (codes[i].value))); -+ emit_insn (gen_lu52i_d (x, x, GEN_INT (0xfffffffffffff), -+ GEN_INT (codes[i].value))); - break; - case METHOD_INSV: -- emit_insn (gen_rtx_SET (gen_rtx_ZERO_EXTRACT (DImode, -- x, -- GEN_INT (20), -- GEN_INT (32)), -- gen_rtx_REG (DImode, 0))); -+ emit_insn ( -+ gen_rtx_SET (gen_rtx_ZERO_EXTRACT (DImode, x, GEN_INT (20), -+ GEN_INT (32)), -+ gen_rtx_REG (DImode, 0))); - break; - default: - gcc_unreachable (); -@@ -2997,7 +2987,7 @@ loongarch_legitimize_const_move (machine_mode mode, rtx dest, rtx src) - - /* If we have (const (plus symbol offset)), and that expression cannot - be forced into memory, load the symbol first and add in the offset. -- prefer to do this even if the constant _can_ be forced into memory, -+ prefer to do this even if the constant _can_ be forced into memory, - as it usually produces better code. */ - split_const (src, &base, &offset); - if (offset != const0_rtx -@@ -3005,7 +2995,8 @@ loongarch_legitimize_const_move (machine_mode mode, rtx dest, rtx src) - || (can_create_pseudo_p ()))) - { - base = loongarch_force_temporary (dest, base); -- loongarch_emit_move (dest, loongarch_add_offset (NULL, base, INTVAL (offset))); -+ loongarch_emit_move (dest, -+ loongarch_add_offset (NULL, base, INTVAL (offset))); - return; - } - -@@ -3020,7 +3011,6 @@ loongarch_legitimize_const_move (machine_mode mode, rtx dest, rtx src) - bool - loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src) - { -- - if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode)) - { - loongarch_emit_move (dest, force_reg (mode, src)); -@@ -3029,10 +3019,9 @@ loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src) - - /* Both src and dest are non-registers; one special case is supported where - the source is (const_int 0) and the store can source the zero register. -- LSX and lasx are never able to source the zero register directly in -+ LSX and LASX are never able to source the zero register directly in - memory operations. */ -- if (!register_operand (dest, mode) -- && !register_operand (src, mode) -+ if (!register_operand (dest, mode) && !register_operand (src, mode) - && (!const_0_operand (src, mode) - || LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))) - { -@@ -3049,40 +3038,26 @@ loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src) - return true; - } - -- if ((GET_CODE (src) == SYMBOL_REF || GET_CODE (src) == LABEL_REF) -- && symbolic_operand (src, VOIDmode) -- && (loongarch_cmodel_var == LARCH_CMODEL_EXTREME)) -- { -- rtx temp = gen_reg_rtx (GET_MODE (dest)); -- rtx x = gen_rtx_UNSPEC_VOLATILE (GET_MODE (dest), gen_rtvec (1, src), UNSPECV_MOVE_EXTREME); -- temp = gen_rtx_USE(VOIDmode, temp); -- temp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec(2, gen_rtx_SET (dest, x), temp)); -- emit_insn (temp); -- return true; -- } -- - return false; - } - --/* Return true if OP refers to small data symbols directly, not through -- a LO_SUM. CONTEXT is the context in which X appears. */ -+/* Return true if OP refers to small data symbols directly. */ - - static int --loongarch_small_data_pattern_1 (rtx x, enum loongarch_symbol_context context) -+loongarch_small_data_pattern_1 (rtx x) - { - subrtx_var_iterator::array_type array; - FOR_EACH_SUBRTX_VAR (iter, array, x, ALL) - { - rtx x = *iter; - -- /* Ignore things like "g" constraints in asms. We make no particular -- guarantee about which symbolic constants are acceptable as asm operands -- versus which must be forced into a GPR. */ -+ /* We make no particular guarantee about which symbolic constants are -+ acceptable as asm operands versus which must be forced into a GPR. */ - if (GET_CODE (x) == ASM_OPERANDS) - iter.skip_subrtxes (); - else if (MEM_P (x)) - { -- if (loongarch_small_data_pattern_1 (XEXP (x, 0), SYMBOL_CONTEXT_MEM)) -+ if (loongarch_small_data_pattern_1 (XEXP (x, 0))) - return true; - iter.skip_subrtxes (); - } -@@ -3090,20 +3065,19 @@ loongarch_small_data_pattern_1 (rtx x, enum loongarch_symbol_context context) - return false; - } - --/* Return true if OP refers to small data symbols directly, not through -- a LO_SUM. */ -+/* Return true if OP refers to small data symbols directly. */ - - bool - loongarch_small_data_pattern_p (rtx op) - { -- return loongarch_small_data_pattern_1 (op, SYMBOL_CONTEXT_LEA); -+ return loongarch_small_data_pattern_1 (op); - } - - /* Rewrite *LOC so that it refers to small data using explicit -- relocations. CONTEXT is the context in which *LOC appears. */ -+ relocation. */ - - static void --loongarch_rewrite_small_data_1 (rtx *loc, enum loongarch_symbol_context context) -+loongarch_rewrite_small_data_1 (rtx *loc) - { - subrtx_ptr_iterator::array_type array; - FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL) -@@ -3111,7 +3085,7 @@ loongarch_rewrite_small_data_1 (rtx *loc, enum loongarch_symbol_context context) - rtx *loc = *iter; - if (MEM_P (*loc)) - { -- loongarch_rewrite_small_data_1 (&XEXP (*loc, 0), SYMBOL_CONTEXT_MEM); -+ loongarch_rewrite_small_data_1 (&XEXP (*loc, 0)); - iter.skip_subrtxes (); - } - } -@@ -3124,15 +3098,15 @@ rtx - loongarch_rewrite_small_data (rtx pattern) - { - pattern = copy_insn (pattern); -- loongarch_rewrite_small_data_1 (&pattern, SYMBOL_CONTEXT_LEA); -+ loongarch_rewrite_small_data_1 (&pattern); - return pattern; - } -- -+ - /* The cost of loading values from the constant pool. It should be - larger than the cost of any constant we want to synthesize inline. */ - #define CONSTANT_POOL_COST COSTS_N_INSNS (8) - --/* Return true if there is a instruction that implements CODE -+/* Return true if there is a instruction that implements CODE - and if that instruction accepts X as an immediate operand. */ - - static int -@@ -3148,20 +3122,19 @@ loongarch_immediate_operand_p (int code, HOST_WIDE_INT x) - - case ROTATE: - case ROTATERT: -- /* Likewise rotates, if the target supports rotates at all. */ - return true; - - case AND: - case IOR: - case XOR: - /* These instructions take 12-bit unsigned immediates. */ -- return SMALL_OPERAND_UNSIGNED (x); -+ return IMM12_OPERAND_UNSIGNED (x); - - case PLUS: - case LT: - case LTU: - /* These instructions take 12-bit signed immediates. */ -- return SMALL_OPERAND (x); -+ return IMM12_OPERAND (x); - - case EQ: - case NE: -@@ -3178,11 +3151,11 @@ loongarch_immediate_operand_p (int code, HOST_WIDE_INT x) - - case LE: - /* We add 1 to the immediate and use SLT. */ -- return SMALL_OPERAND (x + 1); -+ return IMM12_OPERAND (x + 1); - - case LEU: - /* Likewise SLTU, but reject the always-true case. */ -- return SMALL_OPERAND (x + 1) && x + 1 != 0; -+ return IMM12_OPERAND (x + 1) && x + 1 != 0; - - case SIGN_EXTRACT: - case ZERO_EXTRACT: -@@ -3219,7 +3192,8 @@ loongarch_binary_cost (rtx x, int single_cost, int double_cost, bool speed) - static int - loongarch_fp_mult_cost (machine_mode mode) - { -- return mode == DFmode ? loongarch_cost->fp_mult_df : loongarch_cost->fp_mult_sf; -+ return mode == DFmode ? loongarch_cost->fp_mult_df -+ : loongarch_cost->fp_mult_sf; - } - - /* Return the cost of floating-point divisions of mode MODE. */ -@@ -3227,23 +3201,20 @@ loongarch_fp_mult_cost (machine_mode mode) - static int - loongarch_fp_div_cost (machine_mode mode) - { -- return mode == DFmode ? loongarch_cost->fp_div_df : loongarch_cost->fp_div_sf; -+ return mode == DFmode ? loongarch_cost->fp_div_df -+ : loongarch_cost->fp_div_sf; - } - - /* Return the cost of sign-extending OP to mode MODE, not including the - cost of OP itself. */ - - static int --loongarch_sign_extend_cost (machine_mode mode, rtx op) -+loongarch_sign_extend_cost (rtx op) - { - if (MEM_P (op)) - /* Extended loads are as cheap as unextended ones. */ - return 0; - -- if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) -- /* A sign extension from SImode to DImode in 64-bit mode is free. */ -- return 0; -- - return COSTS_N_INSNS (1); - } - -@@ -3251,16 +3222,12 @@ loongarch_sign_extend_cost (machine_mode mode, rtx op) - cost of OP itself. */ - - static int --loongarch_zero_extend_cost (machine_mode mode, rtx op) -+loongarch_zero_extend_cost (rtx op) - { - if (MEM_P (op)) - /* Extended loads are as cheap as unextended ones. */ - return 0; - -- if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) -- /* We need a shift left by 32 bits and a shift right by 32 bits. */ -- return COSTS_N_INSNS (2); -- - /* We can use ANDI. */ - return COSTS_N_INSNS (1); - } -@@ -3281,15 +3248,15 @@ loongarch_set_reg_reg_cost (machine_mode mode) - { - switch (GET_MODE_CLASS (mode)) - { -- case MODE_FCC: -- return loongarch_set_reg_reg_piece_cost (mode, GET_MODE_SIZE (FCCmode)); -+ case MODE_CC: -+ return loongarch_set_reg_reg_piece_cost (mode, GET_MODE_SIZE (CCmode)); - - case MODE_FLOAT: - case MODE_COMPLEX_FLOAT: - case MODE_VECTOR_FLOAT: - if (TARGET_HARD_FLOAT) - return loongarch_set_reg_reg_piece_cost (mode, UNITS_PER_HWFPVALUE); -- /* Fall through */ -+ /* Fall through. */ - - default: - return loongarch_set_reg_reg_piece_cost (mode, UNITS_PER_WORD); -@@ -3300,20 +3267,13 @@ loongarch_set_reg_reg_cost (machine_mode mode) - - static bool - loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, -- int opno ATTRIBUTE_UNUSED, int *total, bool speed) -+ int opno ATTRIBUTE_UNUSED, int *total, bool speed) - { - int code = GET_CODE (x); - bool float_mode_p = FLOAT_MODE_P (mode); - int cost; - rtx addr; - -- /* The cost of a COMPARE is hard to define for LARCH. COMPAREs don't -- appear in the instruction stream, and the cost of a comparison is -- really the cost of the branch or scc condition. At the time of -- writing, GCC only uses an explicit outer COMPARE code when optabs -- is testing whether a constant is expensive enough to force into a -- register. We want optabs to pass such constants through the LARCH -- expanders instead, so make all constants very cheap here. */ - if (outer_code == COMPARE) - { - gcc_assert (CONSTANT_P (x)); -@@ -3324,68 +3284,34 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - switch (code) - { - case CONST_INT: -- /* Treat *clear_upper32-style ANDs as having zero cost in the -- second operand. The cost is entirely in the first operand. -- -- ??? This is needed because we would otherwise try to CSE -- the constant operand. Although that's the right thing for -- instructions that continue to be a register operation throughout -- compilation, it is disastrous for instructions that could -- later be converted into a memory operation. */ -- if (TARGET_64BIT -- && outer_code == AND -- && UINTVAL (x) == 0xffffffff) -+ if (TARGET_64BIT && outer_code == AND && UINTVAL (x) == 0xffffffff) - { - *total = 0; - return true; - } - -- /* When not optimizing for size, we care more about the cost -- of hot code, and hot code is often in a loop. If a constant -- operand needs to be forced into a register, we will often be -- able to hoist the constant load out of the loop, so the load -- should not contribute to the cost. */ -- if (speed || loongarch_immediate_operand_p (outer_code, INTVAL (x))) -- { -- *total = 0; -- return true; -- } -+ /* When not optimizing for size, we care more about the cost -+ of hot code, and hot code is often in a loop. If a constant -+ operand needs to be forced into a register, we will often be -+ able to hoist the constant load out of the loop, so the load -+ should not contribute to the cost. */ -+ if (speed || loongarch_immediate_operand_p (outer_code, INTVAL (x))) -+ { -+ *total = 0; -+ return true; -+ } - /* Fall through. */ - - case CONST: - case SYMBOL_REF: - case LABEL_REF: - case CONST_DOUBLE: -- if (force_to_mem_operand (x, VOIDmode)) -- { -- *total = COSTS_N_INSNS (1); -- return true; -- } - cost = loongarch_const_insns (x); - if (cost > 0) - { -- /* If the constant is likely to be stored in a GPR, SETs of -- single-insn constants are as cheap as register sets; we -- never want to CSE them. -- -- Don't reduce the cost of storing a floating-point zero in -- FPRs. If we have a zero in an FPR for other reasons, we -- can get better cfg-cleanup and delayed-branch results by -- using it consistently, rather than using $0 sometimes and -- an FPR at other times. Also, moves between floating-point -- registers are sometimes cheaper than MOVGR2FR.W/MOVGR2FR.D $0. */ -- if (cost == 1 -- && outer_code == SET -+ if (cost == 1 && outer_code == SET - && !(float_mode_p && TARGET_HARD_FLOAT)) - cost = 0; -- /* When code loads a constant N>1 times, we rarely -- want to CSE the constant itself. It is usually better to -- have N copies of the last operation in the sequence and one -- shared copy of the other operations. -- -- Also, if we have a CONST_INT, we don't know whether it is -- for a word or doubleword operation, so we cannot rely on -- the result of loongarch_build_integer. */ - else if ((outer_code == SET || GET_MODE (x) == VOIDmode)) - cost = 1; - *total = COSTS_N_INSNS (cost); -@@ -3399,16 +3325,16 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - /* If the address is legitimate, return the number of - instructions it needs. */ - addr = XEXP (x, 0); -- cost = loongarch_address_insns (addr, mode, true); -- if (cost > 0) -+ /* Check for a scaled indexed address. */ -+ if (loongarch_index_address_p (addr, mode)) - { -- *total = COSTS_N_INSNS (cost + 1); -+ *total = COSTS_N_INSNS (2); - return true; - } -- /* Check for a scaled indexed address. */ -- if (loongarch_lx_address_p (addr, mode)) -+ cost = loongarch_address_insns (addr, mode, true); -+ if (cost > 0) - { -- *total = COSTS_N_INSNS (2); -+ *total = COSTS_N_INSNS (cost + 1); - return true; - } - /* Otherwise use the default handling. */ -@@ -3425,34 +3351,31 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - case AND: - /* Check for a *clear_upper32 pattern and treat it like a zero - extension. See the pattern's comment for details. */ -- if (TARGET_64BIT -- && mode == DImode -- && CONST_INT_P (XEXP (x, 1)) -+ if (TARGET_64BIT && mode == DImode && CONST_INT_P (XEXP (x, 1)) - && UINTVAL (XEXP (x, 1)) == 0xffffffff) - { -- *total = (loongarch_zero_extend_cost (mode, XEXP (x, 0)) -+ *total = (loongarch_zero_extend_cost (XEXP (x, 0)) - + set_src_cost (XEXP (x, 0), mode, speed)); - return true; - } - /* (AND (NOT op0) (NOT op1) is a nor operation that can be done in - a single instruction. */ -- if (GET_CODE (XEXP (x, 0)) == NOT -- && GET_CODE (XEXP (x, 1)) == NOT) -+ if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT) - { - cost = GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1; -- *total = (COSTS_N_INSNS (cost) -+ *total = (COSTS_N_INSNS (cost) - + set_src_cost (XEXP (XEXP (x, 0), 0), mode, speed) - + set_src_cost (XEXP (XEXP (x, 1), 0), mode, speed)); - return true; - } -- -+ - /* Fall through. */ - - case IOR: - case XOR: - /* Double-word operations use two single-word operations. */ - *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2), -- speed); -+ speed); - return true; - - case ASHIFT: -@@ -3461,18 +3384,18 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - case ROTATE: - case ROTATERT: - if (CONSTANT_P (XEXP (x, 1))) -- *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4), -- speed); -+ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), -+ COSTS_N_INSNS (4), speed); - else -- *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12), -- speed); -+ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), -+ COSTS_N_INSNS (12), speed); - return true; - - case ABS: - if (float_mode_p) -- *total = loongarch_cost->fp_add; -+ *total = loongarch_cost->fp_add; - else -- *total = COSTS_N_INSNS (4); -+ *total = COSTS_N_INSNS (4); - return false; - - case LT: -@@ -3500,7 +3423,7 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - return false; - } - *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4), -- speed); -+ speed); - return true; - - case MINUS: -@@ -3512,13 +3435,12 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - } - - /* If it's an add + mult (which is equivalent to shift left) and -- it's immediate operand satisfies const_immlsa_operand predicate. */ -- if (((ISA_HAS_LSA && mode == SImode) -- || (ISA_HAS_DLSA && mode == DImode)) -+ it's immediate operand satisfies const_immalsl_operand predicate. */ -+ if ((mode == SImode || (TARGET_64BIT && mode == DImode)) - && GET_CODE (XEXP (x, 0)) == MULT) - { - rtx op2 = XEXP (XEXP (x, 0), 1); -- if (const_immlsa_operand (op2, mode)) -+ if (const_immalsl_operand (op2, mode)) - { - *total = (COSTS_N_INSNS (1) - + set_src_cost (XEXP (XEXP (x, 0), 0), mode, speed) -@@ -3529,9 +3451,8 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - - /* Double-word operations require three single-word operations and - an SLTU. */ -- *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), -- COSTS_N_INSNS (4), -- speed); -+ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4), -+ speed); - return true; - - case NEG: -@@ -3549,9 +3470,6 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - if (float_mode_p) - *total = loongarch_fp_mult_cost (mode); - else if (mode == DImode && !TARGET_64BIT) -- /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions, -- where the mulsidi3 always includes an MFHI and an MFLO. */ -- // FIXED ME??? - *total = (speed - ? loongarch_cost->int_mult_si * 3 + 6 - : COSTS_N_INSNS (7)); -@@ -3566,7 +3484,6 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - case DIV: - /* Check for a reciprocal. */ - if (float_mode_p -- && ISA_HAS_FP_RECIP_RSQRT (mode) - && flag_unsafe_math_optimizations - && XEXP (x, 0) == CONST1_RTX (mode)) - { -@@ -3597,17 +3514,17 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - *total = COSTS_N_INSNS (loongarch_idiv_insns (mode)); - } - else if (mode == DImode) -- *total = loongarch_cost->int_div_di; -+ *total = loongarch_cost->int_div_di; - else - *total = loongarch_cost->int_div_si; - return false; - - case SIGN_EXTEND: -- *total = loongarch_sign_extend_cost (mode, XEXP (x, 0)); -+ *total = loongarch_sign_extend_cost (XEXP (x, 0)); - return false; - - case ZERO_EXTEND: -- *total = loongarch_zero_extend_cost (mode, XEXP (x, 0)); -+ *total = loongarch_zero_extend_cost (XEXP (x, 0)); - return false; - case TRUNCATE: - /* Costings for highpart multiplies. Matching patterns of the form: -@@ -3617,11 +3534,11 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - (const_int 32) - */ - if ((GET_CODE (XEXP (x, 0)) == ASHIFTRT -- || GET_CODE (XEXP (x, 0)) == LSHIFTRT) -+ || GET_CODE (XEXP (x, 0)) == LSHIFTRT) - && CONST_INT_P (XEXP (XEXP (x, 0), 1)) - && ((INTVAL (XEXP (XEXP (x, 0), 1)) == 32 - && GET_MODE (XEXP (x, 0)) == DImode) -- || (ISA_HAS_DMUL -+ || (TARGET_64BIT - && INTVAL (XEXP (XEXP (x, 0), 1)) == 64 - && GET_MODE (XEXP (x, 0)) == TImode)) - && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT -@@ -3643,13 +3560,13 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - for (int i = 0; i < 2; ++i) - { - rtx op = XEXP (XEXP (XEXP (x, 0), 0), i); -- if (ISA_HAS_DMUL -+ if (TARGET_64BIT - && GET_CODE (op) == ZERO_EXTEND - && GET_MODE (op) == DImode) - *total += rtx_cost (op, DImode, MULT, i, speed); - else -- *total += rtx_cost (XEXP (op, 0), VOIDmode, GET_CODE (op), -- 0, speed); -+ *total += rtx_cost (XEXP (op, 0), VOIDmode, GET_CODE (op), 0, -+ speed); - } - - return true; -@@ -3684,58 +3601,168 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, - - static int - loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, -- tree vectype, -- int misalign ATTRIBUTE_UNUSED) -+ tree vectype, -+ int misalign ATTRIBUTE_UNUSED) - { -- unsigned elements; -- -+ int elements; - switch (type_of_cost) - { -- case scalar_stmt: -- case scalar_load: -- case vector_stmt: -- case vector_load: -- case vec_to_scalar: -- case scalar_to_vec: -- case cond_branch_not_taken: -- case vec_perm: -- case vec_promote_demote: -- case scalar_store: -- case vector_store: -- return 1; -- -- case unaligned_load: -- case vector_gather_load: -- return 2; -+ case scalar_stmt: -+ case vector_stmt: -+ case vec_to_scalar: -+ case scalar_to_vec: -+ case vec_perm: -+ case vec_promote_demote: -+ return 1; - -- case unaligned_store: -- case vector_scatter_store: -- return 10; -+ case scalar_store: -+ case scalar_load: -+ return 3; - -- case cond_branch_taken: -- return 3; -+ case vector_store: -+ case vector_load: -+ return loongarch_vector_access_cost; - -- case vec_construct: -- elements = TYPE_VECTOR_SUBPARTS (vectype); -- return elements / 2 + 1; -+ case unaligned_load: -+ case unaligned_store: -+ case vector_gather_load: -+ case vector_scatter_store: -+ return 5; - -- default: -- gcc_unreachable (); -- } --} -+ case cond_branch_taken: -+ return 4; -+ -+ case cond_branch_not_taken: -+ return 2; -+ -+ case vec_construct: -+ { -+ elements = TYPE_VECTOR_SUBPARTS (vectype); -+ if (ISA_HAS_LASX) -+ return elements + 1; -+ else -+ return elements; -+ } -+ -+ default: -+ gcc_unreachable (); -+ } -+} -+ -+/* Implement targetm.vectorize.add_stmt_cost. */ -+static unsigned -+loongarch_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, -+ struct _stmt_vec_info *stmt_info, int misalign, -+ enum vect_cost_model_location where) -+{ -+ unsigned *cost = (unsigned *) data; -+ unsigned retval = 0; -+ -+ tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE; -+ int stmt_cost = - 1; -+ -+ if ((kind == vector_stmt || kind == scalar_stmt) -+ && stmt_info -+ && stmt_info->stmt && gimple_code (stmt_info->stmt) == GIMPLE_ASSIGN) -+ { -+ tree_code subcode = gimple_assign_rhs_code (stmt_info->stmt); -+ bool fp = false; -+ machine_mode mode = TImode; -+ -+ if (vectype != NULL) -+ { -+ fp = FLOAT_TYPE_P (vectype); -+ mode = TYPE_MODE (vectype); -+ } -+ -+ switch (subcode) -+ { -+ case PLUS_EXPR: -+ case POINTER_PLUS_EXPR: -+ case MINUS_EXPR: -+ case MULT_EXPR: -+ case WIDEN_MULT_EXPR: -+ case MULT_HIGHPART_EXPR: -+ stmt_cost = fp ? 2 : 1; -+ break; -+ -+ case TRUNC_DIV_EXPR: -+ case CEIL_DIV_EXPR: -+ case FLOOR_DIV_EXPR: -+ case ROUND_DIV_EXPR: -+ case TRUNC_MOD_EXPR: -+ case CEIL_MOD_EXPR: -+ case FLOOR_MOD_EXPR: -+ case RDIV_EXPR: -+ case ROUND_MOD_EXPR: -+ case EXACT_DIV_EXPR: -+ stmt_cost = fp ? 4 : 1; -+ break; -+ -+ case NOP_EXPR: -+ /* Only sign-conversions are free. */ -+ if (tree_nop_conversion_p -+ (TREE_TYPE (gimple_assign_lhs (stmt_info->stmt)), -+ TREE_TYPE (gimple_assign_rhs1 (stmt_info->stmt)))) -+ stmt_cost = 0; -+ break; -+ -+ default: -+ break; -+ } -+ } -+ if (kind == vec_construct -+ && stmt_info -+ && (STMT_VINFO_TYPE (stmt_info) == load_vec_info_type -+ || STMT_VINFO_TYPE (stmt_info) == store_vec_info_type) -+ && STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) == VMAT_ELEMENTWISE -+ && TREE_CODE (DR_STEP (STMT_VINFO_DATA_REF (stmt_info))) != INTEGER_CST) -+ { -+ stmt_cost = loongarch_builtin_vectorization_cost (kind, vectype, misalign); -+ stmt_cost *= TYPE_VECTOR_SUBPARTS (vectype); -+ } -+ if (stmt_cost == -1) -+ stmt_cost = loongarch_builtin_vectorization_cost (kind, vectype, misalign); -+ -+ /* Statements in an inner loop relative to the loop being -+ vectorized are weighted more heavily. The value here is -+ arbitrary and could potentially be improved with analysis. */ -+ if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info)) -+ count *= 50; /* FIXME. */ -+ -+ retval = (unsigned) (count * stmt_cost); -+ -+ cost[where] += retval; -+ -+ return retval; -+} -+ -+static bool -+loongarch_builtin_support_vector_misalignment(machine_mode mode, const_tree type, -+ int misalignment, bool is_packed) -+{ -+ if ((ISA_HAS_LSX || ISA_HAS_LASX) && STRICT_ALIGNMENT) -+ { -+ if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing) -+ return false; -+ if (misalignment == -1) -+ return false; -+ } -+ return default_builtin_support_vector_misalignment (mode, type, misalignment, -+ is_packed); -+} - - - /* Implement TARGET_ADDRESS_COST. */ - - static int - loongarch_address_cost (rtx addr, machine_mode mode, -- addr_space_t as ATTRIBUTE_UNUSED, -- bool speed ATTRIBUTE_UNUSED) -+ addr_space_t as ATTRIBUTE_UNUSED, -+ bool speed ATTRIBUTE_UNUSED) - { - return loongarch_address_insns (addr, mode, false); - } - -- - /* Return one word of double-word value OP, taking into account the fixed - endianness of certain registers. HIGH_P is true to select the high part, - false to select the low part. */ -@@ -3743,24 +3770,16 @@ loongarch_address_cost (rtx addr, machine_mode mode, - rtx - loongarch_subword (rtx op, bool high_p) - { -- unsigned int byte, offset; -+ unsigned int byte; - machine_mode mode; - -+ byte = high_p ? UNITS_PER_WORD : 0; - mode = GET_MODE (op); - if (mode == VOIDmode) - mode = TARGET_64BIT ? TImode : DImode; - -- if (high_p) -- byte = UNITS_PER_WORD; -- else -- byte = 0; -- - if (FP_REG_RTX_P (op)) -- { -- /* Paired FPRs are always ordered little-endian. */ -- offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0); -- return gen_rtx_REG (word_mode, REGNO (op) + offset); -- } -+ return gen_rtx_REG (word_mode, REGNO (op) + high_p); - - if (MEM_P (op)) - return loongarch_rewrite_small_data (adjust_address (op, word_mode, byte)); -@@ -3768,11 +3787,10 @@ loongarch_subword (rtx op, bool high_p) - return simplify_gen_subreg (word_mode, op, mode, byte); - } - --/* Return true if a move from SRC to DEST should be split into two. -- SPLIT_TYPE describes the split condition. */ -+/* Return true if a move from SRC to DEST should be split into two. */ - - bool --loongarch_split_move_p (rtx dest, rtx src, enum loongarch_split_type split_type) -+loongarch_split_move_p (rtx dest, rtx src) - { - /* FPR-to-FPR moves can be done in a single instruction, if they're - allowed at all. */ -@@ -3801,19 +3819,18 @@ loongarch_split_move_p (rtx dest, rtx src, enum loongarch_split_type split_type) - return size > UNITS_PER_WORD; - } - --/* Split a move from SRC to DEST, given that loongarch_split_move_p holds. -- SPLIT_TYPE describes the split condition. */ -+/* Split a move from SRC to DEST, given that loongarch_split_move_p holds. */ - - void --loongarch_split_move (rtx dest, rtx src, enum loongarch_split_type split_type, rtx insn_) -+loongarch_split_move (rtx dest, rtx src, rtx insn_) - { - rtx low_dest; - -- gcc_checking_assert (loongarch_split_move_p (dest, src, split_type)); -+ gcc_checking_assert (loongarch_split_move_p (dest, src)); - if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))) - loongarch_split_128bit_move (dest, src); - else if (LASX_SUPPORTED_MODE_P (GET_MODE (dest))) -- loongarch_split_256bit_move (dest, src); -+ loongarch_split_256bit_move (dest, src); - else if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src)) - { - if (!TARGET_64BIT && GET_MODE (dest) == DImode) -@@ -3830,23 +3847,24 @@ loongarch_split_move (rtx dest, rtx src, enum loongarch_split_type split_type, r - /* The operation can be split into two normal moves. Decide in - which order to do them. */ - low_dest = loongarch_subword (dest, false); -- if (REG_P (low_dest) -- && reg_overlap_mentioned_p (low_dest, src)) -+ if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src)) - { -- loongarch_emit_move (loongarch_subword (dest, true), loongarch_subword (src, true)); -+ loongarch_emit_move (loongarch_subword (dest, true), -+ loongarch_subword (src, true)); - loongarch_emit_move (low_dest, loongarch_subword (src, false)); - } - else - { - loongarch_emit_move (low_dest, loongarch_subword (src, false)); -- loongarch_emit_move (loongarch_subword (dest, true), loongarch_subword (src, true)); -+ loongarch_emit_move (loongarch_subword (dest, true), -+ loongarch_subword (src, true)); - } - } - - /* This is a hack. See if the next insn uses DEST and if so, see if we - can forward SRC for DEST. This is most useful if the next insn is a -- simple store. */ -- rtx_insn *insn = (rtx_insn *)insn_; -+ simple store. */ -+ rtx_insn *insn = (rtx_insn *) insn_; - struct loongarch_address_info addr = {}; - if (insn) - { -@@ -3859,7 +3877,8 @@ loongarch_split_move (rtx dest, rtx src, enum loongarch_split_type split_type, r - if (MEM_P (src)) - { - rtx tmp = XEXP (src, 0); -- loongarch_classify_address (&addr, tmp, GET_MODE (tmp), true); -+ loongarch_classify_address (&addr, tmp, GET_MODE (tmp), -+ true); - if (addr.reg && !reg_overlap_mentioned_p (dest, addr.reg)) - validate_change (next, &SET_SRC (set), src, false); - } -@@ -3870,24 +3889,6 @@ loongarch_split_move (rtx dest, rtx src, enum loongarch_split_type split_type, r - } - } - --/* Return the split type for instruction INSN. */ -- --static enum loongarch_split_type --loongarch_insn_split_type (rtx insn) --{ -- basic_block bb = BLOCK_FOR_INSN (insn); -- if (bb) -- { -- if (optimize_bb_for_speed_p (bb)) -- return SPLIT_FOR_SPEED; -- else -- return SPLIT_FOR_SIZE; -- } -- /* Once CFG information has been removed, we should trust the optimization -- decisions made by previous passes and only split where necessary. */ -- return SPLIT_IF_NECESSARY; --} -- - /* Return true if a 128-bit move from SRC to DEST should be split. */ - - bool -@@ -3974,10 +3975,10 @@ loongarch_split_128bit_move (rtx dest, rtx src) - s = loongarch_subword_at_byte (src, byte); - if (!TARGET_64BIT) - emit_insn (gen_lsx_vinsgr2vr_w (new_dest, s, new_dest, -- GEN_INT (1 << index))); -+ GEN_INT (1 << index))); - else - emit_insn (gen_lsx_vinsgr2vr_d (new_dest, s, new_dest, -- GEN_INT (1 << index))); -+ GEN_INT (1 << index))); - } - } - else if (FP_REG_RTX_P (src)) -@@ -4200,28 +4201,93 @@ loongarch_split_lsx_fill_d (rtx dest, rtx src) - emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 1))); - emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 3))); - } -- -+ - /* Return true if a move from SRC to DEST in INSN should be split. */ - - bool --loongarch_split_move_insn_p (rtx dest, rtx src, rtx insn) -+loongarch_split_move_insn_p (rtx dest, rtx src) - { -- return loongarch_split_move_p (dest, src, loongarch_insn_split_type (insn)); -+ return loongarch_split_move_p (dest, src); - } - --/* Split a move from SRC to DEST in INSN, given that loongarch_split_move_insn_p -- holds. */ -+/* Split a move from SRC to DEST in INSN, given that -+ loongarch_split_move_insn_p holds. */ - - void - loongarch_split_move_insn (rtx dest, rtx src, rtx insn) - { -- loongarch_split_move (dest, src, loongarch_insn_split_type (insn), insn); -+ loongarch_split_move (dest, src, insn); - } -- - --/* Forward declaration. Used below */ -+/* Implement TARGET_CONSTANT_ALIGNMENT. */ -+ - static HOST_WIDE_INT --loongarch_constant_alignment (const_tree exp, HOST_WIDE_INT align); -+loongarch_constant_alignment (const_tree exp, HOST_WIDE_INT align) -+{ -+ if (TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR) -+ return MAX (align, BITS_PER_WORD); -+ return align; -+} -+ -+const char * -+loongarch_output_move_index (rtx x, machine_mode mode, bool ldr) -+{ -+ int index = exact_log2 (GET_MODE_SIZE (mode)); -+ if (!IN_RANGE (index, 0, 3)) -+ return NULL; -+ -+ struct loongarch_address_info info; -+ if ((loongarch_classify_address (&info, x, mode, false) -+ && !(info.type == ADDRESS_REG_REG)) -+ || !loongarch_legitimate_address_p (mode, x, false)) -+ return NULL; -+ -+ const char *const insn[][4] = -+ { -+ { -+ "stx.b\t%z1,%0", -+ "stx.h\t%z1,%0", -+ "stx.w\t%z1,%0", -+ "stx.d\t%z1,%0", -+ }, -+ { -+ "ldx.bu\t%0,%1", -+ "ldx.hu\t%0,%1", -+ "ldx.w\t%0,%1", -+ "ldx.d\t%0,%1", -+ } -+ }; -+ -+ return insn[ldr][index]; -+} -+ -+const char * -+loongarch_output_move_index_float (rtx x, machine_mode mode, bool ldr) -+{ -+ int index = exact_log2 (GET_MODE_SIZE (mode)); -+ if (!IN_RANGE (index, 2, 3)) -+ return NULL; -+ -+ struct loongarch_address_info info; -+ if ((loongarch_classify_address (&info, x, mode, false) -+ && !(info.type == ADDRESS_REG_REG)) -+ || !loongarch_legitimate_address_p (mode, x, false)) -+ return NULL; -+ -+ const char *const insn[][2] = -+ { -+ { -+ "fstx.s\t%1,%0", -+ "fstx.d\t%1,%0" -+ }, -+ { -+ "fldx.s\t%0,%1", -+ "fldx.d\t%0,%1" -+ } -+ }; -+ -+ return insn[ldr][index-2]; -+} - - /* Return the appropriate instructions to move SRC into DEST. Assume - that SRC is operand 1 and DEST is operand 0. */ -@@ -4235,9 +4301,8 @@ loongarch_output_move (rtx dest, rtx src) - bool dbl_p = (GET_MODE_SIZE (mode) == 8); - bool lsx_p = LSX_SUPPORTED_MODE_P (mode); - bool lasx_p = LASX_SUPPORTED_MODE_P (mode); -- enum loongarch_symbol_type symbol_type; - -- if (loongarch_split_move_p (dest, src, SPLIT_IF_NECESSARY)) -+ if (loongarch_split_move_p (dest, src)) - return "#"; - - if ((lsx_p || lasx_p) -@@ -4246,7 +4311,7 @@ loongarch_output_move (rtx dest, rtx src) - && CONST_INT_P (CONST_VECTOR_ELT (src, 0))) - { - gcc_assert (loongarch_const_vector_same_int_p (src, mode, -512, 511)); -- if(lsx_p || lasx_p) -+ if (lsx_p || lasx_p) - { - switch (GET_MODE_SIZE (mode)) - { -@@ -4254,7 +4319,8 @@ loongarch_output_move (rtx dest, rtx src) - return "vrepli.%v0\t%w0,%E1"; - case 32: - return "xvrepli.%v0\t%u0,%E1"; -- default: gcc_unreachable (); -+ default: -+ gcc_unreachable (); - } - } - } -@@ -4278,77 +4344,98 @@ loongarch_output_move (rtx dest, rtx src) - return "vrepli.b\t%w0,0"; - case 32: - return "xvrepli.b\t%u0,0"; -- default: gcc_unreachable (); -+ default: -+ gcc_unreachable (); - } - } - - return dbl_p ? "movgr2fr.d\t%0,%z1" : "movgr2fr.w\t%0,%z1"; - } - } -- if (dest_code == MEM) -+ if (dest_code == MEM) - { -+ const char *insn = NULL; -+ insn = loongarch_output_move_index (XEXP (dest, 0), GET_MODE (dest), -+ false); -+ if (insn) -+ return insn; -+ - rtx offset = XEXP (dest, 0); -- if (GET_CODE(offset) == PLUS) -- offset = XEXP(offset, 1); -+ if (GET_CODE (offset) == PLUS) -+ offset = XEXP (offset, 1); -+ else -+ offset = const0_rtx; - switch (GET_MODE_SIZE (mode)) - { -- case 1: return "st.b\t%z1,%0"; -- case 2: return "st.h\t%z1,%0"; -- case 4: -- if (const_arith_operand (offset, Pmode)) -- return "st.w\t%z1,%0"; -- else -- return "stptr.w\t%z1,%0"; -- case 8: -- if (const_arith_operand (offset, Pmode)) -- return "st.d\t%z1,%0"; -- else -- return "stptr.d\t%z1,%0"; -- default: gcc_unreachable (); -- } -+ case 1: -+ return "st.b\t%z1,%0"; -+ case 2: -+ return "st.h\t%z1,%0"; -+ case 4: -+ if (const_arith_operand (offset, Pmode) || (offset == const0_rtx)) -+ return "st.w\t%z1,%0"; -+ else -+ return "stptr.w\t%z1,%0"; -+ case 8: -+ if (const_arith_operand (offset, Pmode) || (offset == const0_rtx)) -+ return "st.d\t%z1,%0"; -+ else -+ return "stptr.d\t%z1,%0"; -+ default: -+ gcc_unreachable (); -+ } - } - } - if (dest_code == REG && GP_REG_P (REGNO (dest))) - { - if (src_code == REG) -- { -- if (FP_REG_P (REGNO (src))) -- { -- gcc_assert (!lsx_p); -- return dbl_p ? "movfr2gr.d\t%0,%1" : "movfr2gr.s\t%0,%1"; -- } -- } -+ if (FP_REG_P (REGNO (src))) -+ { -+ gcc_assert (!lsx_p && !lasx_p); -+ return dbl_p ? "movfr2gr.d\t%0,%1" : "movfr2gr.s\t%0,%1"; -+ } - - if (src_code == MEM) - { -+ const char *insn = NULL; -+ insn = loongarch_output_move_index (XEXP (src, 0), GET_MODE (src), -+ true); -+ if (insn) -+ return insn; -+ - rtx offset = XEXP (src, 0); -- if (GET_CODE(offset) == PLUS) -- offset = XEXP(offset, 1); -+ if (GET_CODE (offset) == PLUS) -+ offset = XEXP (offset, 1); -+ else -+ offset = const0_rtx; - switch (GET_MODE_SIZE (mode)) - { -- case 1: return "ld.bu\t%0,%1"; -- case 2: return "ld.hu\t%0,%1"; -- case 4: -- if (const_arith_operand (offset, Pmode)) -- return "ld.w\t%0,%1"; -- else -- return "ldptr.w\t%0,%1"; -- case 8: -- if (const_arith_operand (offset, Pmode)) -- return "ld.d\t%0,%1"; -- else -- return "ldptr.d\t%0,%1"; -- default: gcc_unreachable (); -+ case 1: -+ return "ld.bu\t%0,%1"; -+ case 2: -+ return "ld.hu\t%0,%1"; -+ case 4: -+ if (const_arith_operand (offset, Pmode) || (offset == const0_rtx)) -+ return "ld.w\t%0,%1"; -+ else -+ return "ldptr.w\t%0,%1"; -+ case 8: -+ if (const_arith_operand (offset, Pmode) || (offset == const0_rtx)) -+ return "ld.d\t%0,%1"; -+ else -+ return "ldptr.d\t%0,%1"; -+ default: -+ gcc_unreachable (); - } - } -- -+ - if (src_code == CONST_INT) - { -- if (LUI_INT (src)) -+ if (LU12I_INT (src)) - return "lu12i.w\t%0,%1>>12\t\t\t# %X1"; -- else if (SMALL_INT (src)) -+ else if (IMM12_INT (src)) - return "addi.w\t%0,$r0,%1\t\t\t# %X1"; -- else if (SMALL_INT_UNSIGNED (src)) -+ else if (IMM12_INT_UNSIGNED (src)) - return "ori\t%0,$r0,%1\t\t\t# %X1"; - else if (LU52I_INT (src)) - return "lu52i.d\t%0,$r0,%X1>>52\t\t\t# %1"; -@@ -4358,56 +4445,51 @@ loongarch_output_move (rtx dest, rtx src) - - if (symbolic_operand (src, VOIDmode)) - { -- -- switch (loongarch_cmodel_var) -+ if ((TARGET_CMODEL_TINY && (!loongarch_global_symbol_p (src) -+ || loongarch_symbol_binds_local_p (src))) -+ || (TARGET_CMODEL_TINY_STATIC && !loongarch_weak_symbol_p (src))) - { -- case LARCH_CMODEL_TINY: -- do -+ /* The symbol must be aligned to 4 byte. */ -+ unsigned int align; -+ -+ if (GET_CODE (src) == LABEL_REF) -+ align = 32 /* Whatever. */; -+ else if (CONSTANT_POOL_ADDRESS_P (src)) -+ align = GET_MODE_ALIGNMENT (get_pool_mode (src)); -+ else if (TREE_CONSTANT_POOL_ADDRESS_P (src)) - { -- if (loongarch_global_symbol_p (src) -- && !loongarch_symbol_binds_local_p (src)) -- break; -- case LARCH_CMODEL_TINY_STATIC: -- if (loongarch_weak_symbol_p (src)) -- break; -- -- /* The symbol must be aligned to 4 byte. */ -- unsigned int align; -- -- if (GET_CODE (src) == LABEL_REF) -- align = 128 /* whatever */; -- /* copy from aarch64 */ -- else if (CONSTANT_POOL_ADDRESS_P (src)) -- align = GET_MODE_ALIGNMENT (get_pool_mode (src)); -- else if (TREE_CONSTANT_POOL_ADDRESS_P (src)) -- { -- tree exp = SYMBOL_REF_DECL (src); -- align = TYPE_ALIGN (TREE_TYPE (exp)); -- align = loongarch_constant_alignment (exp, align); -- } -- else if (SYMBOL_REF_DECL (src)) -- align = DECL_ALIGN (SYMBOL_REF_DECL (src)); -- else if (SYMBOL_REF_HAS_BLOCK_INFO_P (src) -- && SYMBOL_REF_BLOCK (src) != NULL) -- align = SYMBOL_REF_BLOCK (src)->alignment; -- else -- align = BITS_PER_UNIT; -- -- if (align % (4 * 8) == 0) -- return "pcaddi\t%0,%%pcrel(%1)>>2"; -+ tree exp = SYMBOL_REF_DECL (src); -+ align = TYPE_ALIGN (TREE_TYPE (exp)); -+ align = loongarch_constant_alignment (exp, align); - } -- while (0); -- case LARCH_CMODEL_NORMAL: -- case LARCH_CMODEL_LARGE: -+ else if (SYMBOL_REF_DECL (src)) -+ align = DECL_ALIGN (SYMBOL_REF_DECL (src)); -+ else if (SYMBOL_REF_HAS_BLOCK_INFO_P (src) -+ && SYMBOL_REF_BLOCK (src) != NULL) -+ align = SYMBOL_REF_BLOCK (src)->alignment; -+ else -+ align = BITS_PER_UNIT; -+ -+ if (align % (4 * 8) == 0) -+ return "pcaddi\t%0,%%pcrel(%1)>>2"; -+ } -+ if (TARGET_CMODEL_TINY -+ || TARGET_CMODEL_TINY_STATIC -+ || TARGET_CMODEL_NORMAL -+ || TARGET_CMODEL_LARGE) -+ { - if (!loongarch_global_symbol_p (src) - || loongarch_symbol_binds_local_p (src)) - return "la.local\t%0,%1"; - else - return "la.global\t%0,%1"; -- case LARCH_CMODEL_EXTREME: -- default: -+ } -+ if (TARGET_CMODEL_EXTREME) -+ { -+ sorry ("Normal symbol loading not implemented in extreme mode."); - gcc_unreachable (); - } -+ - } - } - if (src_code == REG && FP_REG_P (REGNO (src))) -@@ -4416,14 +4498,14 @@ loongarch_output_move (rtx dest, rtx src) - { - if (lsx_p || lasx_p) - { -- - switch (GET_MODE_SIZE (mode)) - { - case 16: - return "vori.b\t%w0,%w1,0"; - case 32: - return "xvori.b\t%u0,%u1,0"; -- default: gcc_unreachable (); -+ default: -+ gcc_unreachable (); - } - } - else -@@ -4434,16 +4516,22 @@ loongarch_output_move (rtx dest, rtx src) - { - if (lsx_p || lasx_p) - { -- - switch (GET_MODE_SIZE (mode)) - { - case 16: - return "vst\t%w1,%0"; - case 32: - return "xvst\t%u1,%0"; -- default: gcc_unreachable (); -+ default: -+ gcc_unreachable (); - } - } -+ const char *insn = NULL; -+ insn = loongarch_output_move_index_float (XEXP (dest, 0), -+ GET_MODE (dest), -+ false); -+ if (insn) -+ return insn; - - return dbl_p ? "fst.d\t%1,%0" : "fst.s\t%1,%0"; - } -@@ -4460,17 +4548,25 @@ loongarch_output_move (rtx dest, rtx src) - return "vld\t%w0,%1"; - case 32: - return "xvld\t%u0,%1"; -- default: gcc_unreachable (); -+ default: -+ gcc_unreachable (); - } - } -+ const char *insn = NULL; -+ insn = loongarch_output_move_index_float (XEXP (src, 0), -+ GET_MODE (src), -+ true); -+ if (insn) -+ return insn; -+ - return dbl_p ? "fld.d\t%0,%1" : "fld.s\t%0,%1"; - } - } - gcc_unreachable (); - } -- -+ - /* Return true if CMP1 is a suitable second operand for integer ordering -- test CODE. See also the *sCC patterns in loongarch.md. */ -+ test CODE. */ - - static bool - loongarch_int_order_operand_ok_p (enum rtx_code code, rtx cmp1) -@@ -4508,7 +4604,7 @@ loongarch_int_order_operand_ok_p (enum rtx_code code, rtx cmp1) - - static bool - loongarch_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1, -- machine_mode mode) -+ machine_mode mode) - { - HOST_WIDE_INT plus_one; - -@@ -4551,11 +4647,11 @@ loongarch_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1, - - static void - loongarch_emit_int_order_test (enum rtx_code code, bool *invert_ptr, -- rtx target, rtx cmp0, rtx cmp1) -+ rtx target, rtx cmp0, rtx cmp1) - { - machine_mode mode; - -- /* First see if there is a LARCH instruction that can do this operation. -+ /* First see if there is a LoongArch instruction that can do this operation. - If not, try doing the same for the inverse operation. If that also - fails, force CMP1 into a register and try again. */ - mode = GET_MODE (cmp0); -@@ -4574,7 +4670,7 @@ loongarch_emit_int_order_test (enum rtx_code code, bool *invert_ptr, - rtx inv_target; - - inv_target = loongarch_force_binary (GET_MODE (target), -- inv_code, cmp0, cmp1); -+ inv_code, cmp0, cmp1); - loongarch_emit_binary (XOR, target, inv_target, const1_rtx); - } - else -@@ -4595,43 +4691,14 @@ loongarch_zero_if_equal (rtx cmp0, rtx cmp1) - return cmp0; - - if (uns_arith_operand (cmp1, VOIDmode)) -- return expand_binop (GET_MODE (cmp0), xor_optab, -- cmp0, cmp1, 0, 0, OPTAB_DIRECT); -+ return expand_binop (GET_MODE (cmp0), xor_optab, cmp0, cmp1, 0, 0, -+ OPTAB_DIRECT); - -- return expand_binop (GET_MODE (cmp0), sub_optab, -- cmp0, cmp1, 0, 0, OPTAB_DIRECT); -+ return expand_binop (GET_MODE (cmp0), sub_optab, cmp0, cmp1, 0, 0, -+ OPTAB_DIRECT); - } - --/* Allocate a floating-point condition-code register of mode MODE. -- -- These condition code registers are used for certain kinds -- of compound operation, such as compare and branches, vconds, -- and built-in functions. At expand time, their use is entirely -- controlled by LARCH-specific code and is entirely internal -- to these compound operations. -- -- We could (and did in the past) expose condition-code values -- as pseudo registers and leave the register allocator to pick -- appropriate registers. The problem is that it is not practically -- possible for the rtl optimizers to guarantee that no spills will -- be needed, even when AVOID_CCMODE_COPIES is defined. We would -- therefore need spill and reload sequences to handle the worst case. -- -- Although such sequences do exist, they are very expensive and are -- not something we'd want to use. -- -- The main benefit of having more than one condition-code register -- is to allow the pipelining of operations, especially those involving -- comparisons and conditional moves. We don't really expect the -- registers to be live for long periods, and certainly never want -- them to be live across calls. -- -- Also, there should be no penalty attached to using all the available -- registers. They are simply bits in the same underlying FPU control -- register. -- -- We therefore expose the hardware registers from the outset and use -- a simple round-robin allocation scheme. */ -+/* Allocate a floating-point condition-code register of mode MODE. */ - - static rtx - loongarch_allocate_fcc (machine_mode mode) -@@ -4646,15 +4713,14 @@ loongarch_allocate_fcc (machine_mode mode) - gcc_unreachable (); - - cfun->machine->next_fcc += -cfun->machine->next_fcc & (count - 1); -- if (cfun->machine->next_fcc > ST_REG_LAST - ST_REG_FIRST) -+ if (cfun->machine->next_fcc > FCC_REG_LAST - FCC_REG_FIRST) - cfun->machine->next_fcc = 0; - -- regno = ST_REG_FIRST + cfun->machine->next_fcc; -+ regno = FCC_REG_FIRST + cfun->machine->next_fcc; - cfun->machine->next_fcc += count; - return gen_rtx_REG (mode, regno); - } - -- - /* Sign- or zero-extend OP0 and OP1 for integer comparisons. */ - - static void -@@ -4681,6 +4747,7 @@ loongarch_extend_comparands (rtx_code code, rtx *op0, rtx *op1) - } - } - -+ - /* Convert a comparison into something that can be used in a branch. On - entry, *OP0 and *OP1 are the values being compared and *CODE is the code - used to compare them. Update them to describe the final comparison. */ -@@ -4688,6 +4755,9 @@ loongarch_extend_comparands (rtx_code code, rtx *op0, rtx *op1) - static void - loongarch_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1) - { -+ static const enum rtx_code -+ mag_comparisons[][2] = {{LEU, LTU}, {GTU, GEU}, {LE, LT}, {GT, GE}}; -+ - if (splittable_const_int_operand (*op1, VOIDmode)) - { - HOST_WIDE_INT rhs = INTVAL (*op1); -@@ -4695,7 +4765,7 @@ loongarch_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1) - if (*code == EQ || *code == NE) - { - /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */ -- if (SMALL_OPERAND (-rhs)) -+ if (IMM12_OPERAND (-rhs)) - { - *op0 = loongarch_force_binary (GET_MODE (*op0), PLUS, *op0, - GEN_INT (-rhs)); -@@ -4704,10 +4774,6 @@ loongarch_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1) - } - else - { -- static const enum rtx_code mag_comparisons[][2] = { -- {LEU, LTU}, {GTU, GEU}, {LE, LT}, {GT, GE} -- }; -- - /* Convert e.g. (OP0 <= 0xFFF) into (OP0 < 0x1000). */ - for (size_t i = 0; i < ARRAY_SIZE (mag_comparisons); i++) - { -@@ -4730,13 +4796,14 @@ loongarch_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1) - } - } - -+ loongarch_extend_comparands (*code, op0, op1); - -- *op0 = force_reg (GET_MODE (*op0), *op0); -- if (*op1 != const0_rtx) -- *op1 = force_reg (GET_MODE (*op0), *op1); -+ *op0 = force_reg (word_mode, *op0); -+ if (*op1 != const0_rtx) -+ *op1 = force_reg (word_mode, *op1); - } - --/* Like riscv_emit_int_compare, but for floating-point comparisons. */ -+/* Like loongarch_emit_int_compare, but for floating-point comparisons. */ - - static void - loongarch_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1) -@@ -4749,7 +4816,7 @@ loongarch_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1) - then compare that register against zero. - - Set CMP_CODE to the code of the comparison instruction and -- *CODE to the code that the branch or move should use. */ -+ *CODE to the code that the branch or move should use. */ - enum rtx_code cmp_code = *code; - /* Three FP conditions cannot be implemented by reversing the - operands for FCMP.cond.fmt, instead a reversed condition code is -@@ -4760,7 +4827,7 @@ loongarch_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1) - *op1 = const0_rtx; - loongarch_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1); - } -- -+ - /* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2] - and OPERAND[3]. Store the result in OPERANDS[0]. - -@@ -4775,14 +4842,15 @@ loongarch_expand_scc (rtx operands[]) - rtx op0 = operands[2]; - rtx op1 = operands[3]; - -+ loongarch_extend_comparands (code, &op0, &op1); -+ op0 = force_reg (word_mode, op0); -+ - gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT); - - if (code == EQ || code == NE) - { -- { -- rtx zie = loongarch_zero_if_equal (op0, op1); -- loongarch_emit_binary (code, target, zie, const0_rtx); -- } -+ rtx zie = loongarch_zero_if_equal (op0, op1); -+ loongarch_emit_binary (code, target, zie, const0_rtx); - } - else - loongarch_emit_int_order_test (code, 0, target, op0, op1); -@@ -4804,49 +4872,65 @@ loongarch_expand_conditional_branch (rtx *operands) - else - loongarch_emit_int_compare (&code, &op0, &op1); - -- condition = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1); -+ condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1); - emit_jump_insn (gen_condjump (condition, operands[3])); - } - - /* Perform the comparison in OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0] - if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0]. */ -- --void --loongarch_expand_conditional_move (rtx *operands) -+bool -+loongarch_expand_conditional_move_la464 (rtx *operands) - { - enum rtx_code code = GET_CODE (operands[1]); - rtx op0 = XEXP (operands[1], 0); - rtx op1 = XEXP (operands[1], 1); -+ machine_mode cmp_mode = GET_MODE(op0); -+ machine_mode sel_mode = GET_MODE(operands[2]); - -+ /*ffii means Selecting a fixed point based on floating point comparison results */ - if (FLOAT_MODE_P (GET_MODE (op1))) - loongarch_emit_float_compare (&code, &op0, &op1); - else - { -- if (code == EQ || code == NE) /*see test-mask-1.c && test-mask-5.c*/ -+ loongarch_extend_comparands (code, &op0, &op1); -+ -+ op0 = force_reg (word_mode, op0); -+ -+ if (code == EQ || code == NE) -+ { -+ op0 = loongarch_zero_if_equal (op0, op1); -+ op1 = const0_rtx; -+ /*Be careful iiff*/ -+ if(FLOAT_MODE_P(sel_mode)){ -+ rtx target = gen_reg_rtx (GET_MODE (op0)); -+ bool invert = false; -+ loongarch_emit_int_order_test (LTU, NULL, op0, -+ force_reg (GET_MODE (op0), const0_rtx), -+ op0); -+ op1 = const0_rtx; -+ } -+ } -+ else - { -- op0 = loongarch_zero_if_equal(op0, op1); -+ /* The comparison needs a separate scc instruction. Store the -+ result of the scc in *OP0 and compare it against zero. */ -+ bool invert = false; -+ rtx target = gen_reg_rtx (GET_MODE (op0)); -+ loongarch_emit_int_order_test (code, &invert, target, op0, op1); -+ code = invert ? EQ : NE; -+ op0 = target; - op1 = const0_rtx; - } -- else /*see test-mask-2.c*/ -- { -- /* The comparison needs a separate scc instruction. Store the -- result of the scc in *OP0 and compare it against zero. */ -- bool invert = false; -- rtx target = gen_reg_rtx (GET_MODE (op0)); -- loongarch_emit_int_order_test (code, &invert, target, op0, op1); -- code = invert ? EQ: NE; -- op0 = target; -- op1 = const0_rtx; -- } - } - - rtx cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1); - /* There is no direct support for general conditional GP move involving -- two registers using SEL. see test-mask-3.c */ -- if (INTEGRAL_MODE_P (GET_MODE (operands[2])) -+ two registers using SEL. */ -+ if (INTEGRAL_MODE_P (cmp_mode) -+ &&(INTEGRAL_MODE_P (sel_mode)) - && register_operand (operands[2], VOIDmode) -- && register_operand (operands[3], VOIDmode)) -- { -+ && register_operand (operands[3], VOIDmode)) { -+ - machine_mode mode = GET_MODE (operands[0]); - rtx temp = gen_reg_rtx (mode); - rtx temp2 = gen_reg_rtx (mode); -@@ -4864,26 +4948,72 @@ loongarch_expand_conditional_move (rtx *operands) - - /* Merge the two results, at least one is guaranteed to be zero. */ - emit_insn (gen_rtx_SET (operands[0], gen_rtx_IOR (mode, temp, temp2))); -- } -- else -- emit_insn (gen_rtx_SET (operands[0], -- gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond, -- operands[2], operands[3]))); --} -- -- --/* Initialize *CUM for a call to a function of type FNTYPE. */ - --void --loongarch_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype) --{ -- memset (cum, 0, sizeof (*cum)); -- cum->prototype = (fntype && prototype_p (fntype)); -- cum->gp_reg_found = (cum->prototype && stdarg_p (fntype)); -+ return true; -+ /*For ffii, iiff due to movgr2fr, movfr2gr overhead is relatively large, -+ * so we use some compromise*/ -+ } else if (INTEGRAL_MODE_P (cmp_mode) -+ &&(FLOAT_MODE_P (sel_mode)) -+ && register_operand (operands[2], VOIDmode) -+ && register_operand (operands[3], VOIDmode)) { -+ rtx temp = gen_reg_rtx(sel_mode); -+ rtx fcc_reg =loongarch_allocate_fcc (FCCmode); -+ rtx diop0 = convert_to_mode(E_DImode, op0, true); -+ /*stl t0 i i-> movgr2fr f0 t0 -> movfr2cf fcc0 f0 -> fsel f f*/ -+ if(sel_mode == E_DFmode){ -+ emit_insn(gen_movdgr2frdf(temp, diop0)); -+ emit_insn(gen_movfr2fccdf(fcc_reg, temp)); -+ }else if(sel_mode == E_SFmode){ -+ emit_insn(gen_movdgr2frsf(temp, diop0)); -+ emit_insn(gen_movfr2fccsf(fcc_reg, temp)); -+ } -+ cond = gen_rtx_fmt_ee (code, GET_MODE(fcc_reg), fcc_reg, const0_rtx); -+ -+ emit_insn (gen_rtx_SET (operands[0], -+ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond, -+ operands[2], operands[3]))); -+ return true; -+ } else if (FLOAT_MODE_P (cmp_mode) -+ &&(INTEGRAL_MODE_P (sel_mode))) { -+ /*movgr2fr f0 i -> movgr2fr f1 i -> fcmp fcc0 f f -+ * -> fsel f3 f0 f1 -> movfr2gr t0 f3*/ -+ machine_mode dst_mode = GET_MODE (operands[0]); -+ rtx temp = gen_reg_rtx (E_DFmode); -+ rtx temp2 = gen_reg_rtx (E_DFmode); -+ rtx temp3 = gen_reg_rtx (E_DFmode); -+ -+ if(CONST_INT_P(operands[2])){ -+ operands[2] = copy_to_mode_reg(dst_mode, operands[2]); -+ } -+ if(CONST_INT_P(operands[3])){ -+ operands[3] = copy_to_mode_reg(dst_mode, operands[3]); -+ } -+ if(GET_MODE(operands[2]) != E_DImode) -+ operands[2] = convert_to_mode(E_DImode, operands[2], false); -+ if(GET_MODE(operands[3]) != E_DImode) -+ operands[3] = convert_to_mode(E_DImode, operands[3], false); -+ -+ emit_insn(gen_movdgr2frdf(temp2, operands[2])); -+ emit_insn(gen_movdgr2frdf(temp3, operands[3])); -+ -+ emit_insn (gen_rtx_SET (temp, -+ gen_rtx_IF_THEN_ELSE (E_DFmode, cond, -+ temp2, temp3))); -+ if(GET_MODE(operands[0]) == E_DImode) -+ emit_insn(gen_movdfr2grdi(operands[0], temp)); -+ else if(GET_MODE(operands[0]) == E_SImode) -+ emit_insn(gen_movdfr2grsi(operands[0], temp)); -+ return true; -+ } else if(FLOAT_MODE_P (cmp_mode) -+ &&FLOAT_MODE_P (sel_mode)){ -+ emit_insn (gen_rtx_SET (operands[0], -+ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond, -+ operands[2], operands[3]))); -+ return true; -+ } -+ -+ return false; - } -- -- -- - /* Implement TARGET_EXPAND_BUILTIN_VA_START. */ - - static void -@@ -4893,100 +5023,15 @@ loongarch_va_start (tree valist, rtx nextarg) - std_expand_builtin_va_start (valist, nextarg); - } - -- --/* Start a definition of function NAME. */ -- --static void --loongarch_start_function_definition (const char *name) --{ -- ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function"); -- -- /* Start the definition proper. */ -- assemble_name (asm_out_file, name); -- fputs (":\n", asm_out_file); --} -- --/* End a function definition started by loongarch_start_function_definition. */ -- --static void --loongarch_end_function_definition (const char *name) --{ --} -- - /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */ - - static bool --loongarch_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED) -+loongarch_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED, -+ tree exp ATTRIBUTE_UNUSED) - { -- if (!TARGET_SIBCALLS) -- return false; -- -- /* Interrupt handlers need special epilogue code and therefore can't -- use sibcalls. */ -- if (loongarch_interrupt_type_p (TREE_TYPE (current_function_decl))) -- return false; -- -- /* Otherwise OK. */ -+ /* Always OK. */ - return true; - } -- --/* Implement a handler for STORE_BY_PIECES operations -- for TARGET_USE_MOVE_BY_PIECES_INFRASTRUCTURE_P. */ -- --bool --loongarch_store_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align) --{ -- /* Storing by pieces involves moving constants into registers -- of size MIN (ALIGN, BITS_PER_WORD), then storing them. -- We need to decide whether it is cheaper to load the address of -- constant data into a register and use a block move instead. */ -- -- /* If the data is only byte aligned, then: -- -- (a1) A block move of less than 4 bytes would involve three 3 LD.Bs and -- 3 ST.Bs. We might as well use 3 single-instruction LIs and 3 SD.Bs -- instead. -- -- (a2) A block move of 4 bytes from aligned source data can use an -- LD.W/ST.W sequence. This is often better than the 4 LIs and -- 4 SD.Bs that we would generate when storing by pieces. */ -- if (align <= BITS_PER_UNIT) -- return size < 4; -- -- /* If the data is 2-byte aligned, then: -- -- (b1) A block move of less than 4 bytes would use a combination of LD.Bs, -- LD.Hs, SD.Bs and SD.Hs. We get better code by using single-instruction -- LIs, SD.Bs and SD.Hs instead. -- -- (b2) A block move of 4 bytes from aligned source data would again use -- an LD.W/ST.W sequence. In most cases, loading the address of -- the source data would require at least one extra instruction. -- It is often more efficient to use 2 single-instruction LIs and -- 2 SHs instead. -- -- (b3) A block move of up to 3 additional bytes would be like (b1). -- -- (b4) A block move of 8 bytes from aligned source data can use two -- LD.W/ST.W sequences. Both sequences are better than the 4 LIs -- and 4 ST.Hs that we'd generate when storing by pieces. -- -- The reasoning for higher alignments is similar: -- -- (c1) A block move of less than 4 bytes would be the same as (b1). -- -- (c2) A block move of 4 bytes would use an LD.W/ST.W sequence. Again, -- loading the address of the source data would typically require -- at least one extra instruction. It is generally better to use -- LUI/ORI/SW instead. -- -- (c3) A block move of up to 3 additional bytes would be like (b1). -- -- (c4) A block move of 8 bytes can use two LD.W/ST.W sequences or a single -- LD.D/ST.D sequence, and in these cases we've traditionally preferred -- the memory copy over the more bulky constant moves. */ -- return size < 8; --} - - /* Emit straight-line code to move LENGTH bytes from SRC to DEST. - Assume that the areas do not overlap. */ -@@ -4999,20 +5044,13 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) - int i; - machine_mode mode; - rtx *regs; -+ -+ if (STRICT_ALIGNMENT) -+ bits = MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))); -+ else -+ bits = BITS_PER_WORD; - -- /* Work out how many bits to move at a time. If both operands have -- half-word alignment, it is usually better to move in half words. -- For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr -- and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr. -- Otherwise move word-sized chunks. -- -- For ISA_HAS_LWL_LWR we rely on the lwl/lwr & swl/swr load. Otherwise -- picking the minimum of alignment or BITS_PER_WORD gets us the -- desired size for bits. */ -- -- bits = MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))); -- -- if (TARGET_LASX) -+ if (ISA_HAS_LASX && !STRICT_ALIGNMENT) - { - bits = BITS_PER_WORD * 4; - mode = V4DImode; -@@ -5029,7 +5067,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) - - /* Load as many BITS-sized chunks as possible. Use a normal load if - the source has enough alignment, otherwise use left/right pairs. */ -- if (TARGET_LASX) -+ if (ISA_HAS_LASX && !STRICT_ALIGNMENT) - { - for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) - { -@@ -5047,7 +5085,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) - } - - /* Copy the chunks to the destination. */ -- if (TARGET_LASX) -+ if (ISA_HAS_LASX && !STRICT_ALIGNMENT) - { - - for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) -@@ -5065,9 +5103,9 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) - /* Mop up any left-over bytes. */ - if (offset < length) - { -- if (TARGET_LASX) -+ if (ISA_HAS_LASX && !STRICT_ALIGNMENT) - { -- if(length - offset >= 16) -+ if (length - offset >= 16) - { - rtx *regs_tmp = XALLOCAVEC (rtx, 1); - regs_tmp[0] = gen_reg_rtx (V2DImode); -@@ -5075,7 +5113,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) - loongarch_emit_move (adjust_address (dest, V2DImode, offset), regs_tmp[0]); - offset += 16; - } -- if(length - offset >= 8) -+ if (length - offset >= 8) - { - rtx *regs_tmp = XALLOCAVEC (rtx, 1); - regs_tmp[0] = gen_reg_rtx (DImode); -@@ -5083,7 +5121,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) - loongarch_emit_move (adjust_address (dest, DImode, offset), regs_tmp[0]); - offset += 8; - } -- if(length - offset >= 4) -+ if (length - offset >= 4) - { - rtx *regs_tmp = XALLOCAVEC (rtx, 1); - regs_tmp[0] = gen_reg_rtx (SImode); -@@ -5091,7 +5129,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) - loongarch_emit_move (adjust_address (dest, SImode, offset), regs_tmp[0]); - offset += 4; - } -- if(length - offset >= 2) -+ if (length - offset >= 2) - { - rtx *regs_tmp = XALLOCAVEC (rtx, 1); - regs_tmp[0] = gen_reg_rtx (HImode); -@@ -5099,7 +5137,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) - loongarch_emit_move (adjust_address (dest, HImode, offset), regs_tmp[0]); - offset += 2; - } -- if(length - offset >= 1) -+ if (length - offset >= 1) - { - rtx *regs_tmp = XALLOCAVEC (rtx, 1); - regs_tmp[0] = gen_reg_rtx (QImode); -@@ -5108,7 +5146,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) - offset += 1; - } - -- if(length - offset != 0) -+ if (length - offset != 0) - gcc_unreachable (); - } - else -@@ -5131,8 +5169,8 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) - register. Store them in *LOOP_REG and *LOOP_MEM respectively. */ - - static void --loongarch_adjust_block_mem (rtx mem, HOST_WIDE_INT length, -- rtx *loop_reg, rtx *loop_mem) -+loongarch_adjust_block_mem (rtx mem, HOST_WIDE_INT length, rtx *loop_reg, -+ rtx *loop_mem) - { - *loop_reg = copy_addr_to_reg (XEXP (mem, 0)); - -@@ -5148,7 +5186,7 @@ loongarch_adjust_block_mem (rtx mem, HOST_WIDE_INT length, - - static void - loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, -- HOST_WIDE_INT bytes_per_iter) -+ HOST_WIDE_INT bytes_per_iter) - { - rtx_code_label *label; - rtx src_reg, dest_reg, final_src, test; -@@ -5163,8 +5201,8 @@ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, - - /* Calculate the value that SRC_REG should have after the last iteration - of the loop. */ -- final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length), -- 0, 0, OPTAB_WIDEN); -+ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length), 0, -+ 0, OPTAB_WIDEN); - - /* Emit the start of the loop. */ - label = gen_label_rtx (); -@@ -5174,8 +5212,10 @@ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, - loongarch_block_move_straight (dest, src, bytes_per_iter); - - /* Move on to the next block. */ -- loongarch_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter)); -- loongarch_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter)); -+ loongarch_emit_move (src_reg, -+ plus_constant (Pmode, src_reg, bytes_per_iter)); -+ loongarch_emit_move (dest_reg, -+ plus_constant (Pmode, dest_reg, bytes_per_iter)); - - /* Emit the loop condition. */ - test = gen_rtx_NE (VOIDmode, src_reg, final_src); -@@ -5198,12 +5238,12 @@ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, - bool - loongarch_expand_block_move (rtx dest, rtx src, rtx length) - { -- -- int max_move_bytes = (TARGET_LASX ? \ -+ int max_move_bytes = (ISA_HAS_LASX ? \ - LARCH_MAX_MOVE_BYTES_STRAIGHT * 8 \ - : LARCH_MAX_MOVE_BYTES_STRAIGHT); - -- if (CONST_INT_P (length) && INTVAL (length) <= loongarch_max_inline_memcpy_size) -+ if (CONST_INT_P (length) -+ && INTVAL (length) <= loongarch_max_inline_memcpy_size) - { - if (INTVAL (length) <= max_move_bytes) - { -@@ -5213,13 +5253,12 @@ loongarch_expand_block_move (rtx dest, rtx src, rtx length) - else if (optimize) - { - loongarch_block_move_loop (dest, src, INTVAL (length), -- LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER); -+ LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER); - return true; - } - } - return false; - } -- - - /* Expand a QI or HI mode atomic memory operation. - -@@ -5239,13 +5278,12 @@ loongarch_expand_block_move (rtx dest, rtx src, rtx length) - - void - loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs generator, -- rtx result, rtx mem, rtx oldval, -- rtx newval, rtx model) -+ rtx result, rtx mem, rtx oldval, rtx newval, -+ rtx model) - { - rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask; - rtx unshifted_mask_reg, mask, inverted_mask, si_op; - rtx res = NULL; -- rtx tmp = NULL; - machine_mode mode; - - mode = GET_MODE (mem); -@@ -5253,7 +5291,7 @@ loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs generator, - /* Compute the address of the containing SImode value. */ - orig_addr = force_reg (Pmode, XEXP (mem, 0)); - memsi_addr = loongarch_force_binary (Pmode, AND, orig_addr, -- force_reg (Pmode, GEN_INT (-4))); -+ force_reg (Pmode, GEN_INT (-4))); - - /* Create a memory reference for it. */ - memsi = gen_rtx_MEM (SImode, memsi_addr); -@@ -5263,7 +5301,6 @@ loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs generator, - /* Work out the byte offset of the QImode or HImode value, - counting from the least significant byte. */ - shift = loongarch_force_binary (Pmode, AND, orig_addr, GEN_INT (3)); -- - /* Multiply by eight to convert the shift value from bytes to bits. */ - loongarch_emit_binary (ASHIFT, shift, shift, GEN_INT (3)); - -@@ -5301,14 +5338,13 @@ loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs generator, - res = gen_reg_rtx (SImode); - - if (newval) -- si_op = generator.fn_7 (res, memsi, mask, inverted_mask, oldval, newval, model); -+ si_op = generator.fn_7 (res, memsi, mask, inverted_mask, oldval, newval, -+ model); - else if (result) - si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, model); - else - si_op = generator.fn_5 (memsi, mask, inverted_mask, oldval, model); - -- //si_op = generator.fn_7 (res, memsi, mask, inverted_mask, oldval, newval, model); -- - emit_insn (si_op); - - if (result) -@@ -5320,24 +5356,14 @@ loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs generator, - } - } - --/* Return true if X is a MEM with the same size as MODE. */ -- --bool --loongarch_mem_fits_mode_p (machine_mode mode, rtx x) --{ -- return (MEM_P (x) -- && MEM_SIZE_KNOWN_P (x) -- && MEM_SIZE (x) == GET_MODE_SIZE (mode)); --} -- - /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the - source of an "ext" instruction or the destination of an "ins" - instruction. OP must be a register operand and the following - conditions must hold: - -- 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op)) -- 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op)) -- 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op)) -+ 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op)) -+ 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op)) -+ 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op)) - - Also reject lengths equal to a word as they are better handled - by the move patterns. */ -@@ -5358,31 +5384,11 @@ loongarch_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos) - return true; - } - -- --/* Return true iff OP1 and OP2 are valid operands together for the -- *and3 patterns. For the cases to consider, -- see the table in the comment before the pattern. */ -- --bool --and_operands_ok (machine_mode mode, rtx op1, rtx op2) --{ -- -- if (memory_operand (op1, mode)) -- { -- return and_load_operand (op2, mode); -- } -- else -- return and_reg_operand (op2, mode); --} -- - /* Print the text for PRINT_OPERAND punctation character CH to FILE. - The punctuation characters are: - - '.' Print the name of the register with a hard-wired zero (zero or $r0). - '$' Print the name of the stack pointer register (sp or $r3). -- ':' Print "c" to use the compact version if the delay slot is a nop. -- '!' Print "s" to use the short version if the delay slot contains a -- 16-bit instruction. - - See also loongarch_init_print_operand_punct. */ - -@@ -5399,14 +5405,6 @@ loongarch_print_operand_punctuation (FILE *file, int ch) - fputs (reg_names[STACK_POINTER_REGNUM], file); - break; - -- case ':': -- /* When final_sequence is 0, the delay slot will be a nop. We can -- use the compact version where available. The %: formatter will -- only be present if a compact form of the branch is available. */ -- if (final_sequence == 0) -- putc ('c', file); -- break; -- - default: - gcc_unreachable (); - break; -@@ -5420,7 +5418,7 @@ loongarch_init_print_operand_punct (void) - { - const char *p; - -- for (p = ".$:"; *p; p++) -+ for (p = ".$"; *p; p++) - loongarch_print_operand_punct[(unsigned char) *p] = true; - } - -@@ -5429,7 +5427,8 @@ loongarch_init_print_operand_punct (void) - opcode to FILE. */ - - static void --loongarch_print_int_branch_condition (FILE *file, enum rtx_code code, int letter) -+loongarch_print_int_branch_condition (FILE *file, enum rtx_code code, -+ int letter) - { - switch (code) - { -@@ -5443,7 +5442,7 @@ loongarch_print_int_branch_condition (FILE *file, enum rtx_code code, int letter - case GEU: - case LTU: - case LEU: -- /* Conveniently, the LARCH names for these conditions are the same -+ /* Conveniently, the LoongArch names for these conditions are the same - as their RTL equivalents. */ - fputs (GET_RTX_NAME (code), file); - break; -@@ -5457,7 +5456,8 @@ loongarch_print_int_branch_condition (FILE *file, enum rtx_code code, int letter - /* Likewise floating-point branches. */ - - static void --loongarch_print_float_branch_condition (FILE *file, enum rtx_code code, int letter) -+loongarch_print_float_branch_condition (FILE *file, enum rtx_code code, -+ int letter) - { - switch (code) - { -@@ -5487,20 +5487,22 @@ loongarch_print_operand_punct_valid_p (unsigned char code) - implement the release portion of memory model MODEL. */ - - static bool --loongarch_memmodel_needs_rel_and_acq_fence (enum memmodel model) -+loongarch_memmodel_needs_rel_acq_fence (enum memmodel model) - { - switch (model) - { - case MEMMODEL_ACQ_REL: - case MEMMODEL_SEQ_CST: - case MEMMODEL_SYNC_SEQ_CST: -- case MEMMODEL_RELEASE: - case MEMMODEL_SYNC_RELEASE: -- case MEMMODEL_ACQUIRE: -- case MEMMODEL_CONSUME: - case MEMMODEL_SYNC_ACQUIRE: - return true; - -+ case MEMMODEL_RELEASE: -+ case MEMMODEL_ACQUIRE: -+ case MEMMODEL_CONSUME: -+ if (!TARGET_uARCH_LA664) -+ return true; - case MEMMODEL_RELAXED: - return false; - -@@ -5517,25 +5519,25 @@ loongarch_memmodel_needs_release_fence (enum memmodel model) - { - switch (model) - { -- case MEMMODEL_ACQ_REL: -- case MEMMODEL_SEQ_CST: -- case MEMMODEL_SYNC_SEQ_CST: -- case MEMMODEL_RELEASE: -- case MEMMODEL_SYNC_RELEASE: -- return true; -+ case MEMMODEL_ACQ_REL: -+ case MEMMODEL_SEQ_CST: -+ case MEMMODEL_SYNC_SEQ_CST: -+ case MEMMODEL_RELEASE: -+ case MEMMODEL_SYNC_RELEASE: -+ return true; - -- case MEMMODEL_ACQUIRE: -- case MEMMODEL_CONSUME: -- case MEMMODEL_SYNC_ACQUIRE: -- case MEMMODEL_RELAXED: -- return false; -+ case MEMMODEL_ACQUIRE: -+ case MEMMODEL_CONSUME: -+ case MEMMODEL_SYNC_ACQUIRE: -+ case MEMMODEL_RELAXED: -+ return false; - -- default: -- gcc_unreachable (); -+ default: -+ gcc_unreachable (); - } - } - --/* Implement TARGET_PRINT_OPERAND. The LARCH-specific operand codes are: -+/* Implement TARGET_PRINT_OPERAND. The LoongArch-specific operand codes are: - - 'E' Print CONST_INT OP element 0 of a replicated CONST_VECTOR in decimal. - 'X' Print CONST_INT OP in hexadecimal format. -@@ -5674,7 +5676,8 @@ loongarch_print_operand (FILE *file, rtx op, int letter) - break; - - case 'N': -- loongarch_print_int_branch_condition (file, reverse_condition (code), letter); -+ loongarch_print_int_branch_condition (file, reverse_condition (code), -+ letter); - break; - - case 'F': -@@ -5683,19 +5686,20 @@ loongarch_print_operand (FILE *file, rtx op, int letter) - - case 'W': - loongarch_print_float_branch_condition (file, reverse_condition (code), -- letter); -+ letter); - break; - - case 'T': - case 't': - { - int truth = (code == NE) == (letter == 'T'); -- fputc ("zfnt"[truth * 2 + ST_REG_P (REGNO (XEXP (op, 0)))], file); -+ fputc ("zfnt"[truth * 2 + FCC_REG_P (REGNO (XEXP (op, 0)))], file); - } - break; - - case 'Y': -- if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (loongarch_fp_conditions)) -+ if (code == CONST_INT -+ && UINTVAL (op) < ARRAY_SIZE (loongarch_fp_conditions)) - fputs (loongarch_fp_conditions[UINTVAL (op)], file); - else - output_operand_lossage ("'%%%c' is not a valid operand prefix", -@@ -5750,18 +5754,36 @@ loongarch_print_operand (FILE *file, rtx op, int letter) - break; - - case 'A': -- if (loongarch_memmodel_needs_rel_and_acq_fence ((enum memmodel) INTVAL (op))) -+ if (loongarch_memmodel_needs_rel_acq_fence ((enum memmodel) INTVAL (op))) - fputs ("_db", file); - break; - - case 'G': - if (loongarch_memmodel_needs_release_fence ((enum memmodel) INTVAL (op))) -- fputs ("dbar\t0", file); -+ fputs ("dbar\t0x11", file); -+ break; -+ -+ case 'J': -+ if (TARGET_uARCH_LA664) -+ { -+ enum memmodel model = memmodel_from_int (INTVAL (op)); -+ if (is_mm_release (model)) -+ fputs ("dbar\t0x12", file); -+ } -+ break; -+ -+ case 'K': -+ if (TARGET_uARCH_LA664) -+ { -+ enum memmodel model = memmodel_from_int (INTVAL (op)); -+ if (is_mm_acquire (model)) -+ fputs ("dbar\t0x18", file); -+ } - break; - - case 'i': - if (code != REG) -- fputs ("i", file); -+ fputs ("i", file); - break; - - default: -@@ -5770,10 +5792,7 @@ loongarch_print_operand (FILE *file, rtx op, int letter) - case REG: - { - unsigned int regno = REGNO (op); -- if ((letter == 'M') -- || letter == 'D') -- regno++; -- else if (letter && letter != 'z' && letter != 'M' && letter != 'L') -+ if (letter && letter != 'z') - output_operand_lossage ("invalid use of '%%%c'", letter); - fprintf (file, "%s", reg_names[regno]); - } -@@ -5781,8 +5800,8 @@ loongarch_print_operand (FILE *file, rtx op, int letter) - - case MEM: - if (letter == 'D') -- output_address (GET_MODE (op), plus_constant (Pmode, -- XEXP (op, 0), 4)); -+ output_address (GET_MODE (op), -+ plus_constant (Pmode, XEXP (op, 0), 4)); - else if (letter == 'b') - { - gcc_assert (REG_P (XEXP (op, 0))); -@@ -5809,7 +5828,7 @@ loongarch_print_operand (FILE *file, rtx op, int letter) - /* Implement TARGET_PRINT_OPERAND_ADDRESS. */ - - static void --loongarch_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x) -+loongarch_print_operand_address (FILE *file, machine_mode /* mode */, rtx x) - { - struct loongarch_address_info addr; - -@@ -5821,6 +5840,11 @@ loongarch_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x) - loongarch_print_operand (file, addr.offset, 0); - return; - -+ case ADDRESS_REG_REG: -+ fprintf (file, "%s,%s", reg_names[REGNO (addr.reg)], -+ reg_names[REGNO (addr.offset)]); -+ return; -+ - case ADDRESS_CONST_INT: - fprintf (file, "%s,", reg_names[GP_REG_FIRST]); - output_addr_const (file, x); -@@ -5830,37 +5854,17 @@ loongarch_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x) - output_addr_const (file, loongarch_strip_unspec_address (x)); - return; - } -- if (GET_CODE (x) == CONST_INT) -+ if (CONST_INT_P (x)) - output_addr_const (file, x); - else - gcc_unreachable (); - } - -- --/* Implement TARGET_ENCODE_SECTION_INFO. */ -- --static void --loongarch_encode_section_info (tree decl, rtx rtl, int first) --{ -- default_encode_section_info (decl, rtl, first); -- -- if (TREE_CODE (decl) == FUNCTION_DECL) -- { -- rtx symbol = XEXP (rtl, 0); -- tree type = TREE_TYPE (decl); -- -- /* Encode whether the symbol is short or long. */ -- if ((TARGET_LONG_CALLS && !loongarch_near_type_p (type)) -- || loongarch_far_type_p (type)) -- SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL; -- } --} -- --/* Implement TARGET_SELECT_RTX_SECTION. */ -+/* Implement TARGET_ASM_SELECT_RTX_SECTION. */ - - static section * - loongarch_select_rtx_section (machine_mode mode, rtx x, -- unsigned HOST_WIDE_INT align) -+ unsigned HOST_WIDE_INT align) - { - /* ??? Consider using mergeable small data sections. */ - if (loongarch_rtx_constant_in_small_data_p (mode)) -@@ -5871,12 +5875,10 @@ loongarch_select_rtx_section (machine_mode mode, rtx x, - - /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION. - -- The complication here is that, with the combination -- !TARGET_ABSOLUTE_ABICALLS , jump tables will use -- absolute addresses, and should therefore not be included in the -- read-only part of a DSO. Handle such cases by selecting a normal -- data section instead of a read-only one. The logic apes that in -- default_function_rodata_section. */ -+ The complication here is that jump atbles will use absolute addresses, -+ and should therefore not be included in the read-only part of a DSO. -+ Handle such cases by selecting a normal data section instead of a -+ read-only one. The logic apes that in default_function_rodata_section. */ - - static section * - loongarch_function_rodata_section (tree decl) -@@ -5889,17 +5891,11 @@ loongarch_function_rodata_section (tree decl) - static bool - loongarch_in_small_data_p (const_tree decl) - { -- unsigned HOST_WIDE_INT size; -+ int size; - - if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL) - return false; - -- /* We don't yet generate small-data references for -- VxWorks RTP code. See the related -G handling in -- loongarch_option_override. */ -- if (TARGET_VXWORKS_RTP) -- return false; -- - if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0) - { - const char *name; -@@ -5918,23 +5914,12 @@ loongarch_in_small_data_p (const_tree decl) - /* We have traditionally not treated zero-sized objects as small data, - so this is now effectively part of the ABI. */ - size = int_size_in_bytes (TREE_TYPE (decl)); -- return size > 0 && size <= loongarch_small_data_threshold; -+ return size > 0 && size <= g_switch_value; - } - --/* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use -- anchors for small data: the GP register acts as an anchor in that -- case. We also don't want to use them for PC-relative accesses, -- where the PC acts as an anchor. */ -- --static bool --loongarch_use_anchors_for_symbol_p (const_rtx symbol) --{ -- return default_use_anchors_for_symbol_p (symbol); --} -- --/* The LARCH debug format wants all automatic variables and arguments -+/* The LoongArch debug format wants all automatic variables and arguments - to be in terms of the virtual frame pointer (stack pointer before -- any adjustment in the function), while the LARCH 3.0 linker wants -+ any adjustment in the function), while the LoongArch linker wants - the frame pointer to be the stack pointer after the initial - adjustment. So, we do the adjustment here. The arg pointer (which - is eliminated) points to the virtual frame pointer, while the frame -@@ -5961,7 +5946,7 @@ loongarch_debugger_offset (rtx addr, HOST_WIDE_INT offset) - - return offset; - } -- -+ - /* Implement ASM_OUTPUT_EXTERNAL. */ - - void -@@ -5971,7 +5956,7 @@ loongarch_output_external (FILE *file, tree decl, const char *name) - - /* We output the name if and only if TREE_SYMBOL_REFERENCED is - set in order to avoid putting out names that are never really -- used. */ -+ used. */ - if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))) - { - if (loongarch_in_small_data_p (decl)) -@@ -6014,33 +5999,6 @@ loongarch_output_dwarf_dtprel (FILE *file, int size, rtx x) - fputs ("+0x8000", file); - } - --/* Implement TARGET_DWARF_REGISTER_SPAN. */ -- --static rtx --loongarch_dwarf_register_span (rtx reg) --{ -- rtx high, low; -- machine_mode mode; -- -- mode = GET_MODE (reg); -- -- return NULL_RTX; --} -- --/* Implement TARGET_DWARF_FRAME_REG_MODE. */ -- --static machine_mode --loongarch_dwarf_frame_reg_mode (int regno) --{ -- machine_mode mode = default_dwarf_frame_reg_mode (regno); -- -- if (FP_REG_P (regno) && loongarch_abi == ABILP32 && TARGET_FLOAT64) -- mode = SImode; -- -- return mode; --} -- -- - /* Implement ASM_OUTPUT_ASCII. */ - - void -@@ -6072,7 +6030,7 @@ loongarch_output_ascii (FILE *stream, const char *string, size_t len) - cur_pos += 4; - } - -- if (cur_pos > 72 && i+1 < len) -+ if (cur_pos > 72 && i + 1 < len) - { - cur_pos = 17; - fprintf (stream, "\"\n\t.ascii\t\""); -@@ -6081,194 +6039,6 @@ loongarch_output_ascii (FILE *stream, const char *string, size_t len) - fprintf (stream, "\"\n"); - } - --/* Emit either a label, .comm, or .lcomm directive. When using assembler -- macros, mark the symbol as written so that loongarch_asm_output_external -- won't emit an .extern for it. STREAM is the output file, NAME is the -- name of the symbol, INIT_STRING is the string that should be written -- before the symbol and FINAL_STRING is the string that should be -- written after it. FINAL_STRING is a printf format that consumes the -- remaining arguments. */ -- --void --loongarch_declare_object (FILE *stream, const char *name, const char *init_string, -- const char *final_string, ...) --{ -- va_list ap; -- -- fputs (init_string, stream); -- assemble_name (stream, name); -- va_start (ap, final_string); -- vfprintf (stream, final_string, ap); -- va_end (ap); -- -- tree name_tree = get_identifier (name); -- TREE_ASM_WRITTEN (name_tree) = 1; --} -- --/* Declare a common object of SIZE bytes using asm directive INIT_STRING. -- NAME is the name of the object and ALIGN is the required alignment -- in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third -- alignment argument. */ -- --void --loongarch_declare_common_object (FILE *stream, const char *name, -- const char *init_string, -- unsigned HOST_WIDE_INT size, -- unsigned int align, bool takes_alignment_p) --{ -- if (!takes_alignment_p) -- { -- size += (align / BITS_PER_UNIT) - 1; -- size -= size % (align / BITS_PER_UNIT); -- loongarch_declare_object (stream, name, init_string, -- "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size); -- } -- else -- loongarch_declare_object (stream, name, init_string, -- "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n", -- size, align / BITS_PER_UNIT); --} -- --/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the -- elfos.h version, but we also need to handle -muninit-const-in-rodata. */ -- --void --loongarch_output_aligned_decl_common (FILE *stream, tree decl, const char *name, -- unsigned HOST_WIDE_INT size, -- unsigned int align) --{ -- loongarch_declare_common_object (stream, name, "\n\t.comm\t", -- size, align, true); --} -- --#ifdef ASM_OUTPUT_SIZE_DIRECTIVE --extern int size_directive_output; -- --/* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF -- definitions except that it uses loongarch_declare_object to emit the label. */ -- --void --loongarch_declare_object_name (FILE *stream, const char *name, -- tree decl ATTRIBUTE_UNUSED) --{ --#ifdef ASM_OUTPUT_TYPE_DIRECTIVE --#ifdef USE_GNU_UNIQUE_OBJECT -- /* As in elfos.h. */ -- if (USE_GNU_UNIQUE_OBJECT && DECL_ONE_ONLY (decl) -- && (!DECL_ARTIFICIAL (decl) || !TREE_READONLY (decl))) -- ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "gnu_unique_object"); -- else --#endif -- ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object"); --#endif -- -- size_directive_output = 0; -- if (!flag_inhibit_size_directive && DECL_SIZE (decl)) -- { -- HOST_WIDE_INT size; -- -- size_directive_output = 1; -- size = int_size_in_bytes (TREE_TYPE (decl)); -- ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size); -- } -- -- loongarch_declare_object (stream, name, "", ":\n"); --} -- --/* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */ -- --void --loongarch_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end) --{ -- const char *name; -- -- name = XSTR (XEXP (DECL_RTL (decl), 0), 0); -- if (!flag_inhibit_size_directive -- && DECL_SIZE (decl) != 0 -- && !at_end -- && top_level -- && DECL_INITIAL (decl) == error_mark_node -- && !size_directive_output) -- { -- HOST_WIDE_INT size; -- -- size_directive_output = 1; -- size = int_size_in_bytes (TREE_TYPE (decl)); -- ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size); -- } --} --#endif -- --/* Mark text contents as code or data, mainly for the purpose of correct -- disassembly. Emit a local symbol and set its type appropriately for -- that purpose. Also emit `.insn' if marking contents as code so that -- the ISA mode is recorded and any padding that follows is disassembled -- as correct instructions. */ -- --void --loongarch_set_text_contents_type (FILE *file ATTRIBUTE_UNUSED, -- const char *prefix ATTRIBUTE_UNUSED, -- unsigned long num ATTRIBUTE_UNUSED, -- bool function_p ATTRIBUTE_UNUSED) --{ --#ifdef ASM_OUTPUT_TYPE_DIRECTIVE -- char buf[(sizeof (num) * 10) / 4 + 2]; -- const char *fnname; -- char *sname; -- rtx symbol; -- -- sprintf (buf, "%lu", num); -- symbol = XEXP (DECL_RTL (current_function_decl), 0); -- fnname = targetm.strip_name_encoding (XSTR (symbol, 0)); -- sname = ACONCAT ((prefix, fnname, "_", buf, NULL)); -- -- ASM_OUTPUT_TYPE_DIRECTIVE (file, sname, function_p ? "function" : "object"); -- assemble_name (file, sname); -- fputs (":\n", file); --// if (function_p) --// fputs ("\t.insn\n", file); --#endif --} -- -- --/* Implement TARGET_ASM_FILE_START. */ -- --static void --loongarch_file_start (void) --{ -- default_file_start (); -- -- /* Generate a special section to describe the ABI switches used to -- produce the resultant binary. */ --} -- -- --/* Return true if REGNO is a register that is ordinarily call-clobbered -- but must nevertheless be preserved by an interrupt handler. */ -- --static bool --loongarch_interrupt_extra_call_saved_reg_p (unsigned int regno) --{ -- if (GP_REG_P (regno) -- && cfun->machine->use_shadow_register_set == SHADOW_SET_NO) -- { -- /* $0 is hard-wired. */ -- if (regno == GP_REG_FIRST) -- return false; -- -- /* The function will return the stack pointer to its original value -- anyway. */ -- if (regno == STACK_POINTER_REGNUM) -- return false; -- -- /* Otherwise, return true for registers that aren't ordinarily -- call-clobbered. */ -- return call_used_regs[regno]; -- } -- -- return false; --} -- - /* Implement TARGET_FRAME_POINTER_REQUIRED. */ - - static bool -@@ -6282,17 +6052,20 @@ loongarch_frame_pointer_required (void) - return false; - } - --/* Make sure that we're not trying to eliminate to the wrong hard frame -- pointer. */ -+/* Implement TARGET_CAN_ELIMINATE. Make sure that we're not trying -+ to eliminate to the wrong hard frame pointer. */ - - static bool - loongarch_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to) - { -+ if (stack_realign_fp) -+ return ((from == ARG_POINTER_REGNUM -+ && to == HARD_FRAME_POINTER_REGNUM) -+ || (from == FRAME_POINTER_REGNUM -+ && to == STACK_POINTER_REGNUM)); - return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM); - } - -- -- - /* Implement RETURN_ADDR_RTX. We do not support moving back to a - previous frame. */ - -@@ -6315,73 +6088,21 @@ loongarch_set_return_address (rtx address, rtx scratch) - rtx slot_address; - - gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM)); -+ - if (frame_pointer_needed) - slot_address = loongarch_add_offset (scratch, hard_frame_pointer_rtx, -- -UNITS_PER_WORD); -+ -UNITS_PER_WORD); - else - slot_address = loongarch_add_offset (scratch, stack_pointer_rtx, -- cfun->machine->frame.gp_sp_offset); -- loongarch_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address); --} -- -- --/* Fill *BASE and *OFFSET such that *BASE + *OFFSET refers to the -- cprestore slot. LOAD_P is true if the caller wants to load from -- the cprestore slot; it is false if the caller wants to store to -- the slot. */ -- --static void --loongarch_get_cprestore_base_and_offset (rtx *base, HOST_WIDE_INT *offset, -- bool load_p) --{ -- const struct loongarch_frame_info *frame; -- -- frame = &cfun->machine->frame; -- /* .cprestore always uses the stack pointer instead of the frame pointer. -- We have a free choice for direct stores, -- Using the stack pointer would sometimes give more -- (early) scheduling freedom, but using the frame pointer would -- sometimes give more (late) scheduling freedom. It's hard to -- predict which applies to a given function, so let's keep things -- simple. -- -- Loads must always use the frame pointer in functions that call -- alloca, and there's little benefit to using the stack pointer -- otherwise. */ -- if (frame_pointer_needed) -- { -- *base = hard_frame_pointer_rtx; -- *offset = frame->args_size - frame->hard_frame_pointer_offset; -- } -- else -- { -- *base = stack_pointer_rtx; -- *offset = frame->args_size; -- } --} -+ cfun->machine->frame.gp_sp_offset); - --/* Return true if X is the load or store address of the cprestore slot; -- LOAD_P says which. */ -- --bool --loongarch_cprestore_address_p (rtx x, bool load_p) --{ -- rtx given_base, required_base; -- HOST_WIDE_INT given_offset, required_offset; -- -- loongarch_split_plus (x, &given_base, &given_offset); -- loongarch_get_cprestore_base_and_offset (&required_base, &required_offset, load_p); -- return given_base == required_base && given_offset == required_offset; -+ loongarch_emit_move (gen_frame_mem (GET_MODE (address), slot_address), -+ address); - } - -- --/* A function to save or store a register. The first argument is the -- register and the second is the stack slot. */ --typedef void (*loongarch_save_restore_fn) (rtx, rtx); -- - /* LOONGSON LA464 Emit insn pattern for gssq and gslq*/ - void --loongarch_la464_emit_128bit_load(rtx operands[]) -+loongarch_la464_emit_128bit_load (rtx operands[]) - { - rtx op0; - rtx op1; -@@ -6389,9 +6110,9 @@ loongarch_la464_emit_128bit_load(rtx operands[]) - rtx op3; - - #if 0 /*for debug*/ -- printf("464po: emit 128 PO LOAD!\n"); -- printf("reg num of op0 is: %d\n",REGNO(operands[0])); -- printf("reg num of op2 is: %d\n",REGNO(operands[2])); -+ printf ("464po: emit 128 PO LOAD!\n"); -+ printf ("reg num of op0 is: %d\n",REGNO (operands[0])); -+ printf ("reg num of op2 is: %d\n",REGNO (operands[2])); - #endif - op0 = gen_rtx_REG (GET_MODE (operands[0]), REGNO (operands[0])); - op1 = operands[1]; -@@ -6403,8 +6124,8 @@ loongarch_la464_emit_128bit_load(rtx operands[]) - gen_rtx_SET (op2,op3)))); - } - --void --loongarch_la464_emit_128bit_store(rtx operands[]) -+void -+loongarch_la464_emit_128bit_store (rtx operands[]) - { - rtx op0; - rtx op1; -@@ -6412,10 +6133,10 @@ loongarch_la464_emit_128bit_store(rtx operands[]) - rtx op3; - - #if 0 /*for debug*/ -- printf("464po: emit 128 PO STORE!\n"); -- printf("reg num of op1 is: %d\n",REGNO(operands[1])); -- printf("reg num of op3 is: %d\n",REGNO(operands[3])); --#endif -+ printf ("464po: emit 128 PO STORE!\n"); -+ printf ("reg num of op1 is: %d\n",REGNO (operands[1])); -+ printf ("reg num of op3 is: %d\n",REGNO (operands[3])); -+#endif - op0 = operands[0]; - op1 = gen_rtx_REG (GET_MODE (operands[1]), REGNO (operands[1])); - op2 = operands[2]; -@@ -6427,405 +6148,109 @@ loongarch_la464_emit_128bit_store(rtx operands[]) - - } - -+/* Return true if register REGNO can store a value of mode MODE. -+ The result of this function is cached in loongarch_hard_regno_mode_ok. */ - -- -+static bool -+loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode) -+{ -+ unsigned int size; -+ enum mode_class mclass; - --/* Implement ASM_DECLARE_FUNCTION_NAME. */ -+ if (mode == FCCmode) -+ return FCC_REG_P (regno); - --void loongarch_declare_function_name(FILE *stream ATTRIBUTE_UNUSED, -- const char *name, tree fndecl ATTRIBUTE_UNUSED) --{ -- loongarch_start_function_definition (name); --} -+ size = GET_MODE_SIZE (mode); -+ mclass = GET_MODE_CLASS (mode); - --/* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */ -+ if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode) && !LASX_SUPPORTED_MODE_P (mode)) -+ return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD; - --static void --loongarch_output_function_prologue (FILE *file) --{ -+ /* For LSX, allow TImode and 128-bit vector modes in all FPR. */ -+ if (FP_REG_P (regno) && LSX_SUPPORTED_MODE_P (mode)) -+ return true; -+ -+ /* For LASX, allow TImode and 256-bit vector modes in all FPR. FIXME: */ -+ if (FP_REG_P (regno) && LASX_SUPPORTED_MODE_P (mode)) -+ return true; -+ -+ if (FP_REG_P (regno)) -+ { -+ if (mclass == MODE_FLOAT -+ || mclass == MODE_COMPLEX_FLOAT -+ || mclass == MODE_VECTOR_FLOAT) -+ return size <= UNITS_PER_FPVALUE; -+ -+ /* Allow integer modes that fit into a single register. We need -+ to put integers into FPRs when using instructions like CVT -+ and TRUNC. There's no point allowing sizes smaller than a word, -+ because the FPU has no appropriate load/store instructions. */ -+ if (mclass == MODE_INT) -+ return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG; -+ } -+ -+ return false; - } - --/* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */ -+/* Implement TARGET_HARD_REGNO_MODE_OK. */ - --static void --loongarch_output_function_epilogue (FILE *) -+static bool -+loongarch_hard_regno_mode_ok (unsigned int regno, machine_mode mode) - { -- const char *fnname; -- -- /* Get the function name the same way that toplev.c does before calling -- assemble_start_function. This is needed so that the name used here -- exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */ -- fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0); -- loongarch_end_function_definition (fnname); -+ return loongarch_hard_regno_mode_ok_p[mode][regno]; - } -- - --#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP) -+static bool -+loongarch_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode) -+{ -+ if (ISA_HAS_LSX && FP_REG_P (regno) && GET_MODE_SIZE (mode) > 8) -+ return true; - --#if PROBE_INTERVAL > 16384 --#error Cannot use indexed addressing mode for stack probing --#endif -+ return false; -+} - --/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE, -- inclusive. These are offsets from the current stack pointer. */ -+/* Implement TARGET_HARD_REGNO_NREGS. */ - --static void --loongarch_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size) -+static unsigned int -+loongarch_hard_regno_nregs (unsigned int regno, machine_mode mode) - { -+ if (FCC_REG_P (regno)) -+ /* The size of FP status registers is always 4, because they only hold -+ FCCmode values, and FCCmode is always considered to be 4 bytes wide. */ -+ return (GET_MODE_SIZE (mode) + 3) / 4; - -- /* See if we have a constant small number of probes to generate. If so, -- that's the easy case. */ -- if ((TARGET_64BIT && (first + size <= 8 * PROBE_INTERVAL)) -- || (!TARGET_64BIT && (first + size <= 2048))) -+ if (FP_REG_P (regno)) - { -- HOST_WIDE_INT i; -+ if (LSX_SUPPORTED_MODE_P (mode)) -+ return 1; - -- /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until -- it exceeds SIZE. If only one probe is needed, this will not -- generate any code. Then probe at FIRST + SIZE. */ -- for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL) -- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx, -- -(first + i))); -+ if (LASX_SUPPORTED_MODE_P (mode)) -+ return 1; - -- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx, -- -(first + size))); -+ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG; - } - -- /* Otherwise, do the same as above, but in a loop. Note that we must be -- extra careful with variables wrapping around because we might be at -- the very top (or the very bottom) of the address space and we have -- to be able to handle this case properly; in particular, we use an -- equality test for the loop condition. */ -- else -- { -- HOST_WIDE_INT rounded_size; -- rtx r13 = LARCH_PROLOGUE_TEMP (Pmode); -- rtx r12 = LARCH_PROLOGUE_TEMP2 (Pmode); -- rtx r14 = LARCH_PROLOGUE_TEMP3 (Pmode); -+ /* All other registers are word-sized. */ -+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; -+} - -- /* Sanity check for the addressing mode we're going to use. */ -- gcc_assert (first <= 16384); -+/* Implement CLASS_MAX_NREGS, taking the maximum of the cases -+ in loongarch_hard_regno_nregs. */ - -+int -+loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode) -+{ -+ int size; -+ HARD_REG_SET left; - -- /* Step 1: round SIZE to the previous multiple of the interval. */ -+ size = 0x8000; -+ COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]); -+ if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FCC_REGS])) -+ { -+ if (loongarch_hard_regno_mode_ok (FCC_REG_FIRST, mode)) -+ size = MIN (size, 4); - -- rounded_size = ROUND_DOWN (size, PROBE_INTERVAL); -- /* TEST_ADDR = SP + FIRST */ -- if (first != 0) -- { -- emit_move_insn (r14, GEN_INT (first)); -- emit_insn (gen_rtx_SET (r13, gen_rtx_MINUS (Pmode, stack_pointer_rtx, r14))); -- } -- else -- emit_move_insn (r13, stack_pointer_rtx); -- -- /* Step 2: compute initial and final value of the loop counter. */ -- -- emit_move_insn (r14, GEN_INT (PROBE_INTERVAL)); -- if (rounded_size == 0) -- emit_move_insn (r12, r13); -- /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */ -- else -- { -- emit_move_insn (r12, GEN_INT (rounded_size)); -- emit_insn (gen_rtx_SET (r12, gen_rtx_MINUS (Pmode, r13, r12))); -- /* Step 3: the loop -- -- do -- { -- TEST_ADDR = TEST_ADDR + PROBE_INTERVAL -- probe at TEST_ADDR -- } -- while (TEST_ADDR != LAST_ADDR) -- -- probes at FIRST + N * PROBE_INTERVAL for values of N from 1 -- until it is equal to ROUNDED_SIZE. */ -- -- emit_insn (PMODE_INSN (gen_probe_stack_range, (r13, r13, r12, r14))); -- } -- -- /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time -- that SIZE is equal to ROUNDED_SIZE. */ -- -- if (size != rounded_size) -- { -- if (TARGET_64BIT) -- emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size)); -- else -- { -- HOST_WIDE_INT i; -- for (i = 2048; i < (size - rounded_size); i += 2048 ) -- { -- emit_stack_probe (plus_constant (Pmode, r12, -i)); -- emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, r12, -2048))); -- } -- emit_stack_probe (plus_constant (Pmode, r12, -(size - rounded_size - i + 2048))); -- } -- } -- } -- -- /* Make sure nothing is scheduled before we are done. */ -- emit_insn (gen_blockage ()); --} -- --/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are -- absolute addresses. */ -- --const char * --loongarch_output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3) --{ -- static int labelno = 0; -- char loop_lab[32], tmp[64]; -- rtx xops[3]; -- -- ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++); -- -- /* Loop. */ -- ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab); -- -- /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */ -- xops[0] = reg1; -- xops[1] = GEN_INT (-PROBE_INTERVAL); -- xops[2] = reg3; -- if (TARGET_64BIT) -- output_asm_insn ("sub.d\t%0,%0,%2", xops); -- else -- output_asm_insn ("sub.w\t%0,%0,%2", xops); -- -- /* Probe at TEST_ADDR, test if TEST_ADDR == LAST_ADDR and branch. */ -- xops[1] = reg2; -- strcpy (tmp, "bne\t%0,%1,"); -- if (TARGET_64BIT) -- output_asm_insn ("st.d\t$r0,%0,0", xops); -- else -- output_asm_insn ("st.w\t$r0,%0,0", xops); -- output_asm_insn (strcat (tmp, &loop_lab[1]), xops); -- -- return ""; --} -- --/* Expand the "prologue" pattern. */ -- --void --loongarch_expand_prologue (void) --{ -- struct loongarch_frame_info *frame = &cfun->machine->frame; -- HOST_WIDE_INT size = frame->total_size; -- unsigned mask = frame->mask; -- rtx insn; -- -- if (flag_stack_usage_info) -- current_function_static_stack_size = size; -- -- if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK -- || flag_stack_clash_protection) -- { -- if (crtl->is_leaf && !cfun->calls_alloca) -- { -- if (size > PROBE_INTERVAL && size > get_stack_check_protect ()) -- loongarch_emit_probe_stack_range (get_stack_check_protect (), -- size - get_stack_check_protect ()); -- } -- else if (size > 0) -- loongarch_emit_probe_stack_range (get_stack_check_protect (), size); -- } -- -- /* When optimizing for size, call a subroutine to save the registers. */ -- if (loongarch_use_save_libcall (frame)) -- { -- rtx dwarf = NULL_RTX; -- dwarf = loongarch_adjust_libcall_cfi_prologue (); -- -- frame->mask = 0; /* Temporarily fib that we need not save GPRs. */ -- size -= frame->save_libcall_adjustment; -- insn = emit_insn (gen_gpr_save (GEN_INT (mask))); -- -- RTX_FRAME_RELATED_P (insn) = 1; -- REG_NOTES (insn) = dwarf; -- } -- -- /* Save the registers. */ -- if ((frame->mask | frame->fmask) != 0) -- { -- HOST_WIDE_INT step1 = MIN (size, loongarch_first_stack_step (frame)); -- -- insn = gen_add3_insn (stack_pointer_rtx, -- stack_pointer_rtx, -- GEN_INT (-step1)); -- RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; -- size -= step1; -- loongarch_for_each_saved_reg (size, loongarch_save_reg); -- } -- -- frame->mask = mask; /* Undo the above fib. */ -- -- /* Set up the frame pointer, if we're using one. */ -- if (frame_pointer_needed) -- { -- insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx, -- GEN_INT (frame->hard_frame_pointer_offset - size)); -- RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; -- -- loongarch_emit_stack_tie (); -- } -- -- /* Allocate the rest of the frame. */ -- if (size > 0) -- { -- if (SMALL_OPERAND (-size)) -- { -- insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, -- GEN_INT (-size)); -- RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; -- } -- else -- { -- loongarch_emit_move (N_LARCH_PROLOGUE_TEMP (Pmode), GEN_INT (-size)); -- emit_insn (gen_add3_insn (stack_pointer_rtx, -- stack_pointer_rtx, -- N_LARCH_PROLOGUE_TEMP (Pmode))); -- -- /* Describe the effect of the previous instructions. */ -- insn = plus_constant (Pmode, stack_pointer_rtx, -size); -- insn = gen_rtx_SET (stack_pointer_rtx, insn); -- loongarch_set_frame_expr (insn); -- } -- } --} -- -- --/* Return true if register REGNO can store a value of mode MODE. -- The result of this function is cached in loongarch_hard_regno_mode_ok. */ -- --static bool --loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode) --{ -- unsigned int size; -- enum mode_class mclass; -- -- if (mode == FCCmode) -- return ST_REG_P (regno); -- -- size = GET_MODE_SIZE (mode); -- mclass = GET_MODE_CLASS (mode); -- -- if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode) && !LASX_SUPPORTED_MODE_P (mode)) -- return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD; -- -- /* For LSX, allow TImode and 128-bit vector modes in all FPR. */ -- if (FP_REG_P (regno) && LSX_SUPPORTED_MODE_P (mode)) -- return true; -- -- /* For LASX, allow TImode and 256-bit vector modes in all FPR. FIXME: */ -- if (FP_REG_P (regno) && LASX_SUPPORTED_MODE_P (mode)) -- return true; -- -- if (FP_REG_P (regno) -- && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0 -- || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG))) -- { -- if (mclass == MODE_FLOAT -- || mclass == MODE_COMPLEX_FLOAT -- || mclass == MODE_VECTOR_FLOAT) -- return size <= UNITS_PER_FPVALUE; -- -- /* Allow integer modes that fit into a single register. We need -- to put integers into FPRs when using instructions like CVT -- and TRUNC. There's no point allowing sizes smaller than a word, -- because the FPU has no appropriate load/store instructions. */ -- if (mclass == MODE_INT) -- return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG; -- } -- -- return false; --} -- --/* Implement TARGET_HARD_REGNO_MODE_OK. */ -- --static bool --loongarch_hard_regno_mode_ok (unsigned int regno, machine_mode mode) --{ -- return loongarch_hard_regno_mode_ok_p[mode][regno]; --} -- --/* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */ -- --bool --loongarch_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED, -- unsigned int new_reg) --{ -- /* Interrupt functions can only use registers that have already been -- saved by the prologue, even if they would normally be call-clobbered. */ -- if (cfun->machine->interrupt_handler_p && !df_regs_ever_live_p (new_reg)) -- return false; -- -- return true; --} -- --/* Return nonzero if register REGNO can be used as a scratch register -- in peephole2. */ -- --bool --loongarch_hard_regno_scratch_ok (unsigned int regno) --{ -- /* See loongarch_hard_regno_rename_ok. */ -- if (cfun->machine->interrupt_handler_p && !df_regs_ever_live_p (regno)) -- return false; -- -- return true; --} -- --static bool --loongarch_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode) --{ -- if (ISA_HAS_LSX && FP_REG_P (regno) && GET_MODE_SIZE (mode) > 8) -- return true; -- -- return false; --} -- --/* Implement TARGET_HARD_REGNO_NREGS. */ -- --static unsigned int --loongarch_hard_regno_nregs (unsigned int regno, machine_mode mode) --{ -- if (ST_REG_P (regno)) -- /* The size of FP status registers is always 4, because they only hold -- FCCmode values, and FCCmode is always considered to be 4 bytes wide. */ -- return (GET_MODE_SIZE (mode) + 3) / 4; -- -- if (FP_REG_P (regno)) -- { -- if (LSX_SUPPORTED_MODE_P (mode)) -- return 1; -- -- if (LASX_SUPPORTED_MODE_P (mode)) -- return 1; -- -- return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG; -- } -- -- /* All other registers are word-sized. */ -- return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; --} -- --/* Implement CLASS_MAX_NREGS, taking the maximum of the cases -- in loongarch_hard_regno_nregs. */ -- --int --loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode) --{ -- int size; -- HARD_REG_SET left; -- -- size = 0x8000; -- COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]); -- if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS])) -- { -- if (loongarch_hard_regno_mode_ok (ST_REG_FIRST, mode)) -- size = MIN (size, 4); -- -- AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]); -+ AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FCC_REGS]); - } - if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS])) - { -@@ -6849,8 +6274,8 @@ loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode) - /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */ - - static bool --loongarch_can_change_mode_class (machine_mode from, -- machine_mode to, reg_class_t rclass) -+loongarch_can_change_mode_class (machine_mode from, machine_mode to, -+ reg_class_t rclass) - { - /* Allow conversions between different Loongson integer vectors, - and between those vectors and DImode. */ -@@ -6866,42 +6291,10 @@ loongarch_can_change_mode_class (machine_mode from, - if (LSX_SUPPORTED_MODE_P (from) && LSX_SUPPORTED_MODE_P (to)) - return true; - -- /* Otherwise, there are several problems with changing the modes of -- values in floating-point registers: -- -- - When a multi-word value is stored in paired floating-point -- registers, the first register always holds the low word. We -- therefore can't allow FPRs to change between single-word and -- multi-word modes on big-endian targets. -- -- - GCC assumes that each word of a multiword register can be -- accessed individually using SUBREGs. This is not true for -- floating-point registers if they are bigger than a word. -- -- - Loading a 32-bit value into a 64-bit floating-point register -- will not sign-extend the value, despite what LOAD_EXTEND_OP -- says. We can't allow FPRs to change from SImode to a wider -- mode on 64-bit targets. -- -- - If the FPU has already interpreted a value in one format, we -- must not ask it to treat the value as having a different -- format. -- -- We therefore disallow all mode changes involving FPRs. */ -- - return !reg_classes_intersect_p (FP_REGS, rclass); - } - --/* Implement target hook small_register_classes_for_mode_p. */ -- --static bool --loongarch_small_register_classes_for_mode_p (machine_mode mode -- ATTRIBUTE_UNUSED) --{ -- return 0; --} -- --/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction, -+/* Return true if moves in mode MODE can use the FPU's fmov.fmt instruction, - or use the LSX's move.v instruction. */ - - static bool -@@ -6909,6 +6302,7 @@ loongarch_mode_ok_for_mov_fmt_p (machine_mode mode) - { - switch (mode) - { -+ case E_FCCmode: - case E_SFmode: - return TARGET_HARD_FLOAT; - -@@ -6976,7 +6370,7 @@ loongarch_move_to_gpr_cost (reg_class_t from) - return 2; - - case FP_REGS: -- /* MFC1, etc. */ -+ /* MOVFR2GR, etc. */ - return 4; - - default: -@@ -6998,7 +6392,7 @@ loongarch_move_from_gpr_cost (reg_class_t to) - return 2; - - case FP_REGS: -- /* MTC1, etc. */ -+ /* MOVGR2FR, etc. */ - return 4; - - default: -@@ -7011,8 +6405,8 @@ loongarch_move_from_gpr_cost (reg_class_t to) - the maximum for us. */ - - static int --loongarch_register_move_cost (machine_mode mode, -- reg_class_t from, reg_class_t to) -+loongarch_register_move_cost (machine_mode mode, reg_class_t from, -+ reg_class_t to) - { - reg_class_t dregs; - int cost1, cost2; -@@ -7024,7 +6418,7 @@ loongarch_register_move_cost (machine_mode mode, - if (from == FP_REGS) - { - if (to == FP_REGS && loongarch_mode_ok_for_mov_fmt_p (mode)) -- /* MOV.FMT. */ -+ /* FMOV.FMT. */ - return 4; - } - -@@ -7054,28 +6448,6 @@ loongarch_memory_move_cost (machine_mode mode, reg_class_t rclass, bool in) - { - return (loongarch_cost->memory_latency - + memory_move_secondary_cost (mode, rclass, in)); --} -- --/* Implement TARGET_SECONDARY_MEMORY_NEEDED. -- -- When targeting the o32 FPXX ABI, all moves with a length of doubleword -- or greater must be performed by FR-mode-aware instructions. -- This can be achieved using MOVFRH2GR.S/MOVGR2FRH.W when these instructions are -- available but otherwise moves must go via memory. -- Using MOVGR2FR/MOVFR2GR to access the lower-half of these registers would require -- a forbidden single-precision access. We require all double-word moves to use -- memory because adding even and odd floating-point registers classes -- would have a significant impact on the backend. */ -- --static bool --loongarch_secondary_memory_needed (machine_mode mode, reg_class_t class1, -- reg_class_t class2) --{ -- /* Ignore spilled pseudos. */ -- if (lra_in_progress && (class1 == NO_REGS || class2 == NO_REGS)) -- return false; -- -- return false; - } - - /* Return the register class required for a secondary register when -@@ -7084,9 +6456,10 @@ loongarch_secondary_memory_needed (machine_mode mode, reg_class_t class1, - is the destination. Return NO_REGS if no secondary register is - needed. */ - --enum reg_class --loongarch_secondary_reload_class (enum reg_class rclass, -- machine_mode mode, rtx x, bool) -+static reg_class_t -+loongarch_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x, -+ reg_class_t rclass, machine_mode mode, -+ secondary_reload_info *sri ATTRIBUTE_UNUSED) - { - int regno; - -@@ -7094,15 +6467,12 @@ loongarch_secondary_reload_class (enum reg_class rclass, - - /* Copying from accumulator registers to anywhere other than a general - register requires a temporary general register. */ --// if (reg_class_subset_p (rclass, ACC_REGS)) ?????? --// return GP_REG_P (regno) ? NO_REGS : GR_REGS; - if (reg_class_subset_p (rclass, FP_REGS)) - { - if (regno < 0 - || (MEM_P (x) - && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))) -- /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use -- pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */ -+ /* In this case we can use fld.s, fst.s, fld.d or fst.d. */ - return NO_REGS; - - if (MEM_P (x) && LSX_SUPPORTED_MODE_P (mode)) -@@ -7110,17 +6480,18 @@ loongarch_secondary_reload_class (enum reg_class rclass, - return NO_REGS; - - if (GP_REG_P (regno) || x == CONST0_RTX (mode)) -- /* In this case we can use movgr2fr.s, movfr2gr.s, movgr2fr.d or movfr2gr.d. */ -+ /* In this case we can use movgr2fr.s, movfr2gr.s, movgr2fr.d or -+ * movfr2gr.d. */ - return NO_REGS; - - if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x)) -- /* We can force the constant to memory and use lwc1 -- and ldc1. As above, we will use pairs of lwc1s if -+ /* We can force the constant to memory and use fld.s -+ and fld.d. As above, we will use pairs of lwc1s if - ldc1 is not supported. */ - return NO_REGS; - - if (FP_REG_P (regno) && loongarch_mode_ok_for_mov_fmt_p (mode)) -- /* In this case we can use mov.fmt. */ -+ /* In this case we can use fmov.{s/d}. */ - return NO_REGS; - - /* Otherwise, we need to reload through an integer register. */ -@@ -7132,7 +6503,19 @@ loongarch_secondary_reload_class (enum reg_class rclass, - return NO_REGS; - } - -- -+/* Implement TARGET_MODE_REP_EXTENDED */ -+ -+static int -+loongarch_mode_rep_extended (scalar_int_mode mode, scalar_int_mode mode_rep) -+{ -+ /* On 64-bit targets, SImode register values are sign-extended to DImode. */ -+ if (TARGET_64BIT && mode == SImode && mode_rep == DImode) -+ return SIGN_EXTEND; -+ -+ return UNKNOWN; -+} -+ -+ - /* Implement TARGET_VALID_POINTER_MODE. */ - - static bool -@@ -7160,7 +6543,7 @@ loongarch_scalar_mode_supported_p (scalar_mode mode) - - return default_scalar_mode_supported_p (mode); - } -- -+ - /* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */ - - static machine_mode -@@ -7233,17 +6616,15 @@ loongarch_adjust_insn_length (rtx_insn *insn, int length) - length += 4; - - /* See how many nops might be needed to avoid hardware hazards. */ -- if (!cfun->machine->ignore_hazard_length_p -- && INSN_P (insn) -+ if (INSN_P (insn) - && INSN_CODE (insn) >= 0) - switch (get_attr_hazard (insn)) - { - case HAZARD_NONE: - break; - -- case HAZARD_DELAY: - case HAZARD_FORBIDDEN_SLOT: -- length += NOP_INSN_LENGTH; -+ length += 4; - break; - } - -@@ -7258,8 +6639,8 @@ loongarch_adjust_insn_length (rtx_insn *insn, int length) - - const char * - loongarch_output_conditional_branch (rtx_insn *insn, rtx *operands, -- const char *branch_if_true, -- const char *branch_if_false) -+ const char *branch_if_true, -+ const char *branch_if_false) - { - unsigned int length; - rtx taken; -@@ -7272,8 +6653,7 @@ loongarch_output_conditional_branch (rtx_insn *insn, rtx *operands, - return branch_if_true; - } - -- /* Generate a reversed branch around a direct jump. This fallback does -- not use branch-likely instructions. */ -+ /* Generate a reversed branch around a direct jump. */ - rtx_code_label *not_taken = gen_label_rtx (); - taken = operands[0]; - -@@ -7281,37 +6661,7 @@ loongarch_output_conditional_branch (rtx_insn *insn, rtx *operands, - operands[0] = not_taken; - output_asm_insn (branch_if_false, operands); - -- /* If INSN has a delay slot, we must provide delay slots for both the -- branch to NOT_TAKEN and the conditional jump. We must also ensure -- that INSN's delay slot is executed in the appropriate cases. */ -- if (final_sequence) -- { -- /* This first delay slot will always be executed, so use INSN's -- delay slot if is not annulled. */ -- if (!INSN_ANNULLED_BRANCH_P (insn)) -- { -- final_scan_insn (final_sequence->insn (1), -- asm_out_file, optimize, 1, NULL); -- final_sequence->insn (1)->set_deleted (); -- } -- fprintf (asm_out_file, "\n"); -- } -- -- output_asm_insn (LARCH_ABSOLUTE_JUMP ("b\t%0"), &taken); -- -- /* Now deal with its delay slot; see above. */ -- if (final_sequence) -- { -- /* This delay slot will only be executed if the branch is taken. -- Use INSN's delay slot if is annulled. */ -- if (INSN_ANNULLED_BRANCH_P (insn)) -- { -- final_scan_insn (final_sequence->insn (1), -- asm_out_file, optimize, 1, NULL); -- final_sequence->insn (1)->set_deleted (); -- } -- fprintf (asm_out_file, "\n"); -- } -+ output_asm_insn ("b\t%0", &taken); - - /* Output NOT_TAKEN. */ - targetm.asm_out.internal_label (asm_out_file, "L", -@@ -7326,21 +6676,23 @@ loongarch_output_conditional_branch (rtx_insn *insn, rtx *operands, - OPERANDS[3] is the second operand and may be zero or a register. */ - - const char * --loongarch_output_equal_conditional_branch (rtx_insn* insn, rtx *operands, -- bool inverted_p) -+loongarch_output_equal_conditional_branch (rtx_insn *insn, rtx *operands, -+ bool inverted_p) - { - const char *branch[2]; - if (operands[3] == const0_rtx) - { - branch[!inverted_p] = LARCH_BRANCH ("b%C1z", "%2,%0"); - branch[inverted_p] = LARCH_BRANCH ("b%N1z", "%2,%0"); -- } else -+ } -+ else - { - branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,%z3,%0"); - branch[inverted_p] = LARCH_BRANCH ("b%N1", "%2,%z3,%0"); - } - -- return loongarch_output_conditional_branch (insn, operands, branch[1], branch[0]); -+ return loongarch_output_conditional_branch (insn, operands, branch[1], -+ branch[0]); - } - - /* Return the assembly code for INSN, which branches to OPERANDS[0] -@@ -7351,7 +6703,7 @@ loongarch_output_equal_conditional_branch (rtx_insn* insn, rtx *operands, - - const char * - loongarch_output_order_conditional_branch (rtx_insn *insn, rtx *operands, -- bool inverted_p) -+ bool inverted_p) - { - const char *branch[2]; - -@@ -7377,7 +6729,7 @@ loongarch_output_order_conditional_branch (rtx_insn *insn, rtx *operands, - branch[!inverted_p] = LARCH_BRANCH ("b", "%0"); - branch[inverted_p] = "\t# branch never"; - break; -- default: -+ default: - gcc_unreachable (); - } - } -@@ -7385,31 +6737,19 @@ loongarch_output_order_conditional_branch (rtx_insn *insn, rtx *operands, - { - switch (GET_CODE (operands[1])) - { -- case LE: -- branch[!inverted_p] = LARCH_BRANCH ("bge", "%3,%2,%0"); -- branch[inverted_p] = LARCH_BRANCH ("blt", "%3,%2,%0"); -- break; -- case LEU: -- branch[!inverted_p] = LARCH_BRANCH ("bgeu", "%3,%2,%0"); -- branch[inverted_p] = LARCH_BRANCH ("bltu", "%3,%2,%0"); -- break; -- case GT: -- branch[!inverted_p] = LARCH_BRANCH ("blt", "%3,%2,%0"); -- branch[inverted_p] = LARCH_BRANCH ("bge", "%3,%2,%0"); -- break; -- case GTU: -- branch[!inverted_p] = LARCH_BRANCH ("bltu", "%3,%2,%0"); -- branch[inverted_p] = LARCH_BRANCH ("bgeu", "%3,%2,%0"); -- break; -- case LT: -- case LTU: -- case GE: -- case GEU: -- branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,%3,%0"); -- branch[inverted_p] = LARCH_BRANCH ("b%N1", "%2,%3,%0"); -- break; -- default: -- gcc_unreachable (); -+ case LE: -+ case LEU: -+ case GT: -+ case GTU: -+ case LT: -+ case LTU: -+ case GE: -+ case GEU: -+ branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,%3,%0"); -+ branch[inverted_p] = LARCH_BRANCH ("b%N1", "%2,%3,%0"); -+ break; -+ default: -+ gcc_unreachable (); - } - } - } -@@ -7419,30 +6759,11 @@ loongarch_output_order_conditional_branch (rtx_insn *insn, rtx *operands, - { - /* These cases are equivalent to comparisons against zero. */ - case LEU: -- inverted_p = !inverted_p; -- /* Fall through. */ - case GTU: -- branch[!inverted_p] = LARCH_BRANCH ("bne", "%2,%.,%0"); -- branch[inverted_p] = LARCH_BRANCH ("beq", "%2,%.,%0"); -- break; -- -- /* These cases are always true or always false. */ - case LTU: -- inverted_p = !inverted_p; -- /* Fall through. */ - case GEU: -- branch[!inverted_p] = LARCH_BRANCH ("beq", "%.,%.,%0"); -- branch[inverted_p] = LARCH_BRANCH ("bne", "%.,%.,%0"); -- break; -- -- case LE: -- branch[!inverted_p] = LARCH_BRANCH ("bge", "$r0,%2,%0"); -- branch[inverted_p] = LARCH_BRANCH ("blt", "$r0,%2,%0"); -- break; -+ case LE: - case GT: -- branch[!inverted_p] = LARCH_BRANCH ("blt", "$r0,%2,%0"); -- branch[inverted_p] = LARCH_BRANCH ("bge", "$r0,%2,%0"); -- break; - case LT: - case GE: - branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,$r0,%0"); -@@ -7451,98 +6772,14 @@ loongarch_output_order_conditional_branch (rtx_insn *insn, rtx *operands, - default: - gcc_unreachable (); - } -- } -- return loongarch_output_conditional_branch (insn, operands, branch[1], branch[0]); -+ } -+ return loongarch_output_conditional_branch (insn, operands, branch[1], -+ branch[0]); - } -- --/* Return the assembly code for DIV or DDIV instruction DIVISION, which has -- the operands given by OPERANDS. Add in a divide-by-zero check if needed. - -- When working around R4000 and R4400 errata, we need to make sure that -- the division is not immediately followed by a shift[1][2]. We also -- need to stop the division from being put into a branch delay slot[3]. -- The easiest way to avoid both problems is to add a nop after the -- division. When a divide-by-zero check is needed, this nop can be -- used to fill the branch delay slot. -- -- [1] If a double-word or a variable shift executes immediately -- after starting an integer division, the shift may give an -- incorrect result. See quotations of errata #16 and #28 from -- "LARCH R4000PC/SC Errata, Processor Revision 2.2 and 3.0" -- in loongarch.md for details. -- -- [2] A similar bug to [1] exists for all revisions of the -- R4000 and the R4400 when run in an MC configuration. -- From "LARCH R4000MC Errata, Processor Revision 2.2 and 3.0": -- -- "19. In this following sequence: -- -- ddiv (or ddivu or div or divu) -- dsll32 (or dsrl32, dsra32) -- -- if an MPT stall occurs, while the divide is slipping the cpu -- pipeline, then the following double shift would end up with an -- incorrect result. -- -- Workaround: The compiler needs to avoid generating any -- sequence with divide followed by extended double shift." -- -- This erratum is also present in "LARCH R4400MC Errata, Processor -- Revision 1.0" and "LARCH R4400MC Errata, Processor Revision 2.0 -- & 3.0" as errata #10 and #4, respectively. -- -- [3] From "LARCH R4000PC/SC Errata, Processor Revision 2.2 and 3.0" -- (also valid for LARCH R4000MC processors): -- -- "52. R4000SC: This bug does not apply for the R4000PC. -- -- There are two flavors of this bug: -- -- 1) If the instruction just after divide takes an RF exception -- (tlb-refill, tlb-invalid) and gets an instruction cache -- miss (both primary and secondary) and the line which is -- currently in secondary cache at this index had the first -- data word, where the bits 5..2 are set, then R4000 would -- get a wrong result for the div. -- -- ##1 -- nop -- div r8, r9 -- ------------------- # end-of page. -tlb-refill -- nop -- ##2 -- nop -- div r8, r9 -- ------------------- # end-of page. -tlb-invalid -- nop -- -- 2) If the divide is in the taken branch delay slot, where the -- target takes RF exception and gets an I-cache miss for the -- exception vector or where I-cache miss occurs for the -- target address, under the above mentioned scenarios, the -- div would get wrong results. -- -- ##1 -- j r2 # to next page mapped or unmapped -- div r8,r9 # this bug would be there as long -- # as there is an ICache miss and -- nop # the "data pattern" is present -- -- ##2 -- beq r0, r0, NextPage # to Next page -- div r8,r9 -- nop -- -- This bug is present for div, divu, ddiv, and ddivu -- instructions. -- -- Workaround: For item 1), OS could make sure that the next page -- after the divide instruction is also mapped. For item 2), the -- compiler could make sure that the divide instruction is not in -- the branch delay slot." -- -- These processors have PRId values of 0x00004220 and 0x00004300 for -- the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */ -+/* Return the assembly code for DIV.{W/D} instruction DIVISION, which has -+ the operands given by OPERANDS. Add in a divide-by-zero check if needed. -+ */ - - const char * - loongarch_output_division (const char *division, rtx *operands) -@@ -7571,13 +6808,13 @@ loongarch_lsx_output_division (const char *division, rtx *operands) - s = division; - if (TARGET_CHECK_ZERO_DIV) - { -- if(ISA_HAS_LASX && GET_MODE_SIZE (mode) == 32) -+ if (ISA_HAS_LASX && GET_MODE_SIZE (mode) == 32) - { - output_asm_insn ("xvsetallnez.%v0\t$fcc7,%u2",operands); - output_asm_insn (s, operands); - output_asm_insn ("bcnez\t$fcc7,1f", operands); - } -- else if(ISA_HAS_LSX) -+ else if (ISA_HAS_LSX) - { - output_asm_insn ("vsetallnez.%v0\t$fcc7,%w2",operands); - output_asm_insn (s, operands); -@@ -7587,80 +6824,13 @@ loongarch_lsx_output_division (const char *division, rtx *operands) - } - return s; - } -- --/* Return true if destination of IN_INSN is used as add source in -- OUT_INSN. Both IN_INSN and OUT_INSN are of type fmadd. Example: -- madd.s dst, x, y, z -- madd.s a, dst, b, c */ -- --bool --loongarch_fmadd_bypass (rtx_insn *out_insn, rtx_insn *in_insn) --{ -- int dst_reg, src_reg; -- -- gcc_assert (get_attr_type (in_insn) == TYPE_FMADD); -- gcc_assert (get_attr_type (out_insn) == TYPE_FMADD); -- -- extract_insn (in_insn); -- dst_reg = REG_P (recog_data.operand[0]); -- -- extract_insn (out_insn); -- src_reg = REG_P (recog_data.operand[1]); -- -- if (dst_reg == src_reg) -- return true; -- -- return false; --} -- --/* Return true if IN_INSN is a multiply-add or multiply-subtract -- instruction and if OUT_INSN assigns to the accumulator operand. */ -- --bool --loongarch_linked_madd_p (rtx_insn *out_insn, rtx_insn *in_insn) --{ -- enum attr_accum_in accum_in; -- int accum_in_opnum; -- rtx accum_in_op; -- -- if (recog_memoized (in_insn) < 0) -- return false; -- -- accum_in = get_attr_accum_in (in_insn); -- if (accum_in == ACCUM_IN_NONE) -- return false; -- -- accum_in_opnum = accum_in - ACCUM_IN_0; -- -- extract_insn (in_insn); -- gcc_assert (accum_in_opnum < recog_data.n_operands); -- accum_in_op = recog_data.operand[accum_in_opnum]; -- -- return reg_set_p (accum_in_op, out_insn); --} -- --/* True if the dependency between OUT_INSN and IN_INSN is on the store -- data rather than the address. We need this because the cprestore -- pattern is type "store", but is defined using an UNSPEC_VOLATILE, -- which causes the default routine to abort. We just return false -- for that case. */ -- --bool --loongarch_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn) --{ -- if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE) -- return false; -- -- return store_data_bypass_p (out_insn, in_insn); --} -- - - /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output -- dependencies have no cost, except on the 20Kc where output-dependence -- is treated like input-dependence. */ -+ dependencies have no cost. */ - - static int --loongarch_adjust_cost (rtx_insn *, int dep_type, rtx_insn *, int cost, unsigned int) -+loongarch_adjust_cost (rtx_insn *, int dep_type, rtx_insn *, int cost, -+ unsigned int) - { - if (dep_type != 0 && (dep_type != REG_DEP_OUTPUT)) - return 0; -@@ -7672,15 +6842,10 @@ loongarch_adjust_cost (rtx_insn *, int dep_type, rtx_insn *, int cost, unsigned - static int - loongarch_issue_rate (void) - { -- switch (loongarch_tune) -- { -- case PROCESSOR_LOONGARCH64: -- case PROCESSOR_LA464: -- return 4; -- -- default: -- return 1; -- } -+ if ((unsigned long) la_target.cpu_tune < N_TUNE_TYPES) -+ return loongarch_cpu_issue_rate[la_target.cpu_tune]; -+ else -+ return 1; - } - - /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should -@@ -7689,24 +6854,20 @@ loongarch_issue_rate (void) - static int - loongarch_multipass_dfa_lookahead (void) - { -- if (TUNE_LOONGARCH64 || TUNE_LA464) -- return 4; -- -- return 0; --} -- -- --static void --loongarch_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, -- int max_ready ATTRIBUTE_UNUSED) --{ -+ if ((unsigned long) la_target.cpu_tune < N_ARCH_TYPES) -+ return loongarch_cpu_multipass_dfa_lookahead[la_target.cpu_tune]; -+ else -+ return 0; - } - - /* Implement TARGET_SCHED_REORDER. */ - - static int --loongarch_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, -- rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED) -+loongarch_sched_reorder (FILE *file ATTRIBUTE_UNUSED, -+ int verbose ATTRIBUTE_UNUSED, -+ rtx_insn **ready ATTRIBUTE_UNUSED, -+ int *nreadyp ATTRIBUTE_UNUSED, -+ int cycle ATTRIBUTE_UNUSED) - { - return loongarch_issue_rate (); - } -@@ -7714,17 +6875,29 @@ loongarch_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUS - /* Implement TARGET_SCHED_REORDER2. */ - - static int --loongarch_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, -- rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED) -+loongarch_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, -+ int verbose ATTRIBUTE_UNUSED, -+ rtx_insn **ready ATTRIBUTE_UNUSED, -+ int *nreadyp ATTRIBUTE_UNUSED, -+ int cycle ATTRIBUTE_UNUSED) - { - return cached_can_issue_more; - } - -+/* Implement TARGET_SCHED_INIT. */ -+ -+static void -+loongarch_sched_init (FILE *file ATTRIBUTE_UNUSED, -+ int verbose ATTRIBUTE_UNUSED, -+ int max_ready ATTRIBUTE_UNUSED) -+{} -+ - /* Implement TARGET_SCHED_VARIABLE_ISSUE. */ - - static int --loongarch_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, -- rtx_insn *insn, int more) -+loongarch_variable_issue (FILE *file ATTRIBUTE_UNUSED, -+ int verbose ATTRIBUTE_UNUSED, rtx_insn *insn, -+ int more) - { - /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */ - if (USEFUL_INSN_P (insn)) -@@ -7742,1243 +6915,2339 @@ loongarch_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNU - cached_can_issue_more = more; - return more; - } -- --/* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY), -- return the first operand of the associated PREF or PREFX insn. */ -- --rtx --loongarch_prefetch_cookie (rtx write, rtx locality) --{ -- /* store_streamed / load_streamed. */ -- if (INTVAL (locality) <= 0) -- return GEN_INT (INTVAL (write) + 4); -- -- /* store / load. */ -- if (INTVAL (locality) <= 2) -- return write; -- -- /* store_retained / load_retained. */ -- return GEN_INT (INTVAL (write) + 6); --} -- -- --/* Return whether CFG is used in loongarch_reorg. */ - --static bool --loongarch_cfg_in_reorg (void) --{ -- return (TARGET_RELAX_PIC_CALLS); --} -- --/* If INSN is a call, return the underlying CALL expr. Return NULL_RTX -- otherwise. If INSN has two call rtx, then store the second one in -- SECOND_CALL. */ -+/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text -+ in order to avoid duplicating too much logic from elsewhere. */ - --static rtx --loongarch_call_expr_from_insn (rtx_insn *insn, rtx *second_call) -+static void -+loongarch_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, -+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, -+ tree function) - { -- rtx x; -- rtx x2; -- -- if (!CALL_P (insn)) -- return NULL_RTX; -- -- x = PATTERN (insn); -- if (GET_CODE (x) == PARALLEL) -- { -- /* Calls returning complex values have two CALL rtx. Look for the second -- one here, and return it via the SECOND_CALL arg. */ -- x2 = XVECEXP (x, 0, 1); -- if (GET_CODE (x2) == SET) -- x2 = XEXP (x2, 1); -- if (GET_CODE (x2) == CALL) -- *second_call = x2; -- -- x = XVECEXP (x, 0, 0); -- } -- if (GET_CODE (x) == SET) -- x = XEXP (x, 1); -- gcc_assert (GET_CODE (x) == CALL); -+ rtx this_rtx, temp1, temp2, fnaddr; -+ rtx_insn *insn; -+ bool use_sibcall_p; - -- return x; --} -+ /* Pretend to be a post-reload pass while generating rtl. */ -+ reload_completed = 1; - --/* REG is set in DEF. See if the definition is one of the ways we load a -- register with a symbol address for a loongarch_use_pic_fn_addr_reg_p call. -- If it is, return the symbol reference of the function, otherwise return -- NULL_RTX. -+ /* Mark the end of the (empty) prologue. */ -+ emit_note (NOTE_INSN_PROLOGUE_END); - -- If RECURSE_P is true, use loongarch_find_pic_call_symbol to interpret -- the values of source registers, otherwise treat such registers as -- having an unknown value. */ -+ /* Determine if we can use a sibcall to call FUNCTION directly. */ -+ fnaddr = XEXP (DECL_RTL (function), 0); -+ use_sibcall_p = const_call_insn_operand (fnaddr, Pmode); - --static rtx --loongarch_pic_call_symbol_from_set (df_ref def, rtx reg, bool recurse_p) --{ -- rtx_insn *def_insn; -- rtx set; -+ /* We need two temporary registers in some cases. */ -+ temp1 = gen_rtx_REG (Pmode, 12); -+ temp2 = gen_rtx_REG (Pmode, 13); - -- if (DF_REF_IS_ARTIFICIAL (def)) -- return NULL_RTX; -+ /* Find out which register contains the "this" pointer. */ -+ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)) -+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1); -+ else -+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST); - -- def_insn = DF_REF_INSN (def); -- set = single_set (def_insn); -- if (set && rtx_equal_p (SET_DEST (set), reg)) -+ /* Add DELTA to THIS_RTX. */ -+ if (delta != 0) - { -- rtx note, src, symbol; -- -- /* First see whether the source is a plain symbol. This is used -- when calling symbols that are not lazily bound. */ -- src = SET_SRC (set); -- if (GET_CODE (src) == SYMBOL_REF) -- return src; -- -- /* Handle %call16 references. */ -- symbol = loongarch_strip_unspec_call (src); -- if (symbol) -+ rtx offset = GEN_INT (delta); -+ if (!IMM12_OPERAND (delta)) - { -- gcc_assert (GET_CODE (symbol) == SYMBOL_REF); -- return symbol; -+ loongarch_emit_move (temp1, offset); -+ offset = temp1; - } -- -- /* If we have something more complicated, look for a -- REG_EQUAL or REG_EQUIV note. */ -- note = find_reg_equal_equiv_note (def_insn); -- if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF) -- return XEXP (note, 0); -- -- /* Follow at most one simple register copy. Such copies are -- interesting in cases like: -- -- for (...) -- { -- locally_binding_fn (...); -- } -- -- and: -- -- locally_binding_fn (...); -- ... -- locally_binding_fn (...); -- -- where the load of locally_binding_fn can legitimately be -- hoisted or shared. However, we do not expect to see complex -- chains of copies, so a full worklist solution to the problem -- would probably be overkill. */ -- if (recurse_p && REG_P (src)) -- return loongarch_find_pic_call_symbol (def_insn, src, false); -+ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset)); - } - -- return NULL_RTX; --} -+ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */ -+ if (vcall_offset != 0) -+ { -+ rtx addr; - --/* Find the definition of the use of REG in INSN. See if the definition -- is one of the ways we load a register with a symbol address for a -- loongarch_use_pic_fn_addr_reg_p call. If it is return the symbol reference -- of the function, otherwise return NULL_RTX. RECURSE_P is as for -- loongarch_pic_call_symbol_from_set. */ -+ /* Set TEMP1 to *THIS_RTX. */ -+ loongarch_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx)); - --static rtx --loongarch_find_pic_call_symbol (rtx_insn *insn, rtx reg, bool recurse_p) --{ -- df_ref use; -- struct df_link *defs; -- rtx symbol; -+ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */ -+ addr = loongarch_add_offset (temp2, temp1, vcall_offset); - -- use = df_find_use (insn, regno_reg_rtx[REGNO (reg)]); -- if (!use) -- return NULL_RTX; -- defs = DF_REF_CHAIN (use); -- if (!defs) -- return NULL_RTX; -- symbol = loongarch_pic_call_symbol_from_set (defs->ref, reg, recurse_p); -- if (!symbol) -- return NULL_RTX; -+ /* Load the offset and add it to THIS_RTX. */ -+ loongarch_emit_move (temp1, gen_rtx_MEM (Pmode, addr)); -+ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1)); -+ } - -- /* If we have more than one definition, they need to be identical. */ -- for (defs = defs->next; defs; defs = defs->next) -+ /* Jump to the target function. Use a sibcall if direct jumps are -+ allowed, otherwise load the address into a register first. */ -+ if (use_sibcall_p) - { -- rtx other; -- -- other = loongarch_pic_call_symbol_from_set (defs->ref, reg, recurse_p); -- if (!rtx_equal_p (symbol, other)) -- return NULL_RTX; -+ insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx)); -+ SIBLING_CALL_P (insn) = 1; -+ } -+ else -+ { -+ loongarch_emit_move (temp1, fnaddr); -+ emit_jump_insn (gen_indirect_jump (temp1)); - } - -- return symbol; --} -- --/* Replace the args_size operand of the call expression CALL with the -- call-attribute UNSPEC and fill in SYMBOL as the function symbol. */ -- --static void --loongarch_annotate_pic_call_expr (rtx call, rtx symbol) --{ -- rtx args_size; -+ /* Run just enough of rest_of_compilation. This sequence was -+ "borrowed" from alpha.c. */ -+ insn = get_insns (); -+ split_all_insns_noflow (); -+ shorten_branches (insn); -+ final_start_function (insn, file, 1); -+ final (insn, file, 1); -+ final_end_function (); - -- args_size = XEXP (call, 1); -- XEXP (call, 1) = gen_rtx_UNSPEC (GET_MODE (args_size), -- gen_rtvec (2, args_size, symbol), -- UNSPEC_CALL_ATTR); -+ /* Stop pretending to be a post-reload pass. */ -+ reload_completed = 0; - } - --/* OPERANDS[ARGS_SIZE_OPNO] is the arg_size operand of a CALL expression. See -- if instead of the arg_size argument it contains the call attributes. If -- yes return true along with setting OPERANDS[ARGS_SIZE_OPNO] to the function -- symbol from the call attributes. Also return false if ARGS_SIZE_OPNO is -- -1. */ -+/* Allocate a chunk of memory for per-function machine-dependent data. */ - --bool --loongarch_get_pic_call_symbol (rtx *operands, int args_size_opno) -+static struct machine_function * -+loongarch_init_machine_status (void) - { -- rtx args_size, symbol; -- -- if (!TARGET_RELAX_PIC_CALLS || args_size_opno == -1) -- return false; -- -- args_size = operands[args_size_opno]; -- if (GET_CODE (args_size) != UNSPEC) -- return false; -- gcc_assert (XINT (args_size, 1) == UNSPEC_CALL_ATTR); -- -- symbol = XVECEXP (args_size, 0, 1); -- gcc_assert (GET_CODE (symbol) == SYMBOL_REF); -- -- operands[args_size_opno] = symbol; -- return true; -+ return ggc_cleared_alloc (); - } - --/* Use DF to annotate PIC indirect calls with the function symbol they -- dispatch to. */ -- - static void --loongarch_annotate_pic_calls (void) -+loongarch_cpu_option_override (struct loongarch_target *target, -+ struct gcc_options *opts, -+ struct gcc_options *opts_set) - { -- basic_block bb; -- rtx_insn *insn; -- -- FOR_EACH_BB_FN (bb, cfun) -- FOR_BB_INSNS (bb, insn) -+ /* strict alignment */ -+ switch (target->cpu_arch) - { -- rtx call, reg, symbol, second_call; -+ case CPU_LA264: -+ /* Using -mstrict-align is recommended for la264 cores. */ -+ if (!opts_set->x_TARGET_STRICT_ALIGN) -+ { -+ opts->x_TARGET_STRICT_ALIGN = 1; -+ opts_set->x_TARGET_STRICT_ALIGN = 1; -+ } -+ break; -+ } - -- second_call = 0; -- call = loongarch_call_expr_from_insn (insn, &second_call); -- if (!call) -- continue; -- gcc_assert (MEM_P (XEXP (call, 0))); -- reg = XEXP (XEXP (call, 0), 0); -- if (!REG_P (reg)) -- continue; -+ /* software prefetching parameters (-fprefetch-loop-arrays) */ -+ maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, -+ loongarch_cpu_cache[target->cpu_tune].simultaneous_prefetches, -+ opts->x_param_values, opts_set->x_param_values); - -- symbol = loongarch_find_pic_call_symbol (insn, reg, true); -- if (symbol) -- { -- loongarch_annotate_pic_call_expr (call, symbol); -- if (second_call) -- loongarch_annotate_pic_call_expr (second_call, symbol); -- } -- } --} -- -+ maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, -+ loongarch_cpu_cache[target->cpu_tune].l1d_line_size, -+ opts->x_param_values, opts_set->x_param_values); - --/* A structure representing the state of the processor pipeline. -- Used by the loongarch_sim_* family of functions. */ --struct loongarch_sim { -- /* The maximum number of instructions that can be issued in a cycle. -- (Caches loongarch_issue_rate.) */ -- unsigned int issue_rate; -- -- /* The current simulation time. */ -- unsigned int time; -- -- /* How many more instructions can be issued in the current cycle. */ -- unsigned int insns_left; -- -- /* LAST_SET[X].INSN is the last instruction to set register X. -- LAST_SET[X].TIME is the time at which that instruction was issued. -- INSN is null if no instruction has yet set register X. */ -- struct { -- rtx_insn *insn; -- unsigned int time; -- } last_set[FIRST_PSEUDO_REGISTER]; -- -- /* The pipeline's current DFA state. */ -- state_t dfa_state; --}; -+ maybe_set_param_value (PARAM_L1_CACHE_SIZE, -+ loongarch_cpu_cache[target->cpu_tune].l1d_size, -+ opts->x_param_values, opts_set->x_param_values); - --/* Reset STATE to the initial simulation state. */ -+ maybe_set_param_value (PARAM_L2_CACHE_SIZE, -+ loongarch_cpu_cache[target->cpu_tune].l2d_size, -+ opts->x_param_values, opts_set->x_param_values); -+} - - static void --loongarch_sim_reset (struct loongarch_sim *state) -+loongarch_option_override_internal (struct gcc_options *opts, -+ struct gcc_options *opts_set) - { -- curr_state = state->dfa_state; -+ int i, regno, mode; - -- state->time = 0; -- state->insns_left = state->issue_rate; -- memset (&state->last_set, 0, sizeof (state->last_set)); -- state_reset (curr_state); -+ if (flag_pic) -+ g_switch_value = 0; - -- targetm.sched.init (0, false, 0); -- advance_state (curr_state); --} -+ loongarch_init_target (&la_target, -+ la_opt_cpu_arch, la_opt_cpu_tune, la_opt_fpu, -+ la_opt_simd, la_opt_abi_base, la_opt_abi_ext, -+ la_opt_cmodel); - --/* Initialize STATE before its first use. DFA_STATE points to an -- allocated but uninitialized DFA state. */ -+ /* Handle target-specific options: compute defaults/conflicts etc. */ -+ loongarch_config_target (&la_target, NULL, 0); - --static void --loongarch_sim_init (struct loongarch_sim *state, state_t dfa_state) --{ -- if (targetm.sched.init_dfa_pre_cycle_insn) -- targetm.sched.init_dfa_pre_cycle_insn (); -+ loongarch_update_gcc_opt_status (&la_target, opts, opts_set); -+ loongarch_cpu_option_override (&la_target, opts, opts_set); - -- if (targetm.sched.init_dfa_post_cycle_insn) -- targetm.sched.init_dfa_post_cycle_insn (); -+ if (TARGET_ABI_LP64) -+ flag_pcc_struct_return = 0; - -- state->issue_rate = loongarch_issue_rate (); -- state->dfa_state = dfa_state; -- loongarch_sim_reset (state); --} -+ /* Decide which rtx_costs structure to use. */ -+ if (optimize_size) -+ loongarch_cost = &loongarch_rtx_cost_optimize_size; -+ else -+ loongarch_cost = &loongarch_cpu_rtx_cost_data[la_target.cpu_tune]; - -- -+ /* If the user hasn't specified a branch cost, use the processor's -+ default. */ -+ if (loongarch_branch_cost == 0) -+ loongarch_branch_cost = loongarch_cost->branch_cost; - --/* Set up costs based on the current architecture and tuning settings. */ -+ if (loongarch_vector_access_cost == 0) -+ loongarch_vector_access_cost = 5; - --static void --loongarch_set_tuning_info (void) --{ - -- loongarch_tuning_info.arch = loongarch_arch; -- loongarch_tuning_info.tune = loongarch_tune; -- loongarch_tuning_info.initialized_p = true; -+ /* Enable sw prefetching at -O3 and higher. */ -+ if (opts->x_flag_prefetch_loop_arrays < 0 -+ && (opts->x_optimize >= 3 || opts->x_flag_profile_use) -+ && !opts->x_optimize_size) -+ opts->x_flag_prefetch_loop_arrays = 1; - -- dfa_start (); -+ switch (la_target.cmodel) -+ { -+ case CMODEL_TINY_STATIC: -+ case CMODEL_EXTREME: -+ if (opts->x_flag_plt) -+ error ("code model %qs and %qs not support %s mode", -+ "tiny-static", "extreme", "plt"); -+ break; - -- struct loongarch_sim state; -- loongarch_sim_init (&state, alloca (state_size ())); -+ case CMODEL_NORMAL: -+ case CMODEL_TINY: -+ case CMODEL_LARGE: -+ break; - -- dfa_finish (); --} -+ default: -+ gcc_unreachable (); -+ } - --/* Implement TARGET_EXPAND_TO_RTL_HOOK. */ -+ loongarch_init_print_operand_punct (); - --static void --loongarch_expand_to_rtl_hook (void) --{ -- /* We need to call this at a point where we can safely create sequences -- of instructions, so TARGET_OVERRIDE_OPTIONS is too early. We also -- need to call it at a point where the DFA infrastructure is not -- already in use, so we can't just call it lazily on demand. -- -- At present, loongarch_tuning_info is only needed during post-expand -- RTL passes such as split_insns, so this hook should be early enough. -- We may need to move the call elsewhere if loongarch_tuning_info starts -- to be used for other things (such as rtx_costs, or expanders that -- could be called during gimple optimization). */ -- loongarch_set_tuning_info (); --} -- --/* This structure records that the current function has a LO_SUM -- involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is -- the largest offset applied to BASE by all such LO_SUMs. */ --struct loongarch_lo_sum_offset { -- rtx base; -- HOST_WIDE_INT offset; --}; -+ /* Set up array to map GCC register number to debug register number. -+ Ignore the special purpose register numbers. */ - --/* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */ -+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) -+ { -+ if (GP_REG_P (i) || FP_REG_P (i)) -+ loongarch_dwarf_regno[i] = i; -+ else -+ loongarch_dwarf_regno[i] = INVALID_REGNUM; -+ } - --static hashval_t --loongarch_hash_base (rtx base) --{ -- int do_not_record_p; -+ /* Set up loongarch_hard_regno_mode_ok. */ -+ for (mode = 0; mode < MAX_MACHINE_MODE; mode++) -+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) -+ loongarch_hard_regno_mode_ok_p[mode][regno] -+ = loongarch_hard_regno_mode_ok_uncached (regno, (machine_mode) mode); - -- return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false); --} -+ /* Function to allocate machine-dependent function status. */ -+ init_machine_status = &loongarch_init_machine_status; - --/* Hashtable helpers. */ -+ /* If not optimizing for size, set the default -+ alignment to what the target wants. */ -+ if (!opts->x_optimize_size) -+ { -+ if (opts->x_align_loops <= 0) -+ opts->x_align_loops = 16; -+ if (opts->x_align_jumps <= 0) -+ opts->x_align_jumps = 16; -+ if (opts->x_align_functions <= 0) -+ opts->x_align_functions = 16; -+ } - --struct loongarch_lo_sum_offset_hasher : free_ptr_hash --{ -- typedef rtx_def *compare_type; -- static inline hashval_t hash (const loongarch_lo_sum_offset *); -- static inline bool equal (const loongarch_lo_sum_offset *, const rtx_def *); --}; -+ if (loongarch_veclibabi_name -+ && strcmp (loongarch_veclibabi_name, "sleef") != 0) -+ { -+ error ("unknown vectorization library ABI type (%qs) for " -+ "%qs", loongarch_veclibabi_name, "-mveclibabi="); -+ inform (input_location, -+ "valid arguments to %<-mveclibabi=%> are: %s", "sleef"); -+ } -+ if (!ISA_HAS_LASX) -+ loongarch_stack_realign = 0; - --/* Hash-table callbacks for loongarch_lo_sum_offsets. */ -+ /* -mrecip options. */ -+ static struct -+ { -+ const char *string; /* option name */ -+ unsigned int mask; /* mask bits to set */ -+ } -+ const recip_options[] = -+ { -+ { "all", RECIP_MASK_ALL }, -+ { "none", RECIP_MASK_NONE }, -+ { "div", RECIP_MASK_DIV }, -+ { "sqrt", RECIP_MASK_SQRT }, -+ { "rsqrt", RECIP_MASK_RSQRT }, -+ { "vec-div", RECIP_MASK_VEC_DIV }, -+ { "vec-sqrt", RECIP_MASK_VEC_SQRT }, -+ { "vec-rsqrt", RECIP_MASK_VEC_RSQRT }, -+ }; - --inline hashval_t --loongarch_lo_sum_offset_hasher::hash (const loongarch_lo_sum_offset *entry) --{ -- return loongarch_hash_base (entry->base); --} -+ if (loongarch_recip_name) -+ { -+ char *p = ASTRDUP (loongarch_recip_name); -+ char *q; -+ unsigned int mask, i; -+ bool invert; -+ -+ while ((q = strtok (p, ",")) != NULL) -+ { -+ p = NULL; -+ if (*q == '!') -+ { -+ invert = true; -+ q++; -+ } -+ else -+ invert = false; -+ -+ if (!strcmp (q, "default")) -+ mask = RECIP_MASK_ALL; -+ else -+ { -+ for (i = 0; i < ARRAY_SIZE (recip_options); i++) -+ if (!strcmp (q, recip_options[i].string)) -+ { -+ mask = recip_options[i].mask; -+ break; -+ } -+ -+ if (i == ARRAY_SIZE (recip_options)) -+ { -+ error ("unknown option for -mrecip=%s", q); -+ invert = false; -+ mask = RECIP_MASK_NONE; -+ } -+ } -+ -+ if (invert) -+ recip_mask &= ~mask; -+ else -+ recip_mask |= mask; -+ } -+ } -+ if (loongarch_recip) -+ recip_mask |= RECIP_MASK_ALL; - --inline bool --loongarch_lo_sum_offset_hasher::equal (const loongarch_lo_sum_offset *entry, -- const rtx_def *value) --{ -- return rtx_equal_p (entry->base, value); - } - --typedef hash_table loongarch_offset_table; -- - --/* Subroutine of loongarch_reorg to manage passes that require DF. */ -+/* Implement TARGET_OPTION_OVERRIDE. */ - - static void --loongarch_df_reorg (void) -+loongarch_option_override (void) - { -- /* Create def-use chains. */ -- df_set_flags (DF_EQ_NOTES); -- df_chain_add_problem (DF_UD_CHAIN); -- df_analyze (); -- -- if (TARGET_RELAX_PIC_CALLS) -- loongarch_annotate_pic_calls (); -- -- df_finish_pass (false); -+ loongarch_option_override_internal (&global_options, &global_options_set); - } - -- --/* Implement TARGET_MACHINE_DEPENDENT_REORG. */ -+/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */ - - static void --loongarch_reorg (void) -+loongarch_conditional_register_usage (void) - { -- /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF.DF insn info is only kept up -- to date if the CFG is available. */ -- if (loongarch_cfg_in_reorg ()) -- compute_bb_for_insn (); -- if (loongarch_cfg_in_reorg ()) -+ if (!TARGET_HARD_FLOAT) - { -- loongarch_df_reorg (); -- free_bb_for_insn (); -+ AND_COMPL_HARD_REG_SET (accessible_reg_set, -+ reg_class_contents[(int) FP_REGS]); -+ AND_COMPL_HARD_REG_SET (accessible_reg_set, -+ reg_class_contents[(int) FCC_REGS]); - } -+ - } - --/* We use a machine specific pass to do a second machine dependent reorg -- pass after delay branch scheduling. */ -+/* Implement EH_USES. */ - --static unsigned int --loongarch_machine_reorg2 (void) -+bool -+loongarch_eh_uses (unsigned int regno ATTRIBUTE_UNUSED) - { --// loongarch_insert_insn_pseudos (); -- return 0; -+ return false; - } - --namespace { -- --const pass_data pass_data_loongarch_machine_reorg2 = --{ -- RTL_PASS, /* type */ -- "mach2", /* name */ -- OPTGROUP_NONE, /* optinfo_flags */ -- TV_MACH_DEP, /* tv_id */ -- 0, /* properties_required */ -- 0, /* properties_provided */ -- 0, /* properties_destroyed */ -- 0, /* todo_flags_start */ -- 0, /* todo_flags_finish */ --}; -+/* Implement EPILOGUE_USES. */ - --class pass_loongarch_machine_reorg2 : public rtl_opt_pass -+bool -+loongarch_epilogue_uses (unsigned int regno) - { --public: -- pass_loongarch_machine_reorg2(gcc::context *ctxt) -- : rtl_opt_pass(pass_data_loongarch_machine_reorg2, ctxt) -- {} -- -- /* opt_pass methods: */ -- virtual unsigned int execute (function *) { return loongarch_machine_reorg2 (); } -- --}; // class pass_loongarch_machine_reorg2 -- --} // anon namespace -+ /* Say that the epilogue uses the return address register. Note that -+ in the case of sibcalls, the values "used by the epilogue" are -+ considered live at the start of the called function. */ -+ if (regno == RETURN_ADDR_REGNUM) -+ return true; - --rtl_opt_pass * --make_pass_loongarch_machine_reorg2 (gcc::context *ctxt) --{ -- return new pass_loongarch_machine_reorg2 (ctxt); -+ return false; - } - -- --/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text -- in order to avoid duplicating too much logic from elsewhere. */ -- --static void --loongarch_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, -- HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, -- tree function) -+bool -+loongarch_load_store_bonding_p (rtx *operands, machine_mode mode, bool load_p) - { -- rtx this_rtx, temp1, temp2, fnaddr; -- rtx_insn *insn; -- bool use_sibcall_p; -- -- /* Pretend to be a post-reload pass while generating rtl. */ -- reload_completed = 1; -- -- /* Mark the end of the (empty) prologue. */ -- emit_note (NOTE_INSN_PROLOGUE_END); -- -- /* Determine if we can use a sibcall to call FUNCTION directly. */ -- fnaddr = XEXP (DECL_RTL (function), 0); -- use_sibcall_p = (loongarch_function_ok_for_sibcall (function, NULL) -- && const_call_insn_operand (fnaddr, Pmode)); -- --// /* Determine if we need to load FNADDR from the GOT. */ --// if (!use_sibcall_p --// && (loongarch_got_symbol_type_p --// (loongarch_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA)))) --// { --// /* Pick a global pointer. Use a call-clobbered register if --// TARGET_CALL_SAVED_GP. */ --// cfun->machine->global_pointer --// = GLOBAL_POINTER_REGNUM; --// cfun->machine->must_initialize_gp_p = true; --// SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer); --// --// /* Set up the global pointer for n32 or n64 abicalls. */ --// loongarch_emit_loadgp (); --// } -- -- /* We need two temporary registers in some cases. */ -- temp1 = gen_rtx_REG (Pmode, 12); -- temp2 = gen_rtx_REG (Pmode, 13); -- -- /* Find out which register contains the "this" pointer. */ -- if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)) -- this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1); -- else -- this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST); -- -- /* Add DELTA to THIS_RTX. */ -- if (delta != 0) -- { -- rtx offset = GEN_INT (delta); -- if (!SMALL_OPERAND (delta)) -- { -- loongarch_emit_move (temp1, offset); -- offset = temp1; -- } -- emit_insn (gen_add3_insn (this_rtx, this_rtx, offset)); -- } -- -- /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */ -- if (vcall_offset != 0) -- { -- rtx addr; -- -- /* Set TEMP1 to *THIS_RTX. */ -- loongarch_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx)); -- -- /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */ -- addr = loongarch_add_offset (temp2, temp1, vcall_offset); -- -- /* Load the offset and add it to THIS_RTX. */ -- loongarch_emit_move (temp1, gen_rtx_MEM (Pmode, addr)); -- emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1)); -- } -+ rtx reg1, reg2, mem1, mem2, base1, base2; -+ enum reg_class rc1, rc2; -+ HOST_WIDE_INT offset1, offset2; - -- /* Jump to the target function. Use a sibcall if direct jumps are -- allowed, otherwise load the address into a register first. */ -- if (use_sibcall_p) -+ if (load_p) - { -- insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx)); -- SIBLING_CALL_P (insn) = 1; -+ reg1 = operands[0]; -+ reg2 = operands[2]; -+ mem1 = operands[1]; -+ mem2 = operands[3]; - } - else - { -- loongarch_emit_move (temp1, fnaddr); -- emit_jump_insn (gen_indirect_jump (temp1)); -+ reg1 = operands[1]; -+ reg2 = operands[3]; -+ mem1 = operands[0]; -+ mem2 = operands[2]; - } - -- /* Run just enough of rest_of_compilation. This sequence was -- "borrowed" from alpha.c. */ -- insn = get_insns (); -- split_all_insns_noflow (); -- shorten_branches (insn); -- final_start_function (insn, file, 1); -- final (insn, file, 1); -- final_end_function (); -+ if (loongarch_address_insns (XEXP (mem1, 0), mode, false) == 0 -+ || loongarch_address_insns (XEXP (mem2, 0), mode, false) == 0) -+ return false; - -- /* Clean up the vars set above. Note that final_end_function resets -- the global pointer for us. */ -- reload_completed = 0; --} -- -+ loongarch_split_plus (XEXP (mem1, 0), &base1, &offset1); -+ loongarch_split_plus (XEXP (mem2, 0), &base2, &offset2); - --/* Allocate a chunk of memory for per-function machine-dependent data. */ -+ /* Base regs do not match. */ -+ if (!REG_P (base1) || !rtx_equal_p (base1, base2)) -+ return false; - --static struct machine_function * --loongarch_init_machine_status (void) --{ -- return ggc_cleared_alloc (); --} -+ /* Either of the loads is clobbering base register. It is legitimate to bond -+ loads if second load clobbers base register. However, hardware does not -+ support such bonding. */ -+ if (load_p -+ && (REGNO (reg1) == REGNO (base1) || (REGNO (reg2) == REGNO (base1)))) -+ return false; - --/* Return the processor associated with the given ISA level, or null -- if the ISA isn't valid. */ -+ /* Loading in same registers. */ -+ if (load_p && REGNO (reg1) == REGNO (reg2)) -+ return false; - --static const struct loongarch_cpu_info * --loongarch_cpu_info_from_isa (int isa) --{ -- unsigned int i; -+ /* The loads/stores are not of same type. */ -+ rc1 = REGNO_REG_CLASS (REGNO (reg1)); -+ rc2 = REGNO_REG_CLASS (REGNO (reg2)); -+ if (rc1 != rc2 && !reg_class_subset_p (rc1, rc2) -+ && !reg_class_subset_p (rc2, rc1)) -+ return false; - -- for (i = 0; i < ARRAY_SIZE (loongarch_cpu_info_table); i++) -- if (loongarch_cpu_info_table[i].isa == isa) -- return loongarch_cpu_info_table + i; -+ if (abs (offset1 - offset2) != GET_MODE_SIZE (mode)) -+ return false; - -- return NULL; -+ return true; - } - --/* Return a loongarch_cpu_info entry determined by an option valued -- OPT. */ -+/* Implement TARGET_TRAMPOLINE_INIT. */ - --static const struct loongarch_cpu_info * --loongarch_cpu_info_from_opt (int opt) -+static void -+loongarch_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) - { -- switch (opt) -- { -- case LARCH_ARCH_OPTION_NATIVE: -- gcc_unreachable (); -- -- default: -- return &loongarch_cpu_info_table[opt]; -- } --} -+ rtx addr, end_addr, mem; -+ rtx trampoline[8]; -+ unsigned int i, j; -+ HOST_WIDE_INT end_addr_offset, static_chain_offset, target_function_offset; - --/* Return a default loongarch_cpu_info entry, given that no -march= option -- was explicitly specified. */ -+ /* Work out the offsets of the pointers from the start of the -+ trampoline code. */ -+ end_addr_offset = TRAMPOLINE_CODE_SIZE; -+ static_chain_offset = end_addr_offset; -+ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode); - --static const struct loongarch_cpu_info * --loongarch_default_arch (void) --{ --#if defined (LARCH_CPU_STRING_DEFAULT) -- unsigned int i; -- for (i = 0; i < ARRAY_SIZE (loongarch_cpu_info_table); i++) -- if (strcmp (loongarch_cpu_info_table[i].name, LARCH_CPU_STRING_DEFAULT) == 0) -- return loongarch_cpu_info_table + i; -- gcc_unreachable (); --#elif defined (LARCH_ISA_DEFAULT) -- return loongarch_cpu_info_from_isa (LARCH_ISA_DEFAULT); --#else -- gcc_unreachable (); --#endif --} -+ /* Get pointers to the beginning and end of the code block. */ -+ addr = force_reg (Pmode, XEXP (m_tramp, 0)); -+ end_addr -+ = loongarch_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset)); - --/* Set up globals to generate code for the ISA or processor -- described by INFO. */ -+#define OP(X) gen_int_mode (X, SImode) - --static void --loongarch_set_architecture (const struct loongarch_cpu_info *info) --{ -- if (info != 0) -- { -- loongarch_arch_info = info; -- loongarch_arch = info->cpu; -- loongarch_isa = info->isa; -- if (loongarch_isa < 32) -- loongarch_isa_rev = 0; -- else -- loongarch_isa_rev = (loongarch_isa & 31) + 1; -- } --} -+ /* Build up the code in TRAMPOLINE. */ -+ i = 0; -+ /*pcaddi $static_chain,0 -+ ld.[dw] $tmp,$static_chain,target_function_offset -+ ld.[dw] $static_chain,$static_chain,static_chain_offset -+ jirl $r0,$tmp,0 */ -+ trampoline[i++] = OP (0x18000000 | (STATIC_CHAIN_REGNUM - GP_REG_FIRST)); -+ trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000) -+ | 19 /* $t7 */ -+ | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5) -+ | ((target_function_offset & 0xfff) << 10)); -+ trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000) -+ | (STATIC_CHAIN_REGNUM - GP_REG_FIRST) -+ | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5) -+ | ((static_chain_offset & 0xfff) << 10)); -+ trampoline[i++] = OP (0x4c000000 | (19 << 5)); -+#undef OP - --/* Likewise for tuning. */ -+ for (j = 0; j < i; j++) -+ { -+ mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode)); -+ loongarch_emit_move (mem, trampoline[j]); -+ } - --static void --loongarch_set_tune (const struct loongarch_cpu_info *info) --{ -- if (info != 0) -- { -- loongarch_tune_info = info; -- loongarch_tune = info->cpu; -- } --} -+ /* Set up the static chain pointer field. */ -+ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset); -+ loongarch_emit_move (mem, chain_value); - --/* Implement TARGET_OPTION_OVERRIDE. */ -+ /* Set up the target function field. */ -+ mem = adjust_address (m_tramp, ptr_mode, target_function_offset); -+ loongarch_emit_move (mem, XEXP (DECL_RTL (fndecl), 0)); - --static void --loongarch_option_override (void) --{ -- int i, start, regno, mode; -+ /* Flush the code part of the trampoline. */ -+ emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE))); -+ emit_insn (gen_clear_cache (addr, end_addr)); -+} - --#ifdef SUBTARGET_OVERRIDE_OPTIONS -- SUBTARGET_OVERRIDE_OPTIONS; --#endif -+/* Generate or test for an insn that supports a constant permutation. */ - -+#define MAX_VECT_LEN 32 - -- /* -mno-float overrides -mhard-float and -msoft-float. */ -- if (TARGET_NO_FLOAT) -- { -- target_flags |= MASK_SOFT_FLOAT_ABI; -- target_flags_explicit |= MASK_SOFT_FLOAT_ABI; -- } -- -- -- /* Set the small data limit. */ -- loongarch_small_data_threshold = (global_options_set.x_g_switch_value -- ? g_switch_value -- : LARCH_DEFAULT_GVALUE); -- -- /* The following code determines the architecture and register size. -- Similar code was added to GAS 2.14 (see tc-loongarch.c:md_after_parse_args()). -- The GAS and GCC code should be kept in sync as much as possible. */ -- -- if (global_options_set.x_loongarch_arch_option) -- loongarch_set_architecture (loongarch_cpu_info_from_opt (loongarch_arch_option)); -+struct expand_vec_perm_d -+{ -+ rtx target, op0, op1; -+ unsigned char perm[MAX_VECT_LEN]; -+ machine_mode vmode; -+ unsigned char nelt; -+ bool one_vector_p; -+ bool testing_p; -+}; - -- if (loongarch_arch_info == 0) -- loongarch_set_architecture (loongarch_default_arch ()); -+/* Construct (set target (vec_select op0 (parallel perm))) and -+ return true if that's a valid instruction in the active ISA. */ - -- /* Optimize for loongarch_arch, unless -mtune selects a different processor. */ -- if (global_options_set.x_loongarch_tune_option) -- loongarch_set_tune (loongarch_cpu_info_from_opt (loongarch_tune_option)); -+static bool -+loongarch_expand_vselect (rtx target, rtx op0, -+ const unsigned char *perm, unsigned nelt) -+{ -+ rtx rperm[MAX_VECT_LEN], x; -+ rtx_insn *insn; -+ unsigned i; - -- if (loongarch_tune_info == 0) -- loongarch_set_tune (loongarch_arch_info); -+ for (i = 0; i < nelt; ++i) -+ rperm[i] = GEN_INT (perm[i]); - -- if ((target_flags_explicit & MASK_64BIT) == 0) -- { -- /* Infer the integer register size from the ABI and processor. -- Restrict ourselves to 32-bit registers if that's all the -- processor has, or if the ABI cannot handle 64-bit registers. */ -- if (loongarch_abi == ABILP32) -- target_flags &= ~MASK_64BIT; -- else -- target_flags |= MASK_64BIT; -- } -+ x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm)); -+ x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x); -+ x = gen_rtx_SET (target, x); - -- if ((target_flags_explicit & MASK_FLOAT64) != 0) -- { -- if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64) -- error ("unsupported combination: %s", "-mfp64 -msingle-float"); -- } -- else -+ insn = emit_insn (x); -+ if (recog_memoized (insn) < 0) - { -- /* -msingle-float selects 32-bit float registers. On r6 and later, -- -mdouble-float selects 64-bit float registers, since the old paired -- register model is not supported. In other cases the float registers -- should be the same size as the integer ones. */ -- if (TARGET_64BIT && TARGET_DOUBLE_FLOAT) -- target_flags |= MASK_FLOAT64; -- else if (loongarch_abi == ABILP32 && ISA_HAS_LSX) -- target_flags |= MASK_FLOAT64; -- else -- target_flags &= ~MASK_FLOAT64; -+ remove_insn (insn); -+ return false; - } -+ return true; -+} - -- /* End of code shared with GAS. */ -- -- if (!TARGET_OLDABI) -- flag_pcc_struct_return = 0; -+/* Similar, but generate a vec_concat from op0 and op1 as well. */ - -- /* Decide which rtx_costs structure to use. */ -- if (optimize_size) -- loongarch_cost = &loongarch_rtx_cost_optimize_size; -- else -- loongarch_cost = &loongarch_rtx_cost_data[loongarch_tune]; -+static bool -+loongarch_expand_vselect_vconcat (rtx target, rtx op0, rtx op1, -+ const unsigned char *perm, unsigned nelt) -+{ -+ machine_mode v2mode; -+ rtx x; - -- /* If the user hasn't specified a branch cost, use the processor's -- default. */ -- if (loongarch_branch_cost == 0) -- loongarch_branch_cost = loongarch_cost->branch_cost; -+ if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0)).exists (&v2mode)) -+ return false; -+ x = gen_rtx_VEC_CONCAT (v2mode, op0, op1); -+ return loongarch_expand_vselect (target, x, perm, nelt); -+} - -- /* Prefer a call to memcpy over inline code when optimizing for size, -- though see MOVE_RATIO in loongarch.h. */ -- if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0) -- target_flags |= MASK_MEMCPY; -+/* Construct (set target (vec_select op0 (parallel selector))) and -+ return true if that's a valid instruction in the active ISA. */ - -- /* If we have a nonzero small-data limit, check that the -mgpopt -- setting is consistent with the other target flags. */ -- if (loongarch_small_data_threshold > 0) -- { -- if (TARGET_VXWORKS_RTP) -- warning (0, "cannot use small-data accesses for %qs", "-mrtp"); -- } -+static bool -+loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d) -+{ -+ rtx x, elts[MAX_VECT_LEN]; -+ rtvec v; -+ rtx_insn *insn; -+ unsigned i; - -- /* Make sure that when ISA_HAS_LSX is true, TARGET_FLOAT64 and -- TARGET_HARD_FLOAT_ABI and both true. */ -- if (ISA_HAS_LSX && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI)) -- error ("%<-mlsx%> must be used with %<-mfp64%> and %<-mhard-float%>"); -+ if (!ISA_HAS_LSX && !ISA_HAS_LASX) -+ return false; - -- /* If TARGET_LASX, enable TARGET_LSX. */ -- if (TARGET_LASX) -- target_flags |= MASK_LSX; -+ for (i = 0; i < d->nelt; i++) -+ elts[i] = GEN_INT (d->perm[i]); - -- /* .cfi_* directives generate a read-only section, so fall back on -- manual .eh_frame creation if we need the section to be writable. */ -- if (TARGET_WRITABLE_EH_FRAME) -- flag_dwarf2_cfi_asm = 0; -+ v = gen_rtvec_v (d->nelt, elts); -+ x = gen_rtx_PARALLEL (VOIDmode, v); - -- loongarch_init_print_operand_punct (); -+ if (!loongarch_const_vector_shuffle_set_p (x, d->vmode)) -+ return false; - -- /* Set up array to map GCC register number to debug register number. -- Ignore the special purpose register numbers. */ -+ x = gen_rtx_VEC_SELECT (d->vmode, d->op0, x); -+ x = gen_rtx_SET (d->target, x); - -- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) -+ insn = emit_insn (x); -+ if (recog_memoized (insn) < 0) - { -- loongarch_dbx_regno[i] = IGNORED_DWARF_REGNUM; -- if (GP_REG_P (i) || FP_REG_P (i)) -- loongarch_dwarf_regno[i] = i; -- else -- loongarch_dwarf_regno[i] = INVALID_REGNUM; -+ remove_insn (insn); -+ return false; - } -+ return true; -+} - -- start = GP_DBX_FIRST - GP_REG_FIRST; -- for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++) -- loongarch_dbx_regno[i] = i + start; -- -- start = FP_DBX_FIRST - FP_REG_FIRST; -- for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++) -- loongarch_dbx_regno[i] = i + start; -- -- /* Set up loongarch_hard_regno_mode_ok. */ -- for (mode = 0; mode < MAX_MACHINE_MODE; mode++) -- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) -- loongarch_hard_regno_mode_ok_p[mode][regno] -- = loongarch_hard_regno_mode_ok_uncached (regno, (machine_mode) mode); -+/* Try to simplify a two vector permutation using 2 intra-lane interleave -+ insns and cross-lane shuffle for 32-byte vectors. */ - -- /* Function to allocate machine-dependent function status. */ -- init_machine_status = &loongarch_init_machine_status; -- target_flags &= ~MASK_RELAX_PIC_CALLS; -- -- /* We register a second machine specific reorg pass after delay slot -- filling. Registering the pass must be done at start up. It's -- convenient to do it here. */ -- opt_pass *new_pass = make_pass_loongarch_machine_reorg2 (g); -- struct register_pass_info insert_pass_loongarch_machine_reorg2 = -- { -- new_pass, /* pass */ -- "dbr", /* reference_pass_name */ -- 1, /* ref_pass_instance_number */ -- PASS_POS_INSERT_AFTER /* po_op */ -- }; -- register_pass (&insert_pass_loongarch_machine_reorg2); -+static bool -+loongarch_expand_vec_perm_interleave (struct expand_vec_perm_d *d) -+{ -+ unsigned i, nelt; -+ rtx t1,t2,t3; -+ rtx (*gen_high) (rtx, rtx, rtx); -+ rtx (*gen_low) (rtx, rtx, rtx); -+ machine_mode mode = GET_MODE (d->target); - -- loongarch_register_frame_header_opt (); --} -+ if (d->one_vector_p) -+ return false; -+ if (ISA_HAS_LASX && GET_MODE_SIZE (d->vmode) == 32) -+ ; -+ else -+ return false; - -+ nelt = d->nelt; -+ if (d->perm[0] != 0 && d->perm[0] != nelt / 2) -+ return false; -+ for (i = 0; i < nelt; i += 2) -+ if (d->perm[i] != d->perm[0] + i / 2 -+ || d->perm[i + 1] != d->perm[0] + i / 2 + nelt) -+ return false; - --/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */ -+ if (d->testing_p) -+ return true; - --static void --loongarch_conditional_register_usage (void) --{ -- if (!TARGET_HARD_FLOAT) -+ switch (d->vmode) - { -- AND_COMPL_HARD_REG_SET (accessible_reg_set, -- reg_class_contents[(int) FP_REGS]); -- AND_COMPL_HARD_REG_SET (accessible_reg_set, -- reg_class_contents[(int) ST_REGS]); -+ case E_V32QImode: -+ gen_high = gen_lasx_xvilvh_b; -+ gen_low = gen_lasx_xvilvl_b; -+ break; -+ case E_V16HImode: -+ gen_high = gen_lasx_xvilvh_h; -+ gen_low = gen_lasx_xvilvl_h; -+ break; -+ case E_V8SImode: -+ gen_high = gen_lasx_xvilvh_w; -+ gen_low = gen_lasx_xvilvl_w; -+ break; -+ case E_V4DImode: -+ gen_high = gen_lasx_xvilvh_d; -+ gen_low = gen_lasx_xvilvl_d; -+ break; -+ case E_V8SFmode: -+ gen_high = gen_lasx_xvilvh_w_f; -+ gen_low = gen_lasx_xvilvl_w_f; -+ break; -+ case E_V4DFmode: -+ gen_high = gen_lasx_xvilvh_d_f; -+ gen_low = gen_lasx_xvilvl_d_f; -+ break; -+ default: -+ gcc_unreachable (); - } --} - --/* Implement EH_USES. */ -- --bool --loongarch_eh_uses (unsigned int regno) --{ -- return false; -+ t1 = gen_reg_rtx (mode); -+ t2 = gen_reg_rtx (mode); -+ emit_insn (gen_high (t1, d->op0, d->op1)); -+ emit_insn (gen_low (t2, d->op0, d->op1)); -+ if(mode == V4DFmode || mode == V8SFmode) -+ { -+ t3 = gen_reg_rtx (V4DFmode); -+ if (d->perm[0]) -+ emit_insn(gen_lasx_xvpermi_q_v4df (t3, gen_lowpart (V4DFmode, t1), -+ gen_lowpart (V4DFmode, t2),GEN_INT(0x31))); -+ else -+ emit_insn(gen_lasx_xvpermi_q_v4df (t3, gen_lowpart (V4DFmode, t1), -+ gen_lowpart (V4DFmode, t2),GEN_INT(0x20))); -+ } -+ else -+ { -+ t3 = gen_reg_rtx (V4DImode); -+ if (d->perm[0]) -+ emit_insn(gen_lasx_xvpermi_q_v4di (t3, gen_lowpart (V4DImode, t1), -+ gen_lowpart (V4DImode, t2),GEN_INT(0x31))); -+ else -+ emit_insn(gen_lasx_xvpermi_q_v4di (t3, gen_lowpart (V4DImode, t1), -+ gen_lowpart (V4DImode, t2),GEN_INT(0x20))); -+ } -+ emit_move_insn (d->target, gen_lowpart (mode, t3)); -+ return true; - } - --/* Implement EPILOGUE_USES. */ -+/* Implement extract-even and extract-odd permutations.*/ - --bool --loongarch_epilogue_uses (unsigned int regno) -+static bool -+loongarch_expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd) - { -- /* Say that the epilogue uses the return address register. Note that -- in the case of sibcalls, the values "used by the epilogue" are -- considered live at the start of the called function. */ -- if (regno == RETURN_ADDR_REGNUM) -- return true; -+ rtx t1; -+ machine_mode mode = GET_MODE (d->target); -+ t1 = gen_reg_rtx (mode); - -- /* An interrupt handler must preserve some registers that are -- ordinarily call-clobbered. */ -- if (cfun->machine->interrupt_handler_p -- && loongarch_interrupt_extra_call_saved_reg_p (regno)) -+ if (d->testing_p) - return true; - -- return false; --} -+ switch (d->vmode) -+ { -+ case E_V4DFmode: -+ /* Shuffle the lanes around into { 0 4 2 6 } and { 1 5 3 7 }. */ -+ if (odd) -+ emit_insn (gen_lasx_xvilvh_d_f (t1, d->op0, d->op1)); -+ else -+ emit_insn (gen_lasx_xvilvl_d_f (t1, d->op0, d->op1)); - --/* Return true if MEM1 and MEM2 use the same base register, and the -- offset of MEM2 equals the offset of MEM1 plus 4. FIRST_REG is the -- register into (from) which the contents of MEM1 will be loaded -- (stored), depending on the value of LOAD_P. -- SWAP_P is true when the 1st and 2nd instructions are swapped. */ -+ /* Shuffle within the 256-bit lanes to produce the result required. -+ { 0 2 4 6 } | { 1 3 5 7 }. */ -+ emit_insn (gen_lasx_xvpermi_d_v4df (d->target, t1, GEN_INT (0xd8))); -+ break; - --static bool --loongarch_load_store_pair_p_1 (bool load_p, bool swap_p, -- rtx first_reg, rtx mem1, rtx mem2) --{ -- rtx base1, base2; -- HOST_WIDE_INT offset1, offset2; -+ case E_V4DImode: -+ if (odd) -+ emit_insn (gen_lasx_xvilvh_d (t1, d->op0, d->op1)); -+ else -+ emit_insn (gen_lasx_xvilvl_d (t1, d->op0, d->op1)); - -- if (!MEM_P (mem1) || !MEM_P (mem2)) -- return false; -+ emit_insn (gen_lasx_xvpermi_d_v4di (d->target, t1, GEN_INT (0xd8))); -+ break; - -- loongarch_split_plus (XEXP (mem1, 0), &base1, &offset1); -- loongarch_split_plus (XEXP (mem2, 0), &base2, &offset2); -+ case E_V8SFmode: -+ /* Shuffle the lanes around into: -+ { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }. */ -+ if (odd) -+ emit_insn (gen_lasx_xvpickod_w_f (t1, d->op0, d->op1)); -+ else -+ emit_insn (gen_lasx_xvpickev_w_f (t1, d->op0, d->op1)); - -- if (!REG_P (base1) || !rtx_equal_p (base1, base2)) -- return false; -+ /* Shuffle within the 256-bit lanes to produce the result required. -+ { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }. */ -+ emit_insn (gen_lasx_xvpermi_d_v8sf (d->target, t1, GEN_INT (0xd8))); -+ break; - -- /* Avoid invalid load pair instructions. */ -- if (load_p && REGNO (first_reg) == REGNO (base1)) -- return false; -+ case E_V8SImode: -+ if (odd) -+ emit_insn (gen_lasx_xvpickod_w (t1, d->op0, d->op1)); -+ else -+ emit_insn (gen_lasx_xvpickev_w (t1, d->op0, d->op1)); - -- /* We must avoid this case for anti-dependence. -- Ex: lw $3, 4($3) -- lw $2, 0($3) -- first_reg is $2, but the base is $3. */ -- if (load_p -- && swap_p -- && REGNO (first_reg) + 1 == REGNO (base1)) -- return false; -+ emit_insn (gen_lasx_xvpermi_d_v8si (d->target, t1, GEN_INT (0xd8))); -+ break; - -- if (offset2 != offset1 + 4) -- return false; -+ case E_V16HImode: -+ if (odd) -+ emit_insn (gen_lasx_xvpickod_h (t1, d->op0, d->op1)); -+ else -+ emit_insn (gen_lasx_xvpickev_h (t1, d->op0, d->op1)); - -- if (!ULARCH_12BIT_OFFSET_P (offset1)) -- return false; -+ emit_insn (gen_lasx_xvpermi_d_v16hi (d->target, t1, GEN_INT (0xd8))); -+ break; - -- return true; --} -+ case E_V32QImode: -+ if (odd) -+ emit_insn (gen_lasx_xvpickod_b (t1, d->op0, d->op1)); -+ else -+ emit_insn (gen_lasx_xvpickev_b (t1, d->op0, d->op1)); - --bool --loongarch_load_store_bonding_p (rtx *operands, machine_mode mode, bool load_p) --{ -- rtx reg1, reg2, mem1, mem2, base1, base2; -- enum reg_class rc1, rc2; -- HOST_WIDE_INT offset1, offset2; -+ emit_insn (gen_lasx_xvpermi_d_v32qi (d->target, t1, GEN_INT (0xd8))); -+ break; - -- if (load_p) -- { -- reg1 = operands[0]; -- reg2 = operands[2]; -- mem1 = operands[1]; -- mem2 = operands[3]; -- } -- else -- { -- reg1 = operands[1]; -- reg2 = operands[3]; -- mem1 = operands[0]; -- mem2 = operands[2]; -+ default: -+ gcc_unreachable (); - } - -- if (loongarch_address_insns (XEXP (mem1, 0), mode, false) == 0 -- || loongarch_address_insns (XEXP (mem2, 0), mode, false) == 0) -- return false; -- -- loongarch_split_plus (XEXP (mem1, 0), &base1, &offset1); -- loongarch_split_plus (XEXP (mem2, 0), &base2, &offset2); -- -- /* Base regs do not match. */ -- if (!REG_P (base1) || !rtx_equal_p (base1, base2)) -- return false; -+ return true; -+} - -- /* Either of the loads is clobbering base register. It is legitimate to bond -- loads if second load clobbers base register. However, hardware does not -- support such bonding. */ -- if (load_p -- && (REGNO (reg1) == REGNO (base1) -- || (REGNO (reg2) == REGNO (base1)))) -- return false; -+/* Pattern match extract-even and extract-odd permutations. */ - -- /* Loading in same registers. */ -- if (load_p -- && REGNO (reg1) == REGNO (reg2)) -+static bool -+loongarch_expand_vec_perm_even_odd (struct expand_vec_perm_d *d) -+{ -+ unsigned i, odd, nelt = d->nelt; -+ if(!ISA_HAS_LASX) - return false; - -- /* The loads/stores are not of same type. */ -- rc1 = REGNO_REG_CLASS (REGNO (reg1)); -- rc2 = REGNO_REG_CLASS (REGNO (reg2)); -- if (rc1 != rc2 -- && !reg_class_subset_p (rc1, rc2) -- && !reg_class_subset_p (rc2, rc1)) -+ odd = d->perm[0]; -+ if (odd != 0 && odd != 1) - return false; - -- if (abs (offset1 - offset2) != GET_MODE_SIZE (mode)) -- return false; -+ for (i = 1; i < nelt; ++i) -+ if (d->perm[i] != 2 * i + odd) -+ return false; - -- return true; -+ return loongarch_expand_vec_perm_even_odd_1 (d, odd); - } - --/* OPERANDS describes the operands to a pair of SETs, in the order -- dest1, src1, dest2, src2. Return true if the operands can be used -- in an LWP or SWP instruction; LOAD_P says which. */ -+/* Expand a variable vector permutation for LASX. */ - --bool --loongarch_load_store_pair_p (bool load_p, rtx *operands) -+void -+loongarch_expand_vec_perm_1 (rtx operands[]) - { -- rtx reg1, reg2, mem1, mem2; -+ rtx target = operands[0]; -+ rtx op0 = operands[1]; -+ rtx op1 = operands[2]; -+ rtx mask = operands[3]; -+ bool one_operand_shuffle = rtx_equal_p (op0, op1); -+ rtx t1, t2, t3, t4, t5, t6, vt, vec[32]; -+ machine_mode mode = GET_MODE (op0); -+ machine_mode maskmode = GET_MODE (mask); -+ int w, i; -+ -+ /* Number of elements in the vector. */ -+ w = GET_MODE_NUNITS (mode); -+ -+ if (mode == V4DImode || mode == V4DFmode) -+ { -+ maskmode = mode = V8SImode; -+ w = 8; -+ t1 = gen_reg_rtx (maskmode); -+ -+ /* Replicate the low bits of the V4DImode mask into V8SImode: -+ mask = { A B C D } -+ t1 = { A A B B C C D D }. */ -+ for (i = 0; i < w / 2; ++i) -+ vec[i*2 + 1] = vec[i*2] = GEN_INT (i * 2); -+ vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec)); -+ vt = force_reg (maskmode, vt); -+ mask = gen_lowpart (maskmode, mask); -+ emit_insn (gen_lasx_xvperm_w (t1, mask, vt)); -+ -+ /* Multiply the shuffle indicies by two. */ -+ t1 = expand_simple_binop (maskmode, PLUS, t1, t1, t1, 1, -+ OPTAB_DIRECT); -+ -+ /* Add one to the odd shuffle indicies: -+ t1 = { A*2, A*2+1, B*2, B*2+1, ... }. */ -+ for (i = 0; i < w / 2; ++i) -+ { -+ vec[i * 2] = const0_rtx; -+ vec[i * 2 + 1] = const1_rtx; -+ } -+ vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec)); -+ vt = validize_mem (force_const_mem (maskmode, vt)); -+ t1 = expand_simple_binop (maskmode, PLUS, t1, vt, t1, 1, -+ OPTAB_DIRECT); - -- if (load_p) -- { -- reg1 = operands[0]; -- reg2 = operands[2]; -- mem1 = operands[1]; -- mem2 = operands[3]; -+ /* Continue as if V8SImode (resp. V32QImode) was used initially. */ -+ operands[3] = mask = t1; -+ target = gen_reg_rtx (mode); -+ op0 = gen_lowpart (mode, op0); -+ op1 = gen_lowpart (mode, op1); - } -- else -+ switch (mode) - { -- reg1 = operands[1]; -- reg2 = operands[3]; -- mem1 = operands[0]; -- mem2 = operands[2]; -+ case E_V8SImode: -+ if (one_operand_shuffle) -+ { -+ emit_insn (gen_lasx_xvperm_w (target, op0, mask)); -+ if (target != operands[0]) -+ emit_move_insn (operands[0], -+ gen_lowpart (GET_MODE (operands[0]), target)); -+ } -+ else -+ { -+ t1 = gen_reg_rtx (V8SImode); -+ t2 = gen_reg_rtx (V8SImode); -+ emit_insn (gen_lasx_xvperm_w (t1, op0, mask)); -+ emit_insn (gen_lasx_xvperm_w (t2, op1, mask)); -+ goto merge_two; -+ } -+ return; -+ -+ case E_V8SFmode: -+ mask = gen_lowpart (V8SImode, mask); -+ if (one_operand_shuffle) -+ emit_insn (gen_lasx_xvperm_w_f (target, op0, mask)); -+ else -+ { -+ t1 = gen_reg_rtx (V8SFmode); -+ t2 = gen_reg_rtx (V8SFmode); -+ emit_insn (gen_lasx_xvperm_w_f (t1, op0, mask)); -+ emit_insn (gen_lasx_xvperm_w_f (t2, op1, mask)); -+ goto merge_two; -+ } -+ return; -+ -+ case E_V16HImode: -+ if (one_operand_shuffle) -+ { -+ t1 = gen_reg_rtx (V16HImode); -+ t2 = gen_reg_rtx (V16HImode); -+ emit_insn (gen_lasx_xvpermi_d_v16hi (t1, op0, GEN_INT(0x44))); -+ emit_insn (gen_lasx_xvpermi_d_v16hi (t2, op0, GEN_INT(0xee))); -+ emit_insn (gen_lasx_xvshuf_h (target, mask, t2, t1)); -+ } -+ else -+ { -+ t1 = gen_reg_rtx (V16HImode); -+ t2 = gen_reg_rtx (V16HImode); -+ t3 = gen_reg_rtx (V16HImode); -+ t4 = gen_reg_rtx (V16HImode); -+ t5 = gen_reg_rtx (V16HImode); -+ t6 = gen_reg_rtx (V16HImode); -+ emit_insn (gen_lasx_xvpermi_d_v16hi (t3, op0, GEN_INT(0x44))); -+ emit_insn (gen_lasx_xvpermi_d_v16hi (t4, op0, GEN_INT(0xee))); -+ emit_insn (gen_lasx_xvshuf_h (t1, mask, t4, t3)); -+ emit_insn (gen_lasx_xvpermi_d_v16hi (t5, op1, GEN_INT(0x44))); -+ emit_insn (gen_lasx_xvpermi_d_v16hi (t6, op1, GEN_INT(0xee))); -+ emit_insn (gen_lasx_xvshuf_h (t2, mask, t6, t5)); -+ goto merge_two; -+ } -+ return; -+ -+ case E_V32QImode: -+ if (one_operand_shuffle) -+ { -+ t1 = gen_reg_rtx (V32QImode); -+ t2 = gen_reg_rtx (V32QImode); -+ emit_insn (gen_lasx_xvpermi_d_v32qi (t1, op0, GEN_INT(0x44))); -+ emit_insn (gen_lasx_xvpermi_d_v32qi (t2, op0, GEN_INT(0xee))); -+ emit_insn (gen_lasx_xvshuf_b (target, t2, t1, mask)); -+ } -+ else -+ { -+ t1 = gen_reg_rtx (V32QImode); -+ t2 = gen_reg_rtx (V32QImode); -+ t3 = gen_reg_rtx (V32QImode); -+ t4 = gen_reg_rtx (V32QImode); -+ t5 = gen_reg_rtx (V32QImode); -+ t6 = gen_reg_rtx (V32QImode); -+ emit_insn (gen_lasx_xvpermi_d_v32qi (t3, op0, GEN_INT(0x44))); -+ emit_insn (gen_lasx_xvpermi_d_v32qi (t4, op0, GEN_INT(0xee))); -+ emit_insn (gen_lasx_xvshuf_b (t1, t4, t3, mask)); -+ emit_insn (gen_lasx_xvpermi_d_v32qi (t5, op1, GEN_INT(0x44))); -+ emit_insn (gen_lasx_xvpermi_d_v32qi (t6, op1, GEN_INT(0xee))); -+ emit_insn (gen_lasx_xvshuf_b (t2, t6, t5, mask)); -+ goto merge_two; -+ } -+ return; -+ -+ default: -+ gcc_assert (GET_MODE_SIZE (mode) == 32); -+ break; - } - -- if (REGNO (reg2) == REGNO (reg1) + 1) -- return loongarch_load_store_pair_p_1 (load_p, false, reg1, mem1, mem2); -+merge_two: -+ /* Then merge them together. The key is whether any given control -+ element contained a bit set that indicates the second word. */ -+ rtx xops[6]; -+ mask = operands[3]; -+ vt = GEN_INT (w); -+ vt = gen_const_vec_duplicate (maskmode, vt); -+ vt = force_reg (maskmode, vt); -+ if (GET_MODE (target) != mode) -+ target = gen_reg_rtx (mode); -+ xops[0] = target; -+ xops[1] = gen_lowpart (mode, t2); -+ xops[2] = gen_lowpart (mode, t1); -+ xops[3] = gen_rtx_GE (maskmode, mask, vt); -+ xops[4] = mask; -+ xops[5] = vt; -+ -+ loongarch_expand_vec_cond_expr (mode, maskmode, xops); -+ if (target != operands[0]) -+ emit_move_insn (operands[0], -+ gen_lowpart (GET_MODE (operands[0]), target)); -+} - -- if (REGNO (reg1) == REGNO (reg2) + 1) -- return loongarch_load_store_pair_p_1 (load_p, true, reg2, mem2, mem1); -+void -+loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel) -+{ -+ machine_mode vmode = GET_MODE (target); - -- return false; -+ gcc_checking_assert (vmode == E_V16QImode -+ || vmode == E_V2DImode || vmode == E_V2DFmode -+ || vmode == E_V4SImode || vmode == E_V4SFmode -+ || vmode == E_V8HImode); -+ gcc_checking_assert (GET_MODE (op0) == vmode); -+ gcc_checking_assert (GET_MODE (op1) == vmode); -+ gcc_checking_assert (GET_MODE (sel) == vmode); -+ gcc_checking_assert (ISA_HAS_LSX); -+ -+ switch (vmode) -+ { -+ case E_V16QImode: -+ emit_insn (gen_lsx_vshuf_b (target, op1, op0, sel)); -+ break; -+ case E_V2DFmode: -+ emit_insn (gen_lsx_vshuf_d_f (target, sel, op1, op0)); -+ break; -+ case E_V2DImode: -+ emit_insn (gen_lsx_vshuf_d (target, sel, op1, op0)); -+ break; -+ case E_V4SFmode: -+ emit_insn (gen_lsx_vshuf_w_f (target, sel, op1, op0)); -+ break; -+ case E_V4SImode: -+ emit_insn (gen_lsx_vshuf_w (target, sel, op1, op0)); -+ break; -+ case E_V8HImode: -+ emit_insn (gen_lsx_vshuf_h (target, sel, op1, op0)); -+ break; -+ default: -+ break; -+ } - } - --/* Return true if REG1 and REG2 match the criteria for a movep insn. */ -+static bool -+loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d) -+{ -+ int i; -+ rtx target, op0, op1, sel, tmp; -+ rtx rperm[MAX_VECT_LEN]; - --bool --loongarch_movep_target_p (rtx reg1, rtx reg2) --{ -- int regno1, regno2, pair; -- unsigned int i; -- static const int match[8] = { -- 0x00000060, /* 5, 6 */ -- 0x000000a0, /* 5, 7 */ -- 0x000000c0, /* 6, 7 */ -- 0x00200010, /* 4, 21 */ -- 0x00400010, /* 4, 22 */ -- 0x00000030, /* 4, 5 */ -- 0x00000050, /* 4, 6 */ -- 0x00000090 /* 4, 7 */ -- }; -- -- if (!REG_P (reg1) || !REG_P (reg2)) -- return false; -+ if (d->vmode == E_V2DImode || d->vmode == E_V2DFmode -+ || d->vmode == E_V4SImode || d->vmode == E_V4SFmode -+ || d->vmode == E_V8HImode || d->vmode == E_V16QImode) -+ { -+ target = d->target; -+ op0 = d->op0; -+ op1 = d->one_vector_p ? d->op0 : d->op1; - -- regno1 = REGNO (reg1); -- regno2 = REGNO (reg2); -+ if (GET_MODE (op0) != GET_MODE (op1) -+ || GET_MODE (op0) != GET_MODE (target)) -+ return false; - -- if (!GP_REG_P (regno1) || !GP_REG_P (regno2)) -- return false; -+ if (d->testing_p) -+ return true; - -- pair = (1 << regno1) | (1 << regno2); -+ for (i = 0; i < d->nelt; i += 1) -+ { -+ rperm[i] = GEN_INT (d->perm[i]); -+ } - -- for (i = 0; i < ARRAY_SIZE (match); i++) -- if (pair == match[i]) -- return true; -+ if (d->vmode == E_V2DFmode) -+ { -+ sel = gen_rtx_CONST_VECTOR (E_V2DImode, gen_rtvec_v (d->nelt, rperm)); -+ tmp = gen_rtx_SUBREG (E_V2DImode, d->target, 0); -+ emit_move_insn (tmp, sel); -+ } -+ else if (d->vmode == E_V4SFmode) -+ { -+ sel = gen_rtx_CONST_VECTOR (E_V4SImode, gen_rtvec_v (d->nelt, rperm)); -+ tmp = gen_rtx_SUBREG (E_V4SImode, d->target, 0); -+ emit_move_insn (tmp, sel); -+ } -+ else -+ { -+ sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt, rperm)); -+ emit_move_insn (d->target, sel); -+ } -+ -+ switch (d->vmode) -+ { -+ case E_V2DFmode: -+ emit_insn (gen_lsx_vshuf_d_f (target, target, op1, op0)); -+ break; -+ case E_V2DImode: -+ emit_insn (gen_lsx_vshuf_d (target, target, op1, op0)); -+ break; -+ case E_V4SFmode: -+ emit_insn (gen_lsx_vshuf_w_f (target, target, op1, op0)); -+ break; -+ case E_V4SImode: -+ emit_insn (gen_lsx_vshuf_w (target, target, op1, op0)); -+ break; -+ case E_V8HImode: -+ emit_insn (gen_lsx_vshuf_h (target, target, op1, op0)); -+ break; -+ case E_V16QImode: -+ emit_insn (gen_lsx_vshuf_b (target, op1, op0, target)); -+ break; -+ default: -+ break; -+ } - -+ return true; -+ } - return false; - } -- --/* Return the size in bytes of the trampoline code, padded to -- TRAMPOLINE_ALIGNMENT bits. The static chain pointer and target -- function address immediately follow. */ - --int --loongarch_trampoline_code_size (void) -+static bool -+loongarch_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) - { -- return 4 * 4; --} -+ unsigned int i, nelt = d->nelt; -+ unsigned char perm2[MAX_VECT_LEN]; - --/* Implement TARGET_TRAMPOLINE_INIT. */ -+ if (d->one_vector_p) -+ { -+ /* Try interleave with alternating operands. */ -+ memcpy (perm2, d->perm, sizeof(perm2)); -+ for (i = 1; i < nelt; i += 2) -+ perm2[i] += nelt; -+ if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2, nelt)) -+ return true; -+ } -+ else -+ { -+ if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, -+ d->perm, nelt)) -+ return true; - --static void --loongarch_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) --{ -- rtx addr, end_addr, high, low, opcode, mem; -- rtx trampoline[8]; -- unsigned int i, j; -- HOST_WIDE_INT end_addr_offset, static_chain_offset, target_function_offset; -+ /* Try again with swapped operands. */ -+ for (i = 0; i < nelt; ++i) -+ perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1); -+ if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt)) -+ return true; -+ } - -- /* Work out the offsets of the pointers from the start of the -- trampoline code. */ -- end_addr_offset = loongarch_trampoline_code_size (); -- static_chain_offset = end_addr_offset; -- target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode); -+ if (loongarch_expand_lsx_shuffle (d)) -+ return true; -+ if (loongarch_expand_vec_perm_even_odd(d)) -+ return true; -+ if (loongarch_expand_vec_perm_interleave(d)) -+ return true; -+ return false; -+} - -- /* Get pointers to the beginning and end of the code block. */ -- addr = force_reg (Pmode, XEXP (m_tramp, 0)); -- end_addr = loongarch_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset)); -+// Following are the assist function for const vector permutation support. -+static bool -+loongarch_is_quad_duplicate (struct expand_vec_perm_d *d) -+{ -+ if (d->perm[0] >= d->nelt / 2) -+ return false; - --#define OP(X) gen_int_mode (X, SImode) -+ bool result = true; -+ unsigned char lhs = d->perm[0]; -+ unsigned char rhs = d->perm[d->nelt / 2]; - -- /* Build up the code in TRAMPOLINE. */ -- i = 0; -- /* -- pcaddi $static_chain,0 -- ld.[dw] $tmp,$static_chain,target_function_offset -- ld.[dw] $static_chain,$static_chain,static_chain_offset -- jirl $r0,$tmp,0 -- */ -- trampoline[i++] = OP (0x18000000 | (STATIC_CHAIN_REGNUM - GP_REG_FIRST)); -- trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000) -- | 19 /* $t7 */ -- | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5) -- | ((target_function_offset & 0xfff) << 10)); -- trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000) -- | (STATIC_CHAIN_REGNUM - GP_REG_FIRST) -- | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5) -- | ((static_chain_offset & 0xfff) << 10)); -- trampoline[i++] = OP (0x4c000000 | (19 << 5)); --#undef OP -+ if ((rhs - lhs) != d->nelt / 2) -+ return false; - -- for (j = 0; j < i; j++) -+ for (int i = 1; i < d->nelt; i += 1) - { -- mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode)); -- loongarch_emit_move (mem, trampoline[j]); -+ if ((i < d->nelt / 2) && (d->perm[i] != lhs)) -+ { -+ result = false; -+ break; -+ } -+ if ((i > d->nelt / 2) && (d->perm[i] != rhs)) -+ { -+ result = false; -+ break; -+ } - } - -- /* Set up the static chain pointer field. */ -- mem = adjust_address (m_tramp, ptr_mode, static_chain_offset); -- loongarch_emit_move (mem, chain_value); -- -- /* Set up the target function field. */ -- mem = adjust_address (m_tramp, ptr_mode, target_function_offset); -- loongarch_emit_move (mem, XEXP (DECL_RTL (fndecl), 0)); -- -- /* Flush the code part of the trampoline. */ -- emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE))); -- emit_insn (gen_clear_cache (addr, end_addr)); -+ return result; - } - -- --/* Implement TARGET_SHIFT_TRUNCATION_MASK. We want to keep the default -- behavior of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even -- when TARGET_LOONGSON_MMI is true. */ -- --static unsigned HOST_WIDE_INT --loongarch_shift_truncation_mask (machine_mode mode) -+static bool -+loongarch_is_double_duplicate (struct expand_vec_perm_d *d) - { -- return GET_MODE_BITSIZE (mode) - 1; --} -+ if (!d->one_vector_p) -+ return false; - -- --/* Generate or test for an insn that supports a constant permutation. */ -+ if (d->nelt < 8) -+ return false; - --#define MAX_VECT_LEN 32 -+ bool result = true; -+ unsigned char buf = d->perm[0]; - --struct expand_vec_perm_d --{ -- rtx target, op0, op1; -- unsigned char perm[MAX_VECT_LEN]; -- machine_mode vmode; -- unsigned char nelt; -- bool one_vector_p; -- bool testing_p; --}; -+ for (int i = 1; i < d->nelt; i += 2) -+ { -+ if (d->perm[i] != buf) -+ { -+ result = false; -+ break; -+ } -+ if (d->perm[i - 1] != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ buf += d->nelt / 4; -+ } - --/* Construct (set target (vec_select op0 (parallel perm))) and -- return true if that's a valid instruction in the active ISA. */ -+ return result; -+} - - static bool --loongarch_expand_vselect (rtx target, rtx op0, -- const unsigned char *perm, unsigned nelt) -+loongarch_is_odd_extraction (struct expand_vec_perm_d *d) - { -- rtx rperm[MAX_VECT_LEN], x; -- rtx_insn *insn; -- unsigned i; -+ bool result = true; -+ unsigned char buf = 1; - -- for (i = 0; i < nelt; ++i) -- rperm[i] = GEN_INT (perm[i]); -+ for (int i = 0; i < d->nelt; i += 1) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ buf += 2; -+ } - -- x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm)); -- x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x); -- x = gen_rtx_SET (target, x); -+ return result; -+} - -- insn = emit_insn (x); -- if (recog_memoized (insn) < 0) -+static bool -+loongarch_is_even_extraction (struct expand_vec_perm_d *d) -+{ -+ bool result = true; -+ unsigned char buf = 0; -+ -+ for (int i = 0; i < d->nelt; i += 1) - { -- remove_insn (insn); -- return false; -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ buf += 2; - } -- return true; --} - --/* Similar, but generate a vec_concat from op0 and op1 as well. */ -+ return result; -+} - - static bool --loongarch_expand_vselect_vconcat (rtx target, rtx op0, rtx op1, -- const unsigned char *perm, unsigned nelt) -+loongarch_is_extraction_permutation (struct expand_vec_perm_d *d) - { -- machine_mode v2mode; -- rtx x; -+ bool result = true; -+ unsigned char buf = d->perm[0]; - -- if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0)).exists (&v2mode)) -+ if (buf != 0 || buf != d->nelt) - return false; -- x = gen_rtx_VEC_CONCAT (v2mode, op0, op1); -- return loongarch_expand_vselect (target, x, perm, nelt); --} - --/* Construct (set target (vec_select op0 (parallel selector))) and -- return true if that's a valid instruction in the active ISA. */ -+ for (int i = 0; i < d->nelt; i += 1) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ buf += 1; -+ } -+ -+ return result; -+} - - static bool --loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d) -+loongarch_is_center_extraction (struct expand_vec_perm_d *d) - { -- rtx x, elts[MAX_VECT_LEN]; -- rtvec v; -- rtx_insn *insn; -- unsigned i; -+ bool result = true; -+ unsigned buf = d->nelt / 2; - -- if (!ISA_HAS_LSX && !ISA_HAS_LASX) -+ for (int i = 0; i < d->nelt; i += 1) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ buf += 1; -+ } -+ -+ return result; -+} -+ -+static bool -+loongarch_is_reversing_permutation (struct expand_vec_perm_d *d) -+{ -+ if (!d->one_vector_p) - return false; - -- for (i = 0; i < d->nelt; i++) -- elts[i] = GEN_INT (d->perm[i]); -+ bool result = true; -+ unsigned char buf = d->nelt - 1; - -- v = gen_rtvec_v (d->nelt, elts); -- x = gen_rtx_PARALLEL (VOIDmode, v); -+ for (int i = 0; i < d->nelt; i += 1) -+ { -+ if (d->perm[i] != buf) -+ { -+ result = false; -+ break; -+ } - -- if (!loongarch_const_vector_shuffle_set_p (x, d->vmode)) -+ buf -= 1; -+ } -+ -+ return result; -+} -+ -+static bool -+loongarch_is_di_misalign_extract (struct expand_vec_perm_d *d) -+{ -+ if (d->nelt != 4 && d->nelt != 8) - return false; - -- x = gen_rtx_VEC_SELECT (d->vmode, d->op0, x); -- x = gen_rtx_SET (d->target, x); -+ bool result = true; -+ unsigned char buf; - -- insn = emit_insn (x); -- if (recog_memoized (insn) < 0) -+ if (d->nelt == 4) - { -- remove_insn (insn); -- return false; -+ buf = 1; -+ for (int i = 0; i < d->nelt; i += 1) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ -+ buf += 1; -+ } -+ } -+ else if (d->nelt == 8) -+ { -+ buf = 2; -+ for (int i = 0; i < d->nelt; i += 1) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ -+ buf += 1; -+ } -+ } -+ -+ return result; -+} -+ -+static bool -+loongarch_is_si_misalign_extract (struct expand_vec_perm_d *d) -+{ -+ if (d->vmode != E_V8SImode && d->vmode != E_V8SFmode) -+ return false; -+ bool result = true; -+ unsigned char buf = 1; -+ -+ for (int i = 0; i < d->nelt; i += 1) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ buf += 1; -+ } -+ -+ return result; -+} -+ -+static bool -+loongarch_is_lasx_lowpart_interleave (struct expand_vec_perm_d *d) -+{ -+ bool result = true; -+ unsigned char buf = 0; -+ -+ for (int i = 0;i < d->nelt; i += 2) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ buf += 1; -+ } -+ -+ if (result) -+ { -+ buf = d->nelt; -+ for (int i = 1; i < d->nelt; i += 2) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ buf += 1; -+ } -+ } -+ -+ return result; -+} -+ -+static bool -+loongarch_is_lasx_lowpart_interleave_2 (struct expand_vec_perm_d *d) -+{ -+ if (d->vmode != E_V32QImode) -+ return false; -+ bool result = true; -+ unsigned char buf = 0; -+ -+#define COMPARE_SELECTOR(INIT, BEGIN, END) \ -+ buf = INIT; \ -+ for (int i = BEGIN; i < END && result; i += 1) \ -+ { \ -+ if (buf != d->perm[i]) \ -+ { \ -+ result = false; \ -+ break; \ -+ } \ -+ buf += 1; \ -+ } -+ -+ COMPARE_SELECTOR (0, 0, 8); -+ COMPARE_SELECTOR (32, 8, 16); -+ COMPARE_SELECTOR (8, 16, 24); -+ COMPARE_SELECTOR (40, 24, 32); -+ -+#undef COMPARE_SELECTOR -+ return result; -+} -+ -+static bool -+loongarch_is_lasx_lowpart_extract (struct expand_vec_perm_d *d) -+{ -+ bool result = true; -+ unsigned char buf = 0; -+ -+ for (int i = 0; i < d->nelt / 2; i += 1) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ buf += 1; -+ } -+ -+ if (result) -+ { -+ buf = d->nelt; -+ for (int i = d->nelt / 2; i < d->nelt; i += 1) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ buf += 1; -+ } -+ } -+ -+ return result; -+} -+ -+static bool -+loongarch_is_lasx_highpart_interleave (expand_vec_perm_d *d) -+{ -+ bool result = true; -+ unsigned char buf = d->nelt / 2; -+ -+ for (int i = 0; i < d->nelt; i += 2) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ buf += 1; -+ } -+ -+ if (result) -+ { -+ buf = d->nelt + d->nelt / 2; -+ for (int i = 1; i < d->nelt;i += 2) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ buf += 1; -+ } -+ } -+ -+ return result; -+} -+ -+static bool -+loongarch_is_lasx_highpart_interleave_2 (struct expand_vec_perm_d *d) -+{ -+ if (d->vmode != E_V32QImode) -+ return false; -+ -+ bool result = true; -+ unsigned char buf = 0; -+ -+#define COMPARE_SELECTOR(INIT, BEGIN, END) \ -+ buf = INIT; \ -+ for (int i = BEGIN; i < END && result; i += 1) \ -+ { \ -+ if (buf != d->perm[i]) \ -+ { \ -+ result = false; \ -+ break; \ -+ } \ -+ buf += 1; \ -+ } -+ -+ COMPARE_SELECTOR (16, 0, 8); -+ COMPARE_SELECTOR (48, 8, 16); -+ COMPARE_SELECTOR (24, 16, 24); -+ COMPARE_SELECTOR (56, 24, 32); -+ -+#undef COMPARE_SELECTOR -+ return result; -+} -+ -+static bool -+loongarch_is_elem_duplicate (struct expand_vec_perm_d *d) -+{ -+ bool result = true; -+ unsigned char buf = d->perm[0]; -+ -+ for (int i = 0; i < d->nelt; i += 1) -+ { -+ if (buf != d->perm[i]) -+ { -+ result = false; -+ break; -+ } -+ } -+ -+ return result; -+} -+ -+inline bool -+loongarch_is_op_reverse_perm (struct expand_vec_perm_d *d) -+{ -+ return (d->vmode == E_V4DFmode) -+ && d->perm[0] == 2 && d->perm[1] == 3 -+ && d->perm[2] == 0 && d->perm[3] == 1; -+} -+ -+static bool -+loongarch_is_single_op_perm (struct expand_vec_perm_d *d) -+{ -+ bool result = true; -+ -+ for (int i = 0; i < d->nelt; i += 1) -+ { -+ if (d->perm[i] >= d->nelt) -+ { -+ result = false; -+ break; -+ } -+ } -+ -+ return result; -+} -+ -+static bool -+loongarch_is_divisible_perm (struct expand_vec_perm_d *d) -+{ -+ bool result = true; -+ -+ for (int i = 0; i < d->nelt / 2; i += 1) -+ { -+ if (d->perm[i] >= d->nelt) -+ { -+ result = false; -+ break; -+ } -+ } -+ -+ if (result) -+ { -+ for (int i = d->nelt / 2; i < d->nelt; i += 1) -+ { -+ if (d->perm[i] < d->nelt) -+ { -+ result = false; -+ break; -+ } -+ } -+ } -+ -+ return result; -+} -+ -+inline bool -+loongarch_is_triple_stride_extract (struct expand_vec_perm_d *d) -+{ -+ return (d->vmode == E_V4DImode || d->vmode == E_V4DFmode) -+ && d->perm[0] == 1 && d->perm[1] == 4 -+ && d->perm[2] == 7 && d->perm[3] == 0; -+} -+ -+/* In LASX, xvshuf.* insn does not have the behavior that gcc expects when -+ * compiler wants to emit a vector permutation. -+ * -+ * 1. What GCC provides via vectorize_vec_perm_const()'s paramater: -+ * When GCC wants to performs a vector permutation, it provides two op -+ * reigster, one target register, and a selector. -+ * In const vector permutation case, GCC provides selector as a char array -+ * that contains original value; in variable vector permuatation -+ * (performs via vec_perm insn template), it provides a vector register. -+ * We assume that nelt is the elements numbers inside single vector in current -+ * 256bit vector mode. -+ * -+ * 2. What GCC expects to perform: -+ * Two op registers(op0, op1) will "combine" into a 512bit temp vector storage -+ * that has 2*nelt elements inside it; the low 256bit is op0, and high 256bit -+ * is op1, then the elements are indexed as below: -+ * 0 ~ nelt - 1 nelt ~ 2 * nelt - 1 -+ * |-------------------------|-------------------------| -+ * Low 256bit (op0) High 256bit(op1) -+ * For example, the second element in op1(V8SImode) will be indexed with 9. -+ * Selector is a vector that has the same mode and number of elements with -+ * op0,op1 and target, it's look like this: -+ * 0 ~ nelt - 1 -+ * |-------------------------| -+ * 256bit (selector) -+ * It describes which element from 512bit temp vector storage will fit into -+ * target's every element slot. -+ * GCC expects that every element in selector can be ANY indices of 512bit -+ * vector storage(Selector can pick literally any element from op0 and op1, and -+ * then fits into any place of target register). This is also what LSX 128bit -+ * vshuf.* instruction do similarly, so we can handle 128bit vector permutation -+ * by single instruction easily. -+ * -+ * 3. What xvshuf.* instruction does: -+ * In short, it just do TWO 128bit vector permuatation, it's the reason that we -+ * need to do these jobs. We will explain it. -+ * op0, op1, target, and selector will be separate into high 128bit and low -+ * 128bit, and do permutation as the description below: -+ * -+ * a) op0's low 128bit and op1's low 128bit "combines" into a 256bit temp -+ * vector storage(TVS1), elements are indexed as below: -+ * 0 ~ nelt / 2 - 1 nelt / 2 ~ nelt - 1 -+ * |---------------------|---------------------| TVS1 -+ * op0's low 128bit op1's low 128bit -+ * op0's high 128bit and op1's high 128bit are "combined" into TVS2 in the -+ * same way. -+ * 0 ~ nelt / 2 - 1 nelt / 2 ~ nelt - 1 -+ * |---------------------|---------------------| TVS2 -+ * op0's high 128bit op1's high 128bit -+ * b) Selector's low 128bit describes which elements from TVS1 will fit into -+ * target vector's low 128bit. No TVS2 elements are allowed. -+ * c) Selector's high 128bit describes which elements from TVS2 will fit into -+ * target vector's high 128bit. No TVS1 elements are allowed. -+ * -+ * As we can see, if we want to handle vector permutation correctly, we can -+ * achieve it in three ways: -+ * a) Modify selector's elements, to make sure that every elements can inform -+ * correct value that will put into target vector. -+ b) Generate extra instruction before/after xvshuf.* instruction, for -+ adjusting op vector or target vector, to make sure target vector's value is -+ what GCC expects. -+ c) Use other instructions to process op and put correct result into target. -+ */ -+ -+/* Implementation of constant vector permuatation. This function identifies -+ * recognized pattern of permuation selector argument, and use one or more -+ * instruction(s) to finish the permutation job correctly. For unsupported -+ * patterns, it will return false. */ -+ -+static bool -+loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) -+{ -+ // Although we have the LSX vec_perm template, there's still some -+ // 128bit vector permuatation operations send to vectorize_vec_perm_const. -+ // In this case, we just simpliy wrap them by single vshuf.* instruction, -+ // because LSX vshuf.* instruction just have the same behavior that GCC -+ // expects. -+ if (d->vmode != E_V32QImode && d->vmode != E_V16HImode -+ && d->vmode != E_V4DImode && d->vmode != E_V4DFmode -+ && d->vmode != E_V8SImode && d->vmode != E_V8SFmode) -+ return loongarch_try_expand_lsx_vshuf_const (d); -+ -+ bool ok = false, reverse_hi_lo = false, extract_ev_od = false, -+ use_alt_op = false; -+ unsigned char idx; -+ int i; -+ rtx target, op0, op1, sel, tmp; -+ rtx op0_alt = NULL_RTX, op1_alt = NULL_RTX; -+ rtx rperm[MAX_VECT_LEN]; -+ unsigned char remapped[MAX_VECT_LEN]; -+ -+ // Try to figure out whether is a recognized permutation selector pattern, if -+ // yes, we will reassign some elements with new value in selector argument, -+ // and in some cases we will generate some assist insn to complete the -+ // permutation. (Even in some cases, we use other insn to impl permutation -+ // instead of xvshuf!) -+ -+ // Make sure to check d->testing_p is false everytime if you want to emit new -+ // insn, unless you want to crash into ICE directly. -+ if (loongarch_is_quad_duplicate (d)) -+ { -+ // Selector example: E_V8SImode, { 0, 0, 0, 0, 4, 4, 4, 4 } -+ // copy first elem from original selector to all elem in new selector. -+ idx = d->perm[0]; -+ for (i = 0; i < d->nelt; i += 1) -+ { -+ remapped[i] = idx; -+ } -+ // Selector after: { 0, 0, 0, 0, 0, 0, 0, 0 } -+ } -+ else if (loongarch_is_double_duplicate (d)) -+ { -+ // Selector example: E_V8SImode, { 1, 1, 3, 3, 5, 5, 7, 7 } -+ // one_vector_p == true -+ for (i = 0; i < d->nelt / 2; i += 1) -+ { -+ idx = d->perm[i]; -+ remapped[i] = idx; -+ remapped[i + d->nelt / 2] = idx; -+ } -+ // Selector after: { 1, 1, 3, 3, 1, 1, 3, 3 } -+ } -+ else if (loongarch_is_odd_extraction (d) -+ || loongarch_is_even_extraction (d)) -+ { -+ // Odd extraction selector sample: E_V4DImode, { 1, 3, 5, 7 } -+ // Selector after: { 1, 3, 1, 3 } -+ // Even extraction selector sample: E_V4DImode, { 0, 2, 4, 6 } -+ // Selector after: { 0, 2, 0, 2 } -+ for (i = 0; i < d->nelt / 2; i += 1) -+ { -+ idx = d->perm[i]; -+ remapped[i] = idx; -+ remapped[i + d->nelt / 2] = idx; -+ } -+ // Additional insn is required for correct result. See codes below. -+ extract_ev_od = true; -+ } -+ else if (loongarch_is_extraction_permutation (d)) -+ { -+ // Selector sample: E_V8SImode, { 0, 1, 2, 3, 4, 5, 6, 7 } -+ if (d->perm[0] == 0) -+ { -+ for (i = 0; i < d->nelt / 2; i += 1) -+ { -+ remapped[i] = i; -+ remapped[i + d->nelt / 2] = i; -+ } -+ } -+ else -+ { -+ // { 8, 9, 10, 11, 12, 13, 14, 15 } -+ for (i = 0; i < d->nelt / 2; i += 1) -+ { -+ idx = i + d->nelt / 2; -+ remapped[i] = idx; -+ remapped[i + d->nelt / 2] = idx; -+ } -+ } -+ // Selector after: { 0, 1, 2, 3, 0, 1, 2, 3 } -+ // { 8, 9, 10, 11, 8, 9, 10, 11 } -+ } -+ else if (loongarch_is_center_extraction (d)) -+ { -+ // sample: E_V4DImode, { 2, 3, 4, 5 } -+ // In this condition, we can just copy high 128bit of op0 and low 128bit -+ // of op1 to the target register by using xvpermi.q insn. -+ if (!d->testing_p) -+ { -+ emit_move_insn (d->target, d->op1); -+ switch (d->vmode) -+ { -+ case E_V4DImode: -+ emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target, d->op0, GEN_INT (0x21))); -+ break; -+ case E_V4DFmode: -+ emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target, d->op0, GEN_INT (0x21))); -+ break; -+ case E_V8SImode: -+ emit_insn (gen_lasx_xvpermi_q_v8si (d->target, d->target, d->op0, GEN_INT (0x21))); -+ break; -+ case E_V8SFmode: -+ emit_insn (gen_lasx_xvpermi_q_v8sf (d->target, d->target, d->op0, GEN_INT (0x21))); -+ break; -+ case E_V16HImode: -+ emit_insn (gen_lasx_xvpermi_q_v16hi (d->target, d->target, d->op0, GEN_INT (0x21))); -+ break; -+ case E_V32QImode: -+ emit_insn (gen_lasx_xvpermi_q_v32qi (d->target, d->target, d->op0, GEN_INT (0x21))); -+ break; -+ default: -+ break; -+ } -+ } -+ ok = true; -+ // Finish the funtion directly. -+ goto expand_perm_const_2_end; -+ } -+ else if (loongarch_is_reversing_permutation (d)) -+ { -+ // Selector sample: E_V8SImode, { 7, 6, 5, 4, 3, 2, 1, 0 } -+ // one_vector_p == true -+ idx = d->nelt / 2 - 1; -+ for (i = 0; i < d->nelt / 2; i += 1) -+ { -+ remapped[i] = idx; -+ remapped[i + d->nelt / 2] = idx; -+ idx -= 1; -+ } -+ // Selector after: { 3, 2, 1, 0, 3, 2, 1, 0 } -+ // Additional insn will be generated to swap hi and lo 128bit of target -+ // register. -+ reverse_hi_lo = true; -+ } -+ else if (loongarch_is_di_misalign_extract (d) -+ || loongarch_is_si_misalign_extract (d)) -+ { -+ // Selector Sample: -+ // DI misalign: E_V4DImode, { 1, 2, 3, 4 } -+ // SI misalign: E_V8SImode, { 1, 2, 3, 4, 5, 6, 7, 8 } -+ if (!d->testing_p) -+ { -+ // Copy original op0/op1 value to new temp register. -+ // In some cases, operand register may be used in multiple place, so -+ // we need new regiter instead modify original one, to avoid runtime -+ // crashing or wrong value after execution. -+ use_alt_op = true; -+ op1_alt = gen_reg_rtx (d->vmode); -+ emit_move_insn (op1_alt, d->op1); -+ -+ // Adjust op1 for selecting correct value in high 128bit of target -+ // register. -+ // op1: E_V4DImode, { 4, 5, 6, 7 } -> { 2, 3, 4, 5 } -+ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); -+ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, -+ conv_op0, GEN_INT (0x21))); -+ -+ for (i = 0; i < d->nelt / 2; i += 1) -+ { -+ remapped[i] = d->perm[i]; -+ remapped[i + d->nelt / 2] = d->perm[i]; -+ } -+ // Selector after: -+ // DI misalign: { 1, 2, 1, 2 } -+ // SI misalign: { 1, 2, 3, 4, 1, 2, 3, 4 } -+ } -+ } -+ else if (loongarch_is_lasx_lowpart_interleave (d)) -+ { -+ // Elements from op0's low 18bit and op1's 128bit are inserted into -+ // target register alternately. -+ //sample: E_V4DImode, { 0, 4, 1, 5 } -+ if (!d->testing_p) -+ { -+ // Prepare temp register instead of modify original op. -+ use_alt_op = true; -+ op1_alt = gen_reg_rtx (d->vmode); -+ op0_alt = gen_reg_rtx (d->vmode); -+ emit_move_insn (op1_alt, d->op1); -+ emit_move_insn (op0_alt, d->op0); -+ -+ // Generate subreg for fitting into insn gen function. -+ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); -+ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); -+ -+ // Adjust op value in temp register. -+ // op0 = {0,1,2,3}, op1 = {4,5,0,1} -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, -+ conv_op0, GEN_INT (0x02))); -+ // op0 = {0,1,4,5}, op1 = {4,5,0,1} -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0, -+ conv_op1, GEN_INT (0x01))); -+ -+ // Remap indices in selector based on the location of index inside -+ // selector, and vector element numbers in current vector mode. -+ -+ // Filling low 128bit of new selector. -+ for (i = 0; i < d->nelt / 2; i += 1) -+ { -+ // value in odd-indexed slot of low 128bit part of selector -+ // vector. -+ remapped[i] = i % 2 != 0 ? d->perm[i] - d->nelt / 2 : d->perm[i]; -+ } -+ // Then filling the high 128bit. -+ for (i = d->nelt / 2; i < d->nelt; i += 1) -+ { -+ // value in even-indexed slot of high 128bit part of -+ // selector vector. -+ remapped[i] = i % 2 == 0 ? d->perm[i] + (d->nelt / 2) * 3 : d->perm[i]; -+ } -+ } -+ } -+ else if (loongarch_is_lasx_lowpart_interleave_2 (d)) -+ { -+ // Special lowpart interleave case in V32QI vector mode. It does the same -+ // thing as we can see in if branch that above this line. -+ // Selector sample: E_V32QImode, -+ // {0, 1, 2, 3, 4, 5, 6, 7, 32, 33, 34, 35, 36, 37, 38, 39, 8, 9, 10, -+ // 11, 12, 13, 14, 15, 40, 41, 42, 43, 44, 45, 46, 47} -+ if (!d->testing_p) -+ { -+ // Solution for this case in very simple - covert op into V4DI mode, -+ // and do same thing as previous if branch. -+ op1_alt = gen_reg_rtx (d->vmode); -+ op0_alt = gen_reg_rtx (d->vmode); -+ emit_move_insn (op1_alt, d->op1); -+ emit_move_insn (op0_alt, d->op0); -+ -+ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); -+ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); -+ rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0); -+ -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, -+ conv_op0, GEN_INT (0x02))); -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0, -+ conv_op1, GEN_INT (0x01))); -+ remapped[0] = 0; -+ remapped[1] = 4; -+ remapped[2] = 1; -+ remapped[3] = 5; -+ -+ for (i = 0; i < d->nelt; i += 1) -+ { -+ rperm[i] = GEN_INT (remapped[i]); -+ } -+ -+ sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v(4, rperm)); -+ sel = force_reg (E_V4DImode, sel); -+ emit_insn (gen_lasx_xvshuf_d (conv_target, sel, -+ conv_op1, conv_op0)); -+ } -+ -+ ok = true; -+ goto expand_perm_const_2_end; -+ } -+ else if (loongarch_is_lasx_lowpart_extract (d)) -+ { -+ // Copy op0's low 128bit to target's low 128bit, and copy op1's low -+ // 128bit to target's high 128bit. -+ // Selector sample: E_V4DImode, { 0, 1, 4 ,5 } -+ if (!d->testing_p) -+ { -+ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0); -+ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); -+ rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0); -+ -+ // We can achieve the expectation by using sinple xvpermi.q insn. -+ emit_move_insn (conv_target, conv_op1); -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_target, conv_target, -+ conv_op0, GEN_INT(0x20))); -+ } -+ -+ ok = true; -+ goto expand_perm_const_2_end; -+ } -+ else if (loongarch_is_lasx_highpart_interleave (d)) -+ { -+ // Similar to lowpart interleave, elements from op0's high 128bit and -+ // op1's high 128bit are inserted into target regiter alternately. -+ // Selector sample: E_V8SImode, { 4, 12, 5, 13, 6, 14, 7, 15 } -+ if (!d->testing_p) -+ { -+ // Prepare temp op register. -+ use_alt_op = true; -+ op1_alt = gen_reg_rtx (d->vmode); -+ op0_alt = gen_reg_rtx (d->vmode); -+ emit_move_insn (op1_alt, d->op1); -+ emit_move_insn (op0_alt, d->op0); -+ -+ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); -+ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); -+ // Adjust op value in temp regiter. -+ // op0 = { 0, 1, 2, 3 }, op1 = { 6, 7, 2, 3 } -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, -+ conv_op0, GEN_INT (0x13))); -+ // op0 = { 2, 3, 6, 7 }, op1 = { 6, 7, 2, 3 } -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0, -+ conv_op1, GEN_INT (0x01))); -+ // Remap indices in selector based on the location of index inside -+ // selector, and vector element numbers in current vector mode. -+ -+ // Filling low 128bit of new selector. -+ for (i = 0; i < d->nelt / 2; i += 1) -+ { -+ // value in even-indexed slot of low 128bit part of selector -+ // vector. -+ remapped[i] = i % 2 == 0 ? d->perm[i] - d->nelt / 2 : d->perm[i]; -+ } -+ // Then filling the high 128bit. -+ for (i = d->nelt / 2; i < d->nelt; i += 1) -+ { -+ // value in odd-indexed slot of high 128bit part of selector -+ // vector. -+ remapped[i] = i % 2 != 0 ? d->perm[i] - (d->nelt / 2) * 3 : d->perm[i]; -+ } -+ } -+ } -+ else if (loongarch_is_lasx_highpart_interleave_2 (d)) -+ { -+ // Special highpart interleave case in V32QI vector mode. It does the -+ // same thing as the normal version above. -+ // Selector sample: E_V32QImode, -+ // {16, 17, 18, 19, 20, 21, 22, 23, 48, 49, 50, 51, 52, 53, 54, 55, 24, -+ // 25, 26, 27, 28, 29, 30, 31, 56, 57, 58, 59, 60, 61, 62, 63} -+ if (!d->testing_p) -+ { -+ // Convert op into V4DImode and do the things. -+ op1_alt = gen_reg_rtx (d->vmode); -+ op0_alt = gen_reg_rtx (d->vmode); -+ emit_move_insn (op1_alt, d->op1); -+ emit_move_insn (op0_alt, d->op0); -+ -+ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); -+ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); -+ rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0); -+ -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, -+ conv_op0, GEN_INT (0x13))); -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0, -+ conv_op1, GEN_INT (0x01))); -+ remapped[0] = 2; -+ remapped[1] = 6; -+ remapped[2] = 3; -+ remapped[3] = 7; -+ -+ for (i = 0; i < d->nelt; i += 1) -+ { -+ rperm[i] = GEN_INT (remapped[i]); -+ } -+ -+ sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v(4, rperm)); -+ sel = force_reg (E_V4DImode, sel); -+ emit_insn (gen_lasx_xvshuf_d (conv_target, sel, -+ conv_op1, conv_op0)); -+ } -+ -+ ok = true; -+ goto expand_perm_const_2_end; -+ } -+ else if (loongarch_is_elem_duplicate (d)) -+ { -+ // Brocast single element (from op0 or op1) to all slot of target -+ // register. -+ // Selector sample:E_V8SImode, { 2, 2, 2, 2, 2, 2, 2, 2 } -+ if (!d->testing_p) -+ { -+ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0); -+ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); -+ rtx temp_reg = gen_reg_rtx (d->vmode); -+ rtx conv_temp = gen_rtx_SUBREG (E_V4DImode, temp_reg, 0); -+ -+ emit_move_insn (temp_reg, d->op0); -+ -+ idx = d->perm[0]; -+ // We will use xvrepl128vei.* insn to achieve the result, but we need -+ // to make the high/low 128bit has the same contents that contain the -+ // value that we need to broardcast, because xvrepl128vei does the -+ // broardcast job from every 128bit of source register to -+ // corresponded part of target register! (A deep sigh.) -+ if (/*idx >= 0 &&*/ idx < d->nelt / 2) -+ { -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp, -+ conv_op0, GEN_INT (0x0))); -+ } -+ else if (idx >= d->nelt / 2 && idx < d->nelt) -+ { -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp, -+ conv_op0, GEN_INT (0x11))); -+ idx -= d->nelt / 2; -+ } -+ else if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2)) -+ { -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp, -+ conv_op1, GEN_INT (0x0))); -+ } -+ else if (idx >= (d->nelt + d->nelt / 2) && idx < d->nelt * 2) -+ { -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp, -+ conv_op1, GEN_INT (0x11))); -+ idx -= d->nelt / 2; -+ } -+ -+ // Then we can finally generate this insn. -+ switch (d->vmode) -+ { -+ case E_V4DImode: -+ emit_insn (gen_lasx_xvrepl128vei_d (d->target, temp_reg, GEN_INT (idx))); -+ break; -+ case E_V4DFmode: -+ emit_insn (gen_lasx_xvrepl128vei_d_f (d->target, temp_reg, GEN_INT (idx))); -+ break; -+ case E_V8SImode: -+ emit_insn (gen_lasx_xvrepl128vei_w (d->target, temp_reg, GEN_INT (idx))); -+ break; -+ case E_V8SFmode: -+ emit_insn (gen_lasx_xvrepl128vei_w_f (d->target, temp_reg, GEN_INT (idx))); -+ break; -+ case E_V16HImode: -+ emit_insn (gen_lasx_xvrepl128vei_h (d->target, temp_reg, GEN_INT (idx))); -+ break; -+ case E_V32QImode: -+ emit_insn (gen_lasx_xvrepl128vei_b (d->target, temp_reg, GEN_INT(idx))); -+ break; -+ default: -+ gcc_unreachable (); -+ break; -+ } -+ -+ // finish func directly. -+ ok = true; -+ goto expand_perm_const_2_end; -+ } -+ } -+ else if (loongarch_is_op_reverse_perm (d)) -+ { -+ // reverse high 128bit and low 128bit in op0. -+ // Selector sample: E_V4DFmode, { 2, 3, 0, 1 } -+ // Use xvpermi.q for doing this job. -+ if (!d->testing_p) -+ { -+ if (d->vmode == E_V4DImode) -+ { -+ emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target, d->op0, -+ GEN_INT (0x01))); -+ } -+ else if (d->vmode == E_V4DFmode) -+ { -+ emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target, d->op0, -+ GEN_INT (0x01))); -+ } -+ else -+ { -+ gcc_unreachable (); -+ } -+ } -+ -+ ok = true; -+ goto expand_perm_const_2_end; -+ } -+ else if (loongarch_is_single_op_perm (d)) -+ { -+ //Permutation that only select elements from op0. -+ if (!d->testing_p) -+ { -+ // Prepare temp register instead of modify original op. -+ use_alt_op = true; -+ op0_alt = gen_reg_rtx (d->vmode); -+ op1_alt = gen_reg_rtx (d->vmode); -+ -+ emit_move_insn (op0_alt, d->op0); -+ emit_move_insn (op1_alt, d->op1); -+ -+ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); -+ rtx conv_op0a = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); -+ rtx conv_op1a = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); -+ -+ // Duplicate op0's low 128bit in op0, then duplicate high 128bit -+ // in op1. After this, xvshuf.* insn's selector argument can -+ // access all elements we need for correct permutation result. -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0a, conv_op0a, conv_op0, -+ GEN_INT (0x00))); -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1a, conv_op1a, conv_op0, -+ GEN_INT (0x11))); -+ -+ // In this case, there's no need to remap selector's indices. -+ for (i = 0; i < d->nelt; i += 1) -+ { -+ remapped[i] = d->perm[i]; -+ } -+ } -+ } -+ else if (loongarch_is_divisible_perm (d)) -+ { -+ // Divisible perm: -+ // Low 128bit of selector only selects elements of op0, -+ // and high 128bit of selector only selects elements of op1. -+ -+ if (!d->testing_p) -+ { -+ // Prepare temp register instead of modify original op. -+ use_alt_op = true; -+ op0_alt = gen_reg_rtx (d->vmode); -+ op1_alt = gen_reg_rtx (d->vmode); -+ -+ emit_move_insn (op0_alt, d->op0); -+ emit_move_insn (op1_alt, d->op1); -+ -+ rtx conv_op0a = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); -+ rtx conv_op1a = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); -+ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); -+ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0); -+ -+ // Reorganize op0's hi/lo 128bit and op1's hi/lo 128bit, to make sure -+ //that selector's low 128bit can access all op0's elements, and -+ //selector's high 128bit can access all op1's elements. -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0a, conv_op0a, conv_op1, -+ GEN_INT (0x02))); -+ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1a, conv_op1a, conv_op0, -+ GEN_INT (0x31))); -+ -+ // No need to modify indices. -+ for (i = 0; i < d->nelt;i += 1) -+ { -+ remapped[i] = d->perm[i]; -+ } -+ } -+ } -+ else if (loongarch_is_triple_stride_extract (d)) -+ { -+ // Selector sample: E_V4DFmode, { 1, 4, 7, 0 } -+ if (!d->testing_p) -+ { -+ // Resolve it with brute force modification. -+ remapped[0] = 1; -+ remapped[1] = 2; -+ remapped[2] = 3; -+ remapped[3] = 0; -+ } -+ } -+ else -+ { -+ // When all of the detections above are failed, we will try last -+ // strategy. -+ // The for loop tries to detect following rules based on indices' value -+ // , its position inside of selector vector ,and strange behavior of xvshuf.* insn; -+ // Then we take corresponding action. (Replace with new value, or give up -+ // whole permutation expansion.) -+ for (i = 0; i < d->nelt; i += 1) -+ { -+ idx = d->perm[i]/* % (2 * d->nelt)*/; -+ -+ // if index is located in low 128bit of selector vector -+ if (i < d->nelt / 2) -+ { -+ // Fail case 1: index tries to reach element that located in op0's -+ // high 128bit. -+ if (idx >= d->nelt / 2 && idx < d->nelt) -+ { -+ goto expand_perm_const_2_end; -+ } -+ // Fail case 2: index tries to reach element that located in -+ // op1's high 128bit. -+ if (idx >= (d->nelt + d->nelt / 2)) -+ { -+ goto expand_perm_const_2_end; -+ } -+ -+ // Success case: index tries to reach elements that located in -+ // op1's low 128bit. Apply - (nelt / 2) offset to original value. -+ if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2)) -+ { -+ idx -= d->nelt / 2; -+ } -+ } -+ // if index is located in high 128bit of selector vector -+ else -+ { -+ // Fail case 1: index tries to reach element that located in -+ // op1's low 128bit. -+ if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2)) -+ { -+ goto expand_perm_const_2_end; -+ } -+ // Fail case 2: index tries to reach element that located in -+ // op0's low 128bit. -+ if (idx < (d->nelt / 2)) -+ { -+ goto expand_perm_const_2_end; -+ } -+ // Success case: index tries to reach element that located in -+ // op0's high 128bit. -+ if (idx >= d->nelt / 2 && idx < d->nelt) -+ { -+ idx -= d->nelt / 2; -+ } -+ } -+ // No need to process other case that we did not mentioned. -+ -+ // Assign with original or processed value. -+ remapped[i] = idx; -+ } -+ } -+ -+ ok = true; -+ // If testing_p is true, compiler is trying to figure out that backend can -+ // handle this permutation, but doesn't want to generate actual insn. So if -+ // true, exit directly. -+ if (d->testing_p) -+ { -+ goto expand_perm_const_2_end; -+ } -+ -+ // Convert remapped selector array to RTL array. -+ for (i = 0; i < d->nelt; i += 1) -+ { -+ rperm[i] = GEN_INT (remapped[i]); -+ } -+ -+ // Copy selector vector from memory to vector regiter for later insn gen -+ // function. -+ // if vector's element in floating point value, we cannot fit selector -+ // argument into insn gen function directly, because of the insn template -+ // definition. As a solution, generate a integral mode subreg of target, -+ // then copy selector vector(that is in integral mode) to this subreg. -+ switch (d->vmode) -+ { -+ case E_V4DFmode: -+ sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (d->nelt, rperm)); -+ tmp = gen_rtx_SUBREG (E_V4DImode, d->target, 0); -+ emit_move_insn (tmp, sel); -+ break; -+ case E_V8SFmode: -+ sel = gen_rtx_CONST_VECTOR (E_V8SImode, gen_rtvec_v (d->nelt, rperm)); -+ tmp = gen_rtx_SUBREG (E_V8SImode, d->target, 0); -+ emit_move_insn (tmp, sel); -+ break; -+ default: -+ sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt, rperm)); -+ emit_move_insn (d->target, sel); -+ break; - } -- return true; --} -- --static bool --loongarch_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) --{ -- unsigned int i, nelt = d->nelt; -- unsigned char perm2[MAX_VECT_LEN]; - -- if (d->one_vector_p) -+ target = d->target; -+ // If temp op registers are requested in previous if branch, then use temp -+ // register intead of original one. -+ if (use_alt_op) - { -- /* Try interleave with alternating operands. */ -- memcpy (perm2, d->perm, sizeof(perm2)); -- for (i = 1; i < nelt; i += 2) -- perm2[i] += nelt; -- if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2, nelt)) -- return true; -+ op0 = op0_alt != NULL_RTX ? op0_alt : d->op0; -+ op1 = op1_alt != NULL_RTX ? op1_alt : d->op1; - } - else - { -- if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, -- d->perm, nelt)) -- return true; -+ op0 = d->op0; -+ op1 = d->one_vector_p ? d->op0 : d->op1; -+ } - -- /* Try again with swapped operands. */ -- for (i = 0; i < nelt; ++i) -- perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1); -- if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt)) -- return true; -+ // We FINALLY can generate xvshuf.* insn. -+ switch (d->vmode) -+ { -+ case E_V4DFmode: -+ emit_insn (gen_lasx_xvshuf_d_f (target, target, op1, op0)); -+ break; -+ case E_V4DImode: -+ emit_insn (gen_lasx_xvshuf_d (target, target, op1, op0)); -+ break; -+ case E_V8SFmode: -+ emit_insn (gen_lasx_xvshuf_w_f (target, target, op1, op0)); -+ break; -+ case E_V8SImode: -+ emit_insn (gen_lasx_xvshuf_w (target, target, op1, op0)); -+ break; -+ case E_V16HImode: -+ emit_insn (gen_lasx_xvshuf_h (target, target, op1, op0)); -+ break; -+ case E_V32QImode: -+ emit_insn (gen_lasx_xvshuf_b (target, op1, op0, target)); -+ break; -+ default: -+ gcc_unreachable (); -+ break; - } - -- if (loongarch_expand_lsx_shuffle (d)) -- return true; -- return false; -+ // extra insn for swapping the hi/lo 128bit of target vector register. -+ if (reverse_hi_lo) -+ { -+ switch (d->vmode) -+ { -+ case E_V4DFmode: -+ emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target, d->target, GEN_INT (0x1))); -+ break; -+ case E_V4DImode: -+ emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target, d->target, GEN_INT (0x1))); -+ break; -+ case E_V8SFmode: -+ emit_insn (gen_lasx_xvpermi_q_v8sf (d->target, d->target, d->target, GEN_INT (0x1))); -+ break; -+ case E_V8SImode: -+ emit_insn (gen_lasx_xvpermi_q_v8si (d->target, d->target, d->target, GEN_INT (0x1))); -+ break; -+ case E_V16HImode: -+ emit_insn (gen_lasx_xvpermi_q_v16hi (d->target, d->target, d->target, GEN_INT (0x1))); -+ break; -+ case E_V32QImode: -+ emit_insn (gen_lasx_xvpermi_q_v32qi (d->target, d->target, d->target, GEN_INT (0x1))); -+ break; -+ default: -+ break; -+ } -+ } -+ // extra insn required by odd/even extraction. Swapping the second and third -+ // 64bit in target vector register. -+ else if (extract_ev_od) -+ { -+ rtx converted = gen_rtx_SUBREG (E_V4DImode, d->target, 0); -+ emit_insn (gen_lasx_xvpermi_d_v4di (converted, converted, GEN_INT (0xD8))); -+ } -+ -+expand_perm_const_2_end: -+ return ok; - } - - /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */ -@@ -9043,13 +9312,19 @@ loongarch_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0, - if (!d.one_vector_p) - d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3); - -+ ok = loongarch_expand_vec_perm_const_2 (&d); -+ if (ok) -+ return ok; -+ - start_sequence (); - ok = loongarch_expand_vec_perm_const_1 (&d); - end_sequence (); - return ok; - } - -- ok = loongarch_expand_vec_perm_const_1 (&d); -+ ok = loongarch_expand_vec_perm_const_2 (&d); -+ if (!ok) -+ ok = loongarch_expand_vec_perm_const_1 (&d); - - /* If we were given a two-vector permutation which just happened to - have both input vectors equal, we folded this into a one-vector -@@ -9070,16 +9345,18 @@ loongarch_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0, - return ok; - } - --/* Implement TARGET_SCHED_REASSOCIATION_WIDTH. */ -- - static int --loongarch_sched_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED, -- machine_mode mode) -+loongarch_cpu_sched_reassociation_width (struct loongarch_target *target, -+ unsigned int opc, machine_mode mode) - { -- switch (loongarch_tune) -+ /* unreferenced argument */ -+ (void) opc; -+ -+ switch (target->cpu_tune) - { -- case PROCESSOR_LOONGARCH64: -- case PROCESSOR_LA464: -+ case CPU_LOONGARCH64: -+ case CPU_LA464: -+ case CPU_LA664: - /* Vector part. */ - if (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode)) - { -@@ -9094,10 +9371,164 @@ loongarch_sched_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED, - else if (FLOAT_MODE_P (mode)) - return 4; - break; -+ } -+ -+ /* default is 1 */ -+ return 1; -+} -+ -+/* Implement TARGET_SCHED_REASSOCIATION_WIDTH. */ -+ -+static int -+loongarch_sched_reassociation_width (unsigned int opc, machine_mode mode) -+{ -+ return loongarch_cpu_sched_reassociation_width (&la_target, opc, mode); -+} -+ -+/* Implement extract a scalar element from vecotr register */ -+ -+void -+loongarch_expand_vector_extract (rtx target, rtx vec, int elt) -+{ -+ machine_mode mode = GET_MODE (vec); -+ machine_mode inner_mode = GET_MODE_INNER (mode); -+ rtx tmp; -+ -+ switch (mode) -+ { -+ case E_V8HImode: -+ case E_V16QImode: -+ break; -+ -+ case E_V32QImode: -+ if (ISA_HAS_LASX) -+ { -+ if (elt >= 16) -+ { -+ tmp = gen_reg_rtx (V32QImode); -+ emit_insn (gen_lasx_xvpermi_d_v32qi (tmp, vec, GEN_INT (0xe))); -+ loongarch_expand_vector_extract (target, gen_lowpart (V16QImode, tmp), elt & 15); -+ } -+ else -+ loongarch_expand_vector_extract (target, gen_lowpart (V16QImode, vec), elt & 15); -+ return; -+ } -+ break; -+ -+ case E_V16HImode: -+ if (ISA_HAS_LASX) -+ { -+ if (elt >= 8) -+ { -+ tmp = gen_reg_rtx (V16HImode); -+ emit_insn (gen_lasx_xvpermi_d_v16hi (tmp, vec, GEN_INT (0xe))); -+ loongarch_expand_vector_extract (target, gen_lowpart (V8HImode, tmp), elt & 7); -+ } -+ else -+ loongarch_expand_vector_extract (target, gen_lowpart (V8HImode, vec), elt & 7); -+ return; -+ } -+ break; -+ - default: - break; - } -- return 1; -+ -+ tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt))); -+ tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp); -+ -+ /* Let the rtl optimizers know about the zero extension performed. */ -+ if (inner_mode == QImode || inner_mode == HImode) -+ { -+ tmp = gen_rtx_ZERO_EXTEND (SImode, tmp); -+ target = gen_lowpart (SImode, target); -+ } -+ if (inner_mode == SImode || inner_mode == DImode) -+ { -+ tmp = gen_rtx_SIGN_EXTEND (inner_mode, tmp); -+ } -+ -+ emit_insn (gen_rtx_SET (target, tmp)); -+} -+ -+/* Generate code to copy vector bits i / 2 ... i - 1 from vector SRC -+ to bits 0 ... i / 2 - 1 of vector DEST, which has the same mode. -+ The upper bits of DEST are undefined, though they shouldn't cause -+ exceptions (some bits from src or all zeros are ok). */ -+ -+static void -+emit_reduc_half (rtx dest, rtx src, int i) -+{ -+ rtx tem, d = dest; -+ switch (GET_MODE (src)) -+ { -+ case E_V4SFmode: -+ tem = gen_lsx_vbsrl_w_f (dest, src, GEN_INT (i == 128 ? 8 : 4)); -+ break; -+ case E_V2DFmode: -+ tem = gen_lsx_vbsrl_d_f (dest, src, GEN_INT (8)); -+ break; -+ case E_V8SFmode: -+ if (i == 256) -+ tem = gen_lasx_xvpermi_d_v8sf (dest, src, GEN_INT (0xe)); -+ else -+ tem = gen_lasx_xvshuf4i_w_f (dest, src, -+ GEN_INT (i == 128 ? 2 + (3 << 2) : 1)); -+ break; -+ case E_V4DFmode: -+ if (i == 256) -+ tem = gen_lasx_xvpermi_d_v4df (dest, src, GEN_INT (0xe)); -+ else -+ tem = gen_lasx_xvpermi_d_v4df (dest, src, const1_rtx); -+ break; -+ case E_V32QImode: -+ case E_V16HImode: -+ case E_V8SImode: -+ case E_V4DImode: -+ d = gen_reg_rtx (V4DImode); -+ if (i == 256) -+ tem = gen_lasx_xvpermi_d_v4di (d, gen_lowpart (V4DImode, src), GEN_INT (0xe)); -+ else -+ tem = gen_lasx_xvbsrl_d (d, gen_lowpart (V4DImode, src), GEN_INT (i/16)); -+ break; -+ case E_V16QImode: -+ case E_V8HImode: -+ case E_V4SImode: -+ case E_V2DImode: -+ d = gen_reg_rtx (V2DImode); -+ tem = gen_lsx_vbsrl_d (d, gen_lowpart (V2DImode, src), GEN_INT (i/16)); -+ break; -+ default: -+ gcc_unreachable (); -+ } -+ emit_insn (tem); -+ if (d != dest) -+ emit_move_insn (dest, gen_lowpart (GET_MODE (dest), d)); -+} -+ -+/* Expand a vector reduction. FN is the binary pattern to reduce; -+ DEST is the destination; IN is the input vector. */ -+ -+void -+loongarch_expand_vector_reduc (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in) -+{ -+ rtx half, dst, vec = in; -+ machine_mode mode = GET_MODE (in); -+ int i; -+ -+ for (i = GET_MODE_BITSIZE (mode); -+ i > GET_MODE_UNIT_BITSIZE (mode); -+ i >>= 1) -+ { -+ half = gen_reg_rtx (mode); -+ emit_reduc_half (half, vec, i); -+ if (i == GET_MODE_UNIT_BITSIZE (mode) * 2) -+ dst = dest; -+ else -+ dst = gen_reg_rtx (mode); -+ emit_insn (fn (dst, half, vec)); -+ vec = dst; -+ } - } - - /* Expand an integral vector unpack operation. */ -@@ -9110,14 +9541,14 @@ loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) - rtx (*extend) (rtx, rtx); - rtx (*cmpFunc) (rtx, rtx, rtx); - rtx (*swap_hi_lo) (rtx, rtx, rtx, rtx); -- rtx tmp, dest, zero; -- machine_mode halfmode = BLKmode; -+ rtx tmp, dest /*, zero */; -+ /* machine_mode halfmode = BLKmode; */ - - if (ISA_HAS_LASX && GET_MODE_SIZE (imode) == 32) - { - switch (imode) - { -- -+ - case E_V8SImode: - if (unsigned_p) - extend = gen_lasx_vext2xv_du_wu; -@@ -9125,7 +9556,7 @@ loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) - extend = gen_lasx_vext2xv_d_w; - swap_hi_lo = gen_lasx_xvpermi_q_v8si; - break; -- -+ - case E_V16HImode: - if (unsigned_p) - extend = gen_lasx_vext2xv_wu_hu; -@@ -9133,7 +9564,7 @@ loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) - extend = gen_lasx_vext2xv_w_h; - swap_hi_lo = gen_lasx_xvpermi_q_v16hi; - break; -- -+ - case E_V32QImode: - if (unsigned_p) - extend = gen_lasx_vext2xv_hu_bu; -@@ -9141,7 +9572,7 @@ loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) - extend = gen_lasx_vext2xv_h_b; - swap_hi_lo = gen_lasx_xvpermi_q_v32qi; - break; -- -+ - default: - gcc_unreachable (); - break; -@@ -9268,7 +9699,7 @@ loongarch_expand_vector_init (rtx target, rtx vals) - machine_mode vmode = GET_MODE (target); - machine_mode imode = GET_MODE_INNER (vmode); - unsigned i, nelt = GET_MODE_NUNITS (vmode); -- unsigned nvar = 0, one_var = -1u; -+ unsigned nvar = 0 /*, one_var = -1u*/ ; - bool all_same = true; - rtx x; - -@@ -9276,7 +9707,7 @@ loongarch_expand_vector_init (rtx target, rtx vals) - { - x = XVECEXP (vals, 0, i); - if (!loongarch_constant_elt_p (x)) -- nvar++, one_var = i; -+ nvar++ /*, one_var = i */ ; - if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0))) - all_same = false; - } -@@ -9311,7 +9742,7 @@ loongarch_expand_vector_init (rtx target, rtx vals) - temp2 = same; - else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) - { -- if(GET_CODE (same) == MEM) -+ if (GET_CODE (same) == MEM) - { - rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); - loongarch_emit_move (reg_tmp, same); -@@ -9322,7 +9753,7 @@ loongarch_expand_vector_init (rtx target, rtx vals) - } - else - { -- if(GET_CODE (same) == MEM) -+ if (GET_CODE (same) == MEM) - { - rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); - loongarch_emit_move (reg_tmp, same); -@@ -9505,7 +9936,7 @@ loongarch_expand_vector_init (rtx target, rtx vals) - temp2 = same; - else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) - { -- if(GET_CODE (same) == MEM) -+ if (GET_CODE (same) == MEM) - { - rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); - loongarch_emit_move (reg_tmp, same); -@@ -9516,7 +9947,7 @@ loongarch_expand_vector_init (rtx target, rtx vals) - } - else - { -- if(GET_CODE (same) == MEM) -+ if (GET_CODE (same) == MEM) - { - rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); - loongarch_emit_move (reg_tmp, same); -@@ -9614,9 +10045,8 @@ loongarch_expand_vector_init (rtx target, rtx vals) - /* Implement HARD_REGNO_CALLER_SAVE_MODE. */ - - machine_mode --loongarch_hard_regno_caller_save_mode (unsigned int regno, -- unsigned int nregs, -- machine_mode mode) -+loongarch_hard_regno_caller_save_mode (unsigned int regno, unsigned int nregs, -+ machine_mode mode) - { - /* For performance, avoid saving/restoring upper parts of a register - by returning MODE as save mode when the mode is known. */ -@@ -9785,7 +10215,8 @@ loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode, - if (mode != vimode) - { - xop1 = gen_reg_rtx (vimode); -- emit_move_insn (xop1, gen_rtx_SUBREG (vimode, operands[1], 0)); -+ emit_move_insn (xop1, simplify_gen_subreg (vimode, operands[1], -+ GET_MODE (operands[1]), 0)); - } - emit_move_insn (src1, xop1); - } -@@ -9802,7 +10233,8 @@ loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode, - if (mode != vimode) - { - xop2 = gen_reg_rtx (vimode); -- emit_move_insn (xop2, gen_rtx_SUBREG (vimode, operands[2], 0)); -+ emit_move_insn (xop2, simplify_gen_subreg (vimode, operands[2], -+ GET_MODE (operands[2]), 0)); - } - emit_move_insn (src2, xop2); - } -@@ -9821,13 +10253,14 @@ loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode, - gen_rtx_AND (vimode, mask, src1)); - /* The result is placed back to a register with the mask. */ - emit_insn (gen_rtx_SET (mask, bsel)); -- emit_move_insn (operands[0], gen_rtx_SUBREG (mode, mask, 0)); -+ emit_move_insn (operands[0], simplify_gen_subreg (mode, mask, -+ GET_MODE (mask), 0)); - } - } - - /* Expand integer vector comparison */ - bool --loongarch_expand_int_vec_cmp(rtx operands[]) -+loongarch_expand_int_vec_cmp (rtx operands[]) - { - - rtx_code code = GET_CODE (operands[1]); -@@ -9837,7 +10270,7 @@ loongarch_expand_int_vec_cmp(rtx operands[]) - - /* Expand integer vector comparison */ - bool --loongarch_expand_fp_vec_cmp(rtx operands[]) -+loongarch_expand_fp_vec_cmp (rtx operands[]) - { - rtx_code code = GET_CODE (operands[1]); - loongarch_expand_lsx_cmp (operands[0], code, operands[2], operands[3]); -@@ -9845,61 +10278,16 @@ loongarch_expand_fp_vec_cmp(rtx operands[]) - } - - --/* Implement TARGET_CASE_VALUES_THRESHOLD. */ -- --unsigned int --loongarch_case_values_threshold (void) --{ -- return default_case_values_threshold (); --} -- -- - /* Implement TARGET_SPILL_CLASS. */ - - static reg_class_t - loongarch_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED, -- machine_mode mode ATTRIBUTE_UNUSED) -+ machine_mode mode ATTRIBUTE_UNUSED) - { - return NO_REGS; - } - --/* Implement TARGET_LRA_P. */ -- --static bool --loongarch_lra_p (void) --{ -- return loongarch_lra_flag; --} -- --/* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS. */ -- --static reg_class_t --loongarch_ira_change_pseudo_allocno_class (int regno, reg_class_t allocno_class, -- reg_class_t best_class ATTRIBUTE_UNUSED) --{ -- /* LRA will allocate an FPR for an integer mode pseudo instead of spilling -- to memory if an FPR is present in the allocno class. It is rare that -- we actually need to place an integer mode value in an FPR so where -- possible limit the allocation to GR_REGS. This will slightly pessimize -- code that involves integer to/from float conversions as these will have -- to reload into FPRs in LRA. Such reloads are sometimes eliminated and -- sometimes only partially eliminated. We choose to take this penalty -- in order to eliminate usage of FPRs in code that does not use floating -- point data. -- -- This change has a similar effect to increasing the cost of FPR->GPR -- register moves for integer modes so that they are higher than the cost -- of memory but changing the allocno class is more reliable. -- -- This is also similar to forbidding integer mode values in FPRs entirely -- but this would lead to an inconsistency in the integer to/from float -- instructions that say integer mode values must be placed in FPRs. */ -- if (INTEGRAL_MODE_P (PSEUDO_REGNO_MODE (regno)) && allocno_class == ALL_REGS) -- return GR_REGS; -- return allocno_class; --} -- --/* Implement TARGET_PROMOTE_FUNCTION_MODE */ -+/* Implement TARGET_PROMOTE_FUNCTION_MODE. */ - - /* This function is equivalent to default_promote_function_mode_always_promote - except that it returns a promoted mode even if type is NULL_TREE. This is -@@ -9909,10 +10297,10 @@ loongarch_ira_change_pseudo_allocno_class (int regno, reg_class_t allocno_class, - - static machine_mode - loongarch_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, -- machine_mode mode, -- int *punsignedp ATTRIBUTE_UNUSED, -- const_tree fntype ATTRIBUTE_UNUSED, -- int for_return ATTRIBUTE_UNUSED) -+ machine_mode mode, -+ int *punsignedp ATTRIBUTE_UNUSED, -+ const_tree fntype ATTRIBUTE_UNUSED, -+ int for_return ATTRIBUTE_UNUSED) - { - int unsignedp; - -@@ -9933,16 +10321,6 @@ loongarch_truly_noop_truncation (poly_uint64 outprec, poly_uint64 inprec) - return !TARGET_64BIT || inprec <= 32 || outprec > 32; - } - --/* Implement TARGET_CONSTANT_ALIGNMENT. */ -- --static HOST_WIDE_INT --loongarch_constant_alignment (const_tree exp, HOST_WIDE_INT align) --{ -- if (TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR) -- return MAX (align, BITS_PER_WORD); -- return align; --} -- - /* Implement TARGET_STARTING_FRAME_OFFSET. See loongarch_compute_frame_info - for details about the frame layout. */ - -@@ -9963,8 +10341,10 @@ loongarch_la464_128_store_p (rtx operands[]) - int offset1; - rtx dst0 = operands[0]; - rtx dst1 = operands[2]; -+ /* - rtx src0 = operands[1]; - rtx src1 = operands[3]; -+ */ - int base_reg0; - int base_reg1; - -@@ -10030,13 +10410,15 @@ loongarch_la464_128_load_p (rtx operands[]) - int offset0; - int offset1; - rtx dst0 = operands[0]; -+ /* - rtx dst1 = operands[2]; -+ */ - rtx src0 = operands[1]; - rtx src1 = operands[3]; - int base_reg0; - int base_reg1; - int dst_reg0; -- -+ - dst_reg0 = REGNO (dst0); - - if (GET_CODE (XEXP (src0, 0)) == PLUS) -@@ -10209,6 +10591,138 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) - return force_reg (vec_mode, v); - } - -+/* Use rsqrte instruction and Newton-Rhapson to compute the approximation of -+ a single precision floating point [reciprocal] square root. */ -+ -+void loongarch_emit_swrsqrtsf (rtx res, rtx a, machine_mode mode, bool recip) -+{ -+ rtx x0, e0, e1, e2, mhalf, monehalf; -+ REAL_VALUE_TYPE r; -+ machine_mode imode; -+ int unspec; -+ -+ x0 = gen_reg_rtx (mode); -+ e0 = gen_reg_rtx (mode); -+ e1 = gen_reg_rtx (mode); -+ e2 = gen_reg_rtx (mode); -+ -+ real_arithmetic (&r, ABS_EXPR, &dconsthalf, NULL); -+ mhalf = const_double_from_real_value (r, SFmode); -+ -+ real_arithmetic (&r, PLUS_EXPR, &dconsthalf, &dconst1); -+ monehalf = const_double_from_real_value (r, SFmode); -+ unspec = UNSPEC_RSQRTE; -+ -+ if (VECTOR_MODE_P (mode)) -+ { -+ mhalf = loongarch_build_const_vector (mode, true, mhalf); -+ monehalf = loongarch_build_const_vector (mode, true, monehalf); -+ if (GET_MODE_SIZE (mode) == 32) -+ imode = mode == V4DFmode ? V4DImode : V8SImode; -+ if (GET_MODE_SIZE (mode) == 16) -+ imode = mode == V2DFmode ? V2DImode : V4SImode; -+ } -+ -+ /* rsqrt(a) = rsqrte(a) * (1.5 - 0.5 * a * rsqrte(a) * rsqrte(a)) -+ sqrt(a) = a * rsqrte(a) * (1.5 - 0.5 * a * rsqrte(a) * rsqrte(a))*/ -+ -+ a = force_reg (mode, a); -+ -+ /* x0 = rsqrt(a) estimate */ -+ emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, a), -+ unspec))); -+ -+ /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */ -+ if (!recip) -+ { -+ rtx zero = force_reg (mode, CONST0_RTX(mode)); -+ -+ if (VECTOR_MODE_P (mode)) -+ { -+ rtx mask = gen_reg_rtx (imode); -+ emit_insn (gen_rtx_SET (mask, gen_rtx_NE (imode, a, zero))); -+ emit_insn (gen_rtx_SET (x0, gen_rtx_AND (mode, x0, gen_lowpart(mode, mask)))); -+ } -+ else -+ { -+ rtx target = emit_conditional_move (x0, GT, a, zero, mode, -+ x0, zero, mode, 0); -+ if (target != x0) -+ emit_move_insn (x0, target); -+ } -+ } -+ -+ /* e0 = x0 * a */ -+ emit_insn (gen_rtx_SET (e0, gen_rtx_MULT (mode, x0, a))); -+ /* e1 = e0 * x0 */ -+ emit_insn (gen_rtx_SET (e1, gen_rtx_MULT (mode, e0, x0))); -+ -+ /* e2 = 1.5 - e1 * 0.5 */ -+ mhalf = force_reg (mode, mhalf); -+ monehalf = force_reg (mode, monehalf); -+ emit_insn (gen_rtx_SET (e2, gen_rtx_FMA (mode, gen_rtx_NEG(mode, e1), mhalf, monehalf))); -+ -+ if (recip) -+ /* res = e2 * x0 */ -+ emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, x0, e2))); -+ else -+ /* res = e2 * e0 */ -+ emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, e2, e0))); -+} -+ -+/* Use recipe instruction and Newton-Rhapson to compute the approximation of -+ a single precision floating point divide. */ -+ -+void loongarch_emit_swdivsf (rtx res, rtx a, rtx b, machine_mode mode) -+{ -+ rtx x0, x1, e0, mtwo; -+ REAL_VALUE_TYPE r; -+ x0 = gen_reg_rtx (mode); -+ e0 = gen_reg_rtx (mode); -+ x1 = gen_reg_rtx (mode); -+ -+ real_arithmetic (&r, ABS_EXPR, &dconst2, NULL); -+ mtwo = const_double_from_real_value (r, SFmode); -+ -+ if (VECTOR_MODE_P (mode)) -+ mtwo = loongarch_build_const_vector (mode, true, mtwo); -+ -+ mtwo = force_reg (mode, mtwo); -+ -+ /* a / b = a * recipe(b) * (2.0 - b * recipe(b)) */ -+ -+ /* x0 = 1./b estimate */ -+ emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, b), -+ UNSPEC_RECIPE))); -+ /* 2.0 - b * x0; */ -+ emit_insn (gen_rtx_SET (e0, gen_rtx_FMA (mode,gen_rtx_NEG(mode, b), x0, mtwo))); -+ -+ /* x1 = x0 * e0 */ -+ emit_insn (gen_rtx_SET (x1, gen_rtx_MULT (mode, x0, e0))); -+ -+ /* res = a * x1 */ -+ emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, a, x1))); -+} -+ -+/* LoongArch only implements preld hint=0 (prefetch for load) and hint=8 -+ (prefetch for store), other hint just scale to hint = 0 and hint = 1. */ -+ -+rtx -+loongarch_prefetch_cookie (rtx write, rtx locality) -+{ -+ if (INTVAL (locality) == 1 && INTVAL (write) == 0) -+ return GEN_INT (INTVAL (write) + 2); -+ -+ /* store. */ -+ if (INTVAL (write) == 1) -+ return GEN_INT (INTVAL (write) + 7); -+ -+ /* load. */ -+ if (INTVAL (write) == 0) -+ return GEN_INT (INTVAL (write)); -+ -+ gcc_unreachable (); -+} - - - /* Initialize the GCC target structure. */ -@@ -10225,10 +10739,6 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) - #undef TARGET_LEGITIMIZE_ADDRESS - #define TARGET_LEGITIMIZE_ADDRESS loongarch_legitimize_address - --#undef TARGET_ASM_FUNCTION_PROLOGUE --#define TARGET_ASM_FUNCTION_PROLOGUE loongarch_output_function_prologue --#undef TARGET_ASM_FUNCTION_EPILOGUE --#define TARGET_ASM_FUNCTION_EPILOGUE loongarch_output_function_epilogue - #undef TARGET_ASM_SELECT_RTX_SECTION - #define TARGET_ASM_SELECT_RTX_SECTION loongarch_select_rtx_section - #undef TARGET_ASM_FUNCTION_RODATA_SECTION -@@ -10249,19 +10759,12 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) - #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD - #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ - loongarch_multipass_dfa_lookahead --#undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P --#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \ -- loongarch_small_register_classes_for_mode_p - - #undef TARGET_FUNCTION_OK_FOR_SIBCALL - #define TARGET_FUNCTION_OK_FOR_SIBCALL loongarch_function_ok_for_sibcall - --#undef TARGET_INSERT_ATTRIBUTES --#define TARGET_INSERT_ATTRIBUTES loongarch_insert_attributes --#undef TARGET_MERGE_DECL_ATTRIBUTES --#define TARGET_MERGE_DECL_ATTRIBUTES loongarch_merge_decl_attributes --#undef TARGET_CAN_INLINE_P --#define TARGET_CAN_INLINE_P loongarch_can_inline_p -+#undef TARGET_GET_DRAP_RTX -+#define TARGET_GET_DRAP_RTX loongarch_get_drap_rtx - - #undef TARGET_VALID_POINTER_MODE - #define TARGET_VALID_POINTER_MODE loongarch_valid_pointer_mode -@@ -10276,43 +10779,49 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) - #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST - #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \ - loongarch_builtin_vectorization_cost -+#undef TARGET_VECTORIZE_ADD_STMT_COST -+#define TARGET_VECTORIZE_ADD_STMT_COST loongarch_add_stmt_cost - -+#undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT -+#define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT loongarch_builtin_support_vector_misalignment -+#undef TARGET_MODE_REP_EXTENDED -+#define TARGET_MODE_REP_EXTENDED loongarch_mode_rep_extended - - #undef TARGET_IN_SMALL_DATA_P - #define TARGET_IN_SMALL_DATA_P loongarch_in_small_data_p - --#undef TARGET_MACHINE_DEPENDENT_REORG --#define TARGET_MACHINE_DEPENDENT_REORG loongarch_reorg -- --#undef TARGET_PREFERRED_RELOAD_CLASS -+#undef TARGET_PREFERRED_RELOAD_CLASS - #define TARGET_PREFERRED_RELOAD_CLASS loongarch_preferred_reload_class - --#undef TARGET_EXPAND_TO_RTL_HOOK --#define TARGET_EXPAND_TO_RTL_HOOK loongarch_expand_to_rtl_hook --#undef TARGET_ASM_FILE_START --#define TARGET_ASM_FILE_START loongarch_file_start - #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE - #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true - - #undef TARGET_EXPAND_BUILTIN_VA_START - #define TARGET_EXPAND_BUILTIN_VA_START loongarch_va_start - --#undef TARGET_PROMOTE_FUNCTION_MODE -+#undef TARGET_PROMOTE_FUNCTION_MODE - #define TARGET_PROMOTE_FUNCTION_MODE loongarch_promote_function_mode - #undef TARGET_RETURN_IN_MEMORY - #define TARGET_RETURN_IN_MEMORY loongarch_return_in_memory - -+#undef TARGET_FUNCTION_VALUE -+#define TARGET_FUNCTION_VALUE loongarch_function_value -+#undef TARGET_LIBCALL_VALUE -+#define TARGET_LIBCALL_VALUE loongarch_libcall_value -+ - #undef TARGET_ASM_OUTPUT_MI_THUNK - #define TARGET_ASM_OUTPUT_MI_THUNK loongarch_output_mi_thunk - #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK --#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true -+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \ -+ hook_bool_const_tree_hwi_hwi_const_tree_true - - #undef TARGET_PRINT_OPERAND - #define TARGET_PRINT_OPERAND loongarch_print_operand - #undef TARGET_PRINT_OPERAND_ADDRESS - #define TARGET_PRINT_OPERAND_ADDRESS loongarch_print_operand_address - #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P --#define TARGET_PRINT_OPERAND_PUNCT_VALID_P loongarch_print_operand_punct_valid_p -+#define TARGET_PRINT_OPERAND_PUNCT_VALID_P \ -+ loongarch_print_operand_punct_valid_p - - #undef TARGET_SETUP_INCOMING_VARARGS - #define TARGET_SETUP_INCOMING_VARARGS loongarch_setup_incoming_varargs -@@ -10344,6 +10853,10 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) - #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \ - loongarch_autovectorize_vector_sizes - -+#undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION -+#define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \ -+ loongarch_builtin_vectorized_function -+ - #undef TARGET_INIT_BUILTINS - #define TARGET_INIT_BUILTINS loongarch_init_builtins - #undef TARGET_BUILTIN_DECL -@@ -10351,8 +10864,11 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) - #undef TARGET_EXPAND_BUILTIN - #define TARGET_EXPAND_BUILTIN loongarch_expand_builtin - -+/* The generic ELF target does not always have TLS support. */ -+#ifdef HAVE_AS_TLS - #undef TARGET_HAVE_TLS - #define TARGET_HAVE_TLS HAVE_AS_TLS -+#endif - - #undef TARGET_CANNOT_FORCE_CONST_MEM - #define TARGET_CANNOT_FORCE_CONST_MEM loongarch_cannot_force_const_mem -@@ -10360,35 +10876,24 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) - #undef TARGET_LEGITIMATE_CONSTANT_P - #define TARGET_LEGITIMATE_CONSTANT_P loongarch_legitimate_constant_p - --#undef TARGET_ENCODE_SECTION_INFO --#define TARGET_ENCODE_SECTION_INFO loongarch_encode_section_info -- --#undef TARGET_ATTRIBUTE_TABLE --#define TARGET_ATTRIBUTE_TABLE loongarch_attribute_table - /* All our function attributes are related to how out-of-line copies should - be compiled or called. They don't in themselves prevent inlining. */ - #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P - #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true - - #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P --#define TARGET_USE_BLOCKS_FOR_CONSTANT_P loongarch_use_blocks_for_constant_p --#undef TARGET_USE_ANCHORS_FOR_SYMBOL_P --#define TARGET_USE_ANCHORS_FOR_SYMBOL_P loongarch_use_anchors_for_symbol_p -- --#undef TARGET_COMP_TYPE_ATTRIBUTES --#define TARGET_COMP_TYPE_ATTRIBUTES loongarch_comp_type_attributes -+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true - - #ifdef HAVE_AS_DTPRELWORD - #undef TARGET_ASM_OUTPUT_DWARF_DTPREL - #define TARGET_ASM_OUTPUT_DWARF_DTPREL loongarch_output_dwarf_dtprel - #endif --#undef TARGET_DWARF_REGISTER_SPAN --#define TARGET_DWARF_REGISTER_SPAN loongarch_dwarf_register_span --#undef TARGET_DWARF_FRAME_REG_MODE --#define TARGET_DWARF_FRAME_REG_MODE loongarch_dwarf_frame_reg_mode - - #undef TARGET_LEGITIMATE_ADDRESS_P --#define TARGET_LEGITIMATE_ADDRESS_P loongarch_legitimate_address_p -+#define TARGET_LEGITIMATE_ADDRESS_P loongarch_legitimate_address_p -+ -+#undef TARGET_COMPUTE_FRAME_LAYOUT -+#define TARGET_COMPUTE_FRAME_LAYOUT loongarch_compute_frame_info - - #undef TARGET_FRAME_POINTER_REQUIRED - #define TARGET_FRAME_POINTER_REQUIRED loongarch_frame_pointer_required -@@ -10402,18 +10907,12 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) - #undef TARGET_TRAMPOLINE_INIT - #define TARGET_TRAMPOLINE_INIT loongarch_trampoline_init - --#undef TARGET_SHIFT_TRUNCATION_MASK --#define TARGET_SHIFT_TRUNCATION_MASK loongarch_shift_truncation_mask -- - #undef TARGET_VECTORIZE_VEC_PERM_CONST - #define TARGET_VECTORIZE_VEC_PERM_CONST loongarch_vectorize_vec_perm_const - - #undef TARGET_SCHED_REASSOCIATION_WIDTH - #define TARGET_SCHED_REASSOCIATION_WIDTH loongarch_sched_reassociation_width - --#undef TARGET_CASE_VALUES_THRESHOLD --#define TARGET_CASE_VALUES_THRESHOLD loongarch_case_values_threshold -- - #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV - #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV loongarch_atomic_assign_expand_fenv - -@@ -10422,13 +10921,6 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) - - #undef TARGET_SPILL_CLASS - #define TARGET_SPILL_CLASS loongarch_spill_class --#undef TARGET_LRA_P --#define TARGET_LRA_P loongarch_lra_p --#undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS --#define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS loongarch_ira_change_pseudo_allocno_class -- --#undef TARGET_HARD_REGNO_SCRATCH_OK --#define TARGET_HARD_REGNO_SCRATCH_OK loongarch_hard_regno_scratch_ok - - #undef TARGET_HARD_REGNO_NREGS - #define TARGET_HARD_REGNO_NREGS loongarch_hard_regno_nregs -@@ -10445,9 +10937,6 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) - #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS - #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 2 - --#undef TARGET_SECONDARY_MEMORY_NEEDED --#define TARGET_SECONDARY_MEMORY_NEEDED loongarch_secondary_memory_needed -- - #undef TARGET_CAN_CHANGE_MODE_CLASS - #define TARGET_CAN_CHANGE_MODE_CLASS loongarch_can_change_mode_class - -@@ -10460,6 +10949,9 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) - #undef TARGET_STARTING_FRAME_OFFSET - #define TARGET_STARTING_FRAME_OFFSET loongarch_starting_frame_offset - -+#undef TARGET_SECONDARY_RELOAD -+#define TARGET_SECONDARY_RELOAD loongarch_secondary_reload -+ - struct gcc_target targetm = TARGET_INITIALIZER; -- -+ - #include "gt-loongarch.h" -diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h -index 18d17afb8..1b26230cb 100644 ---- a/gcc/config/loongarch/loongarch.h -+++ b/gcc/config/loongarch/loongarch.h -@@ -1,9 +1,7 @@ --/* Definitions of target machine for GNU compiler. LARCH version. -- Copyright (C) 1989-2018 Free Software Foundation, Inc. -- Contributed by A. Lichnewsky (lich@inria.inria.fr). -- Changed by Michael Meissner (meissner@osf.org). -- 64-bit r4000 support by Ian Lance Taylor (ian@cygnus.com) and -- Brendan Eich (brendan@microunity.com). -+/* Definitions of target machine for GNU compiler. LoongArch version. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Technology Co. Ltd. -+ Based on MIPS and RISC-V target for GNU compiler. - - This file is part of GCC. - -@@ -21,318 +19,36 @@ You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING3. If not see - . */ - -- --#include "config/vxworks-dummy.h" -- --#ifdef GENERATOR_FILE --/* This is used in some insn conditions, so needs to be declared, but -- does not need to be defined. */ --extern int target_flags_explicit; --#endif -- --/* LARCH external variables defined in loongarch.c. */ -- --/* Which ABI to use. ABILP32 (original 32, or o32), ABILPX32 (n32), -- ABILP64 (n64) are all defined by SGI. */ -- --#define ABILP32 0 --#define ABILPX32 1 --#define ABILP64 2 -- --/* Information about one recognized processor. Defined here for the -- benefit of TARGET_CPU_CPP_BUILTINS. */ --struct loongarch_cpu_info { -- /* The 'canonical' name of the processor as far as GCC is concerned. -- It's typically a manufacturer's prefix followed by a numerical -- designation. It should be lowercase. */ -- const char *name; -- -- /* The internal processor number that most closely matches this -- entry. Several processors can have the same value, if there's no -- difference between them from GCC's point of view. */ -- enum processor cpu; -- -- /* The ISA level that the processor implements. */ -- int isa; -- -- /* A mask of PTF_* values. */ -- unsigned int tune_flags; --}; -+/* LoongArch external variables defined in loongarch.c. */ - - #include "config/loongarch/loongarch-opts.h" - - /* Macros to silence warnings about numbers being signed in traditional - C and unsigned in ISO C when compiled on 32-bit hosts. */ - --#define BITMASK_HIGH (((unsigned long)1) << 31) /* 0x80000000 */ --#define BITMASK_UPPER16 ((unsigned long)0xffff << 16) /* 0xffff0000 */ --#define BITMASK_LOWER16 ((unsigned long)0xffff) /* 0x0000ffff */ -+#define BITMASK_HIGH (((unsigned long) 1) << 31) /* 0x80000000 */ - -- - /* Run-time compilation parameters selecting different hardware subsets. */ - --/* True if we are generating position-independent VxWorks RTP code. */ --#define TARGET_RTP_PIC (TARGET_VXWORKS_RTP && flag_pic) -- --/* True if we can optimize sibling calls. For simplicity, we only -- handle cases in which call_insn_operand will reject invalid -- sibcall addresses. There are two cases in which this isn't true: -- -- - TARGET_USE_GOT && !TARGET_EXPLICIT_RELOCS. call_insn_operand -- accepts global constants, but all sibcalls must be indirect. */ --#define TARGET_SIBCALLS (1) -- --/* True if we can use the J and JAL instructions. */ --#define TARGET_ABSOLUTE_JUMPS (!flag_pic) -- --/* True if the output must have a writable .eh_frame. -- See ASM_PREFERRED_EH_DATA_FORMAT for details. */ --#ifdef HAVE_LD_PERSONALITY_RELAXATION --#define TARGET_WRITABLE_EH_FRAME 0 --#else --#define TARGET_WRITABLE_EH_FRAME (flag_pic && TARGET_SHARED) --#endif -- -- --/* ISA has LSA available. */ --#define ISA_HAS_LSA (1) -- --/* ISA has DLSA available. */ --#define ISA_HAS_DLSA (TARGET_64BIT) -- --/* Architecture target defines. */ --#define TARGET_LOONGARCH64 (loongarch_arch == PROCESSOR_LOONGARCH64) --#define TUNE_LOONGARCH64 (loongarch_tune == PROCESSOR_LOONGARCH64) --#define TARGET_LA464 (loongarch_arch == PROCESSOR_LA464) --#define TUNE_LA464 (loongarch_tune == PROCESSOR_LA464) --/* True if the pre-reload scheduler should try to create chains of -- multiply-add or multiply-subtract instructions. For example, -- suppose we have: -- -- t1 = a * b -- t2 = t1 + c * d -- t3 = e * f -- t4 = t3 - g * h -- -- t1 will have a higher priority than t2 and t3 will have a higher -- priority than t4. However, before reload, there is no dependence -- between t1 and t3, and they can often have similar priorities. -- The scheduler will then tend to prefer: -- -- t1 = a * b -- t3 = e * f -- t2 = t1 + c * d -- t4 = t3 - g * h -- -- which stops us from making full use of macc/madd-style instructions. -- This sort of situation occurs frequently in Fourier transforms and -- in unrolled loops. -- -- To counter this, the TUNE_MACC_CHAINS code will reorder the ready -- queue so that chained multiply-add and multiply-subtract instructions -- appear ahead of any other instruction that is likely to clobber lo. -- In the example above, if t2 and t3 become ready at the same time, -- the code ensures that t2 is scheduled first. -- -- Multiply-accumulate instructions are a bigger win for some targets -- than others, so this macro is defined on an opt-in basis. */ --#define TUNE_MACC_CHAINS 0 -- --#define TARGET_OLDABI (loongarch_abi == ABILP32) --#define TARGET_NEWABI (loongarch_abi == ABILPX32 || loongarch_abi == ABILP64) -- --/* TARGET_HARD_FLOAT and TARGET_SOFT_FLOAT reflect whether the FPU is -- directly accessible, while the command-line options select -- TARGET_HARD_FLOAT_ABI and TARGET_SOFT_FLOAT_ABI to reflect the ABI -- in use. */ --#define TARGET_HARD_FLOAT (TARGET_HARD_FLOAT_ABI) --#define TARGET_SOFT_FLOAT (TARGET_SOFT_FLOAT_ABI) -- --/* False if SC acts as a memory barrier with respect to itself, -- otherwise a SYNC will be emitted after SC for atomic operations -- that require ordering between the SC and following loads and -- stores. It does not tell anything about ordering of loads and -- stores prior to and following the SC, only about the SC itself and -- those loads and stores follow it. */ --#define TARGET_SYNC_AFTER_SC (1) -- --/* Define preprocessor macros for the -march and -mtune options. -- PREFIX is either _LARCH_ARCH or _LARCH_TUNE, INFO is the selected -- processor. If INFO's canonical name is "foo", define PREFIX to -- be "foo", and define an additional macro PREFIX_FOO. */ --#define LARCH_CPP_SET_PROCESSOR(PREFIX, INFO) \ -- do \ -- { \ -- char *macro, *p; \ -- \ -- macro = concat ((PREFIX), "_", (INFO)->name, NULL); \ -- for (p = macro; *p != 0; p++) \ -- if (*p == '+') \ -- *p = 'P'; \ -- else \ -- *p = TOUPPER (*p); \ -- \ -- builtin_define (macro); \ -- builtin_define_with_value ((PREFIX), (INFO)->name, 1); \ -- free (macro); \ -- } \ -- while (0) -- - /* Target CPU builtins. */ --#define TARGET_CPU_CPP_BUILTINS() loongarch_cpu_cpp_builtins (pfile) -- --/* Target CPU versions for D. */ --#define TARGET_D_CPU_VERSIONS loongarch_d_target_versions -+#define TARGET_CPU_CPP_BUILTINS() loongarch_cpu_cpp_builtins (pfile) - --/* Default target_flags if no switches are specified */ -- --#ifndef TARGET_DEFAULT --#define TARGET_DEFAULT 0 --#endif -- --#ifndef TARGET_CPU_DEFAULT --#define TARGET_CPU_DEFAULT 0 --#endif -+/* Default target_flags if no switches are specified. */ - - #ifdef IN_LIBGCC2 - #undef TARGET_64BIT --/* Make this compile time constant for libgcc2 */ -+/* Make this compile time constant for libgcc2. */ - #ifdef __loongarch64 --#define TARGET_64BIT 1 -+#define TARGET_64BIT 1 - #else --#define TARGET_64BIT 0 -+#define TARGET_64BIT 0 - #endif --#endif /* IN_LIBGCC2 */ -+#endif /* IN_LIBGCC2 */ - - #define TARGET_LIBGCC_SDATA_SECTION ".sdata" - --#ifndef MULTILIB_ISA_DEFAULT --#if LARCH_ISA_DEFAULT == 0 --#define MULTILIB_ISA_DEFAULT "loongarch64" --#endif --#endif -- --#ifndef LARCH_ABI_DEFAULT --#define LARCH_ABI_DEFAULT ABILP32 --#endif -- --/* Use the most portable ABI flag for the ASM specs. */ -- --#if LARCH_ABI_DEFAULT == ABILP32 --#define MULTILIB_ABI_DEFAULT "mabi=lp32" --#elif LARCH_ABI_DEFAULT == ABILP64 --#define MULTILIB_ABI_DEFAULT "mabi=lp64" --#endif -- --#ifndef MULTILIB_DEFAULTS --#define MULTILIB_DEFAULTS \ -- {MULTILIB_ISA_DEFAULT, MULTILIB_ABI_DEFAULT } --#endif -- --/* A spec condition that matches all -loongarch arguments. */ -- --#define LARCH_ISA_LEVEL_OPTION_SPEC \ -- "loongarch" -- --/* A spec condition that matches all architecture arguments. */ -- --#define LARCH_ARCH_OPTION_SPEC \ -- LARCH_ISA_LEVEL_OPTION_SPEC "|march=*" -- --/* A spec that infers a -loongarch argument from an -march argument. */ -- --#define LARCH_ISA_LEVEL_SPEC \ -- "%{" LARCH_ISA_LEVEL_OPTION_SPEC ":;:}" -- --/* A spec that injects the default multilib ISA if no architecture is -- specified. */ -- --#define LARCH_DEFAULT_ISA_LEVEL_SPEC \ -- "%{" LARCH_ISA_LEVEL_OPTION_SPEC ":;: \ -- %{!march=*: -" MULTILIB_ISA_DEFAULT "}}" -- --/* A spec that infers a -mhard-float or -msoft-float setting from an -- -march argument. Note that soft-float and hard-float code are not -- link-compatible. */ -- --#define LARCH_ARCH_FLOAT_SPEC \ -- "%{mhard-float|msoft-float|mno-float|march=loongarch*:; \ -- march=vr41*|march=m4k|march=4k*|march=24kc|march=24kec \ -- |march=34kc|march=34kn|march=74kc|march=1004kc|march=5kc \ -- |march=m14k*|march=m5101|march=octeon|march=xlr: -msoft-float; \ -- march=*: -mhard-float}" -- --/* A spec condition that matches 32-bit options. It only works if -- LARCH_ISA_LEVEL_SPEC has been applied. */ -- --#define LARCH_32BIT_OPTION_SPEC \ -- "loongarch1|loongarch2|loongarch32*|mgp32" -- --#if (LARCH_ABI_DEFAULT == ABILPX32 \ -- || LARCH_ABI_DEFAULT == ABILP64) --#define OPT_ARCH64 "mabi=32|mgp32:;" --#define OPT_ARCH32 "mabi=32|mgp32" --#else --#define OPT_ARCH64 "mabi=o64|mabi=n32|mabi=64|mgp64" --#define OPT_ARCH32 "mabi=o64|mabi=n32|mabi=64|mgp64:;" --#endif -- --/* Support for a compile-time default CPU, et cetera. The rules are: -- --with-arch is ignored if -march is specified or a -loongarch is specified -- ; likewise --with-arch-32 and --with-arch-64. -- --with-tune is ignored if -mtune is specified; likewise -- --with-tune-32 and --with-tune-64. -- --with-abi is ignored if -mabi is specified. -- --with-float is ignored if -mhard-float or -msoft-float are -- specified. -- --with-fpu is ignored if -msoft-float, -msingle-float or -mdouble-float are -- specified. -- --with-fp-32 is ignored if -msoft-float, -msingle-float, -mlsx or -mfp are -- specified. -- --with-divide is ignored if -mdivide-traps or -mdivide-breaks are -- specified. */ --#define OPTION_DEFAULT_SPECS \ -- {"arch", "%{" LARCH_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}" }, \ -- {"arch_32", "%{" OPT_ARCH32 ":%{" LARCH_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \ -- {"arch_64", "%{" OPT_ARCH64 ":%{" LARCH_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \ -- {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \ -- {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \ -- {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \ -- {"abi", "%{!mabi=*:-mabi=%(VALUE)}" }, \ -- {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \ -- {"fpu", "%{!msoft-float:%{!msingle-float:%{!mdouble-float:-m%(VALUE)-float}}}" }, \ -- {"fp_32", "%{" OPT_ARCH32 \ -- ":%{!msoft-float:%{!msingle-float:%{!mfp*:%{!mlsx:%{!mloongson-asx:-mfp%(VALUE)}}}}}" }, \ -- {"divide", "%{!mdivide-traps:%{!mdivide-breaks:-mdivide-%(VALUE)}}" } -- --/* A spec that infers the: -- -mlsx setting from a -march=la464 argument. -- -mlasx setting from a -march=la464 argument. */ --#define BASE_DRIVER_SELF_SPECS \ -- LARCH_ASE_LSX_SPEC \ -- LARCH_ASE_LASX_SPEC -- --#define LARCH_ASE_LSX_SPEC \ -- "%{!mno-lsx: \ -- %{march=la464: -mlsx}}" -- --#define LARCH_ASE_LASX_SPEC \ -- "%{!mno-lasx: \ -- %{march=la464: -mlasx}}" -- --#define DRIVER_SELF_SPECS \ -- BASE_DRIVER_SELF_SPECS -- --/* from N_LARCH */ --#define ABI_SPEC \ -- "%{mabi=lp32:32}" \ -- "%{mabi=lp64:64}" \ -- --#define STARTFILE_PREFIX_SPEC \ -- "/lib" ABI_SPEC "/ " \ -- "/usr/lib" ABI_SPEC "/ " \ -- "/lib/ " \ -- "/usr/lib/ " -+/* Driver native functions for SPEC processing in the GCC driver. */ -+#include "loongarch-driver.h" - - /* This definition replaces the formerly used 'm' constraint with a - different constraint letter in order to avoid changing semantics of -@@ -341,71 +57,11 @@ struct loongarch_cpu_info { - must not be used in insn definitions or inline assemblies. */ - #define TARGET_MEM_CONSTRAINT 'w' - --/* True if the file format uses 64-bit symbols. At present, this is -- only true for n64, which uses 64-bit ELF. */ --#define FILE_HAS_64BIT_SYMBOLS (loongarch_abi == ABILP64) -- --/* True if symbols are 64 bits wide. This is usually determined by -- the ABI's file format, but it can be overridden by -msym32. Note that -- overriding the size with -msym32 changes the ABI of relocatable objects, -- although it doesn't change the ABI of a fully-linked object. */ --#define ABI_HAS_64BIT_SYMBOLS (FILE_HAS_64BIT_SYMBOLS \ -- && Pmode == DImode) -- --/* ISA supports instructions DMUL, DMULU, DMUH, DMUHU. */ --#define ISA_HAS_DMUL (TARGET_64BIT) -- --/* ISA has floating-point RECIP.fmt and RSQRT.fmt instructions. The -- LARCH64 rev. 1 ISA says that RECIP.D and RSQRT.D are unpredictable when -- doubles are stored in pairs of FPRs, so for safety's sake, we apply -- this restriction to the LARCH IV ISA too. */ --#define ISA_HAS_FP_RECIP_RSQRT(MODE) \ -- ((MODE) == SFmode \ -- || (TARGET_FLOAT64 \ -- && (MODE) == DFmode)) -- --/* The LSX ASE is available. */ --#define ISA_HAS_LSX (TARGET_LSX) -- --/* The LASX ASE is available. */ --#define ISA_HAS_LASX (TARGET_LASX) -- - /* Tell collect what flags to pass to nm. */ - #ifndef NM_FLAGS - #define NM_FLAGS "-Bn" - #endif - -- --/* SUBTARGET_ASM_DEBUGGING_SPEC handles passing debugging options to -- the assembler. It may be overridden by subtargets. -- -- Beginning with gas 2.13, -mdebug must be passed to correctly handle -- COFF debugging info. */ -- --#ifndef SUBTARGET_ASM_DEBUGGING_SPEC --#define SUBTARGET_ASM_DEBUGGING_SPEC "\ --%{g} %{g0} %{g1} %{g2} %{g3} \ --%{ggdb:-g} %{ggdb0:-g0} %{ggdb1:-g1} %{ggdb2:-g2} %{ggdb3:-g3} \ --%{gstabs:-g} %{gstabs0:-g0} %{gstabs1:-g1} %{gstabs2:-g2} %{gstabs3:-g3} \ --%{gstabs+:-g} %{gstabs+0:-g0} %{gstabs+1:-g1} %{gstabs+2:-g2} %{gstabs+3:-g3}" --#endif -- --/* FP_ASM_SPEC represents the floating-point options that must be passed -- to the assembler when FPXX support exists. Prior to that point the -- assembler could accept the options but were not required for -- correctness. We only add the options when absolutely necessary -- because passing -msoft-float to the assembler will cause it to reject -- all hard-float instructions which may require some user code to be -- updated. */ -- --#ifdef HAVE_AS_DOT_MODULE --#define FP_ASM_SPEC "\ --%{mhard-float} %{msoft-float} \ --%{msingle-float} %{mdouble-float}" --#else --#define FP_ASM_SPEC --#endif -- - /* SUBTARGET_ASM_SPEC is always passed to the assembler. It may be - overridden by subtargets. */ - -@@ -414,29 +70,21 @@ struct loongarch_cpu_info { - #endif - - #undef ASM_SPEC --#define ASM_SPEC "\ --%{mabi=*} %{!mabi=*: %(asm_abi_default_spec)} \ --" -+#define ASM_SPEC "%{mabi=lp64d:-mabi=lp64} %{subtarget_asm_spec}" -+ - /* Extra switches sometimes passed to the linker. */ - - #ifndef LINK_SPEC - #define LINK_SPEC "" --#endif /* LINK_SPEC defined */ -- -+#endif /* LINK_SPEC defined */ - --/* Specs for the compiler proper */ -- --/* SUBTARGET_CC1_SPEC is passed to the compiler proper. It may be -- overridden by subtargets. */ --#ifndef SUBTARGET_CC1_SPEC --#define SUBTARGET_CC1_SPEC "" --#endif -+/* Specs for the compiler proper. */ - - /* CC1_SPEC is the set of arguments to pass to the compiler proper. */ - - #undef CC1_SPEC - #define CC1_SPEC "\ --%{G*} %{EB:-meb} %{EL:-mel} %{EB:%{EL:%emay not use both -EB and -EL}} \ -+%{G*} \ - %(subtarget_cc1_spec)" - - /* Preprocessor specs. */ -@@ -459,63 +107,38 @@ struct loongarch_cpu_info { - - Do not define this macro if it does not need to do anything. */ - --#define EXTRA_SPECS \ -- { "subtarget_cc1_spec", SUBTARGET_CC1_SPEC }, \ -- { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \ -- { "subtarget_asm_debugging_spec", SUBTARGET_ASM_DEBUGGING_SPEC }, \ -- { "subtarget_asm_spec", SUBTARGET_ASM_SPEC }, \ -- { "asm_abi_default_spec", "-" MULTILIB_ABI_DEFAULT }, \ -- SUBTARGET_EXTRA_SPECS -- --#ifndef SUBTARGET_EXTRA_SPECS --#define SUBTARGET_EXTRA_SPECS --#endif -- --#define DBX_DEBUGGING_INFO 1 /* generate stabs (OSF/rose) */ --#define DWARF2_DEBUGGING_INFO 1 /* dwarf2 debugging info */ -- --#ifndef PREFERRED_DEBUGGING_TYPE --#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG --#endif -- --/* The size of DWARF addresses should be the same as the size of symbols -- in the target file format. They shouldn't depend on things like -msym32, -- because many DWARF consumers do not allow the mixture of address sizes -- that one would then get from linking -msym32 code with -msym64 code. --*/ --#define DWARF2_ADDR_SIZE (FILE_HAS_64BIT_SYMBOLS ? 8 : 4) -- --/* By default, turn on GDB extensions. */ --#define DEFAULT_GDB_EXTENSIONS 1 -+#define EXTRA_SPECS \ -+ {"subtarget_cc1_spec", SUBTARGET_CC1_SPEC}, \ -+ {"subtarget_cpp_spec", SUBTARGET_CPP_SPEC}, \ -+ {"subtarget_asm_spec", SUBTARGET_ASM_SPEC}, - - /* Registers may have a prefix which can be ignored when matching - user asm and register definitions. */ - #ifndef REGISTER_PREFIX --#define REGISTER_PREFIX "$" -+#define REGISTER_PREFIX "$" - #endif - - /* Local compiler-generated symbols must have a prefix that the assembler -- understands. By default, this is $, although some targets (e.g., -- NetBSD-ELF) need to override this. */ -+ understands. */ - --#ifndef LOCAL_LABEL_PREFIX --#define LOCAL_LABEL_PREFIX "$" --#endif -+#define LOCAL_LABEL_PREFIX "." - - /* By default on the loongarch, external symbols do not have an underscore -- prepended, but some targets (e.g., NetBSD) require this. */ -+ prepended. */ - --#ifndef USER_LABEL_PREFIX --#define USER_LABEL_PREFIX "" -+#define USER_LABEL_PREFIX "" -+ -+#ifndef PREFERRED_DEBUGGING_TYPE -+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG - #endif - --/* On Sun 4, this limit is 2048. We use 1500 to be safe, -- since the length can run past this up to a continuation point. */ --#undef DBX_CONTIN_LENGTH --#define DBX_CONTIN_LENGTH 1500 -+/* The size of DWARF addresses should be the same as the size of symbols -+ in the target file format. */ -+#define DWARF2_ADDR_SIZE (TARGET_64BIT ? 8 : 4) - --/* How to renumber registers for dbx and gdb. */ --#define DBX_REGISTER_NUMBER(REGNO) loongarch_dbx_regno[REGNO] -+/* By default, produce dwarf version 2 format debugging output in response -+ to the ‘-g’ option. */ -+#define DWARF2_DEBUGGING_INFO 1 - - /* The mapping from gcc register number to DWARF 2 CFA column number. */ - #define DWARF_FRAME_REGNUM(REGNO) loongarch_dwarf_regno[REGNO] -@@ -530,7 +153,7 @@ struct loongarch_cpu_info { - #define EH_RETURN_DATA_REGNO(N) \ - ((N) < (4) ? (N) + GP_ARG_FIRST : INVALID_REGNUM) - --#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4) -+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4) - - #define EH_USES(N) loongarch_eh_uses (N) - -@@ -539,19 +162,7 @@ struct loongarch_cpu_info { - SFmode register saves. */ - #define DWARF_CIE_DATA_ALIGNMENT -4 - --/* Correct the offset of automatic variables and arguments. Note that -- the LARCH debug format wants all automatic variables and arguments -- to be in terms of the virtual frame pointer (stack pointer before -- any adjustment in the function), while the LARCH 3.0 linker wants -- the frame pointer to be the stack pointer after the initial -- adjustment. */ -- --#define DEBUGGER_AUTO_OFFSET(X) \ -- loongarch_debugger_offset (X, (HOST_WIDE_INT) 0) --#define DEBUGGER_ARG_OFFSET(OFFSET, X) \ -- loongarch_debugger_offset (X, (HOST_WIDE_INT) OFFSET) -- --/* Target machine storage layout */ -+/* Target machine storage layout. */ - - #define BITS_BIG_ENDIAN 0 - #define BYTES_BIG_ENDIAN 0 -@@ -576,27 +187,19 @@ struct loongarch_cpu_info { - #define BITS_PER_LASX_REG (UNITS_PER_LASX_REG * BITS_PER_UNIT) - - /* For LARCH, width of a floating point register. */ --#define UNITS_PER_FPREG (TARGET_FLOAT64 ? 8 : 4) -- --/* The number of consecutive floating-point registers needed to store the -- largest format supported by the FPU. */ --#define MAX_FPRS_PER_FMT (TARGET_FLOAT64 || TARGET_SINGLE_FLOAT ? 1 : 2) -- --/* The number of consecutive floating-point registers needed to store the -- smallest format supported by the FPU. */ --#define MIN_FPRS_PER_FMT 1 -+#define UNITS_PER_FPREG (TARGET_DOUBLE_FLOAT ? 8 : 4) - - /* The largest size of value that can be held in floating-point - registers and moved with a single instruction. */ - #define UNITS_PER_HWFPVALUE \ -- (TARGET_SOFT_FLOAT_ABI ? 0 : MAX_FPRS_PER_FMT * UNITS_PER_FPREG) -+ (TARGET_SOFT_FLOAT ? 0 : UNITS_PER_FPREG) - - /* The largest size of value that can be held in floating-point - registers. */ --#define UNITS_PER_FPVALUE \ -- (TARGET_SOFT_FLOAT_ABI ? 0 \ -- : TARGET_SINGLE_FLOAT ? UNITS_PER_FPREG \ -- : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT) -+#define UNITS_PER_FPVALUE \ -+ (TARGET_SOFT_FLOAT ? 0 \ -+ : TARGET_SINGLE_FLOAT ? UNITS_PER_FPREG \ -+ : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT) - - /* The number of bytes in a double. */ - #define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT) -@@ -609,7 +212,7 @@ struct loongarch_cpu_info { - - #define FLOAT_TYPE_SIZE 32 - #define DOUBLE_TYPE_SIZE 64 --#define LONG_DOUBLE_TYPE_SIZE (TARGET_NEWABI ? 128 : 64) -+#define LONG_DOUBLE_TYPE_SIZE (TARGET_64BIT ? 128 : 64) - - /* Define the sizes of fixed-point types. */ - #define SHORT_FRACT_TYPE_SIZE 8 -@@ -620,8 +223,6 @@ struct loongarch_cpu_info { - #define SHORT_ACCUM_TYPE_SIZE 16 - #define ACCUM_TYPE_SIZE 32 - #define LONG_ACCUM_TYPE_SIZE 64 --/* FIXME. LONG_LONG_ACCUM_TYPE_SIZE should be 128 bits, but GCC -- doesn't support 128-bit integers for LARCH32 currently. */ - #define LONG_LONG_ACCUM_TYPE_SIZE (TARGET_64BIT ? 128 : 64) - - /* long double is not a fixed mode, but the idea is that, if we -@@ -630,7 +231,7 @@ struct loongarch_cpu_info { - - /* Width in bits of a pointer. */ - #ifndef POINTER_SIZE --#define POINTER_SIZE ((TARGET_64BIT) ? 64 : 32) -+#define POINTER_SIZE (TARGET_64BIT ? 64 : 32) - #endif - - /* Allocation boundary (in *bits*) for storing arguments in argument list. */ -@@ -642,8 +243,8 @@ struct loongarch_cpu_info { - /* Alignment of field after `int : 0' in a structure. */ - #define EMPTY_FIELD_BOUNDARY 32 - --/* Every structure's size must be a multiple of this. */ --/* 8 is observed right on a DECstation and on riscos 4.02. */ -+/* Number of bits which any structure or union's size must be a multiple of. -+ Each structure or union's size is rounded up to a multiple of this. */ - #define STRUCTURE_SIZE_BOUNDARY 8 - - /* There is no point aligning anything to a rounder boundary than -@@ -655,6 +256,9 @@ struct loongarch_cpu_info { - /* All accesses must be aligned. */ - #define STRICT_ALIGNMENT (TARGET_STRICT_ALIGN) - -+/* Glibc align malloc to 128 from glibc/sysdeps/generic/malloc-alignment.h. */ -+#define MALLOC_ABI_ALIGNMENT 128 -+ - /* Define this if you wish to imitate the way many other C compilers - handle alignment of bitfields and the structures that contain - them. -@@ -699,22 +303,17 @@ struct loongarch_cpu_info { - /* We need this for the same reason as DATA_ALIGNMENT, namely to cause - character arrays to be word-aligned so that `strcpy' calls that copy - constants to character arrays can be done inline, and 'strcmp' can be -- optimised to use word loads. */ --#define LOCAL_ALIGNMENT(TYPE, ALIGN) \ -- DATA_ALIGNMENT (TYPE, ALIGN) -- --#define PAD_VARARGS_DOWN \ -- (targetm.calls.function_arg_padding (TYPE_MODE (type), type) == PAD_DOWNWARD) -+ optimised to use word loads. */ -+#define LOCAL_ALIGNMENT(TYPE, ALIGN) DATA_ALIGNMENT (TYPE, ALIGN) - - /* Define if operations between registers always perform the operation - on the full register even if a narrower mode is specified. */ - #define WORD_REGISTER_OPERATIONS 1 - --/* When in 64-bit mode, move insns will sign extend SImode and CCmode -+/* When in 64-bit mode, move insns will sign extend SImode and FCCmode - moves. All other references are zero extended. */ - #define LOAD_EXTEND_OP(MODE) \ -- (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \ -- ? SIGN_EXTEND : ZERO_EXTEND) -+ ((TARGET_64BIT && (MODE) == SImode) ? SIGN_EXTEND : UNKNOWN) - - /* Define this macro if it is advisable to hold scalars in registers - in a wider mode than that declared by the program. In such cases, -@@ -722,13 +321,13 @@ struct loongarch_cpu_info { - type, but kept valid in the wider mode. The signedness of the - extension may differ from that of the type. */ - --#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ -- if (GET_MODE_CLASS (MODE) == MODE_INT \ -+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ -+ if (GET_MODE_CLASS (MODE) == MODE_INT \ - && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \ -- { \ -- if ((MODE) == SImode) \ -- (UNSIGNEDP) = 0; \ -- (MODE) = Pmode; \ -+ { \ -+ if ((MODE) == SImode) \ -+ (UNSIGNEDP) = 0; \ -+ (MODE) = Pmode; \ - } - - /* Pmode is always the same as ptr_mode, but not always the same as word_mode. -@@ -738,11 +337,11 @@ struct loongarch_cpu_info { - /* Define if loading short immediate values into registers sign extends. */ - #define SHORT_IMMEDIATES_SIGN_EXTEND 1 - --/* The [d]clz instructions have the natural values at 0. */ -+/* The clz.{w/d} instructions have the natural values at 0. */ - - #define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ - ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2) -- -+ - /* Standard register usage. */ - - /* Number of hardware registers. We have: -@@ -757,57 +356,39 @@ struct loongarch_cpu_info { - - #define FIRST_PSEUDO_REGISTER 74 - --/* By default, fix the kernel registers ($26 and $27), the global -- pointer ($28) and the stack pointer ($29). This can change -- depending on the command-line options. -- -- Regarding coprocessor registers: without evidence to the contrary, -- it's best to assume that each coprocessor register has a unique -- use. This can be overridden, in, e.g., loongarch_option_override or -- TARGET_CONDITIONAL_REGISTER_USAGE should the assumption be -- inappropriate for a particular target. */ -- -+/* zero, tp, sp and x are fixed. */ - #define FIXED_REGISTERS \ --{ \ -+{ /* General-purpose registers. */ \ - 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ - 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ -+ /* Floating-point registers. */ \ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ -+ /* Others. */ \ - 0, 0, 0, 0, 0, 0, 0, 1, 1, 1} - -- --/* Set up this array for o32 by default. -- -- Note that we don't mark $31 as a call-clobbered register. The idea is -- that it's really the call instructions themselves which clobber $31. -- We don't care what the called function does with it afterwards. -- -- This approach makes it easier to implement sibcalls. Unlike normal -- calls, sibcalls don't clobber $31, so the register reaches the -- called function in tact. EPILOGUE_USES says that $31 is useful -- to the called function. */ -- -+/* The call RTLs themselves clobber ra. */ - #define CALL_USED_REGISTERS \ --{ \ -+{ /* General registers. */ \ - 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ - 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ -+ /* Floating-point registers. */ \ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ - 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, \ -+ /* Others. */ \ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} - - /* Internal macros to classify a register number as to whether it's a -- general purpose register, a floating point register, a -- multiply/divide register, or a status register. */ -+ general purpose register, a floating point register, or a status -+ register. */ - - #define GP_REG_FIRST 0 --#define GP_REG_LAST 31 --#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1) --#define GP_DBX_FIRST 0 -+#define GP_REG_LAST 31 -+#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1) - - #define FP_REG_FIRST 32 --#define FP_REG_LAST 63 --#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1) --#define FP_DBX_FIRST ((write_symbols == DBX_DEBUG) ? 38 : 32) -+#define FP_REG_LAST 63 -+#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1) - - #define LSX_REG_FIRST FP_REG_FIRST - #define LSX_REG_LAST FP_REG_LAST -@@ -823,20 +404,16 @@ struct loongarch_cpu_info { - would need to be handled by the DWARF unwinder. */ - #define DWARF_ALT_FRAME_RETURN_COLUMN 72 - --#define ST_REG_FIRST 64 --#define ST_REG_LAST 71 --#define ST_REG_NUM (ST_REG_LAST - ST_REG_FIRST + 1) -+#define FCC_REG_FIRST 64 -+#define FCC_REG_LAST 71 -+#define FCC_REG_NUM (FCC_REG_LAST - FCC_REG_FIRST + 1) - --#define GP_REG_P(REGNO) \ -+#define GP_REG_P(REGNO) \ - ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM) --#define M16_REG_P(REGNO) \ -- (((REGNO) >= 2 && (REGNO) <= 7) || (REGNO) == 16 || (REGNO) == 17) --#define M16STORE_REG_P(REGNO) \ -- (((REGNO) >= 2 && (REGNO) <= 7) || (REGNO) == 0 || (REGNO) == 17) --#define FP_REG_P(REGNO) \ -+#define FP_REG_P(REGNO) \ - ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM) --#define ST_REG_P(REGNO) \ -- ((unsigned int) ((int) (REGNO) - ST_REG_FIRST) < ST_REG_NUM) -+#define FCC_REG_P(REGNO) \ -+ ((unsigned int) ((int) (REGNO) - FCC_REG_FIRST) < FCC_REG_NUM) - #define LSX_REG_P(REGNO) \ - ((unsigned int) ((int) (REGNO) - LSX_REG_FIRST) < LSX_REG_NUM) - #define LASX_REG_P(REGNO) \ -@@ -846,10 +423,6 @@ struct loongarch_cpu_info { - #define LSX_REG_RTX_P(X) (REG_P (X) && LSX_REG_P (REGNO (X))) - #define LASX_REG_RTX_P(X) (REG_P (X) && LASX_REG_P (REGNO (X))) - -- --#define HARD_REGNO_RENAME_OK(OLD_REG, NEW_REG) \ -- loongarch_hard_regno_rename_ok (OLD_REG, NEW_REG) -- - /* Select a register mode required for caller save of hard regno REGNO. */ - #define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ - loongarch_hard_regno_caller_save_mode (REGNO, NREGS, MODE) -@@ -862,35 +435,34 @@ struct loongarch_cpu_info { - #define ARG_POINTER_REGNUM 72 - #define FRAME_POINTER_REGNUM 73 - --#define HARD_FRAME_POINTER_REGNUM \ -- (GP_REG_FIRST + 22) -- --/* FIXME: */ --/* #define HARD_FRAME_POINTER_IS_FRAME_POINTER (HARD_FRAME_POINTER_REGNUM == FRAME_POINTER_REGNUM) */ --/* #define HARD_FRAME_POINTER_IS_ARG_POINTER (HARD_FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM) */ -+#define HARD_FRAME_POINTER_REGNUM (GP_REG_FIRST + 22) - - #define HARD_FRAME_POINTER_IS_FRAME_POINTER 0 - #define HARD_FRAME_POINTER_IS_ARG_POINTER 0 - --/* FIXME: */ - /* Register in which static-chain is passed to a function. */ --#define STATIC_CHAIN_REGNUM (GP_REG_FIRST + 20) /* $t8 */ -- --#define LARCH_PROLOGUE_TEMP_REGNUM \ -- (GP_REG_FIRST + 13) --#define LARCH_PROLOGUE_TEMP2_REGNUM \ -- (GP_REG_FIRST + 12) --#define LARCH_PROLOGUE_TEMP3_REGNUM \ -- (GP_REG_FIRST + 14) --#define LARCH_EPILOGUE_TEMP_REGNUM \ -- (GP_REG_FIRST + (12)) -- --#define LARCH_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP_REGNUM) -+#define STATIC_CHAIN_REGNUM (GP_REG_FIRST + 20) /* $t8 */ -+ -+/* DRAP register if static-chain register is unavailable. */ -+#define DRAP_REGNUM (GP_REG_FIRST + 15) /* $t3 */ -+ -+#define GP_TEMP_FIRST (GP_REG_FIRST + 12) -+#define LARCH_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1) -+#define LARCH_PROLOGUE_TEMP2_REGNUM (GP_TEMP_FIRST) -+#define LARCH_PROLOGUE_TEMP3_REGNUM (GP_TEMP_FIRST + 2) -+#define LARCH_EPILOGUE_TEMP_REGNUM (GP_TEMP_FIRST) -+ -+#define CALLEE_SAVED_REG_NUMBER(REGNO) \ -+ ((REGNO) >= 22 && (REGNO) <= 31 ? (REGNO) - 22 : -1) -+ -+#define LARCH_PROLOGUE_TEMP(MODE) \ -+ gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP_REGNUM) - #define LARCH_PROLOGUE_TEMP2(MODE) \ - gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP2_REGNUM) - #define LARCH_PROLOGUE_TEMP3(MODE) \ - gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP3_REGNUM) --#define LARCH_EPILOGUE_TEMP(MODE) gen_rtx_REG (MODE, LARCH_EPILOGUE_TEMP_REGNUM) -+#define LARCH_EPILOGUE_TEMP(MODE) \ -+ gen_rtx_REG (MODE, LARCH_EPILOGUE_TEMP_REGNUM) - - /* Define this macro if it is as good or better to call a constant - function address than to call an address kept in a register. */ -@@ -898,7 +470,6 @@ struct loongarch_cpu_info { - - #define THREAD_POINTER_REGNUM (GP_REG_FIRST + 2) - -- - /* Define the classes of registers for register constraints in the - machine description. Also define ranges of constants. - -@@ -908,7 +479,7 @@ struct loongarch_cpu_info { - - The name GENERAL_REGS must be the name of a class (or an alias for - another name such as ALL_REGS). This is the class of registers -- that is allowed by "g" or "r" in a register constraint. -+ that is allowed by "r" in a register constraint. - Also, registers outside this class are allocated only when - instructions express preferences for them. - -@@ -921,16 +492,16 @@ struct loongarch_cpu_info { - - enum reg_class - { -- NO_REGS, /* no registers in set */ -- SIBCALL_REGS, /* SIBCALL_REGS */ -- JALR_REGS, /* JALR_REGS */ -- GR_REGS, /* integer registers */ -- CSR_REGS, /* integer registers except for $r0 and $r1 for csr. */ -- FP_REGS, /* floating point registers */ -- ST_REGS, /* status registers (fp status) */ -- FRAME_REGS, /* arg pointer and frame pointer */ -- ALL_REGS, /* all registers */ -- LIM_REG_CLASSES /* max value + 1 */ -+ NO_REGS, /* no registers in set */ -+ SIBCALL_REGS, /* registers used by indirect sibcalls */ -+ JIRL_REGS, /* registers used by indirect calls */ -+ CSR_REGS, /* integer registers except for $r0 and $r1 for lcsr. */ -+ GR_REGS, /* integer registers */ -+ FP_REGS, /* floating point registers */ -+ FCC_REGS, /* status registers (fp status) */ -+ FRAME_REGS, /* arg pointer and frame pointer */ -+ ALL_REGS, /* all registers */ -+ LIM_REG_CLASSES /* max value + 1 */ - }; - - #define N_REG_CLASSES (int) LIM_REG_CLASSES -@@ -945,11 +516,11 @@ enum reg_class - { \ - "NO_REGS", \ - "SIBCALL_REGS", \ -- "JALR_REGS", \ -- "GR_REGS", \ -+ "JIRL_REGS", \ - "CSR_REGS", \ -+ "GR_REGS", \ - "FP_REGS", \ -- "ST_REGS", \ -+ "FCC_REGS", \ - "FRAME_REGS", \ - "ALL_REGS" \ - } -@@ -968,29 +539,28 @@ enum reg_class - #define REG_CLASS_CONTENTS \ - { \ - { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \ -- { 0x001ff000, 0x00000000, 0x00000000 }, /* SIBCALL_REGS */ \ -- { 0xff9ffff0, 0x00000000, 0x00000000 }, /* JALR_REGS */ \ -- { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \ -+ { 0x001fd000, 0x00000000, 0x00000000 }, /* SIBCALL_REGS */ \ -+ { 0xff9ffff0, 0x00000000, 0x00000000 }, /* JIRL_REGS */ \ - { 0xfffffffc, 0x00000000, 0x00000000 }, /* CSR_REGS */ \ -+ { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \ - { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \ -- { 0x00000000, 0x00000000, 0x000000ff }, /* ST_REGS */ \ -+ { 0x00000000, 0x00000000, 0x000000ff }, /* FCC_REGS */ \ - { 0x00000000, 0x00000000, 0x00000300 }, /* FRAME_REGS */ \ - { 0xffffffff, 0xffffffff, 0x000003ff } /* ALL_REGS */ \ - } - -- - /* A C expression whose value is a register class containing hard - register REGNO. In general there is more that one such class; - choose a class which is "minimal", meaning that no smaller class - also contains the register. */ - --#define REGNO_REG_CLASS(REGNO) loongarch_regno_to_class[ (REGNO) ] -+#define REGNO_REG_CLASS(REGNO) loongarch_regno_to_class[(REGNO)] - - /* A macro whose definition is the name of the class to which a - valid base register must belong. A base register is one used in - an address which is the register value plus a displacement. */ - --#define BASE_REG_CLASS (GR_REGS) -+#define BASE_REG_CLASS (GR_REGS) - - /* A macro whose definition is the name of the class to which a - valid index register must belong. An index register is one used -@@ -998,7 +568,7 @@ enum reg_class - factor or added to another register (as well as added to a - displacement). */ - --#define INDEX_REG_CLASS NO_REGS -+#define INDEX_REG_CLASS GR_REGS - - /* We generally want to put call-clobbered registers ahead of - call-saved ones. (IRA expects this.) */ -@@ -1006,10 +576,6 @@ enum reg_class - #define REG_ALLOC_ORDER \ - { /* Call-clobbered GPRs. */ \ - 12, 13, 14, 15, 16, 17, 18, 19, 20, 4, 5, 6, 7, 8, 9, 10, 11, 1, \ -- /* The global pointer. This is call-clobbered for o32 and o64 \ -- abicalls, call-saved for n32 and n64 abicalls, and a program \ -- invariant otherwise. Putting it between the call-clobbered \ -- and call-saved registers should cope with all eventualities. */ \ - /* Call-saved GPRs. */ \ - 23, 24, 25, 26, 27, 28, 29, 30, 31, \ - /* GPRs that can never be exposed to the register allocator. */ \ -@@ -1017,31 +583,27 @@ enum reg_class - /* Call-clobbered FPRs. */ \ - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \ - 48, 49, 50, 51,52, 53, 54, 55, \ -- /* FPRs that are usually call-saved. The odd ones are actually \ -- call-clobbered for n32, but listing them ahead of the even \ -- registers might encourage the register allocator to fragment \ -- the available FPR pairs. We need paired FPRs to store long \ -- doubles, so it isn't clear that using a different order \ -- for n32 would be a win. */ \ - 56, 57, 58, 59, 60, 61, 62, 63, \ - /* None of the remaining classes have defined call-saved \ - registers. */ \ - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73} - -+#define IMM_BITS 12 -+#define IMM_REACH (HOST_WIDE_INT_1 << IMM_BITS) -+#define HWIT_1U HOST_WIDE_INT_1U -+ - /* True if VALUE is an unsigned 6-bit number. */ - --#define UIMM6_OPERAND(VALUE) \ -- (((VALUE) & ~(unsigned HOST_WIDE_INT) 0x3f) == 0) -+#define UIMM6_OPERAND(VALUE) (((VALUE) & ~(unsigned HOST_WIDE_INT) 0x3f) == 0) - - /* True if VALUE is a signed 10-bit number. */ - --#define IMM10_OPERAND(VALUE) \ -- ((unsigned HOST_WIDE_INT) (VALUE) + 0x200 < 0x400) -+#define IMM10_OPERAND(VALUE) ((unsigned HOST_WIDE_INT) (VALUE) + 0x200 < 0x400) - - /* True if VALUE is a signed 12-bit number. */ - - #define IMM12_OPERAND(VALUE) \ -- ((unsigned HOST_WIDE_INT) (VALUE) + 0x800 < 0x1000) -+ ((unsigned HOST_WIDE_INT) (VALUE) + IMM_REACH / 2 < IMM_REACH) - - /* True if VALUE is a signed 13-bit number. */ - -@@ -1053,67 +615,51 @@ enum reg_class - #define IMM16_OPERAND(VALUE) \ - ((unsigned HOST_WIDE_INT) (VALUE) + 0x8000 < 0x10000) - -- --/* True if VALUE is a signed 12-bit number. */ -- --#define SMALL_OPERAND(VALUE) \ -- ((unsigned HOST_WIDE_INT) (VALUE) + 0x800 < 0x1000) -- - /* True if VALUE is an unsigned 12-bit number. */ - --#define SMALL_OPERAND_UNSIGNED(VALUE) \ -- (((VALUE) & ~(unsigned HOST_WIDE_INT) 0xfff) == 0) -+#define IMM12_OPERAND_UNSIGNED(VALUE) \ -+ (((VALUE) & ~(unsigned HOST_WIDE_INT) (IMM_REACH - 1)) == 0) - --/* True if VALUE can be loaded into a register using LUI. */ -+/* True if VALUE can be loaded into a register using LU12I. */ - --#define LUI_OPERAND(VALUE) \ -- (((VALUE) | 0x7ffff000) == 0x7ffff000 \ -- || ((VALUE) | 0x7ffff000) + 0x1000 == 0) -+#define LU12I_OPERAND(VALUE) \ -+ (((VALUE) | ((HWIT_1U << 31) - IMM_REACH)) == ((HWIT_1U << 31) - IMM_REACH) \ -+ || ((VALUE) | ((HWIT_1U << 31) - IMM_REACH)) + IMM_REACH == 0) - --/* True if VALUE can be loaded into a register using LUI. */ -+/* True if VALUE can be loaded into a register using LU32I. */ - --#define LU32I_OPERAND(VALUE) \ -- ((((VALUE) | 0x7ffff00000000) == 0x7ffff00000000) \ -- || ((VALUE) | 0x7ffff00000000) + 0x100000000 == 0) -+#define LU32I_OPERAND(VALUE) \ -+ (((VALUE) | (((HWIT_1U << 19) - 1) << 32)) == (((HWIT_1U << 19) - 1) << 32) \ -+ || ((VALUE) | (((HWIT_1U << 19) - 1) << 32)) + (HWIT_1U << 32) == 0) - --/* True if VALUE can be loaded into a register using LUI. */ -+/* True if VALUE can be loaded into a register using LU52I. */ - --#define LU52I_OPERAND(VALUE) \ -- ((((VALUE) | 0xfff0000000000000) == 0xfff0000000000000)) -+#define HWIT_UC_0xFFF HOST_WIDE_INT_UC(0xfff) -+#define LU52I_OPERAND(VALUE) \ -+ (((VALUE) | (HWIT_UC_0xFFF << 52)) == (HWIT_UC_0xFFF << 52)) - - /* Return a value X with the low 12 bits clear, and such that - VALUE - X is a signed 12-bit value. */ - --#define CONST_HIGH_PART(VALUE) \ -- (((VALUE) + 0x800) & ~(unsigned HOST_WIDE_INT) 0xfff) -+#define CONST_HIGH_PART(VALUE) (((VALUE) + (IMM_REACH / 2)) & ~(IMM_REACH - 1)) - --#define CONST_LOW_PART(VALUE) \ -- ((VALUE) - CONST_HIGH_PART (VALUE)) -+#define CONST_LOW_PART(VALUE) ((VALUE) - CONST_HIGH_PART (VALUE)) - --#define SMALL_INT(X) SMALL_OPERAND (INTVAL (X)) --#define SMALL_INT_UNSIGNED(X) SMALL_OPERAND_UNSIGNED (INTVAL (X)) --#define LUI_INT(X) LUI_OPERAND (INTVAL (X)) -+#define IMM12_INT(X) IMM12_OPERAND (INTVAL (X)) -+#define IMM12_INT_UNSIGNED(X) IMM12_OPERAND_UNSIGNED (INTVAL (X)) -+#define LU12I_INT(X) LU12I_OPERAND (INTVAL (X)) - #define LU32I_INT(X) LU32I_OPERAND (INTVAL (X)) - #define LU52I_INT(X) LU52I_OPERAND (INTVAL (X)) --#define ULARCH_12BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -2048, 2047)) -+#define LARCH_U12BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -2048, 2047)) - #define LARCH_9BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -256, 255)) --#define LISA_16BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -32768, 32767)) --#define LISA_SHIFT_2_OFFSET_P(OFFSET) (((OFFSET) & 0x3) == 0) -- --/* The HI and LO registers can only be reloaded via the general -- registers. Condition code registers can only be loaded to the -- general registers, and from the floating point registers. */ -- --#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \ -- loongarch_secondary_reload_class (CLASS, MODE, X, true) --#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \ -- loongarch_secondary_reload_class (CLASS, MODE, X, false) -+#define LARCH_16BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -32768, 32767)) -+#define LARCH_SHIFT_2_OFFSET_P(OFFSET) (((OFFSET) & 0x3) == 0) - - /* Return the maximum number of consecutive registers - needed to represent mode MODE in a register of class CLASS. */ - - #define CLASS_MAX_NREGS(CLASS, MODE) loongarch_class_max_nregs (CLASS, MODE) -- -+ - /* Stack layout; function entry, exit and calling. */ - - #define STACK_GROWS_DOWNWARD 1 -@@ -1127,11 +673,13 @@ enum reg_class - - #define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta - --#define ELIMINABLE_REGS \ --{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ -- { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ -- { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ -- { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM},} -+#define ELIMINABLE_REGS \ -+ { \ -+ {ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ -+ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ -+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ -+ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ -+ } - - #define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ - (OFFSET) = loongarch_initial_elimination_offset ((FROM), (TO)) -@@ -1142,11 +690,7 @@ enum reg_class - /* The argument pointer always points to the first argument. */ - #define FIRST_PARM_OFFSET(FNDECL) 0 - --/* o32 and o64 reserve stack space for all argument registers. */ --#define REG_PARM_STACK_SPACE(FNDECL) \ -- (TARGET_OLDABI \ -- ? (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD) \ -- : 0) -+#define REG_PARM_STACK_SPACE(FNDECL) 0 - - /* Define this if it is the responsibility of the caller to - allocate the area reserved for arguments passed in registers. -@@ -1155,22 +699,25 @@ enum reg_class - `crtl->outgoing_args_size'. */ - #define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1 - --#define STACK_BOUNDARY (TARGET_NEWABI ? 128 : 64) -- -+#define STACK_BOUNDARY (TARGET_ABI_LP64 ? 128 : 64) -+ -+/* Maximum stack alignment. */ -+#define MAX_STACK_ALIGNMENT (loongarch_stack_realign ? MAX_OFILE_ALIGNMENT : STACK_BOUNDARY) -+ - /* Symbolic macros for the registers used to return integer and floating - point values. */ - - #define GP_RETURN (GP_REG_FIRST + 4) - #define FP_RETURN ((TARGET_SOFT_FLOAT) ? GP_RETURN : (FP_REG_FIRST + 0)) - --#define MAX_ARGS_IN_REGISTERS (TARGET_OLDABI ? 4 : 8) -+#define MAX_ARGS_IN_REGISTERS 8 - - /* Symbolic macros for the first/last argument registers. */ - - #define GP_ARG_FIRST (GP_REG_FIRST + 4) --#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) -+#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) - #define FP_ARG_FIRST (FP_REG_FIRST + 0) --#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) -+#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) - - /* True if MODE is vector and supported in a LSX vector register. */ - #define LSX_SUPPORTED_MODE_P(MODE) \ -@@ -1188,60 +735,39 @@ enum reg_class - && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \ - || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT)) - -+#define RECIP_MASK_NONE 0x00 -+#define RECIP_MASK_DIV 0x01 -+#define RECIP_MASK_SQRT 0x02 -+#define RECIP_MASK_RSQRT 0x04 -+#define RECIP_MASK_VEC_DIV 0x08 -+#define RECIP_MASK_VEC_SQRT 0x10 -+#define RECIP_MASK_VEC_RSQRT 0x20 -+#define RECIP_MASK_ALL (RECIP_MASK_DIV | RECIP_MASK_SQRT \ -+ | RECIP_MASK_RSQRT | RECIP_MASK_VEC_SQRT \ -+ | RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_RSQRT) -+ -+#define TARGET_RECIP_DIV ((recip_mask & RECIP_MASK_DIV) != 0 || TARGET_uARCH_LA664) -+#define TARGET_RECIP_SQRT ((recip_mask & RECIP_MASK_SQRT) != 0 || TARGET_uARCH_LA664) -+#define TARGET_RECIP_RSQRT ((recip_mask & RECIP_MASK_RSQRT) != 0 || TARGET_uARCH_LA664) -+#define TARGET_RECIP_VEC_DIV ((recip_mask & RECIP_MASK_VEC_DIV) != 0 || TARGET_uARCH_LA664) -+#define TARGET_RECIP_VEC_SQRT ((recip_mask & RECIP_MASK_VEC_SQRT) != 0 || TARGET_uARCH_LA664) -+#define TARGET_RECIP_VEC_RSQRT ((recip_mask & RECIP_MASK_VEC_RSQRT) != 0 || TARGET_uARCH_LA664) -+ - /* 1 if N is a possible register number for function argument passing. - We have no FP argument registers when soft-float. */ - - /* Accept arguments in a0-a7, and in fa0-fa7 if permitted by the ABI. */ --#define FUNCTION_ARG_REGNO_P(N) \ -- (IN_RANGE ((N), GP_ARG_FIRST, GP_ARG_LAST) \ -+#define FUNCTION_ARG_REGNO_P(N) \ -+ (IN_RANGE ((N), GP_ARG_FIRST, GP_ARG_LAST) \ - || (UNITS_PER_FP_ARG && IN_RANGE ((N), FP_ARG_FIRST, FP_ARG_LAST))) - -- --/* This structure has to cope with two different argument allocation -- schemes. Most LARCH ABIs view the arguments as a structure, of which -- the first N words go in registers and the rest go on the stack. If I -- < N, the Ith word might go in Ith integer argument register or in a -- floating-point register. For these ABIs, we only need to remember -- the offset of the current argument into the structure. -- -- So for the standard ABIs, the first N words are allocated to integer -- registers, and loongarch_function_arg decides on an argument-by-argument -- basis whether that argument should really go in an integer register, -- or in a floating-point one. */ -- --typedef struct loongarch_args { -- /* Always true for varargs functions. Otherwise true if at least -- one argument has been passed in an integer register. */ -- int gp_reg_found; -- -- /* The number of arguments seen so far. */ -- unsigned int arg_number; -- -- /* The number of integer registers used so far. This is the number -- of words that have been added to the argument structure, limited -- to MAX_ARGS_IN_REGISTERS. */ -+typedef struct { -+ /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */ - unsigned int num_gprs; - -+ /* Number of floating-point registers used so far, likewise. */ - unsigned int num_fprs; - -- /* The number of words passed on the stack. */ -- unsigned int stack_words; -- -- /* On the loongarch16, we need to keep track of which floating point -- arguments were passed in general registers, but would have been -- passed in the FP regs if this were a 32-bit function, so that we -- can move them to the FP regs if we wind up calling a 32-bit -- function. We record this information in fp_code, encoded in base -- four. A zero digit means no floating point argument, a one digit -- means an SFmode argument, and a two digit means a DFmode argument, -- and a three digit is not used. The low order digit is the first -- argument. Thus 6 == 1 * 4 + 2 means a DFmode argument followed by -- an SFmode argument. ??? A more sophisticated approach will be -- needed if LARCH_ABI != ABILP32. */ -- int fp_code; -- -- /* True if the function has a prototype. */ -- int prototype; - } CUMULATIVE_ARGS; - - /* Initialize a variable CUM of type CUMULATIVE_ARGS -@@ -1251,48 +777,37 @@ typedef struct loongarch_args { - #define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \ - memset (&(CUM), 0, sizeof (CUM)) - -- --#define EPILOGUE_USES(REGNO) loongarch_epilogue_uses (REGNO) -+#define EPILOGUE_USES(REGNO) loongarch_epilogue_uses (REGNO) - -+#define STACK_ALIGN_SIZE_INTERNAL \ -+ (crtl->stack_realign_needed) \ -+? (crtl->stack_alignment_needed / BITS_PER_UNIT) \ -+: (TARGET_ABI_LP64 ? 16 : 8) - /* Treat LOC as a byte offset from the stack pointer and round it up - to the next fully-aligned offset. */ - #define LARCH_STACK_ALIGN(LOC) \ -- (TARGET_NEWABI ? ROUND_UP ((LOC), 16) : ROUND_UP ((LOC), 8)) -+ ROUND_UP ((LOC), TARGET_ABI_LP64 ? 16 : 8) - -- --/* Output assembler code to FILE to increment profiler label # LABELNO -- for profiling a function entry. */ -+#define LARCH_STACK_ALIGN2(LOC) \ -+ ROUND_UP ((LOC), STACK_ALIGN_SIZE_INTERNAL) - - #define MCOUNT_NAME "_mcount" - - /* Emit rtl for profiling. Output assembler code to FILE - to call "_mcount" for profiling a function entry. */ --#define PROFILE_HOOK(LABEL) \ -- { \ -- rtx fun, ra; \ -- ra = get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM); \ -- fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \ -- emit_library_call (fun, LCT_NORMAL, VOIDmode, ra, Pmode); \ -+#define PROFILE_HOOK(LABEL) \ -+ { \ -+ rtx fun, ra; \ -+ ra = get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM); \ -+ fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \ -+ emit_library_call (fun, LCT_NORMAL, VOIDmode, ra, Pmode); \ - } - - /* All the work done in PROFILE_HOOK, but still required. */ - #define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0) - -- --/* The profiler preserves all interesting registers, including $31. */ --#define LARCH_SAVE_REG_FOR_PROFILING_P(REGNO) false -- --/* No loongarch port has ever used the profiler counter word, so don't emit it -- or the label for it. */ -- - #define NO_PROFILE_COUNTERS 1 - --/* Define this macro if the code for function profiling should come -- before the function prologue. Normally, the profiling code comes -- after. */ -- --/* #define PROFILE_BEFORE_PROLOGUE */ -- - /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, - the stack pointer does not matter. The value is tested only in - functions that have frame pointers. -@@ -1300,16 +815,13 @@ typedef struct loongarch_args { - - #define EXIT_IGNORE_STACK 1 - -- - /* Trampolines are a block of code followed by two pointers. */ - -+#define TRAMPOLINE_CODE_SIZE 16 - #define TRAMPOLINE_SIZE \ -- (loongarch_trampoline_code_size () + GET_MODE_SIZE (ptr_mode) * 2) -- --/* Forcing a 64-bit alignment for 32-bit targets allows us to load two -- pointers from a single LUI base. */ -- --#define TRAMPOLINE_ALIGNMENT 64 -+ ((Pmode == SImode) ? TRAMPOLINE_CODE_SIZE \ -+ : (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2)) -+#define TRAMPOLINE_ALIGNMENT POINTER_SIZE - - /* loongarch_trampoline_init calls this library function to flush - program and data caches. */ -@@ -1318,96 +830,64 @@ typedef struct loongarch_args { - #define CACHE_FLUSH_FUNC "_flush_cache" - #endif - --#define LARCH_ICACHE_SYNC(ADDR, SIZE) \ -- /* Flush both caches. We need to flush the data cache in case \ -- the system has a write-back cache. */ \ -- emit_library_call (gen_rtx_SYMBOL_REF (Pmode, loongarch_cache_flush_func), \ -- LCT_NORMAL, VOIDmode, ADDR, Pmode, SIZE, Pmode, \ -- GEN_INT (3), TYPE_MODE (integer_type_node)) -- -- - /* Addressing modes, and classification of registers for them. */ - --#define REGNO_OK_FOR_INDEX_P(REGNO) 0 -+#define REGNO_OK_FOR_INDEX_P(REGNO) \ -+ loongarch_regno_mode_ok_for_base_p (REGNO, VOIDmode, 1) -+ - #define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \ - loongarch_regno_mode_ok_for_base_p (REGNO, MODE, 1) -- -+ - /* Maximum number of registers that can appear in a valid memory address. */ - --#define MAX_REGS_PER_ADDRESS 1 -+#define MAX_REGS_PER_ADDRESS 2 - - /* Check for constness inline but use loongarch_legitimate_address_p - to check whether a constant really is an address. */ - --#define CONSTANT_ADDRESS_P(X) \ -- (CONSTANT_P (X) && memory_address_p (SImode, X)) -+#define CONSTANT_ADDRESS_P(X) (CONSTANT_P (X) && memory_address_p (SImode, X)) - - /* This handles the magic '..CURRENT_FUNCTION' symbol, which means - 'the start of the function that this code is output in'. */ - --#define ASM_OUTPUT_LABELREF(FILE,NAME) \ -- do { \ -- if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \ -- asm_fprintf ((FILE), "%U%s", \ -- XSTR (XEXP (DECL_RTL (current_function_decl), \ -- 0), 0)); \ -- else \ -- asm_fprintf ((FILE), "%U%s", (NAME)); \ -- } while (0) -- --/* Flag to mark a function decl symbol that requires a long call. */ --#define SYMBOL_FLAG_LONG_CALL (SYMBOL_FLAG_MACH_DEP << 0) --#define SYMBOL_REF_LONG_CALL_P(X) \ -- ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_LONG_CALL) != 0) -- --/* This flag marks functions that cannot be lazily bound. */ --#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1) --#define SYMBOL_REF_BIND_NOW_P(RTX) \ -- ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0) -- --/* True if we're generating a form of LARCH16 code in which jump tables -- are stored in the text section and encoded as 16-bit PC-relative -- offsets. This is only possible when general text loads are allowed, -- since the table access itself will be an "lh" instruction. If the -- PC-relative offsets grow too large, 32-bit offsets are used instead. */ -- -- --#define CASE_VECTOR_MODE (ptr_mode) -+#define ASM_OUTPUT_LABELREF(FILE, NAME) \ -+ do \ -+ { \ -+ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \ -+ asm_fprintf ((FILE), "%U%s", \ -+ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \ -+ else \ -+ asm_fprintf ((FILE), "%U%s", (NAME)); \ -+ } \ -+ while (0) - --/* Only use short offsets if their range will not overflow. */ --#define CASE_VECTOR_SHORTEN_MODE(MIN, MAX, BODY) \ -- (ptr_mode ? HImode : SImode) -+#define CASE_VECTOR_MODE Pmode - -+#define CASE_VECTOR_SHORTEN_MODE(MIN, MAX, BODY) Pmode - - /* Define this as 1 if `char' should by default be signed; else as 0. */ - #ifndef DEFAULT_SIGNED_CHAR - #define DEFAULT_SIGNED_CHAR 1 - #endif - --/* Although LDC1 and SDC1 provide 64-bit moves on 32-bit targets, -- we generally don't want to use them for copying arbitrary data. -- A single N-word move is usually the same cost as N single-word moves. */ -+/* The SPARC port says: -+ The maximum number of bytes that a single instruction -+ can move quickly between memory and registers or between -+ two memory locations. */ - #define MOVE_MAX UNITS_PER_WORD - /* We don't modify it for LSX as it is only used by the classic reload. */ - #define MAX_MOVE_MAX 8 - --/* Define this macro as a C expression which is nonzero if -- accessing less than a word of memory (i.e. a `char' or a -- `short') is no faster than accessing a word of memory, i.e., if -- such access require more than one instruction or if there is no -- difference in cost between byte and (aligned) word loads. -- -- On RISC machines, it tends to generate better code to define -- this as 1, since it avoids making a QI or HI mode register. -- --*/ --#define SLOW_BYTE_ACCESS (1) -- --/* Standard LARCH integer shifts truncate the shift amount to the -- width of the shifted operand. However, Loongson MMI shifts -- do not truncate the shift amount at all. */ --#define SHIFT_COUNT_TRUNCATED (1) -+/* The SPARC port says: -+ Nonzero if access to memory by bytes is slow and undesirable. -+ For RISC chips, it means that access to memory by bytes is no -+ better than access by words when possible, so grab a whole word -+ and maybe make use of that. */ -+#define SLOW_BYTE_ACCESS 1 - -+/* Standard LoongArch integer shifts truncate the shift amount to the -+ width of the shifted operand. */ -+#define SHIFT_COUNT_TRUNCATED 1 - - /* Specify the machine mode that pointers have. - After generation of rtl, the compiler makes no further distinction -@@ -1422,7 +902,6 @@ typedef struct loongarch_args { - - #define FUNCTION_MODE SImode - -- - /* We allocate $fcc registers by hand and can't cope with moves of - CCmode registers to and from pseudos (or memory). */ - #define AVOID_CCMODE_COPIES -@@ -1433,14 +912,6 @@ typedef struct loongarch_args { - #define BRANCH_COST(speed_p, predictable_p) loongarch_branch_cost - #define LOGICAL_OP_NON_SHORT_CIRCUIT 0 - --/* The LARCH port has several functions that return an instruction count. -- Multiplying the count by this value gives the number of bytes that -- the instructions occupy. */ --#define BASE_INSN_LENGTH (4) -- --/* The length of a NOP in bytes. */ --#define NOP_INSN_LENGTH (4) -- - /* If defined, modifies the length assigned to instruction INSN as a - function of the context in which it is used. LENGTH is an lvalue - that contains the initially computed length of the insn and should -@@ -1451,17 +922,8 @@ typedef struct loongarch_args { - /* Return the asm template for a conditional branch instruction. - OPCODE is the opcode's mnemonic and OPERANDS is the asm template for - its operands. */ --#define LARCH_BRANCH(OPCODE, OPERANDS) \ -- OPCODE "\t" OPERANDS -+#define LARCH_BRANCH(OPCODE, OPERANDS) OPCODE "\t" OPERANDS - --#define LARCH_BRANCH_C(OPCODE, OPERANDS) \ -- OPCODE "%:\t" OPERANDS -- --/* Return an asm string that forces INSN to be treated as an absolute -- J or JAL instruction instead of an assembler macro. */ --#define LARCH_ABSOLUTE_JUMP(INSN) INSN -- -- - /* Control the assembler format that we output. */ - - /* Output to assembler file text saying following lines -@@ -1478,20 +940,19 @@ typedef struct loongarch_args { - #define ASM_APP_OFF " #NO_APP\n" - #endif - --#define REGISTER_NAMES \ --{ "$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7", \ -- "$r8", "$r9", "$r10", "$r11", "$r12", "$r13", "$r14", "$r15", \ -- "$r16", "$r17", "$r18", "$r19", "$r20", "$r21", "$r22", "$r23", \ -- "$r24", "$r25", "$r26", "$r27", "$r28", "$r29", "$r30", "$r31", \ -- "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", \ -- "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", \ -- "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23", \ -- "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31", \ -- "$fcc0","$fcc1","$fcc2","$fcc3","$fcc4","$fcc5","$fcc6","$fcc7", \ -+#define REGISTER_NAMES \ -+{ "$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7", \ -+ "$r8", "$r9", "$r10", "$r11", "$r12", "$r13", "$r14", "$r15", \ -+ "$r16", "$r17", "$r18", "$r19", "$r20", "$r21", "$r22", "$r23", \ -+ "$r24", "$r25", "$r26", "$r27", "$r28", "$r29", "$r30", "$r31", \ -+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", \ -+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", \ -+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23", \ -+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31", \ -+ "$fcc0","$fcc1","$fcc2","$fcc3","$fcc4","$fcc5","$fcc6","$fcc7", \ - "$arg", "$frame"} - --/* List the "software" names for each register. Also list the numerical -- names for $fp and $sp. */ -+/* This macro defines additional names for hard registers. */ - - #define ADDITIONAL_REGISTER_NAMES \ - { \ -@@ -1595,61 +1056,17 @@ typedef struct loongarch_args { - { "xr31", 31 + FP_REG_FIRST } \ - } - --#define DBR_OUTPUT_SEQEND(STREAM) \ --do \ -- { \ -- /* Emit a blank line after the delay slot for emphasis. */ \ -- fputs ("\n", STREAM); \ -- } \ --while (0) -- --/* The LARCH implementation uses some labels for its own purpose. The -- following lists what labels are created, and are all formed by the -- pattern $L[a-z].*. The machine independent portion of GCC creates -- labels matching: $L[A-Z][0-9]+ and $L[0-9]+. -- -- LM[0-9]+ Silicon Graphics/ECOFF stabs label before each stmt. -- $Lb[0-9]+ Begin blocks for LARCH debug support -- $Lc[0-9]+ Label for use in s operation. -- $Le[0-9]+ End blocks for LARCH debug support */ -- --#undef ASM_DECLARE_OBJECT_NAME --#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \ -- loongarch_declare_object (STREAM, NAME, "", ":\n") -- - /* Globalizing directive for a label. */ - #define GLOBAL_ASM_OP "\t.globl\t" - --/* This says how to define a global common symbol. */ -- --#define ASM_OUTPUT_ALIGNED_DECL_COMMON loongarch_output_aligned_decl_common -- --/* This says how to define a local common symbol (i.e., not visible to -- linker). */ -- --#ifndef ASM_OUTPUT_ALIGNED_LOCAL --#define ASM_OUTPUT_ALIGNED_LOCAL(STREAM, NAME, SIZE, ALIGN) \ -- loongarch_declare_common_object (STREAM, NAME, "\n\t.lcomm\t", SIZE, ALIGN, false) --#endif -- - /* This says how to output an external. It would be possible not to -- output anything and let undefined symbol become external. However -+ output anything and let undefined symbol become external. However - the assembler uses length information on externals to allocate in - data/sdata bss/sbss, thereby saving exec time. */ - - #undef ASM_OUTPUT_EXTERNAL --#define ASM_OUTPUT_EXTERNAL(STREAM,DECL,NAME) \ -- loongarch_output_external(STREAM,DECL,NAME) -- --/* This is how to declare a function name. The actual work of -- emitting the label is moved to function_prologue, so that we can -- get the line number correctly emitted before the .ent directive, -- and after any .file directives. Define as empty so that the function -- is not declared before the .ent directive elsewhere. */ -- --#undef ASM_DECLARE_FUNCTION_NAME --#define ASM_DECLARE_FUNCTION_NAME(STREAM,NAME,DECL) \ -- loongarch_declare_function_name(STREAM,NAME,DECL) -+#define ASM_OUTPUT_EXTERNAL(STREAM, DECL, NAME) \ -+ loongarch_output_external (STREAM, DECL, NAME) - - /* This is how to store into the string LABEL - the symbol_ref name of an internal numbered label where -@@ -1657,8 +1074,8 @@ while (0) - This is suitable for output with `assemble_name'. */ - - #undef ASM_GENERATE_INTERNAL_LABEL --#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \ -- sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM)) -+#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \ -+ sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long) (NUM)) - - /* Print debug labels as "foo = ." rather than "foo:" because they should - represent a byte pointer rather than an ISA-encoded address. This is -@@ -1677,159 +1094,108 @@ while (0) - At the time of writing, this hook is not used for the function end - label: - -- $LFExxx: -+ $LFExxx: - .end foo - - */ - --#define ASM_OUTPUT_DEBUG_LABEL(FILE, PREFIX, NUM) \ -+#define ASM_OUTPUT_DEBUG_LABEL(FILE, PREFIX, NUM) \ - fprintf (FILE, "%s%s%d = .\n", LOCAL_LABEL_PREFIX, PREFIX, NUM) - - /* This is how to output an element of a case-vector that is absolute. */ - --#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \ -- fprintf (STREAM, "\t%s\t%sL%d\n", \ -- ptr_mode == DImode ? ".dword" : ".word", \ -- LOCAL_LABEL_PREFIX, \ -- VALUE) -- --/* This is how to output an element of a case-vector. We can make the -- entries GP-relative when .gp(d)word is supported. */ -- --#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \ --do { \ -- if (TARGET_RTP_PIC) \ -- { \ -- /* Make the entry relative to the start of the function. */ \ -- rtx fnsym = XEXP (DECL_RTL (current_function_decl), 0); \ -- fprintf (STREAM, "\t%s\t%sL%d-", \ -- Pmode == DImode ? ".dword" : ".word", \ -- LOCAL_LABEL_PREFIX, VALUE); \ -- assemble_name (STREAM, XSTR (fnsym, 0)); \ -- fprintf (STREAM, "\n"); \ -- } \ -- else \ -- fprintf (STREAM, "\t%s\t%sL%d-%sL%d\n", \ -- ptr_mode == DImode ? ".dword" : ".word", \ -- LOCAL_LABEL_PREFIX, VALUE, \ -- LOCAL_LABEL_PREFIX, REL); \ --} while (0) -- --/* Mark inline jump tables as data for the purpose of disassembly. For -- simplicity embed the jump table's label number in the local symbol -- produced so that multiple jump tables within a single function end -- up marked with unique symbols. Retain the alignment setting from -- `elfos.h' as we are replacing the definition from there. */ -- --#undef ASM_OUTPUT_BEFORE_CASE_LABEL --#define ASM_OUTPUT_BEFORE_CASE_LABEL(STREAM, PREFIX, NUM, TABLE) \ -- do \ -- { \ -- ASM_OUTPUT_ALIGN ((STREAM), 2); \ -- if (JUMP_TABLES_IN_TEXT_SECTION) \ -- loongarch_set_text_contents_type (STREAM, "__jump_", NUM, FALSE); \ -- } \ -- while (0) -+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \ -+ fprintf (STREAM, "\t%s\t%sL%d\n", ptr_mode == DImode ? ".dword" : ".word", \ -+ LOCAL_LABEL_PREFIX, VALUE) - --/* Reset text marking to code after an inline jump table. Like with -- the beginning of a jump table use the label number to keep symbols -- unique. */ -+/* This is how to output an element of a case-vector. */ - --#define ASM_OUTPUT_CASE_END(STREAM, NUM, TABLE) \ -- do \ -- if (JUMP_TABLES_IN_TEXT_SECTION) \ -- loongarch_set_text_contents_type (STREAM, "__jend_", NUM, TRUE); \ -+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \ -+ do \ -+ { \ -+ fprintf (STREAM, "\t%s\t%sL%d-%sL%d\n", \ -+ ptr_mode == DImode ? ".dword" : ".word", LOCAL_LABEL_PREFIX, \ -+ VALUE, LOCAL_LABEL_PREFIX, REL); \ -+ } \ - while (0) - -+#define JUMP_TABLES_IN_TEXT_SECTION 0 -+ - /* This is how to output an assembler line - that says to advance the location counter - to a multiple of 2**LOG bytes. */ - --#define ASM_OUTPUT_ALIGN(STREAM,LOG) \ -- fprintf (STREAM, "\t.align\t%d\n", (LOG)) -+#define ASM_OUTPUT_ALIGN(STREAM, LOG) fprintf (STREAM, "\t.align\t%d\n", (LOG)) - --#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM,LOG) \ -+/* "nop" instruction 54525952 (andi $r0,$r0,0) is -+ used for padding. */ -+#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, LOG) \ - fprintf (STREAM, "\t.align\t%d,54525952,4\n", (LOG)) - -- - /* This is how to output an assembler line to advance the location - counter by SIZE bytes. */ - - #undef ASM_OUTPUT_SKIP --#define ASM_OUTPUT_SKIP(STREAM,SIZE) \ -- fprintf (STREAM, "\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE)) -+#define ASM_OUTPUT_SKIP(STREAM, SIZE) \ -+ fprintf (STREAM, "\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n", (SIZE)) - - /* This is how to output a string. */ - #undef ASM_OUTPUT_ASCII - #define ASM_OUTPUT_ASCII loongarch_output_ascii - -- --/* Default to -G 8 */ --#ifndef LARCH_DEFAULT_GVALUE --#define LARCH_DEFAULT_GVALUE 8 --#endif -- - /* Define the strings to put out for each section in the object file. */ --#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */ --#define DATA_SECTION_ASM_OP "\t.data" /* large data */ -+#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */ -+#define DATA_SECTION_ASM_OP "\t.data" /* large data */ - - #undef READONLY_DATA_SECTION_ASM_OP --#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata" /* read-only data */ -- --#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \ --do \ -- { \ -- fprintf (STREAM, "\t%s\t%s,%s,-8\n\t%s\t%s,0(%s)\n", \ -- TARGET_64BIT ? "daddiu" : "addiu", \ -- reg_names[STACK_POINTER_REGNUM], \ -- reg_names[STACK_POINTER_REGNUM], \ -- TARGET_64BIT ? "sd" : "sw", \ -- reg_names[REGNO], \ -- reg_names[STACK_POINTER_REGNUM]); \ -- } \ --while (0) -- --#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \ --do \ -- { \ -- loongarch_push_asm_switch (&loongarch_noreorder); \ -- fprintf (STREAM, "\t%s\t%s,0(%s)\n\t%s\t%s,%s,8\n", \ -- TARGET_64BIT ? "ld" : "lw", \ -- reg_names[REGNO], \ -- reg_names[STACK_POINTER_REGNUM], \ -- TARGET_64BIT ? "daddu" : "addu", \ -- reg_names[STACK_POINTER_REGNUM], \ -- reg_names[STACK_POINTER_REGNUM]); \ -- loongarch_pop_asm_switch (&loongarch_noreorder); \ -- } \ --while (0) -+#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata" /* read-only data */ -+ -+#define ASM_OUTPUT_REG_PUSH(STREAM, REGNO) \ -+ do \ -+ { \ -+ fprintf (STREAM, "\t%s\t%s,%s,-8\n\t%s\t%s,%s,0\n", \ -+ TARGET_64BIT ? "addi.d" : "addi.w", \ -+ reg_names[STACK_POINTER_REGNUM], \ -+ reg_names[STACK_POINTER_REGNUM], \ -+ TARGET_64BIT ? "st.d" : "st.w", reg_names[REGNO], \ -+ reg_names[STACK_POINTER_REGNUM]); \ -+ } \ -+ while (0) -+ -+#define ASM_OUTPUT_REG_POP(STREAM, REGNO) \ -+ do \ -+ { \ -+ fprintf (STREAM, "\t%s\t%s,%s,0\n\t%s\t%s,%s,8\n", \ -+ TARGET_64BIT ? "ld.d" : "ld.w", reg_names[REGNO], \ -+ reg_names[STACK_POINTER_REGNUM], \ -+ TARGET_64BIT ? "addi.d" : "addi.w", \ -+ reg_names[STACK_POINTER_REGNUM], \ -+ reg_names[STACK_POINTER_REGNUM]); \ -+ } \ -+ while (0) - - /* How to start an assembler comment. -- The leading space is important (the loongarch native assembler requires it). */ -+ The leading space is important (the loongarch native assembler requires it). -+ */ - #ifndef ASM_COMMENT_START - #define ASM_COMMENT_START " #" - #endif -- -+ - #undef SIZE_TYPE - #define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int") - - #undef PTRDIFF_TYPE - #define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int") - --/* The minimum alignment of any expanded block move. */ --#define LARCH_MIN_MOVE_MEM_ALIGN 16 -- - /* The maximum number of bytes that can be copied by one iteration of - a movmemsi loop; see loongarch_block_move_loop. */ --#define LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER \ -- (UNITS_PER_WORD * 4) -+#define LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER (UNITS_PER_WORD * 4) - - /* The maximum number of bytes that can be copied by a straight-line - implementation of movmemsi; see loongarch_block_move_straight. We want - to make sure that any loop-based implementation will iterate at - least twice. */ --#define LARCH_MAX_MOVE_BYTES_STRAIGHT \ -- (LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER * 2) -+#define LARCH_MAX_MOVE_BYTES_STRAIGHT (LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER * 2) - - /* The base cost of a memcpy call, for MOVE_RATIO and friends. These - values were determined experimentally by benchmarking with CSiBE. -@@ -1847,73 +1213,29 @@ while (0) - we'll have to generate a load/store pair for each, halve the - value of LARCH_CALL_RATIO to take that into account. */ - --#define MOVE_RATIO(speed) \ -- (HAVE_movmemsi \ -+#define MOVE_RATIO(speed) \ -+ (HAVE_movmemsi \ - ? LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD \ - : CLEAR_RATIO (speed) / 2) - - /* For CLEAR_RATIO, when optimizing for size, give a better estimate - of the length of a memset call, but use the default otherwise. */ - --#define CLEAR_RATIO(speed)\ -- ((speed) ? 15 : LARCH_CALL_RATIO) -+#define CLEAR_RATIO(speed) ((speed) ? 15 : LARCH_CALL_RATIO) - - /* This is similar to CLEAR_RATIO, but for a non-zero constant, so when - optimizing for size adjust the ratio to account for the overhead of - loading the constant and replicating it across the word. */ - --#define SET_RATIO(speed) \ -- ((speed) ? 15 : LARCH_CALL_RATIO - 2) -- --/* Since the bits of the _init and _fini function is spread across -- many object files, each potentially with its own GP, we must assume -- we need to load our GP. We don't preserve $gp or $ra, since each -- init/fini chunk is supposed to initialize $gp, and crti/crtn -- already take care of preserving $ra and, when appropriate, $gp. */ --#if (defined _ABI64 && _LARCH_SIM == _ABI64) --#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ -- asm (SECTION_OP "\n\ -- .set push\n\ -- la $r20, " USER_LABEL_PREFIX #FUNC "\n\ -- jirl $r1, $r20, 0\n\ -- .set pop\n\ -- " TEXT_SECTION_ASM_OP); --#endif --#ifndef HAVE_AS_TLS --#define HAVE_AS_TLS 0 --#endif -- --#ifndef HAVE_AS_NAN --#define HAVE_AS_NAN 0 --#endif -+#define SET_RATIO(speed) ((speed) ? 15 : LARCH_CALL_RATIO - 2) - - #ifndef USED_FOR_TARGET --/* Information about ".set noFOO; ...; .set FOO" blocks. */ --struct loongarch_asm_switch { -- /* The FOO in the description above. */ -- const char *name; -- -- /* The current block nesting level, or 0 if we aren't in a block. */ -- int nesting_level; --}; -- - extern const enum reg_class loongarch_regno_to_class[]; --extern const char *current_function_file; /* filename current function is in */ --extern int num_source_filenames; /* current .file # */ --extern int loongarch_dbx_regno[]; - extern int loongarch_dwarf_regno[]; --extern bool loongarch_split_p[]; --extern bool loongarch_use_pcrel_pool_p[]; --extern enum processor loongarch_arch; /* which cpu to codegen for */ --extern enum processor loongarch_tune; /* which cpu to schedule for */ --extern int loongarch_isa; /* architectural level */ --extern int loongarch_isa_rev; --extern const struct loongarch_cpu_info *loongarch_arch_info; --extern const struct loongarch_cpu_info *loongarch_tune_info; --extern unsigned int loongarch_base_compression_flags; - - /* Information about a function's frame layout. */ --struct GTY(()) loongarch_frame_info { -+struct GTY (()) loongarch_frame_info -+{ - /* The size of the frame in bytes. */ - HOST_WIDE_INT total_size; - -@@ -1930,216 +1252,67 @@ struct GTY(()) loongarch_frame_info { - /* Bit X is set if the function saves or restores GPR X. */ - unsigned int mask; - -+ unsigned int gpr_saved_num; -+ - /* Likewise FPR X. */ - unsigned int fmask; - -- /* Likewise doubleword accumulator X ($acX). */ -- unsigned int acc_mask; -- -- /* The number of GPRs, FPRs, doubleword accumulators and COP0 -- registers saved. */ -- unsigned int num_gp; -- unsigned int num_fp; -- unsigned int num_acc; -- unsigned int num_cop0_regs; -- -- /* The offset of the topmost GPR, FPR, accumulator and COP0-register -- save slots from the top of the frame, or zero if no such slots are -- needed. */ -- HOST_WIDE_INT gp_save_offset; -- HOST_WIDE_INT fp_save_offset; -- HOST_WIDE_INT acc_save_offset; -- HOST_WIDE_INT cop0_save_offset; -- -- /* Likewise, but giving offsets from the bottom of the frame. */ -+ /* How much the GPR save/restore routines adjust sp (or 0 if unused). */ -+ unsigned save_libcall_adjustment; -+ -+ /* Offsets of fixed-point and floating-point save areas from frame -+ bottom. */ - HOST_WIDE_INT gp_sp_offset; - HOST_WIDE_INT fp_sp_offset; -- HOST_WIDE_INT acc_sp_offset; -- HOST_WIDE_INT cop0_sp_offset; - -- /* Similar, but the value passed to _mcount. */ -- HOST_WIDE_INT ra_fp_offset; -- -- /* The offset of arg_pointer_rtx from the bottom of the frame. */ -- HOST_WIDE_INT arg_pointer_offset; -+ /* Offset of virtual frame pointer from stack pointer/frame bottom. */ -+ HOST_WIDE_INT frame_pointer_offset; - -- /* The offset of hard_frame_pointer_rtx from the bottom of the frame. */ -+ /* Offset of hard frame pointer from stack pointer/frame bottom. */ - HOST_WIDE_INT hard_frame_pointer_offset; - -- /* How much the GPR save/restore routines adjust sp (or 0 if unused). */ -- unsigned save_libcall_adjustment; -- -- /* Offset of virtual frame pointer from stack pointer/frame bottom */ -- HOST_WIDE_INT frame_pointer_offset; --}; -- --/* Enumeration for masked vectored (VI) and non-masked (EIC) interrupts. */ --enum loongarch_int_mask --{ -- INT_MASK_EIC = -1, -- INT_MASK_SW0 = 0, -- INT_MASK_SW1 = 1, -- INT_MASK_HW0 = 2, -- INT_MASK_HW1 = 3, -- INT_MASK_HW2 = 4, -- INT_MASK_HW3 = 5, -- INT_MASK_HW4 = 6, -- INT_MASK_HW5 = 7 -+ /* The offset of arg_pointer_rtx from the bottom of the frame. */ -+ HOST_WIDE_INT arg_pointer_offset; - }; - --/* Enumeration to mark the existence of the shadow register set. -- SHADOW_SET_INTSTACK indicates a shadow register set with a valid stack -- pointer. */ --enum loongarch_shadow_set -+struct GTY (()) machine_function - { -- SHADOW_SET_NO, -- SHADOW_SET_YES, -- SHADOW_SET_INTSTACK --}; -- --struct GTY(()) machine_function { - /* The next floating-point condition-code register to allocate -- for 8CC targets, relative to ST_REG_FIRST. */ -+ for 8CC targets, relative to FCC_REG_FIRST. */ - unsigned int next_fcc; - - /* The number of extra stack bytes taken up by register varargs. - This area is allocated by the callee at the very top of the frame. */ - int varargs_size; - -- /* The current frame information, calculated by loongarch_compute_frame_info. */ -+ /* The current frame information, calculated by loongarch_compute_frame_info. -+ */ - struct loongarch_frame_info frame; -- -- /* How many instructions it takes to load a label into $AT, or 0 if -- this property hasn't yet been calculated. */ -- unsigned int load_label_num_insns; -- -- /* True if loongarch_adjust_insn_length should ignore an instruction's -- hazard attribute. */ -- bool ignore_hazard_length_p; -- -- /* True if the whole function is suitable for .set noreorder and -- .set nomacro. */ -- bool all_noreorder_p; -- -- /* True if the function has "inflexible" and "flexible" references -- to the global pointer. See loongarch_cfun_has_inflexible_gp_ref_p -- and loongarch_cfun_has_flexible_gp_ref_p for details. */ -- bool has_inflexible_gp_insn_p; -- bool has_flexible_gp_insn_p; -- -- /* True if the function's prologue must load the global pointer -- value into pic_offset_table_rtx and store the same value in -- the function's cprestore slot (if any). Even if this value -- is currently false, we may decide to set it to true later; -- see loongarch_must_initialize_gp_p () for details. */ -- bool must_initialize_gp_p; -- -- /* True if the current function must restore $gp after any potential -- clobber. This value is only meaningful during the first post-epilogue -- split_insns pass; see loongarch_must_initialize_gp_p () for details. */ -- bool must_restore_gp_when_clobbered_p; -- -- /* True if this is an interrupt handler. */ -- bool interrupt_handler_p; -- -- /* Records the way in which interrupts should be masked. Only used if -- interrupts are not kept masked. */ -- enum loongarch_int_mask int_mask; -- -- /* Records if this is an interrupt handler that uses shadow registers. */ -- enum loongarch_shadow_set use_shadow_register_set; -- -- /* True if this is an interrupt handler that should keep interrupts -- masked. */ -- bool keep_interrupts_masked_p; -- -- /* True if this is an interrupt handler that should use DERET -- instead of ERET. */ -- bool use_debug_exception_return_p; -- -- /* True if at least one of the formal parameters to a function must be -- written to the frame header (probably so its address can be taken). */ -- bool does_not_use_frame_header; -- -- /* True if none of the functions that are called by this function need -- stack space allocated for their arguments. */ -- bool optimize_call_stack; -- -- /* True if one of the functions calling this function may not allocate -- a frame header. */ -- bool callers_may_not_allocate_frame; -- -- /* True if GCC stored callee saved registers in the frame header. */ -- bool use_frame_header_for_callee_saved_regs; - }; - #endif - --/* Enable querying of DFA units. */ --#define CPU_UNITS_QUERY 0 -- --/* As on most targets, we want the .eh_frame section to be read-only where -- possible. And as on most targets, this means two things: -- -- (a) Non-locally-binding pointers must have an indirect encoding, -- so that the addresses in the .eh_frame section itself become -- locally-binding. -- -- (b) A shared library's .eh_frame section must encode locally-binding -- pointers in a relative (relocation-free) form. -- -- However, LARCH has traditionally not allowed directives like: -- -- .long x-. -- -- in cases where "x" is in a different section, or is not defined in the -- same assembly file. We are therefore unable to emit the PC-relative -- form required by (b) at assembly time. -- -- Fortunately, the linker is able to convert absolute addresses into -- PC-relative addresses on our behalf. Unfortunately, only certain -- versions of the linker know how to do this for indirect pointers, -- and for personality data. We must fall back on using writable -- .eh_frame sections for shared libraries if the linker does not -- support this feature. */ --#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \ -+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ - (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_absptr) - --#define SWITCHABLE_TARGET 1 -- --/* Several named LARCH patterns depend on Pmode. These patterns have the -- form _si for Pmode == SImode and _di for Pmode == DImode. -+/* Several named LoongArch patterns depend on Pmode. These patterns have the -+ form si for Pmode == SImode and di for Pmode == DImode. - Add the appropriate suffix to generator function NAME and invoke it - with arguments ARGS. */ - #define PMODE_INSN(NAME, ARGS) \ -- (Pmode == SImode ? NAME ## _si ARGS : NAME ## _di ARGS) -+ (Pmode == SImode ? NAME##si ARGS : NAME##di ARGS) -+ -+/* Do emit .note.GNU-stack by default. */ -+#ifndef NEED_INDICATE_EXEC_STACK -+#define NEED_INDICATE_EXEC_STACK 1 -+#endif - --/***********************/ --/* N_LARCH-PORT */ --/***********************/ - /* The `Q' extension is not yet supported. */ --/* TODO: according to march */ -+/* TODO: according to march. */ - #define UNITS_PER_FP_REG (TARGET_DOUBLE_FLOAT ? 8 : 4) - - /* The largest type that can be passed in floating-point registers. */ --/* TODO: according to mabi */ --#define UNITS_PER_FP_ARG (TARGET_HARD_FLOAT ? (TARGET_64BIT ? 8 : 4) : 0) -- --/* Internal macros to classify an ISA register's type. */ -- --#define GP_TEMP_FIRST (GP_REG_FIRST + 12) -- --#define CALLEE_SAVED_REG_NUMBER(REGNO) \ -- ((REGNO) >= 22 && (REGNO) <= 31 ? (REGNO) - 22 : -1) -- --#define N_LARCH_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1) --#define N_LARCH_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, N_LARCH_PROLOGUE_TEMP_REGNUM) -- --#define LIBCALL_VALUE(MODE) \ -- loongarch_function_value (NULL_TREE, NULL_TREE, MODE) -- --#define FUNCTION_VALUE(VALTYPE, FUNC) \ -- loongarch_function_value (VALTYPE, FUNC, VOIDmode) -- --#define FRAME_GROWS_DOWNWARD 1 -+/* TODO: according to mabi. */ -+#define UNITS_PER_FP_ARG \ -+ (TARGET_HARD_FLOAT ? (TARGET_DOUBLE_FLOAT ? 8 : 4) : 0) - - #define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN) -diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md -index be950c9e4..097c9f4db 100644 ---- a/gcc/config/loongarch/loongarch.md -+++ b/gcc/config/loongarch/loongarch.md -@@ -1,7 +1,7 @@ --;; Loongarch.md Machine Description for LARCH based processors --;; Copyright (C) 1989-2018 Free Software Foundation, Inc. --;; Contributed by A. Lichnewsky, lich@inria.inria.fr --;; Changes by Michael Meissner, meissner@osf.org -+;; Machine Description for LoongArch for GNU compiler. -+;; Copyright (C) 2020-2022 Free Software Foundation, Inc. -+;; Contributed by Loongson Ltd. -+;; Based on MIPS target for GNU compiler. - - ;; This file is part of GCC. - -@@ -19,118 +19,96 @@ - ;; along with GCC; see the file COPYING3. If not see - ;; . - --(define_enum "processor" [ -- loongarch -- loongarch64 -- la464 --]) -- - (define_c_enum "unspec" [ - ;; Integer operations that are too cumbersome to describe directly. -- UNSPEC_WSBH -- UNSPEC_DSBH -- UNSPEC_DSHD -+ UNSPEC_REVB_2H -+ UNSPEC_REVB_4H -+ UNSPEC_REVH_D - - ;; Floating-point moves. - UNSPEC_LOAD_LOW - UNSPEC_LOAD_HIGH - UNSPEC_STORE_WORD - UNSPEC_MOVGR2FRH -+ UNSPEC_MOVGR2FR - UNSPEC_MOVFRH2GR -+ UNSPEC_MOVFR2GR -+ UNSPEC_MOVFCC2GR -+ UNSPEC_MOVGR2FCC -+ UNSPEC_MOVFR2FCC - -- ;; Floating-point environment. -- UNSPEC_MOVFCSR2GR -- UNSPEC_MOVGR2FCSR -+ ;; Floating point unspecs. -+ UNSPEC_FRINT -+ UNSPEC_FCLASS -+ UNSPEC_FCOPYSIGN - -- ;; GP manipulation. -+ ;; Override return address for exception handling. - UNSPEC_EH_RETURN - -- ;; -- UNSPEC_FRINT -- UNSPEC_FCLASS -+ ;; Bit operation - UNSPEC_BYTEPICK_W - UNSPEC_BYTEPICK_D - UNSPEC_BITREV_4B - UNSPEC_BITREV_8B - -- ;; Symbolic accesses. -- UNSPEC_LOAD_CALL -- -- ;; Blockage and synchronisation. -- UNSPEC_BLOCKAGE -- UNSPEC_DBAR -- UNSPEC_IBAR -- -- ;; CPUCFG -- UNSPEC_CPUCFG -- UNSPEC_ASRTLE_D -- UNSPEC_ASRTGT_D -- -- UNSPEC_CSRRD -- UNSPEC_CSRWR -- UNSPEC_CSRXCHG -- UNSPEC_IOCSRRD -- UNSPEC_IOCSRWR -- -- ;; cacop -- UNSPEC_CACOP -- -- ;; pte -- UNSPEC_LDDIR -- UNSPEC_LDPTE -- -- ;; Cache manipulation. -- UNSPEC_LARCH_CACHE -- -- ;; Interrupt handling. -- UNSPEC_ERTN -- UNSPEC_DI -- UNSPEC_EHB -- UNSPEC_RDPGPR -- -- ;; Used in a call expression in place of args_size. It's present for PIC -- ;; indirect calls where it contains args_size and the function symbol. -- UNSPEC_CALL_ATTR -- -- -- ;; Stack checking. -- UNSPEC_PROBE_STACK_RANGE -- -- ;; The `.insn' pseudo-op. -- UNSPEC_INSN_PSEUDO -- - ;; TLS - UNSPEC_TLS_GD - UNSPEC_TLS_LD - UNSPEC_TLS_LE - UNSPEC_TLS_IE - -- UNSPEC_LU52I_D -- -+ ;; Stack tie - UNSPEC_TIE - - ;; CRC - UNSPEC_CRC - UNSPEC_CRCC -- UNSPEC_ADDRESS_FIRST --]) - --(define_c_enum "unspecv" [ -- ;; Register save and restore. -- UNSPECV_GPR_SAVE -- UNSPECV_GPR_RESTORE -+ ;; RSQRT -+ UNSPEC_RSQRT -+ UNSPEC_RSQRTE - -- UNSPECV_MOVE_EXTREME -+ ;; RECIP -+ UNSPEC_RECIPE - ]) - -+(define_c_enum "unspecv" [ -+ ;; Blockage and synchronisation. -+ UNSPECV_BLOCKAGE -+ UNSPECV_DBAR -+ UNSPECV_IBAR -+ -+ ;; Privileged instructions -+ UNSPECV_CSRRD -+ UNSPECV_CSRWR -+ UNSPECV_CSRXCHG -+ UNSPECV_IOCSRRD -+ UNSPECV_IOCSRWR -+ UNSPECV_CACOP -+ UNSPECV_LDDIR -+ UNSPECV_LDPTE -+ UNSPECV_ERTN -+ -+ ;; Stack checking -+ UNSPECV_PROBE_STACK_RANGE -+ -+ ;; Floating-point environment -+ UNSPECV_MOVFCSR2GR -+ UNSPECV_MOVGR2FCSR -+ -+ ;; Others -+ UNSPECV_CPUCFG -+ UNSPECV_ASRTLE_D -+ UNSPECV_ASRTGT_D -+ UNSPECV_SYSCALL -+ UNSPECV_BREAK -+]) - - (define_constants - [(RETURN_ADDR_REGNUM 1) - (T0_REGNUM 12) - (T1_REGNUM 13) - (S0_REGNUM 23) -- (S1_REGNUM 24) -- (S2_REGNUM 25) - - ;; PIC long branch sequences are never longer than 100 bytes. - (MAX_PIC_BRANCH_LENGTH 100) -@@ -148,9 +126,9 @@ - (define_attr "got" "unset,load" - (const_string "unset")) - --;; For jal instructions, this attribute is DIRECT when the target address -+;; For jirl instructions, this attribute is DIRECT when the target address - ;; is symbolic and INDIRECT when it is a register. --(define_attr "jal" "unset,direct,indirect" -+(define_attr "jirl" "unset,direct,indirect" - (const_string "unset")) - - -@@ -158,7 +136,7 @@ - ;; are as for "type" (see below) but there are also the following - ;; move-specific values: - ;; --;; sll0 "sll DEST,SRC,0", which on 64-bit targets is guaranteed -+;; sll0 "slli.w DEST,SRC,0", which on 64-bit targets is guaranteed - ;; to produce a sign-extended DEST, even if SRC is not - ;; properly sign-extended - ;; pick_ins BSTRPICK.W, BSTRPICK.D, BSTRINS.W or BSTRINS.D instruction -@@ -207,59 +185,6 @@ - (const_string "yes")] - (const_string "no"))) - --;; Attributes describing a sync loop. These loops have the form: --;; --;; if (RELEASE_BARRIER == YES) sync --;; 1: OLDVAL = *MEM --;; if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2 --;; CMP = 0 [delay slot] --;; $TMP1 = OLDVAL & EXCLUSIVE_MASK --;; $TMP2 = INSN1 (OLDVAL, INSN1_OP2) --;; $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK) --;; $AT |= $TMP1 | $TMP3 --;; if (!commit (*MEM = $AT)) goto 1. --;; if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot] --;; CMP = 1 --;; if (ACQUIRE_BARRIER == YES) sync --;; 2: --;; --;; where "$" values are temporaries and where the other values are --;; specified by the attributes below. Values are specified as operand --;; numbers and insns are specified as enums. If no operand number is --;; specified, the following values are used instead: --;; --;; - OLDVAL: $AT --;; - CMP: NONE --;; - NEWVAL: $AT --;; - INCLUSIVE_MASK: -1 --;; - REQUIRED_OLDVAL: OLDVAL & INCLUSIVE_MASK --;; - EXCLUSIVE_MASK: 0 --;; --;; MEM and INSN1_OP2 are required. --;; --;; Ideally, the operand attributes would be integers, with -1 meaning "none", --;; but the gen* programs don't yet support that. --(define_attr "sync_mem" "none,0,1,2,3,4,5" (const_string "none")) --(define_attr "sync_oldval" "none,0,1,2,3,4,5" (const_string "none")) --(define_attr "sync_cmp" "none,0,1,2,3,4,5" (const_string "none")) --(define_attr "sync_newval" "none,0,1,2,3,4,5" (const_string "none")) --(define_attr "sync_inclusive_mask" "none,0,1,2,3,4,5" (const_string "none")) --(define_attr "sync_exclusive_mask" "none,0,1,2,3,4,5" (const_string "none")) --(define_attr "sync_required_oldval" "none,0,1,2,3,4,5" (const_string "none")) --(define_attr "sync_insn1_op2" "none,0,1,2,3,4,5" (const_string "none")) --(define_attr "sync_insn1" "move,li,addu,addiu,subu,and,andi,or,ori,xor,xori" -- (const_string "move")) --(define_attr "sync_insn2" "nop,and,xor,not" -- (const_string "nop")) --;; Memory model specifier. --;; "0"-"9" values specify the operand that stores the memory model value. --;; "10" specifies MEMMODEL_ACQ_REL, --;; "11" specifies MEMMODEL_ACQUIRE. --(define_attr "sync_memmodel" "" (const_int 10)) -- --;; Accumulator operand for madd patterns. --(define_attr "accum_in" "none,0,1,2,3,4,5" (const_string "none")) -- - ;; Classification of each insn. - ;; branch conditional branch - ;; jump unconditional jump -@@ -273,8 +198,8 @@ - ;; prefetch memory prefetch (register + offset) - ;; prefetchx memory indexed prefetch (register + register) - ;; condmove conditional moves --;; mgtf move generate register to float register --;; mftg move float register to generate register -+;; mgtf move general-purpose register to floating point register -+;; mftg move floating point register to general-purpose register - ;; const load constant - ;; arith integer arithmetic instructions - ;; logical integer logical instructions -@@ -283,10 +208,9 @@ - ;; signext sign extend instructions - ;; clz the clz and clo instructions - ;; trap trap if instructions --;; imul integer multiply 2 operands --;; imul3 integer multiply 3 operands --;; idiv3 integer divide 3 operands --;; move integer register move ({,D}ADD{,U} with rt = 0) -+;; imul integer multiply -+;; idiv integer divide -+;; move integer move - ;; fmove floating point register move - ;; fadd floating point add/subtract - ;; fmul floating point multiply -@@ -296,9 +220,11 @@ - ;; fabs floating point absolute value - ;; fneg floating point negation - ;; fcmp floating point compare -+;; fcopysign floating point copysign - ;; fcvt floating point convert - ;; fsqrt floating point square root - ;; frsqrt floating point reciprocal square root -+;; frsqrte float point reciprocal square root approximate - ;; multi multiword sequence (or user asm statements) - ;; atomic atomic memory update instruction - ;; syncloop memory atomic operation implemented as a sync loop -@@ -307,16 +233,15 @@ - (define_attr "type" - "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore, - prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical, -- shift,slt,signext,clz,trap,imul,imul3,idiv3,move, -- fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcvt,fsqrt, -- frsqrt,dspmac,dspmacsat,accext,accmod,dspalu,dspalusat, -- multi,atomic,syncloop,nop,ghost, -+ shift,slt,signext,clz,trap,imul,idiv,move, -+ fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcopysign,fcvt,fsqrt, -+ frsqrt,frsqrte,accext,accmod,multi,atomic,syncloop,nop,ghost, - simd_div,simd_fclass,simd_flog2,simd_fadd,simd_fcvt,simd_fmul,simd_fmadd, - simd_fdiv,simd_bitins,simd_bitmov,simd_insert,simd_sld,simd_mul,simd_fcmp, - simd_fexp2,simd_int_arith,simd_bit,simd_shift,simd_splat,simd_fill, - simd_permute,simd_shf,simd_sat,simd_pcnt,simd_copy,simd_branch,simd_clsx, - simd_fminmax,simd_logic,simd_move,simd_load,simd_store" -- (cond [(eq_attr "jal" "!unset") (const_string "call") -+ (cond [(eq_attr "jirl" "!unset") (const_string "call") - (eq_attr "got" "load") (const_string "load") - - (eq_attr "alu_type" "add,sub") (const_string "arith") -@@ -362,35 +287,22 @@ - (eq_attr "dword_mode" "yes")) - (const_string "multi") - (eq_attr "move_type" "move") (const_string "move") -- (eq_attr "move_type" "const") (const_string "const") -- (eq_attr "sync_mem" "!none") (const_string "syncloop")] -+ (eq_attr "move_type" "const") (const_string "const")] - (const_string "unknown"))) - --(define_attr "compact_form" "always,maybe,never" -- (cond [(eq_attr "jal" "direct") -- (const_string "always") -- (eq_attr "jal" "indirect") -- (const_string "maybe") -- (eq_attr "type" "jump") -- (const_string "maybe")] -- (const_string "never"))) -- - ;; Mode for conversion types (fcvt) --;; I2S integer to float single (SI/DI to SF) --;; I2D integer to float double (SI/DI to DF) --;; S2I float to integer (SF to SI/DI) --;; D2I float to integer (DF to SI/DI) --;; D2S double to float single --;; S2D float single to double -- --(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D" -+;; I2S integer to float single (SI/DI to SF) -+;; I2D integer to float double (SI/DI to DF) -+;; S2I float to integer (SF to SI/DI) -+;; D2I float to integer (DF to SI/DI) -+;; D2S double to float single -+;; S2D float single to double -+;; C2D fcc to DI -+ -+(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D" - (const_string "unknown")) - --(define_attr "compression" "none,all" -- (const_string "none")) -- --;; The number of individual instructions that a non-branch pattern generates, --;; using units of BASE_INSN_LENGTH. -+;; The number of individual instructions that a non-branch pattern generates - (define_attr "insn_count" "" - (cond [;; "Ghost" instructions occupy no space. - (eq_attr "type" "ghost") -@@ -425,84 +337,30 @@ - (eq_attr "move_type" "store,fpstore") - (symbol_ref "loongarch_load_store_insns (operands[0], insn)") - -- (eq_attr "type" "idiv3") -+ (eq_attr "type" "idiv") - (symbol_ref "loongarch_idiv_insns (GET_MODE (PATTERN (insn)))")] - (const_int 1))) - --;; Length of instruction in bytes. The default is derived from "insn_count", --;; but there are special cases for branches (which must be handled here) --;; and for compressed single instructions. -- -- -- -+;; Length of instruction in bytes. - (define_attr "length" "" - (cond [ -- ;; Branch instructions have a range of [-0x20000,0x1fffc]. -- ;; If a branch is outside this range, we have a choice of two -- ;; sequences. -- ;; -- ;; For PIC, an out-of-range branch like: -- ;; -- ;; bne r1,r2,target -- ;; -- ;; becomes the equivalent of: -- ;; -- ;; beq r1,r2,1f -- ;; la rd,target -- ;; jr rd -- ;; 1: -- ;; -- ;; The non-PIC case is similar except that we use a direct -- ;; jump instead of an la/jr pair. Since the target of this -- ;; jump is an absolute 28-bit bit address (the other bits -- ;; coming from the address of the delay slot) this form cannot -- ;; cross a 256MB boundary. We could provide the option of -- ;; using la/jr in this case too, but we do not do so at -- ;; present. -- ;; -- ;; from the shorten_branches reference address. -- (eq_attr "type" "branch") -- (cond [;; Any variant can handle the 17-bit range. -- (and (le (minus (match_dup 0) (pc)) (const_int 65532)) -- (le (minus (pc) (match_dup 0)) (const_int 65534))) -- (const_int 4) -- -- ;; The non-PIC case: branch, and J. -- (match_test "TARGET_ABSOLUTE_JUMPS") -- (const_int 8)] -- -- ;; Use MAX_PIC_BRANCH_LENGTH as a (gross) overestimate. -- ;; loongarch_adjust_insn_length substitutes the correct length. -- ;; -- ;; Note that we can't simply use (symbol_ref ...) here -- ;; because genattrtab needs to know the maximum length -- ;; of an insn. -- (const_int MAX_PIC_BRANCH_LENGTH)) -- ] -- (symbol_ref "get_attr_insn_count (insn) * BASE_INSN_LENGTH"))) -- --;; Attribute describing the processor. --(define_enum_attr "cpu" "processor" -- (const (symbol_ref "loongarch_tune"))) -+ ;; Branching further than +/- 128 KiB requires two instructions. -+ (eq_attr "type" "branch") -+ (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 131064)) -+ (le (minus (pc) (match_dup 0)) (const_int 131068))) -+ (const_int 4) -+ (const_int 8))] -+ (symbol_ref "get_attr_insn_count (insn) * 4"))) - - ;; The type of hardware hazard associated with this instruction. - ;; DELAY means that the next instruction cannot read the result - ;; of this one. --(define_attr "hazard" "none,delay,forbidden_slot" -+(define_attr "hazard" "none,forbidden_slot" - (const_string "none")) - --;; Can the instruction be put into a delay slot? --(define_attr "can_delay" "no,yes" -- (if_then_else (and (eq_attr "type" "!branch,call,jump") -- (eq_attr "hazard" "none") -- (match_test "get_attr_insn_count (insn) == 1")) -- (const_string "yes") -- (const_string "no"))) -- - ;; Describe a user's asm statement. - (define_asm_attributes -- [(set_attr "type" "multi") -- (set_attr "can_delay" "no")]) -+ [(set_attr "type" "multi")]) - - ;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated - ;; from the same template. -@@ -512,141 +370,99 @@ - ;; modes. - (define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")]) - --;; Likewise, but for XLEN-sized quantities. --(define_mode_iterator X [(SI "!TARGET_64BIT") (DI "TARGET_64BIT")]) -- --(define_mode_iterator MOVEP1 [SI SF]) --(define_mode_iterator MOVEP2 [SI SF]) -+;; This mode iterator allows 16-bit and 32-bit GPR patterns and 32-bit 64-bit -+;; FPR patterns to be generated from the same template. - (define_mode_iterator JOIN_MODE [HI - SI - (SF "TARGET_HARD_FLOAT") -- (DF "TARGET_HARD_FLOAT -- && TARGET_DOUBLE_FLOAT")]) -+ (DF "TARGET_DOUBLE_FLOAT")]) - - ;; This mode iterator allows :P to be used for patterns that operate on - ;; pointer-sized quantities. Exactly one of the two alternatives will match. - (define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")]) - --;; 32-bit integer moves for which we provide move patterns. --(define_mode_iterator IMOVE32 -- [SI]) -+;; Likewise, but for XLEN-sized quantities. -+(define_mode_iterator X [(SI "!TARGET_64BIT") (DI "TARGET_64BIT")]) - - ;; 64-bit modes for which we provide move patterns. --(define_mode_iterator MOVE64 -- [DI DF]) -+(define_mode_iterator MOVE64 [DI DF]) - - ;; 128-bit modes for which we provide move patterns on 64-bit targets. - (define_mode_iterator MOVE128 [TI TF]) - --;; This mode iterator allows the QI and HI extension patterns to be --;; defined from the same template. -+;; Iterator for sub-32-bit integer modes. - (define_mode_iterator SHORT [QI HI]) - - ;; Likewise the 64-bit truncate-and-shift patterns. - (define_mode_iterator SUBDI [QI HI SI]) - --;; This mode iterator allows the QI HI SI and DI extension patterns to be -+;; Iterator for scalar fixed-point modes. - (define_mode_iterator QHWD [QI HI SI (DI "TARGET_64BIT")]) - -- --;; This mode iterator allows :ANYF to be used wherever a scalar or vector --;; floating-point mode is allowed. -+;; Iterator for hardware-supported floating-point modes. - (define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT") -- (DF "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT")]) -- --;; Like ANYF, but only applies to scalar modes. --(define_mode_iterator SCALARF [(SF "TARGET_HARD_FLOAT") -- (DF "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT")]) -+ (DF "TARGET_DOUBLE_FLOAT")]) - - ;; A floating-point mode for which moves involving FPRs may need to be split. - (define_mode_iterator SPLITF - [(DF "!TARGET_64BIT && TARGET_DOUBLE_FLOAT") - (DI "!TARGET_64BIT && TARGET_DOUBLE_FLOAT") -- (TF "TARGET_64BIT && TARGET_FLOAT64")]) -+ (TF "TARGET_64BIT && TARGET_DOUBLE_FLOAT")]) - --;; In GPR templates, a string like "mul." will expand to "mul" in the --;; 32-bit "mul.w" and "mul.d" in the 64-bit version. -+;; In GPR templates, a string like "mul." will expand to "mul.w" in the -+;; 32-bit version and "mul.d" in the 64-bit version. - (define_mode_attr d [(SI "w") (DI "d")]) - --;; Same as d but upper-case. --(define_mode_attr D [(SI "") (DI "D")]) -- - ;; This attribute gives the length suffix for a load or store instruction. - ;; The same suffixes work for zero and sign extensions. - (define_mode_attr size [(QI "b") (HI "h") (SI "w") (DI "d")]) - (define_mode_attr SIZE [(QI "B") (HI "H") (SI "W") (DI "D")]) - --;; This attributes gives the mode mask of a SHORT. -+;; This attribute gives the mode mask of a SHORT. - (define_mode_attr mask [(QI "0x00ff") (HI "0xffff")]) - --;; This attributes gives the size (bits) of a SHORT. --(define_mode_attr qi_hi [(QI "7") (HI "15")]) -- --;; Mode attributes for GPR loads. --(define_mode_attr load [(SI "lw") (DI "ld")]) -+;; This attribute gives the size (bits) of a SHORT. -+(define_mode_attr 7_or_15 [(QI "7") (HI "15")]) - --(define_mode_attr load_l [(SI "ld.w") (DI "ld.d")]) - ;; Instruction names for stores. - (define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd")]) - --;; Similarly for LARCH IV indexed FPR loads and stores. --(define_mode_attr floadx [(SF "fldx.s") (DF "fldx.d") (V2SF "fldx.d")]) --(define_mode_attr fstorex [(SF "fstx.s") (DF "fstx.d") (V2SF "fstx.d")]) -- --;; Similarly for LOONGSON indexed GPR loads and stores. -+;; Similarly for LoongArch indexed GPR loads and stores. - (define_mode_attr loadx [(QI "ldx.b") -- (HI "ldx.h") -- (SI "ldx.w") -- (DI "ldx.d")]) -+ (HI "ldx.h") -+ (SI "ldx.w") -+ (DI "ldx.d")]) - (define_mode_attr storex [(QI "stx.b") -- (HI "stx.h") -- (SI "stx.w") -- (DI "stx.d")]) -- --;; This attribute gives the best constraint to use for registers of --;; a given mode. --(define_mode_attr reg [(SI "d") (DI "d") (FCC "z")]) -+ (HI "stx.h") -+ (SI "stx.w") -+ (DI "stx.d")]) - - ;; This attribute gives the format suffix for floating-point operations. - (define_mode_attr fmt [(SF "s") (DF "d") (V2SF "ps")]) -+(define_mode_attr ifmt [(SI "w") (DI "l")]) - - ;; This attribute gives the upper-case mode name for one unit of a - ;; floating-point mode or vector mode. - (define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF") (V4SF "SF") -- (V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI") -- (V2DF "DF")(V8SF "SF")(V32QI "QI")(V16HI "HI")(V8SI "SI")(V4DI "DI")(V4DF "DF")]) -+ (V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI") -+ (V2DF "DF")(V8SF "SF")(V32QI "QI")(V16HI "HI")(V8SI "SI")(V4DI "DI")(V4DF "DF")]) - - ;; As above, but in lower case. - (define_mode_attr unitmode [(SF "sf") (DF "df") (V2SF "sf") (V4SF "sf") -- (V16QI "qi") (V8QI "qi") (V8HI "hi") (V4HI "hi") -- (V4SI "si") (V2SI "si") (V2DI "di") (V2DF "df") -- (V8SI "si") (V4DI "di") (V32QI "qi") (V16HI "hi") -+ (V16QI "qi") (V8QI "qi") (V8HI "hi") (V4HI "hi") -+ (V4SI "si") (V2SI "si") (V2DI "di") (V2DF "df") -+ (V8SI "si") (V4DI "di") (V32QI "qi") (V16HI "hi") - (V8SF "sf") (V4DF "df")]) - - ;; This attribute gives the integer mode that has half the size of - ;; the controlling mode. - (define_mode_attr HALFMODE [(DF "SI") (DI "SI") (V2SF "SI") -- (V2SI "SI") (V4HI "SI") (V8QI "SI") -- (TF "DI")]) -+ (V2SI "SI") (V4HI "SI") (V8QI "SI") -+ (TF "DI")]) - -+;; This attribute gives the integer prefix for some instructions templates. - (define_mode_attr p [(SI "") (DI "d")]) - --;; This attribute works around the early SB-1 rev2 core "F2" erratum: --;; --;; In certain cases, div.s and div.ps may have a rounding error --;; and/or wrong inexact flag. --;; --;; Therefore, we only allow div.s if not working around SB-1 rev2 --;; errata or if a slight loss of precision is OK. --(define_mode_attr divide_condition -- [DF (SF "flag_unsafe_math_optimizations") -- (V2SF "TARGET_SB1 && (flag_unsafe_math_optimizations)")]) -- --;; This attribute gives the conditions under which SQRT.fmt instructions --;; can be used. --(define_mode_attr sqrt_condition -- [SF DF (V2SF "TARGET_SB1")]) -- - ;; This code iterator allows signed and unsigned widening multiplications - ;; to use the same template. - (define_code_iterator any_extend [sign_extend zero_extend]) -@@ -659,13 +475,10 @@ - ;; from the same template. - (define_code_iterator any_shift [ashift ashiftrt lshiftrt]) - --;; This code iterator allows unsigned and signed division to be generated --;; from the same template. --(define_code_iterator any_div [div udiv]) -- --;; This code iterator allows unsigned and signed modulus to be generated -+;; This code iterator allows the three bitwise instructions to be generated - ;; from the same template. --(define_code_iterator any_mod [mod umod]) -+(define_code_iterator any_bitwise [and ior xor]) -+(define_code_iterator neg_bitwise [and ior]) - - ;; This code iterator allows addition and subtraction to be generated - ;; from the same template. -@@ -679,13 +492,14 @@ - ;; from the same template - (define_code_iterator addsubmul [plus minus mult]) - -+;; This code iterator allows unsigned and signed division to be generated -+;; from the same template. -+(define_code_iterator any_div [div udiv mod umod]) -+ - ;; This code iterator allows all native floating-point comparisons to be - ;; generated from the same template. --(define_code_iterator fcond [unordered uneq unlt unle eq lt le ordered ltgt ne]) -- --;; This code iterator is used for comparisons that can be implemented --;; by swapping the operands. --(define_code_iterator swapped_fcond [ge gt unge ungt]) -+(define_code_iterator fcond [unordered uneq unlt unle eq lt le -+ ordered ltgt ne ge gt unge ungt]) - - ;; Equality operators. - (define_code_iterator equality_op [eq ne]) -@@ -725,6 +539,10 @@ - (plus "add") - (minus "sub") - (mult "mul") -+ (div "div") -+ (udiv "udiv") -+ (mod "mod") -+ (umod "umod") - (return "return") - (simple_return "simple_return")]) - -@@ -736,15 +554,13 @@ - (xor "xor") - (and "and") - (plus "addu") -- (minus "subu")]) -- --;; expands to the name of the insn that implements --;; a particular code to operate on immediate values. --(define_code_attr immediate_insn [(ior "ori") -- (xor "xori") -- (and "andi")]) -+ (minus "subu") -+ (div "div") -+ (udiv "div") -+ (mod "mod") -+ (umod "mod")]) - --;; is the c.cond.fmt condition associated with a particular code. -+;; is the fcmp.cond.fmt condition associated with a particular code. - (define_code_attr fcond [(unordered "cun") - (uneq "cueq") - (unlt "cult") -@@ -754,48 +570,17 @@ - (le "sle") - (ordered "cor") - (ltgt "sne") -- (ne "cune")]) -- --;; Similar, but for swapped conditions. --(define_code_attr swapped_fcond [(ge "sle") -- (gt "slt") -- (unge "cule") -- (ungt "cult")]) -- --;; The value of the bit when the branch is taken for branch_bit patterns. --;; Comparison is always against zero so this depends on the operator. --(define_code_attr bbv [(eq "0") (ne "1")]) -- --;; This is the inverse value of bbv. --(define_code_attr bbinv [(eq "1") (ne "0")]) -+ (ne "cune") -+ (ge "sge") -+ (gt "sgt") -+ (unge "cuge") -+ (ungt "cugt")]) - - ;; The sel mnemonic to use depending on the condition test. - (define_code_attr sel [(eq "masknez") (ne "maskeqz")]) -+(define_code_attr fsel_invert [(eq "%2,%3") (ne "%3,%2")]) - (define_code_attr selinv [(eq "maskeqz") (ne "masknez")]) -- --;; Pipeline descriptions. --;; --;; generic.md provides a fallback for processors without a specific --;; pipeline description. It is derived from the old define_function_unit --;; version and uses the "alu" and "imuldiv" units declared below. --;; --;; Some of the processor-specific files are also derived from old --;; define_function_unit descriptions and simply override the parts of --;; generic.md that don't apply. The other processor-specific files --;; are self-contained. --(define_automaton "alu,imuldiv") - --(define_cpu_unit "alu" "alu") --(define_cpu_unit "imuldiv" "imuldiv") -- --;; Ghost instructions produce no real code and introduce no hazards. --;; They exist purely to express an effect on dataflow. --(define_insn_reservation "ghost" 0 -- (eq_attr "type" "ghost") -- "nothing") -- --(include "generic.md") -- - ;; - ;; .................... - ;; -@@ -831,37 +616,22 @@ - [(set_attr "type" "fadd") - (set_attr "mode" "")]) - --(define_expand "add3" -- [(set (match_operand:GPR 0 "register_operand") -- (plus:GPR (match_operand:GPR 1 "register_operand") -- (match_operand:GPR 2 "arith_operand")))] -- "") -- --(define_insn "*add3" -+(define_insn "add3" - [(set (match_operand:GPR 0 "register_operand" "=r,r") - (plus:GPR (match_operand:GPR 1 "register_operand" "r,r") -- (match_operand:GPR 2 "arith_operand" "r,Q")))] -+ (match_operand:GPR 2 "arith_operand" "r,I")))] - "" --{ -- if (which_alternative == 0) -- return "add.\t%0,%1,%2"; -- else -- return "addi.\t%0,%1,%2"; --} -+ "add%i2.\t%0,%1,%2"; - [(set_attr "alu_type" "add") -- (set_attr "compression" "*,*") - (set_attr "mode" "")]) - -- - (define_insn "*addsi3_extended" - [(set (match_operand:DI 0 "register_operand" "=r,r") - (sign_extend:DI - (plus:SI (match_operand:SI 1 "register_operand" "r,r") -- (match_operand:SI 2 "arith_operand" "r,Q"))))] -+ (match_operand:SI 2 "arith_operand" "r,I"))))] - "TARGET_64BIT" -- "@ -- add.w\t%0,%1,%2 -- addi.w\t%0,%1,%2" -+ "add%i2.w\t%0,%1,%2" - [(set_attr "alu_type" "add") - (set_attr "mode" "SI")]) - -@@ -885,23 +655,23 @@ - - (define_insn "sub3" - [(set (match_operand:GPR 0 "register_operand" "=r") -- (minus:GPR (match_operand:GPR 1 "register_operand" "r") -+ (minus:GPR (match_operand:GPR 1 "register_operand" "rJ") - (match_operand:GPR 2 "register_operand" "r")))] - "" -- "sub.\t%0,%1,%2" -+ "sub.\t%0,%z1,%2" - [(set_attr "alu_type" "sub") -- (set_attr "compression" "*") - (set_attr "mode" "")]) - -+ - (define_insn "*subsi3_extended" - [(set (match_operand:DI 0 "register_operand" "=r") - (sign_extend:DI -- (minus:SI (match_operand:SI 1 "register_operand" "r") -- (match_operand:SI 2 "register_operand" "r"))))] -+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ") -+ (match_operand:SI 2 "register_operand" "r"))))] - "TARGET_64BIT" -- "sub.w\t%0,%1,%2" -- [(set_attr "alu_type" "sub") -- (set_attr "mode" "DI")]) -+ "sub.w\t%0,%z1,%2" -+ [(set_attr "type" "arith") -+ (set_attr "mode" "SI")]) - - ;; - ;; .................... -@@ -911,17 +681,10 @@ - ;; .................... - ;; - --(define_expand "mul3" -- [(set (match_operand:SCALARF 0 "register_operand") -- (mult:SCALARF (match_operand:SCALARF 1 "register_operand") -- (match_operand:SCALARF 2 "register_operand")))] -- "" -- "") -- --(define_insn "*mul3" -- [(set (match_operand:SCALARF 0 "register_operand" "=f") -- (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f") -- (match_operand:SCALARF 2 "register_operand" "f")))] -+(define_insn "mul3" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (mult:ANYF (match_operand:ANYF 1 "register_operand" "f") -+ (match_operand:ANYF 2 "register_operand" "f")))] - "" - "fmul.\t%0,%1,%2" - [(set_attr "type" "fmul") -@@ -933,20 +696,27 @@ - (match_operand:GPR 2 "register_operand" "r")))] - "" - "mul.\t%0,%1,%2" -- [(set_attr "type" "imul3") -+ [(set_attr "type" "imul") - (set_attr "mode" "")]) - -- -- - (define_insn "mulsidi3_64bit" - [(set (match_operand:DI 0 "register_operand" "=r") - (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) - (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))] -- "" -+ "TARGET_64BIT" - "mul.d\t%0,%1,%2" -- [(set_attr "type" "imul3") -+ [(set_attr "type" "imul") - (set_attr "mode" "DI")]) - -+(define_insn "*mulsi3_extended" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (sign_extend:DI -+ (mult:SI (match_operand:SI 1 "register_operand" "r") -+ (match_operand:SI 2 "register_operand" "r"))))] -+ "TARGET_64BIT" -+ "mul.w\t%0,%1,%2" -+ [(set_attr "type" "imul") -+ (set_attr "mode" "SI")]) - - ;; - ;; ........................ -@@ -956,9 +726,8 @@ - ;; ........................ - ;; - -- - (define_expand "mulditi3" -- [(set (match_operand:TI 0 "register_operand") -+ [(set (match_operand:TI 0 "register_operand") - (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand")) - (any_extend:TI (match_operand:DI 2 "register_operand"))))] - "TARGET_64BIT" -@@ -975,7 +744,7 @@ - }) - - (define_insn "muldi3_highpart" -- [(set (match_operand:DI 0 "register_operand" "=r") -+ [(set (match_operand:DI 0 "register_operand" "=r") - (truncate:DI - (lshiftrt:TI - (mult:TI (any_extend:TI -@@ -989,7 +758,7 @@ - (set_attr "mode" "DI")]) - - (define_expand "mulsidi3" -- [(set (match_operand:DI 0 "register_operand" "=r") -+ [(set (match_operand:DI 0 "register_operand" "=r") - (mult:DI (any_extend:DI - (match_operand:SI 1 "register_operand" " r")) - (any_extend:DI -@@ -1005,7 +774,7 @@ - }) - - (define_insn "mulsi3_highpart" -- [(set (match_operand:SI 0 "register_operand" "=r") -+ [(set (match_operand:SI 0 "register_operand" "=r") - (truncate:SI - (lshiftrt:DI - (mult:DI (any_extend:DI -@@ -1018,97 +787,35 @@ - [(set_attr "type" "imul") - (set_attr "mode" "SI")]) - --;; Floating point multiply accumulate instructions. - --(define_expand "fma4" -- [(set (match_operand:ANYF 0 "register_operand") -- (fma:ANYF (match_operand:ANYF 1 "register_operand") -- (match_operand:ANYF 2 "register_operand") -- (match_operand:ANYF 3 "register_operand")))] -- "TARGET_HARD_FLOAT") -+;; .................... -+;; -+;; FLOATING POINT COPYSIGN -+;; -+;; .................... -+ -+;; FLOATING POINT COPYSIGN -+;; -+;; .................... - --(define_insn "*fma4_madd4" -+(define_insn "copysign3" - [(set (match_operand:ANYF 0 "register_operand" "=f") -- (fma:ANYF (match_operand:ANYF 1 "register_operand" "f") -- (match_operand:ANYF 2 "register_operand" "f") -- (match_operand:ANYF 3 "register_operand" "f")))] -+ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f") -+ (match_operand:ANYF 2 "register_operand" "f")] -+ UNSPEC_FCOPYSIGN))] - "TARGET_HARD_FLOAT" -- "fmadd.\t%0,%1,%2,%3" -- [(set_attr "type" "fmadd") -+ "fcopysign.\t%0,%1,%2" -+ [(set_attr "type" "fcopysign") - (set_attr "mode" "")]) - --;; The fms, fnma, and fnms instructions can be used even when HONOR_NANS --;; is true because while IEEE 754-2008 requires the negate operation to --;; negate the sign of a NAN and the LARCH neg instruction does not do this, --;; the fma part of the instruction has no requirement on how the sign of --;; a NAN is handled and so the final sign bit of the entire operation is --;; undefined. -- --(define_expand "fms4" -- [(set (match_operand:ANYF 0 "register_operand") -- (fma:ANYF (match_operand:ANYF 1 "register_operand") -- (match_operand:ANYF 2 "register_operand") -- (neg:ANYF (match_operand:ANYF 3 "register_operand"))))] -- "TARGET_HARD_FLOAT") -- - --(define_insn "*fms4_msub4" -- [(set (match_operand:ANYF 0 "register_operand" "=f") -- (fma:ANYF (match_operand:ANYF 1 "register_operand" "f") -- (match_operand:ANYF 2 "register_operand" "f") -- (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] -- "TARGET_HARD_FLOAT" -- "fmsub.\t%0,%1,%2,%3" -- [(set_attr "type" "fmadd") -- (set_attr "mode" "")]) -+;; - --;; fnma is defined in GCC as (fma (neg op1) op2 op3) --;; (-op1 * op2) + op3 ==> -(op1 * op2) + op3 ==> -((op1 * op2) - op3) --;; The loongarch nmsub instructions implement -((op1 * op2) - op3) --;; This transformation means we may return the wrong signed zero --;; so we check HONOR_SIGNED_ZEROS. - --(define_expand "fnma4" -- [(set (match_operand:ANYF 0 "register_operand") -- (fma:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand")) -- (match_operand:ANYF 2 "register_operand") -- (match_operand:ANYF 3 "register_operand")))] -- "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)") - --(define_insn "*fnma4_nmsub4" -- [(set (match_operand:ANYF 0 "register_operand" "=f") -- (fma:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) -- (match_operand:ANYF 2 "register_operand" "f") -- (match_operand:ANYF 3 "register_operand" "f")))] -- "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)" -- "fnmsub.\t%0,%1,%2,%3" -- [(set_attr "type" "fmadd") -- (set_attr "mode" "")]) - --;; fnms is defined as: (fma (neg op1) op2 (neg op3)) --;; ((-op1) * op2) - op3 ==> -(op1 * op2) - op3 ==> -((op1 * op2) + op3) --;; The loongarch nmadd instructions implement -((op1 * op2) + op3) --;; This transformation means we may return the wrong signed zero --;; so we check HONOR_SIGNED_ZEROS. - --(define_expand "fnms4" -- [(set (match_operand:ANYF 0 "register_operand") -- (fma:ANYF -- (neg:ANYF (match_operand:ANYF 1 "register_operand")) -- (match_operand:ANYF 2 "register_operand") -- (neg:ANYF (match_operand:ANYF 3 "register_operand"))))] -- "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)") - --(define_insn "*fnms4_nmadd4" -- [(set (match_operand:ANYF 0 "register_operand" "=f") -- (fma:ANYF -- (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) -- (match_operand:ANYF 2 "register_operand" "f") -- (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] -- "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)" -- "fnmadd.\t%0,%1,%2,%3" -- [(set_attr "type" "fmadd") -- (set_attr "mode" "")]) - - ;; - ;; .................... -@@ -1118,144 +825,315 @@ - ;; .................... - ;; - -+;; Float division and modulus. - (define_expand "div3" - [(set (match_operand:ANYF 0 "register_operand") -- (div:ANYF (match_operand:ANYF 1 "reg_or_1_operand") -+ (div:ANYF (match_operand:ANYF 1 "register_operand") - (match_operand:ANYF 2 "register_operand")))] -- "" -+ "" - { -- if (const_1_operand (operands[1], mode)) -- if (!(ISA_HAS_FP_RECIP_RSQRT (mode) -- && flag_unsafe_math_optimizations)) -- operands[1] = force_reg (mode, operands[1]); -+ if (mode == SFmode -+ && TARGET_RECIP_DIV -+ && optimize_insn_for_speed_p () -+ && flag_finite_math_only && !flag_trapping_math -+ && flag_unsafe_math_optimizations) -+ { -+ loongarch_emit_swdivsf (operands[0], operands[1], -+ operands[2], SFmode); -+ DONE; -+ } - }) - --;; These patterns work around the early SB-1 rev2 core "F1" erratum: --;; --;; If an mftg1 or dmftg1 happens to access the floating point register --;; file at the same time a long latency operation (div, sqrt, recip, --;; sqrt) iterates an intermediate result back through the floating --;; point register file bypass, then instead returning the correct --;; register value the mftg1 or dmftg1 operation returns the intermediate --;; result of the long latency operation. --;; --;; The workaround is to insert an unconditional 'mov' from/to the --;; long latency op destination register. -- - (define_insn "*div3" - [(set (match_operand:ANYF 0 "register_operand" "=f") - (div:ANYF (match_operand:ANYF 1 "register_operand" "f") - (match_operand:ANYF 2 "register_operand" "f")))] -- "" --{ -- return "fdiv.\t%0,%1,%2"; --} -+ "" -+ "fdiv.\t%0,%1,%2" - [(set_attr "type" "fdiv") - (set_attr "mode" "") - (set_attr "insn_count" "1")]) - -+;; In 3A5000, the reciprocal operation is the same as the division operation. -+ - (define_insn "*recip3" - [(set (match_operand:ANYF 0 "register_operand" "=f") - (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") - (match_operand:ANYF 2 "register_operand" "f")))] -- "ISA_HAS_FP_RECIP_RSQRT (mode) && flag_unsafe_math_optimizations" --{ -- return "frecip.\t%0,%2"; --} -+ "" -+ "frecip.\t%0,%2" - [(set_attr "type" "frdiv") - (set_attr "mode" "") - (set_attr "insn_count" "1")]) - -+;; In 3A6000, frecipe calculates the approximate value of the reciprocal operation -+ -+(define_insn "recipe2" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] -+ UNSPEC_RECIPE))] -+ "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations && TARGET_RECIP_DIV" -+ "frecipe.\t%0,%1" -+ [(set_attr "type" "frsqrte") -+ (set_attr "mode" "") -+ (set_attr "insn_count" "1")]) -+ - ;; Integer division and modulus. -+(define_expand "3" -+ [(set (match_operand:GPR 0 "register_operand") -+ (any_div:GPR (match_operand:GPR 1 "register_operand") -+ (match_operand:GPR 2 "register_operand")))] -+ "" -+{ -+ if (GET_MODE (operands[0]) == SImode) -+ { -+ rtx reg1 = gen_reg_rtx (DImode); -+ rtx reg2 = gen_reg_rtx (DImode); -+ -+ operands[1] = gen_rtx_SIGN_EXTEND (word_mode, operands[1]); -+ operands[2] = gen_rtx_SIGN_EXTEND (word_mode, operands[2]); -+ -+ emit_insn (gen_rtx_SET (reg1, operands[1])); -+ emit_insn (gen_rtx_SET (reg2, operands[2])); - --(define_insn "div3" -+ emit_insn (gen_di3_fake (operands[0], reg1, reg2)); -+ DONE; -+ } -+}) -+ -+(define_insn "*3" - [(set (match_operand:GPR 0 "register_operand" "=&r") - (any_div:GPR (match_operand:GPR 1 "register_operand" "r") - (match_operand:GPR 2 "register_operand" "r")))] - "" -- { -- return loongarch_output_division ("div.\t%0,%1,%2", operands); -- } -- [(set_attr "type" "idiv3") -+{ -+ return loongarch_output_division (".\t%0,%1,%2", operands); -+} -+ [(set_attr "type" "idiv") - (set_attr "mode" "")]) - --(define_insn "mod3" -- [(set (match_operand:GPR 0 "register_operand" "=&r") -- (any_mod:GPR (match_operand:GPR 1 "register_operand" "r") -- (match_operand:GPR 2 "register_operand" "r")))] -+(define_insn "di3_fake" -+ [(set (match_operand:SI 0 "register_operand" "=&r") -+ (any_div:SI (match_operand:DI 1 "register_operand" "r") -+ (match_operand:DI 2 "register_operand" "r")))] - "" -- { -- return loongarch_output_division ("mod.\t%0,%1,%2", operands); -- } -- [(set_attr "type" "idiv3") -- (set_attr "mode" "")]) -- --;; --;; .................... --;; --;; SQUARE ROOT --;; --;; .................... -- --;; These patterns work around the early SB-1 rev2 core "F1" erratum (see --;; "*div[sd]f3" comment for details). -- --(define_insn "sqrt2" -- [(set (match_operand:ANYF 0 "register_operand" "=f") -- (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))] -- "" - { -- return "fsqrt.\t%0,%1"; -+ return loongarch_output_division (".w\t%0,%1,%2", operands); - } -- [(set_attr "type" "fsqrt") -- (set_attr "mode" "") -- (set_attr "insn_count" "1")]) -+ [(set_attr "type" "idiv") -+ (set_attr "mode" "SI")]) - --(define_insn "*rsqrta" -- [(set (match_operand:ANYF 0 "register_operand" "=f") -- (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") -- (sqrt:ANYF (match_operand:ANYF 2 "register_operand" "f"))))] -- "ISA_HAS_FP_RECIP_RSQRT (mode) && flag_unsafe_math_optimizations" --{ -- return "frsqrt.\t%0,%2"; --} -- [(set_attr "type" "frsqrt") -- (set_attr "mode" "") -- (set_attr "insn_count" "1")]) -+;; Floating point multiply accumulate instructions. - --(define_insn "*rsqrtb" -+;; a * b + c -+(define_insn "fma4" - [(set (match_operand:ANYF 0 "register_operand" "=f") -- (sqrt:ANYF (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") -- (match_operand:ANYF 2 "register_operand" "f"))))] -- "ISA_HAS_FP_RECIP_RSQRT (mode) && flag_unsafe_math_optimizations" --{ -- return "frsqrt.\t%0,%2"; --} -- [(set_attr "type" "frsqrt") -- (set_attr "mode" "") -- (set_attr "insn_count" "1")]) -- --;; --;; .................... --;; --;; ABSOLUTE VALUE --;; --;; .................... -- --;; Do not use the integer abs macro instruction, since that signals an --;; exception on -2147483648 (sigh). -- --;; The "legacy" (as opposed to "2008") form of ABS.fmt is an arithmetic --;; instruction that treats all NaN inputs as invalid; it does not clear --;; their sign bit. We therefore can't use that form if the signs of --;; NaNs matter. -+ (fma:ANYF (match_operand:ANYF 1 "register_operand" "f") -+ (match_operand:ANYF 2 "register_operand" "f") -+ (match_operand:ANYF 3 "register_operand" "f")))] -+ "" -+ "fmadd.\t%0,%1,%2,%3" -+ [(set_attr "type" "fmadd") -+ (set_attr "mode" "")]) - --(define_insn "abs2" -+;; a * b - c -+(define_insn "fms4" - [(set (match_operand:ANYF 0 "register_operand" "=f") -- (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))] -+ (fma:ANYF (match_operand:ANYF 1 "register_operand" "f") -+ (match_operand:ANYF 2 "register_operand" "f") -+ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] - "" -- "fabs.\t%0,%1" -- [(set_attr "type" "fabs") -+ "fmsub.\t%0,%1,%2,%3" -+ [(set_attr "type" "fmadd") -+ (set_attr "mode" "")]) -+ -+;; fnma is defined in GCC as (fma (neg op1) op2 op3) -+;; (-op1 * op2) + op3 ==> -(op1 * op2) + op3 ==> -((op1 * op2) - op3) -+;; The loongarch nmsub instructions implement -((op1 * op2) - op3) -+;; This transformation means we may return the wrong signed zero -+;; so we check HONOR_SIGNED_ZEROS. -+ -+;; -a * b + c -+(define_insn "fnma4" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (fma:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) -+ (match_operand:ANYF 2 "register_operand" "f") -+ (match_operand:ANYF 3 "register_operand" "f")))] -+ "!HONOR_SIGNED_ZEROS (mode)" -+ "fnmsub.\t%0,%1,%2,%3" -+ [(set_attr "type" "fmadd") -+ (set_attr "mode" "")]) -+ -+;; fnms is defined as: (fma (neg op1) op2 (neg op3)) -+;; ((-op1) * op2) - op3 ==> -(op1 * op2) - op3 ==> -((op1 * op2) + op3) -+;; The loongarch nmadd instructions implement -((op1 * op2) + op3) -+;; This transformation means we may return the wrong signed zero -+;; so we check HONOR_SIGNED_ZEROS. -+ -+;; -a * b - c -+(define_insn "fnms4" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (fma:ANYF -+ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) -+ (match_operand:ANYF 2 "register_operand" "f") -+ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] -+ "!HONOR_SIGNED_ZEROS (mode)" -+ "fnmadd.\t%0,%1,%2,%3" -+ [(set_attr "type" "fmadd") -+ (set_attr "mode" "")]) -+ -+;; -(-a * b - c), modulo signed zeros -+(define_insn "*fma4" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (neg:ANYF -+ (fma:ANYF -+ (neg:ANYF (match_operand:ANYF 1 "register_operand" " f")) -+ (match_operand:ANYF 2 "register_operand" " f") -+ (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))] -+ "!HONOR_SIGNED_ZEROS (mode)" -+ "fmadd.\t%0,%1,%2,%3" -+ [(set_attr "type" "fmadd") -+ (set_attr "mode" "")]) -+ -+;; -(-a * b + c), modulo signed zeros -+(define_insn "*fms4" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (neg:ANYF -+ (fma:ANYF -+ (neg:ANYF (match_operand:ANYF 1 "register_operand" " f")) -+ (match_operand:ANYF 2 "register_operand" " f") -+ (match_operand:ANYF 3 "register_operand" " f"))))] -+ "!HONOR_SIGNED_ZEROS (mode)" -+ "fmsub.\t%0,%1,%2,%3" -+ [(set_attr "type" "fmadd") -+ (set_attr "mode" "")]) -+ -+;; -(a * b + c) -+(define_insn "*fnms4" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (neg:ANYF -+ (fma:ANYF -+ (match_operand:ANYF 1 "register_operand" " f") -+ (match_operand:ANYF 2 "register_operand" " f") -+ (match_operand:ANYF 3 "register_operand" " f"))))] -+ "" -+ "fnmadd.\t%0,%1,%2,%3" -+ [(set_attr "type" "fmadd") -+ (set_attr "mode" "")]) -+ -+;; -(a * b - c) -+(define_insn "*fnma4" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (neg:ANYF -+ (fma:ANYF -+ (match_operand:ANYF 1 "register_operand" " f") -+ (match_operand:ANYF 2 "register_operand" " f") -+ (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))] -+ "" -+ "fnmsub.\t%0,%1,%2,%3" -+ [(set_attr "type" "fmadd") -+ (set_attr "mode" "")]) -+ -+;; -+;; .................... -+;; -+;; SQUARE ROOT -+;; -+;; .................... -+ -+(define_insn "*sqrt2" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))] -+ "" -+ "fsqrt.\t%0,%1" -+ [(set_attr "type" "fsqrt") -+ (set_attr "mode" "") -+ (set_attr "insn_count" "1")]) -+ -+(define_expand "sqrt2" -+ [(set (match_operand:ANYF 0 "register_operand") -+ (sqrt:ANYF (match_operand:ANYF 1 "register_operand")))] -+ "" -+{ -+ if (mode == SFmode -+ && TARGET_RECIP_SQRT -+ && flag_unsafe_math_optimizations -+ && !optimize_insn_for_size_p () -+ && flag_finite_math_only && !flag_trapping_math) -+ { -+ loongarch_emit_swrsqrtsf (operands[0], operands[1], SFmode, 0); -+ DONE; -+ } -+}) -+ -+(define_expand "rsqrt2" -+ [(set (match_operand:ANYF 0 "register_operand") -+ (unspec:ANYF [(match_operand:ANYF 1 "register_operand")] -+ UNSPEC_RSQRT))] -+ "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations" -+{ -+ if (mode == SFmode -+ && TARGET_RECIP_RSQRT -+ && flag_unsafe_math_optimizations -+ && !optimize_insn_for_size_p () -+ && flag_finite_math_only && !flag_trapping_math) -+ { -+ loongarch_emit_swrsqrtsf (operands[0], operands[1], SFmode, 1); -+ DONE; -+ } -+}) -+ -+(define_insn "*rsqrt2" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] -+ UNSPEC_RSQRT))] -+ "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations" -+ "frsqrt.\t%0,%1" -+ [(set_attr "type" "frsqrt") -+ (set_attr "mode" "")]) -+ -+(define_insn "rsqrte" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] -+ UNSPEC_RSQRTE))] -+ "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations && TARGET_RECIP_SQRT" -+ "frsqrte.\t%0,%1" -+ [(set_attr "type" "frsqrte") -+ (set_attr "mode" "")]) -+ -+(define_insn "*rsqrta" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") -+ (sqrt:ANYF (match_operand:ANYF 2 "register_operand" "f"))))] -+ "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations" -+ "frsqrt.\t%0,%2" -+ [(set_attr "type" "frsqrt") -+ (set_attr "mode" "") -+ (set_attr "insn_count" "1")]) -+ -+(define_insn "*rsqrtb" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (sqrt:ANYF (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") -+ (match_operand:ANYF 2 "register_operand" "f"))))] -+ "flag_unsafe_math_optimizations" -+ "frsqrt.\t%0,%2" -+ [(set_attr "type" "frsqrt") -+ (set_attr "mode" "") -+ (set_attr "insn_count" "1")]) -+ -+;; -+;; .................... -+;; -+;; ABSOLUTE VALUE -+;; -+;; .................... -+ -+(define_insn "abs2" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))] -+ "" -+ "fabs.\t%0,%1" -+ [(set_attr "type" "fabs") - (set_attr "mode" "")]) - - ;; -@@ -1290,7 +1168,54 @@ - [(set_attr "type" "clz") - (set_attr "mode" "")]) - -+;; -+;; .................... -+;; -+;; MIN/MAX -+;; -+;; .................... -+ -+(define_insn "smax3" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (smax:ANYF (match_operand:ANYF 1 "register_operand" "f") -+ (match_operand:ANYF 2 "register_operand" "f")))] -+ "" -+ "fmax.\t%0,%1,%2" -+ [(set_attr "type" "fmove") -+ (set_attr "mode" "")]) -+ -+(define_insn "smin3" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (smin:ANYF (match_operand:ANYF 1 "register_operand" "f") -+ (match_operand:ANYF 2 "register_operand" "f")))] -+ "" -+ "fmin.\t%0,%1,%2" -+ [(set_attr "type" "fmove") -+ (set_attr "mode" "")]) - -+(define_insn "smaxa3" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (if_then_else:ANYF -+ (gt (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")) -+ (abs:ANYF (match_operand:ANYF 2 "register_operand" "f"))) -+ (match_dup 1) -+ (match_dup 2)))] -+ "" -+ "fmaxa.\t%0,%1,%2" -+ [(set_attr "type" "fmove") -+ (set_attr "mode" "")]) -+ -+(define_insn "smina3" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (if_then_else:ANYF -+ (lt (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")) -+ (abs:ANYF (match_operand:ANYF 2 "register_operand" "f"))) -+ (match_dup 1) -+ (match_dup 2)))] -+ "" -+ "fmina.\t%0,%1,%2" -+ [(set_attr "type" "fmove") -+ (set_attr "mode" "")]) - - ;; - ;; .................... -@@ -1299,28 +1224,21 @@ - ;; - ;; .................... - --(define_insn "negsi2" -- [(set (match_operand:SI 0 "register_operand" "=r") -- (neg:SI (match_operand:SI 1 "register_operand" "r")))] -+(define_insn "neg2" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (neg:GPR (match_operand:GPR 1 "register_operand" "r")))] - "" --{ -- return "sub.w\t%0,%.,%1"; --} -+ "sub.\t%0,%.,%1" - [(set_attr "alu_type" "sub") -- (set_attr "mode" "SI")]) -- --(define_insn "negdi2" -- [(set (match_operand:DI 0 "register_operand" "=r") -- (neg:DI (match_operand:DI 1 "register_operand" "r")))] -- "TARGET_64BIT" -- "sub.d\t%0,%.,%1" -- [(set_attr "alu_type" "sub") -- (set_attr "mode" "DI")]) -+ (set_attr "mode" "")]) - --;; The "legacy" (as opposed to "2008") form of NEG.fmt is an arithmetic --;; instruction that treats all NaN inputs as invalid; it does not flip --;; their sign bit. We therefore can't use that form if the signs of --;; NaNs matter. -+(define_insn "one_cmpl2" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (not:GPR (match_operand:GPR 1 "register_operand" "r")))] -+ "" -+ "nor\t%0,%.,%1" -+ [(set_attr "alu_type" "not") -+ (set_attr "mode" "")]) - - (define_insn "neg2" - [(set (match_operand:ANYF 0 "register_operand" "=f") -@@ -1329,17 +1247,6 @@ - "fneg.\t%0,%1" - [(set_attr "type" "fneg") - (set_attr "mode" "")]) -- --(define_insn "one_cmpl2" -- [(set (match_operand:GPR 0 "register_operand" "=r") -- (not:GPR (match_operand:GPR 1 "register_operand" "r")))] -- "" --{ -- return "nor\t%0,%.,%1"; --} -- [(set_attr "alu_type" "not") -- (set_attr "compression" "*") -- (set_attr "mode" "")]) - - - ;; -@@ -1350,133 +1257,58 @@ - ;; .................... - ;; - -- --(define_expand "and3" -- [(set (match_operand:GPR 0 "register_operand") -- (and:GPR (match_operand:GPR 1 "register_operand") -- (match_operand:GPR 2 "and_reg_operand")))]) -- --;; The middle-end is not allowed to convert ANDing with 0xffff_ffff into a --;; zero_extendsidi2 because of TARGET_TRULY_NOOP_TRUNCATION, so handle these --;; here. Note that this variant does not trigger for SI mode because we --;; require a 64-bit HOST_WIDE_INT and 0xffff_ffff wouldn't be a canonical --;; sign-extended SImode value. --;; --;; These are possible combinations for operand 1 and 2. --;; (r=register, mem=memory, x=match, S=split): --;; --;; \ op1 r/EXT r/!EXT mem --;; op2 --;; --;; andi x x --;; 0xff x x x --;; 0xffff x x x --;; 0xffff_ffff x S x --;; low-bitmask x --;; register x x --;; register =op1 -- --(define_insn "*and3" -- [(set (match_operand:GPR 0 "register_operand" "=r,r,r,r,r,r,r") -- (and:GPR (match_operand:GPR 1 "nonimmediate_operand" "o,o,W,r,r,r,r") -- (match_operand:GPR 2 "and_operand" "Yb,Yh,Yw,K,Yx,Yw,r")))] -- " and_operands_ok (mode, operands[1], operands[2])" --{ -- int len; -- -- switch (which_alternative) -- { -- case 0: -- operands[1] = gen_lowpart (QImode, operands[1]); -- return "ld.bu\t%0,%1"; -- case 1: -- operands[1] = gen_lowpart (HImode, operands[1]); -- return "ld.hu\t%0,%1"; -- case 2: -- operands[1] = gen_lowpart (SImode, operands[1]); -- if (loongarch_14bit_shifted_offset_address_p (XEXP (operands[1], 0), SImode)) -- return "ldptr.w\t%0,%1\n\tbstrins.d\t%0,$r0,63,32"; -- else if (loongarch_12bit_offset_address_p (XEXP (operands[1], 0), SImode)) -- return "ld.wu\t%0,%1"; -- else -- gcc_unreachable (); -- case 3: -- return "andi\t%0,%1,%x2"; -- case 4: -- len = low_bitmask_len (mode, INTVAL (operands[2])); -- operands[2] = GEN_INT (len-1); -- return "bstrpick.\t%0,%1,%2,0"; -- case 5: -- return "#"; -- case 6: -- return "and\t%0,%1,%2"; -- default: -- gcc_unreachable (); -- } --} -- [(set_attr "move_type" "load,load,load,andi,pick_ins,shift_shift,logical") -- (set_attr "compression" "*,*,*,*,*,*,*") -+(define_insn "3" -+ [(set (match_operand:GPR 0 "register_operand" "=r,r") -+ (any_bitwise:GPR (match_operand:GPR 1 "register_operand" "r,r") -+ (match_operand:GPR 2 "uns_arith_operand" "r,K")))] -+ "" -+ "%i2\t%0,%1,%2" -+ [(set_attr "type" "logical") - (set_attr "mode" "")]) - --(define_expand "ior3" -- [(set (match_operand:GPR 0 "register_operand") -- (ior:GPR (match_operand:GPR 1 "register_operand") -- (match_operand:GPR 2 "uns_arith_operand")))] -+(define_insn "and3_extended" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (and:GPR (match_operand:GPR 1 "nonimmediate_operand" "r") -+ (match_operand:GPR 2 "low_bitmask_operand" "Yx")))] - "" - { --}) -+ int len; - --(define_insn "*ior3" -- [(set (match_operand:GPR 0 "register_operand" "=r,r") -- (ior:GPR (match_operand:GPR 1 "register_operand" "r,r") -- (match_operand:GPR 2 "uns_arith_operand" "r,K")))] -- "" -- "@ -- or\t%0,%1,%2 -- ori\t%0,%1,%x2" -- [(set_attr "alu_type" "or") -- (set_attr "compression" "*,*") -+ len = low_bitmask_len (mode, INTVAL (operands[2])); -+ operands[2] = GEN_INT (len-1); -+ return "bstrpick.\t%0,%1,%2,0"; -+} -+ [(set_attr "move_type" "pick_ins") - (set_attr "mode" "")]) - - (define_insn "*iorhi3" - [(set (match_operand:HI 0 "register_operand" "=r,r") -- (ior:HI (match_operand:HI 1 "register_operand" "r,r") -- (match_operand:HI 2 "uns_arith_operand" "K,r")))] -+ (ior:HI (match_operand:HI 1 "register_operand" "%r,r") -+ (match_operand:HI 2 "uns_arith_operand" "r,K")))] - "" -- "@ -- ori\t%0,%1,%x2 -- or\t%0,%1,%2" -- [(set_attr "alu_type" "or") -+ "or%i2\t%0,%1,%2" -+ [(set_attr "type" "logical") - (set_attr "mode" "HI")]) - --(define_expand "xor3" -- [(set (match_operand:GPR 0 "register_operand") -- (xor:GPR (match_operand:GPR 1 "register_operand") -- (match_operand:GPR 2 "uns_arith_operand")))] -- "" -- "") -- --(define_insn "*xor3" -- [(set (match_operand:GPR 0 "register_operand" "=r,r") -- (xor:GPR (match_operand:GPR 1 "register_operand" "r,r") -- (match_operand:GPR 2 "uns_arith_operand" "r,K")))] -- "" -- "@ -- xor\t%0,%1,%2 -- xori\t%0,%1,%x2" -- [(set_attr "alu_type" "xor") -- (set_attr "compression" "*,*") -- (set_attr "mode" "")]) -- -- - (define_insn "*nor3" - [(set (match_operand:GPR 0 "register_operand" "=r") -- (and:GPR (not:GPR (match_operand:GPR 1 "register_operand" "r")) -+ (and:GPR (not:GPR (match_operand:GPR 1 "register_operand" "%r")) - (not:GPR (match_operand:GPR 2 "register_operand" "r"))))] - "" - "nor\t%0,%1,%2" -- [(set_attr "alu_type" "nor") -+ [(set_attr "type" "logical") - (set_attr "mode" "")]) -+ -+(define_insn "n" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (neg_bitwise:GPR -+ (not:GPR (match_operand:GPR 1 "register_operand" "r")) -+ (match_operand:GPR 2 "register_operand" "r")))] -+ "" -+ "n\t%0,%2,%1" -+ [(set_attr "type" "logical") -+ (set_attr "mode" "")]) -+ - - ;; - ;; .................... -@@ -1485,163 +1317,109 @@ - ;; - ;; .................... - -- -- --(define_insn "truncdfsf2" -- [(set (match_operand:SF 0 "register_operand" "=f") -- (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))] -- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" -- "fcvt.s.d\t%0,%1" -- [(set_attr "type" "fcvt") -- (set_attr "cnv_mode" "D2S") -- (set_attr "mode" "SF")]) -- --;; Integer truncation patterns. Truncating SImode values to smaller --;; modes is a no-op, as it is for most other GCC ports. Truncating --;; DImode values to SImode is not a no-op for TARGET_64BIT since we --;; need to make sure that the lower 32 bits are properly sign-extended --;; (see TARGET_TRULY_NOOP_TRUNCATION). Truncating DImode values into modes --;; smaller than SImode is equivalent to two separate truncations: --;; --;; A B --;; DI ---> HI == DI ---> SI ---> HI --;; DI ---> QI == DI ---> SI ---> QI --;; --;; Step A needs a real instruction but step B does not. -- --(define_insn "truncdisi2" -- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,ZC,m") -- (truncate:SI (match_operand:DI 1 "register_operand" "r,r,r")))] -- "TARGET_64BIT" -- "@ -- slli.w\t%0,%1,0 -- stptr.w\t%1,%0 -- st.w\t%1,%0" -- [(set_attr "move_type" "sll0,store,store") -- (set_attr "mode" "SI")]) -- - (define_insn "truncdi2" -- [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,m") -- (truncate:SHORT (match_operand:DI 1 "register_operand" "r,r")))] -+ [(set (match_operand:SUBDI 0 "nonimmediate_operand" "=r,m,k") -+ (truncate:SUBDI (match_operand:DI 1 "register_operand" "r,r,r")))] - "TARGET_64BIT" - "@ - slli.w\t%0,%1,0 -- st.\t%1,%0" -- [(set_attr "move_type" "sll0,store") -+ st.\t%1,%0 -+ stx.\t%1,%0" -+ [(set_attr "move_type" "sll0,store,store") - (set_attr "mode" "SI")]) - --;; Combiner patterns to optimize shift/truncate combinations. -- --(define_insn "*ashr_trunc" -- [(set (match_operand:SUBDI 0 "register_operand" "=r") -- (truncate:SUBDI -- (ashiftrt:DI (match_operand:DI 1 "register_operand" "r") -- (match_operand:DI 2 "const_arith_operand" ""))))] -- "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)" -- "srai.d\t%0,%1,%2" -- [(set_attr "type" "shift") -- (set_attr "mode" "")]) -+(define_insn "truncdfsf2" -+ [(set (match_operand:SF 0 "register_operand" "=f") -+ (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))] -+ "TARGET_DOUBLE_FLOAT" -+ "fcvt.s.d\t%0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "cnv_mode" "D2S") -+ (set_attr "mode" "SF")]) - --(define_insn "*lshr32_trunc" -- [(set (match_operand:SUBDI 0 "register_operand" "=r") -- (truncate:SUBDI -- (lshiftrt:DI (match_operand:DI 1 "register_operand" "r") -- (const_int 32))))] -- "TARGET_64BIT" -- "srai.d\t%0,%1,32" -- [(set_attr "type" "shift") -- (set_attr "mode" "")]) -+;;(define_insn "truncdisi2_extended" -+;; [(set (match_operand:SI 0 "nonimmediate_operand" "=ZC") -+;; (truncate:SI (match_operand:DI 1 "register_operand" "r")))] -+;; "TARGET_64BIT" -+;; "stptr.w\t%1,%0" -+;; [(set_attr "move_type" "store") -+;; (set_attr "mode" "SI")]) - - -- - ;; - ;; .................... - ;; - ;; ZERO EXTENSION - ;; - ;; .................... -- --;; Extension insns. -- - (define_expand "zero_extendsidi2" - [(set (match_operand:DI 0 "register_operand") -- (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))] -+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))] - "TARGET_64BIT") - --(define_insn "*zero_extendsidi2_dext" -- [(set (match_operand:DI 0 "register_operand" "=r,r,r") -- (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,ZC,W")))] -+(define_insn_and_split "*zero_extendsidi2_internal" -+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r") -+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m,ZC,k")))] - "TARGET_64BIT" - "@ - bstrpick.d\t%0,%1,31,0 -- ldptr.w\t%0,%1\n\tlu32i.d\t%0,0 -- ld.wu\t%0,%1" -- [(set_attr "move_type" "arith,load,load") -- (set_attr "mode" "DI") -- (set_attr "insn_count" "1,2,1")]) -- --;; See the comment before the *and3 pattern why this is generated by --;; combine. -- --(define_expand "zero_extend2" -- [(set (match_operand:GPR 0 "register_operand") -- (zero_extend:GPR (match_operand:SHORT 1 "nonimmediate_operand")))] -- "" --{ --}) -- --(define_insn "*zero_extend2" -- [(set (match_operand:GPR 0 "register_operand" "=r,r") -- (zero_extend:GPR -- (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))] -- "" --{ -- switch (which_alternative) -- { -- case 0: -- return "bstrpick.\t%0,%1,,0"; -- case 1: -- return "ld.u\t%0,%1"; -- default: -- gcc_unreachable (); -+ ld.wu\t%0,%1 -+ # -+ ldx.wu\t%0,%1" -+ "&& reload_completed -+ && MEM_P (operands[1]) -+ && (loongarch_14bit_shifted_offset_address_p (XEXP (operands[1], 0), SImode) -+ && !loongarch_12bit_offset_address_p (XEXP (operands[1], 0), SImode)) -+ && !paradoxical_subreg_p (operands[0])" -+ [(set (match_dup 3) (match_dup 1)) -+ (set (match_dup 0) -+ (ior:DI (zero_extend:DI (subreg:SI (match_dup 0) 0)) -+ (match_dup 2)))] -+ { -+ operands[1] = gen_lowpart (SImode, operands[1]); -+ operands[3] = gen_lowpart (SImode, operands[0]); -+ operands[2] = const0_rtx; - } --} -- [(set_attr "move_type" "pick_ins,load") -- (set_attr "compression" "*,*") -- (set_attr "mode" "")]) -- -+ [(set_attr "move_type" "arith,load,load,load") -+ (set_attr "mode" "DI")]) - --(define_expand "zero_extendqihi2" -- [(set (match_operand:HI 0 "register_operand") -- (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand")))] -+(define_insn "zero_extend2" -+ [(set (match_operand:GPR 0 "register_operand" "=r,r,r") -+ (zero_extend:GPR -+ (match_operand:SHORT 1 "nonimmediate_operand" "r,m,k")))] - "" --{ --}) -+ "@ -+ bstrpick.w\t%0,%1,,0 -+ ld.u\t%0,%1 -+ ldx.u\t%0,%1" -+ [(set_attr "move_type" "pick_ins,load,load") -+ (set_attr "mode" "")]) - --(define_insn "*zero_extendqihi2" -- [(set (match_operand:HI 0 "register_operand" "=r,r") -- (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))] -+(define_insn "zero_extendqihi2" -+ [(set (match_operand:HI 0 "register_operand" "=r,r,r") -+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,k,m")))] - "" - "@ -- andi\t%0,%1,0x00ff -+ andi\t%0,%1,0xff -+ ldx.bu\t%0,%1 - ld.bu\t%0,%1" -- [(set_attr "move_type" "andi,load") -+ [(set_attr "move_type" "andi,load,load") - (set_attr "mode" "HI")]) - - ;; Combiner patterns to optimize truncate/zero_extend combinations. - - (define_insn "*zero_extend_trunc" - [(set (match_operand:GPR 0 "register_operand" "=r") -- (zero_extend:GPR -+ (zero_extend:GPR - (truncate:SHORT (match_operand:DI 1 "register_operand" "r"))))] - "TARGET_64BIT" -- "bstrpick.\t%0,%1,,0" -+ "bstrpick.w\t%0,%1,,0" - [(set_attr "move_type" "pick_ins") - (set_attr "mode" "")]) - - (define_insn "*zero_extendhi_truncqi" - [(set (match_operand:HI 0 "register_operand" "=r") -- (zero_extend:HI -+ (zero_extend:HI - (truncate:QI (match_operand:DI 1 "register_operand" "r"))))] - "TARGET_64BIT" - "andi\t%0,%1,0xff" -@@ -1655,142 +1433,77 @@ - ;; - ;; .................... - --;; Extension insns. --;; Those for integer source operand are ordered widest source type first. -- --;; When TARGET_64BIT, all SImode integer and accumulator registers --;; should already be in sign-extended form (see TARGET_TRULY_NOOP_TRUNCATION --;; and truncdisi2). We can therefore get rid of register->register --;; instructions if we constrain the source to be in the same register as --;; the destination. --;; --;; Only the pre-reload scheduler sees the type of the register alternatives; --;; we split them into nothing before the post-reload scheduler runs. --;; These alternatives therefore have type "move" in order to reflect --;; what happens if the two pre-reload operands cannot be tied, and are --;; instead allocated two separate GPRs. We don't distinguish between --;; the GPR and LO cases because we don't usually know during pre-reload --;; scheduling whether an operand will be LO or not. - (define_insn_and_split "extendsidi2" -- [(set (match_operand:DI 0 "register_operand" "=r,r,r") -- (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "0,ZC,m")))] -+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r") -+ (sign_extend:DI -+ (match_operand:SI 1 "nonimmediate_operand" "0,ZC,m,k")))] - "TARGET_64BIT" -- "@ -- # -- ldptr.w\t%0,%1 -- ld.w\t%0,%1" -+{ -+ switch (which_alternative) -+ { -+ case 0: -+ return "#"; -+ case 1: -+ { -+ rtx offset = XEXP (operands[1], 0); -+ if (GET_CODE (offset) == PLUS) -+ offset = XEXP (offset, 1); -+ else -+ offset = const0_rtx; -+ if (const_arith_operand (offset, Pmode) || (offset == const0_rtx)) -+ return "ld.w\t%0,%1"; -+ else -+ return "ldptr.w\t%0,%1"; -+ } -+ case 2: -+ return "ld.w\t%0,%1"; -+ case 3: -+ return "ldx.w\t%0,%1"; -+ default: -+ gcc_unreachable (); -+ } -+} - "&& reload_completed && register_operand (operands[1], VOIDmode)" - [(const_int 0)] - { - emit_note (NOTE_INSN_DELETED); - DONE; - } -- [(set_attr "move_type" "move,load,load") -+ [(set_attr "move_type" "move,load,load,load") - (set_attr "mode" "DI")]) - --(define_expand "extend2" -- [(set (match_operand:GPR 0 "register_operand") -- (sign_extend:GPR (match_operand:SHORT 1 "nonimmediate_operand")))] -- "") -- -- --(define_insn "*extend2_se" -- [(set (match_operand:GPR 0 "register_operand" "=r,r") -- (sign_extend:GPR -- (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))] -+(define_insn "extend2" -+ [(set (match_operand:GPR 0 "register_operand" "=r,r,r") -+ (sign_extend:GPR -+ (match_operand:SHORT 1 "nonimmediate_operand" "r,m,k")))] - "" - "@ - ext.w.\t%0,%1 -- ld.\t%0,%1" -- [(set_attr "move_type" "signext,load") -+ ld.\t%0,%1 -+ ldx.\t%0,%1" -+ [(set_attr "move_type" "signext,load,load") - (set_attr "mode" "")]) - --(define_expand "extendqihi2" -- [(set (match_operand:HI 0 "register_operand") -- (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand")))] -- "") -- --(define_insn "*extendqihi2_seb" -- [(set (match_operand:HI 0 "register_operand" "=r,r") -- (sign_extend:HI -- (match_operand:QI 1 "nonimmediate_operand" "r,m")))] -+(define_insn "extendqihi2" -+ [(set (match_operand:HI 0 "register_operand" "=r,r,r") -+ (sign_extend:HI -+ (match_operand:QI 1 "nonimmediate_operand" "r,m,k")))] - "" - "@ - ext.w.b\t%0,%1 -- ld.b\t%0,%1" -- [(set_attr "move_type" "signext,load") -- (set_attr "mode" "SI")]) -- --;; Combiner patterns for truncate/sign_extend combinations. The SI versions --;; use the shift/truncate patterns. -- --(define_insn_and_split "*extenddi_truncate" -- [(set (match_operand:DI 0 "register_operand" "=r") -- (sign_extend:DI -- (truncate:SHORT (match_operand:DI 1 "register_operand" "r"))))] -- "TARGET_64BIT" -- "#" -- "&& reload_completed" -- [(set (match_dup 2) -- (ashift:DI (match_dup 1) -- (match_dup 3))) -- (set (match_dup 0) -- (ashiftrt:DI (match_dup 2) -- (match_dup 3)))] --{ -- operands[2] = gen_lowpart (DImode, operands[0]); -- operands[3] = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (mode)); --} -- [(set_attr "move_type" "shift_shift") -- (set_attr "mode" "DI")]) -- --(define_insn_and_split "*extendsi_truncate" -- [(set (match_operand:SI 0 "register_operand" "=r") -- (sign_extend:SI -- (truncate:SHORT (match_operand:DI 1 "register_operand" "r"))))] -- "TARGET_64BIT" -- "#" -- "&& reload_completed" -- [(set (match_dup 2) -- (ashift:DI (match_dup 1) -- (match_dup 3))) -- (set (match_dup 0) -- (truncate:SI (ashiftrt:DI (match_dup 2) -- (match_dup 3))))] --{ -- operands[2] = gen_lowpart (DImode, operands[0]); -- operands[3] = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (mode)); --} -- [(set_attr "move_type" "shift_shift") -- (set_attr "mode" "SI")]) -- --(define_insn_and_split "*extendhi_truncateqi" -- [(set (match_operand:HI 0 "register_operand" "=r") -- (sign_extend:HI -- (truncate:QI (match_operand:DI 1 "register_operand" "r"))))] -- "TARGET_64BIT" -- "#" -- "&& reload_completed" -- [(set (match_dup 2) -- (ashift:DI (match_dup 1) -- (const_int 56))) -- (set (match_dup 0) -- (truncate:HI (ashiftrt:DI (match_dup 2) -- (const_int 56))))] --{ -- operands[2] = gen_lowpart (DImode, operands[0]); --} -- [(set_attr "move_type" "shift_shift") -+ ld.b\t%0,%1 -+ ldx.b\t%0,%1" -+ [(set_attr "move_type" "signext,load,load") - (set_attr "mode" "SI")]) - - (define_insn "extendsfdf2" - [(set (match_operand:DF 0 "register_operand" "=f") - (float_extend:DF (match_operand:SF 1 "register_operand" "f")))] -- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" -+ "TARGET_DOUBLE_FLOAT" - "fcvt.d.s\t%0,%1" -- [(set_attr "type" "fcvt") -- (set_attr "cnv_mode" "S2D") -- (set_attr "mode" "DF")]) -+ [(set_attr "type" "fcvt") -+ (set_attr "cnv_mode" "S2D") -+ (set_attr "mode" "DF")]) - - ;; - ;; .................... -@@ -1799,104 +1512,60 @@ - ;; - ;; .................... - --(define_expand "fix_truncdfsi2" -- [(set (match_operand:SI 0 "register_operand") -- (fix:SI (match_operand:DF 1 "register_operand")))] -- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" --"" --) -- --(define_insn "fix_truncdfsi2_insn" -- [(set (match_operand:SI 0 "register_operand" "=f") -- (fix:SI (match_operand:DF 1 "register_operand" "f")))] -- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" -- "ftintrz.w.d %0,%1" -- [(set_attr "type" "fcvt") -- (set_attr "mode" "DF") -- (set_attr "cnv_mode" "D2I")]) -- -- --(define_expand "fix_truncsfsi2" -- [(set (match_operand:SI 0 "register_operand") -- (fix:SI (match_operand:SF 1 "register_operand")))] -- "TARGET_HARD_FLOAT" --"" --) -- --(define_insn "fix_truncsfsi2_insn" -- [(set (match_operand:SI 0 "register_operand" "=f") -- (fix:SI (match_operand:SF 1 "register_operand" "f")))] -- "TARGET_HARD_FLOAT" -- "ftintrz.w.s %0,%1" -- [(set_attr "type" "fcvt") -- (set_attr "mode" "SF") -- (set_attr "cnv_mode" "S2I")]) -- -- --(define_insn "fix_truncdfdi2" -- [(set (match_operand:DI 0 "register_operand" "=f") -- (fix:DI (match_operand:DF 1 "register_operand" "f")))] -- "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" -- "ftintrz.l.d %0,%1" -- [(set_attr "type" "fcvt") -- (set_attr "mode" "DF") -- (set_attr "cnv_mode" "D2I")]) -+;; conversion of a floating-point value to a integer - -+(define_insn "fix_trunc2" -+ [(set (match_operand:GPR 0 "register_operand" "=f") -+ (fix:GPR (match_operand:ANYF 1 "register_operand" "f")))] -+ "" -+ "ftintrz..\t%0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "")]) - --(define_insn "fix_truncsfdi2" -- [(set (match_operand:DI 0 "register_operand" "=f") -- (fix:DI (match_operand:SF 1 "register_operand" "f")))] -- "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" -- "ftintrz.l.s %0,%1" -- [(set_attr "type" "fcvt") -- (set_attr "mode" "SF") -- (set_attr "cnv_mode" "S2I")]) -- -+;; conversion of an integral (or boolean) value to a floating-point value - - (define_insn "floatsidf2" - [(set (match_operand:DF 0 "register_operand" "=f") - (float:DF (match_operand:SI 1 "register_operand" "f")))] -- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" -+ "TARGET_DOUBLE_FLOAT" - "ffint.d.w\t%0,%1" -- [(set_attr "type" "fcvt") -- (set_attr "mode" "DF") -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "DF") - (set_attr "cnv_mode" "I2D")]) - -- - (define_insn "floatdidf2" - [(set (match_operand:DF 0 "register_operand" "=f") - (float:DF (match_operand:DI 1 "register_operand" "f")))] -- "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" -+ "TARGET_DOUBLE_FLOAT" - "ffint.d.l\t%0,%1" -- [(set_attr "type" "fcvt") -- (set_attr "mode" "DF") -- (set_attr "cnv_mode" "I2D")]) -- -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "DF") -+ (set_attr "cnv_mode" "I2D")]) - - (define_insn "floatsisf2" - [(set (match_operand:SF 0 "register_operand" "=f") - (float:SF (match_operand:SI 1 "register_operand" "f")))] - "TARGET_HARD_FLOAT" - "ffint.s.w\t%0,%1" -- [(set_attr "type" "fcvt") -- (set_attr "mode" "SF") -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "SF") - (set_attr "cnv_mode" "I2S")]) - -- - (define_insn "floatdisf2" - [(set (match_operand:SF 0 "register_operand" "=f") - (float:SF (match_operand:DI 1 "register_operand" "f")))] -- "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" -+ "TARGET_DOUBLE_FLOAT" - "ffint.s.l\t%0,%1" -- [(set_attr "type" "fcvt") -- (set_attr "mode" "SF") -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "SF") - (set_attr "cnv_mode" "I2S")]) - -+;; Convert a floating-point value to an unsigned integer. - - (define_expand "fixuns_truncdfsi2" - [(set (match_operand:SI 0 "register_operand") - (unsigned_fix:SI (match_operand:DF 1 "register_operand")))] -- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" -+ "TARGET_DOUBLE_FLOAT" - { - rtx reg1 = gen_reg_rtx (DFmode); - rtx reg2 = gen_reg_rtx (DFmode); -@@ -1908,41 +1577,38 @@ - - real_2expN (&offset, 31, DFmode); - -- if (reg1) /* Turn off complaints about unreached code. */ -- { -- loongarch_emit_move (reg1, const_double_from_real_value (offset, DFmode)); -- do_pending_stack_adjust (); -+ loongarch_emit_move (reg1, -+ const_double_from_real_value (offset, DFmode)); -+ do_pending_stack_adjust (); - -- test = gen_rtx_GE (VOIDmode, operands[1], reg1); -- emit_jump_insn (gen_cbranchdf4 (test, operands[1], reg1, label1)); -+ test = gen_rtx_GE (VOIDmode, operands[1], reg1); -+ emit_jump_insn (gen_cbranchdf4 (test, operands[1], reg1, label1)); - -- emit_insn (gen_fix_truncdfsi2 (operands[0], operands[1])); -- emit_jump_insn (gen_rtx_SET (pc_rtx, -- gen_rtx_LABEL_REF (VOIDmode, label2))); -- emit_barrier (); -+ emit_insn (gen_fix_truncdfsi2 (operands[0], operands[1])); -+ emit_jump_insn (gen_rtx_SET (pc_rtx, -+ gen_rtx_LABEL_REF (VOIDmode, label2))); -+ emit_barrier (); - -- emit_label (label1); -- loongarch_emit_move (reg2, gen_rtx_MINUS (DFmode, operands[1], reg1)); -- loongarch_emit_move (reg3, GEN_INT (trunc_int_for_mode -- (BITMASK_HIGH, SImode))); -+ emit_label (label1); -+ loongarch_emit_move (reg2, gen_rtx_MINUS (DFmode, operands[1], reg1)); -+ loongarch_emit_move (reg3, GEN_INT (trunc_int_for_mode -+ (BITMASK_HIGH, SImode))); - -- emit_insn (gen_fix_truncdfsi2 (operands[0], reg2)); -- emit_insn (gen_iorsi3 (operands[0], operands[0], reg3)); -+ emit_insn (gen_fix_truncdfsi2 (operands[0], reg2)); -+ emit_insn (gen_iorsi3 (operands[0], operands[0], reg3)); - -- emit_label (label2); -+ emit_label (label2); - -- /* Allow REG_NOTES to be set on last insn (labels don't have enough -- fields, and can't be used for REG_NOTES anyway). */ -- emit_use (stack_pointer_rtx); -- DONE; -- } -+ /* Allow REG_NOTES to be set on last insn (labels don't have enough -+ fields, and can't be used for REG_NOTES anyway). */ -+ emit_use (stack_pointer_rtx); -+ DONE; - }) - -- - (define_expand "fixuns_truncdfdi2" - [(set (match_operand:DI 0 "register_operand") - (unsigned_fix:DI (match_operand:DF 1 "register_operand")))] -- "TARGET_HARD_FLOAT && TARGET_64BIT && TARGET_DOUBLE_FLOAT" -+ "TARGET_DOUBLE_FLOAT" - { - rtx reg1 = gen_reg_rtx (DFmode); - rtx reg2 = gen_reg_rtx (DFmode); -@@ -1980,7 +1646,6 @@ - DONE; - }) - -- - (define_expand "fixuns_truncsfsi2" - [(set (match_operand:SI 0 "register_operand") - (unsigned_fix:SI (match_operand:SF 1 "register_operand")))] -@@ -2022,11 +1687,10 @@ - DONE; - }) - -- - (define_expand "fixuns_truncsfdi2" - [(set (match_operand:DI 0 "register_operand") - (unsigned_fix:DI (match_operand:SF 1 "register_operand")))] -- "TARGET_HARD_FLOAT && TARGET_64BIT && TARGET_DOUBLE_FLOAT" -+ "TARGET_DOUBLE_FLOAT" - { - rtx reg1 = gen_reg_rtx (SFmode); - rtx reg2 = gen_reg_rtx (SFmode); -@@ -2067,35 +1731,35 @@ - ;; - ;; .................... - ;; --;; DATA MOVEMENT -+;; EXTRACT AND INSERT - ;; - ;; .................... - - (define_expand "extzv" -- [(set (match_operand:GPR 0 "register_operand") -- (zero_extract:GPR (match_operand:GPR 1 "register_operand") -- (match_operand 2 "const_int_operand") -- (match_operand 3 "const_int_operand")))] -+ [(set (match_operand:X 0 "register_operand") -+ (zero_extract:X (match_operand:X 1 "register_operand") -+ (match_operand 2 "const_int_operand") -+ (match_operand 3 "const_int_operand")))] - "" - { - if (!loongarch_use_ins_ext_p (operands[1], INTVAL (operands[2]), -- INTVAL (operands[3]))) -+ INTVAL (operands[3]))) - FAIL; - }) - - (define_insn "*extzv" -- [(set (match_operand:GPR 0 "register_operand" "=r") -- (zero_extract:GPR (match_operand:GPR 1 "register_operand" "r") -- (match_operand 2 "const_int_operand" "") -- (match_operand 3 "const_int_operand" "")))] -+ [(set (match_operand:X 0 "register_operand" "=r") -+ (zero_extract:X (match_operand:X 1 "register_operand" "r") -+ (match_operand 2 "const_int_operand" "") -+ (match_operand 3 "const_int_operand" "")))] - "loongarch_use_ins_ext_p (operands[1], INTVAL (operands[2]), -- INTVAL (operands[3]))" -+ INTVAL (operands[3]))" - { -- operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]) -1 ); -+ operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]) - 1); - return "bstrpick.\t%0,%1,%2,%3"; - } -- [(set_attr "type" "arith") -- (set_attr "mode" "")]) -+ [(set_attr "type" "arith") -+ (set_attr "mode" "")]) - - (define_expand "insv" - [(set (zero_extract:GPR (match_operand:GPR 0 "register_operand") -@@ -2105,7 +1769,7 @@ - "" - { - if (!loongarch_use_ins_ext_p (operands[0], INTVAL (operands[1]), -- INTVAL (operands[2]))) -+ INTVAL (operands[2]))) - FAIL; - }) - -@@ -2115,26 +1779,20 @@ - (match_operand:SI 2 "const_int_operand" "")) - (match_operand:GPR 3 "reg_or_0_operand" "rJ"))] - "loongarch_use_ins_ext_p (operands[0], INTVAL (operands[1]), -- INTVAL (operands[2]))" -+ INTVAL (operands[2]))" - { -- operands[1] = GEN_INT (INTVAL (operands[1]) + INTVAL (operands[2]) -1 ); -+ operands[1] = GEN_INT (INTVAL (operands[1]) + INTVAL (operands[2]) - 1); - return "bstrins.\t%0,%z3,%1,%2"; - } -- [(set_attr "type" "arith") -- (set_attr "mode" "")]) -- --;; Allow combine to split complex const_int load sequences, using operand 2 --;; to store the intermediate results. See move_operand for details. --(define_split -- [(set (match_operand:GPR 0 "register_operand") -- (match_operand:GPR 1 "splittable_const_int_operand")) -- (clobber (match_operand:GPR 2 "register_operand"))] -- "" -- [(const_int 0)] --{ -- loongarch_move_integer (operands[2], operands[0], INTVAL (operands[1])); -- DONE; --}) -+ [(set_attr "type" "arith") -+ (set_attr "mode" "")]) -+ -+;; -+;; .................... -+;; -+;; DATA MOVEMENT -+;; -+;; .................... - - ;; 64-bit integer moves - -@@ -2151,152 +1809,46 @@ - DONE; - }) - -- - (define_insn "*movdi_32bit" -- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,ZC,r,m,*f,*f,*r,*m") -- (match_operand:DI 1 "move_operand" "r,i,ZC,r,m,r,*J*r,*m,*f,*f"))] -+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m") -+ (match_operand:DI 1 "move_operand" "r,i,w,r,*J*r,*m,*f,*f"))] - "!TARGET_64BIT - && (register_operand (operands[0], DImode) - || reg_or_0_operand (operands[1], DImode))" - { return loongarch_output_move (operands[0], operands[1]); } -- [(set_attr "move_type" "move,const,load,store,load,store,mgtf,fpload,mftg,fpstore") -- (set (attr "mode") -- (if_then_else (eq_attr "move_type" "imul") -- (const_string "SI") -- (const_string "DI")))]) -- -+ [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore") -+ (set_attr "mode" "DI")]) - - (define_insn "*movdi_64bit" -- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,ZC,r,m,*f,*f,*r,*m") -- (match_operand:DI 1 "move_operand" "r,Yd,ZC,rJ,m,rJ,*r*J,*m,*f,*f"))] -+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m") -+ (match_operand:DI 1 "move_operand" "r,Yd,w,rJ,*r*J,*m,*f,*f"))] - "TARGET_64BIT - && (register_operand (operands[0], DImode) -- || reg_or_0_operand (operands[1], DImode)) -- && !((GET_CODE (operands[1]) == SYMBOL_REF || GET_CODE (operands[1]) == LABEL_REF) -- && symbolic_operand (operands[1], VOIDmode) -- && (loongarch_cmodel_var == LARCH_CMODEL_EXTREME))" -+ || reg_or_0_operand (operands[1], DImode))" - { return loongarch_output_move (operands[0], operands[1]); } -- [(set_attr "move_type" "move,const,load,store,load,store,mgtf,fpload,mftg,fpstore") -+ [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore") - (set_attr "mode" "DI")]) - --(define_insn "movdi_extreme" -- [(parallel [(set (match_operand:DI 0 "register_operand" "=r") -- (unspec_volatile:DI [(match_operand:DI 1 "symbolic_operand" "")] -- UNSPECV_MOVE_EXTREME)) -- (use (match_operand:DI 2 "register_operand" "=&r"))])] -- "TARGET_64BIT && (loongarch_cmodel_var == LARCH_CMODEL_EXTREME)" -- { -- if (!loongarch_global_symbol_p (operands[1]) -- || loongarch_symbol_binds_local_p (operands[1])) -- return "la.local\t%0,%2,%1"; -- else -- return "la.global\t%0,%2,%1"; -- } -- [(set_attr "move_type" "const") -- (set_attr "mode" "DI")]) - ;; 32-bit Integer moves - --;; Unlike most other insns, the move insns can't be split with --;; different predicates, because register spilling and other parts of --;; the compiler, have memoized the insn number already. -- --(define_expand "mov" -- [(set (match_operand:IMOVE32 0 "") -- (match_operand:IMOVE32 1 ""))] -- "" --{ -- if (loongarch_legitimize_move (mode, operands[0], operands[1])) -- DONE; --}) -- --;; The difference between these two is whether or not ints are allowed --;; in FP registers (off by default, use -mdebugh to enable). -- --(define_insn "*mov_internal" -- [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,ZC,r,m,*f,*f,*r,*m,*r,*z") -- (match_operand:IMOVE32 1 "move_operand" "r,Yd,ZC,rJ,m,rJ,*r*J,*m,*f,*f,*z,*r"))] -- "(register_operand (operands[0], mode) -- || reg_or_0_operand (operands[1], mode))" -- { return loongarch_output_move (operands[0], operands[1]); } -- [(set_attr "move_type" "move,const,load,store,load,store,mgtf,fpload,mftg,fpstore,mftg,mgtf") -- (set_attr "compression" "all,*,*,*,*,*,*,*,*,*,*,*") -- (set_attr "mode" "SI")]) -- -- -- --;; LARCH supports loading and storing a floating point register from --;; the sum of two general registers. We use two versions for each of --;; these four instructions: one where the two general registers are --;; SImode, and one where they are DImode. This is because general --;; registers will be in SImode when they hold 32-bit values, but, --;; since the 32-bit values are always sign extended, the [ls][wd]xc1 --;; instructions will still work correctly. -- --;; ??? Perhaps it would be better to support these instructions by --;; modifying TARGET_LEGITIMATE_ADDRESS_P and friends. However, since --;; these instructions can only be used to load and store floating --;; point registers, that would probably cause trouble in reload. -- --(define_insn "*_" -- [(set (match_operand:ANYF 0 "register_operand" "=f") -- (mem:ANYF (plus:P (match_operand:P 1 "register_operand" "r") -- (match_operand:P 2 "register_operand" "r"))))] -- "" -- "\t%0,%1,%2" -- [(set_attr "type" "fpidxload") -- (set_attr "mode" "")]) -- --(define_insn "*_" -- [(set (mem:ANYF (plus:P (match_operand:P 1 "register_operand" "r") -- (match_operand:P 2 "register_operand" "r"))) -- (match_operand:ANYF 0 "register_operand" "f"))] -- "TARGET_HARD_FLOAT" -- "\t%0,%1,%2" -- [(set_attr "type" "fpidxstore") -- (set_attr "mode" "")]) -- --;; Loongson index address load and store. --(define_insn "*_" -- [(set (match_operand:GPR 0 "register_operand" "=r") -- (mem:GPR -- (plus:P (match_operand:P 1 "register_operand" "r") -- (match_operand:P 2 "register_operand" "r"))))] -- "" -- "\t%0,%1,%2" -- [(set_attr "type" "load") -- (set_attr "mode" "")]) -- --(define_insn "*_" -- [(set (mem:GPR (plus:P (match_operand:P 1 "register_operand" "r") -- (match_operand:P 2 "register_operand" "r"))) -- (match_operand:GPR 0 "register_operand" "r"))] -- "" -- "\t%0,%1,%2" -- [(set_attr "type" "store") -- (set_attr "mode" "")]) -- --;; SHORT mode sign_extend. --(define_insn "*extend__" -- [(set (match_operand:GPR 0 "register_operand" "=r") -- (sign_extend:GPR -- (mem:SHORT -- (plus:P (match_operand:P 1 "register_operand" "r") -- (match_operand:P 2 "register_operand" "r")))))] -+(define_expand "movsi" -+ [(set (match_operand:SI 0 "") -+ (match_operand:SI 1 ""))] - "" -- "\t%0,%1,%2" -- [(set_attr "type" "load") -- (set_attr "mode" "")]) -+{ -+ if (loongarch_legitimize_move (SImode, operands[0], operands[1])) -+ DONE; -+}) - --(define_insn "*extend_" -- [(set (mem:SHORT (plus:P (match_operand:P 1 "register_operand" "r") -- (match_operand:P 2 "register_operand" "r"))) -- (match_operand:SHORT 0 "register_operand" "r"))] -- "" -- "\t%0,%1,%2" -- [(set_attr "type" "store") -+(define_insn "*movsi_internal" -+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m,*r,*z") -+ (match_operand:SI 1 "move_operand" "r,Yd,w,rJ,*r*J,*m,*f,*f,*z,*r"))] -+ "(register_operand (operands[0], SImode) -+ || reg_or_0_operand (operands[1], SImode))" -+ { return loongarch_output_move (operands[0], operands[1]); } -+ [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore,mftg,mgtf") - (set_attr "mode" "SI")]) - -- - ;; 16-bit Integer moves - - ;; Unlike most other insns, the move insns can't be split with -@@ -2314,13 +1866,12 @@ - }) - - (define_insn "*movhi_internal" -- [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,m") -- (match_operand:HI 1 "move_operand" "r,Yd,I,m,rJ"))] -+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,m,r,k") -+ (match_operand:HI 1 "move_operand" "r,Yd,I,m,rJ,k,rJ"))] - "(register_operand (operands[0], HImode) - || reg_or_0_operand (operands[1], HImode))" - { return loongarch_output_move (operands[0], operands[1]); } -- [(set_attr "move_type" "move,const,const,load,store") -- (set_attr "compression" "all,all,*,*,*") -+ [(set_attr "move_type" "move,const,const,load,store,load,store") - (set_attr "mode" "HI")]) - - ;; 8-bit Integer moves -@@ -2340,13 +1891,12 @@ - }) - - (define_insn "*movqi_internal" -- [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m") -- (match_operand:QI 1 "move_operand" "r,I,m,rJ"))] -+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,r,k") -+ (match_operand:QI 1 "move_operand" "r,I,m,rJ,k,rJ"))] - "(register_operand (operands[0], QImode) - || reg_or_0_operand (operands[1], QImode))" - { return loongarch_output_move (operands[0], operands[1]); } -- [(set_attr "move_type" "move,const,load,store") -- (set_attr "compression" "all,*,*,*") -+ [(set_attr "move_type" "move,const,load,store,load,store") - (set_attr "mode" "QI")]) - - ;; 32-bit floating point moves -@@ -2361,13 +1911,13 @@ - }) - - (define_insn "*movsf_hardfloat" -- [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m") -- (match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))] -+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,f,k,m,*f,*r,*r,*r,*m") -+ (match_operand:SF 1 "move_operand" "f,G,m,f,k,f,G,*r,*f,*G*r,*m,*r"))] - "TARGET_HARD_FLOAT - && (register_operand (operands[0], SFmode) - || reg_or_0_operand (operands[1], SFmode))" - { return loongarch_output_move (operands[0], operands[1]); } -- [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,store,mgtf,mftg,move,load,store") -+ [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,fpload,fpstore,store,mgtf,mftg,move,load,store") - (set_attr "mode" "SF")]) - - (define_insn "*movsf_softfloat" -@@ -2380,7 +1930,6 @@ - [(set_attr "move_type" "move,load,store") - (set_attr "mode" "SF")]) - -- - ;; 64-bit floating point moves - - (define_expand "movdf" -@@ -2393,13 +1942,13 @@ - }) - - (define_insn "*movdf_hardfloat" -- [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m") -- (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))] -- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT -+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,f,k,m,*f,*r,*r,*r,*m") -+ (match_operand:DF 1 "move_operand" "f,G,m,f,k,f,G,*r,*f,*r*G,*m,*r"))] -+ "TARGET_DOUBLE_FLOAT - && (register_operand (operands[0], DFmode) - || reg_or_0_operand (operands[1], DFmode))" - { return loongarch_output_move (operands[0], operands[1]); } -- [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,store,mgtf,mftg,move,load,store") -+ [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,fpload,fpstore,store,mgtf,mftg,move,load,store") - (set_attr "mode" "DF")]) - - (define_insn "*movdf_softfloat" -@@ -2433,11 +1982,10 @@ - { return loongarch_output_move (operands[0], operands[1]); } - [(set_attr "move_type" "move,const,load,store") - (set (attr "mode") -- (if_then_else (eq_attr "move_type" "imul") -+ (if_then_else (eq_attr "move_type" "imul") - (const_string "SI") - (const_string "TI")))]) - -- - ;; 128-bit floating point moves - - (define_expand "movtf" -@@ -2460,11 +2008,10 @@ - [(set_attr "move_type" "move,load,store,mgtf,mftg,fpload,fpstore") - (set_attr "mode" "TF")]) - -- - (define_split - [(set (match_operand:MOVE64 0 "nonimmediate_operand") - (match_operand:MOVE64 1 "move_operand"))] -- "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1], insn)" -+ "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1])" - [(const_int 0)] - { - loongarch_split_move_insn (operands[0], operands[1], curr_insn); -@@ -2474,7 +2021,7 @@ - (define_split - [(set (match_operand:MOVE128 0 "nonimmediate_operand") - (match_operand:MOVE128 1 "move_operand"))] -- "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1], insn)" -+ "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1])" - [(const_int 0)] - { - loongarch_split_move_insn (operands[0], operands[1], curr_insn); -@@ -2484,7 +2031,7 @@ - ;; Emit a doubleword move in which exactly one of the operands is - ;; a floating-point register. We can't just emit two normal moves - ;; because of the constraints imposed by the FPU register model; --;; see loongarch_cannot_change_mode_class for details. Instead, we keep -+;; see loongarch_can_change_mode_class for details. Instead, we keep - ;; the FPR whole and use special patterns to refer to each word of - ;; the other operand. - -@@ -2516,6 +2063,108 @@ - DONE; - }) - -+;; Clear one FCC register -+ -+(define_insn "movfcc" -+ [(set (match_operand:FCC 0 "register_operand" "=z") -+ (const_int 0))] -+ "" -+ "movgr2cf\t%0,$r0") -+ -+;; Conditional move instructions. -+ -+(define_insn "*sel_using_" -+ [(set (match_operand:GPR 0 "register_operand" "=r,r") -+ (if_then_else:GPR -+ (equality_op:GPR2 (match_operand:GPR2 1 "register_operand" "r,r") -+ (const_int 0)) -+ (match_operand:GPR 2 "reg_or_0_operand" "r,J") -+ (match_operand:GPR 3 "reg_or_0_operand" "J,r")))] -+ "register_operand (operands[2], mode) -+ != register_operand (operands[3], mode)" -+ "@ -+ \t%0,%2,%1 -+ \t%0,%3,%1" -+ [(set_attr "type" "condmove") -+ (set_attr "mode" "")]) -+ -+;; fsel copies the 3rd argument when the 1st is non-zero and the 2nd -+;; argument if the 1st is zero. This means operand 2 and 3 are -+;; inverted in the instruction. -+ -+(define_insn "*sel" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (if_then_else:ANYF -+ (equality_op:FCC (match_operand:FCC 1 "register_operand" "z") -+ (const_int 0)) -+ (match_operand:ANYF 2 "reg_or_0_operand" "f") -+ (match_operand:ANYF 3 "reg_or_0_operand" "f")))] -+ "TARGET_HARD_FLOAT" -+ "fsel\t%0,,%1" -+ [(set_attr "type" "condmove") -+ (set_attr "mode" "")]) -+ -+;; These are the main define_expand's used to make conditional moves. -+ -+(define_expand "movcc" -+ [(set (match_operand:GPR 0 "register_operand") -+ (if_then_else:GPR (match_operator 1 "comparison_operator" -+ [(match_operand:GPR 2 "reg_or_0_operand") -+ (match_operand:GPR 3 "reg_or_0_operand")])))] -+ "TARGET_COND_MOVE_INT" -+{ -+ if(loongarch_expand_conditional_move_la464 (operands)) -+ DONE; -+ else -+ FAIL; -+}) -+ -+(define_expand "movcc" -+ [(set (match_operand:ANYF 0 "register_operand") -+ (if_then_else:ANYF (match_operator 1 "comparison_operator" -+ [(match_operand:ANYF 2 "reg_or_0_operand") -+ (match_operand:ANYF 3 "reg_or_0_operand")])))] -+ "TARGET_COND_MOVE_FLOAT" -+{ -+ -+ if(loongarch_expand_conditional_move_la464 (operands)) -+ DONE; -+ else -+ FAIL; -+}) -+ -+(define_insn "lu32i_d" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (ior:DI -+ (zero_extend:DI -+ (subreg:SI (match_operand:DI 1 "register_operand" "0") 0)) -+ (match_operand:DI 2 "const_lu32i_operand" "u")))] -+ "TARGET_64BIT" -+ "lu32i.d\t%0,%X2>>32" -+ [(set_attr "type" "arith") -+ (set_attr "mode" "DI")]) -+ -+(define_insn "lu52i_d" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (ior:DI -+ (and:DI (match_operand:DI 1 "register_operand" "r") -+ (match_operand 2 "lu52i_mask_operand")) -+ (match_operand 3 "const_lu52i_operand" "v")))] -+ "TARGET_64BIT" -+ "lu52i.d\t%0,%1,%X3>>52" -+ [(set_attr "type" "arith") -+ (set_attr "mode" "DI")]) -+ -+;; Convert floating-point numbers to integers -+(define_insn "frint_" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] -+ UNSPEC_FRINT))] -+ "" -+ "frint.\t%0,%1" -+ [(set_attr "type" "fcvt") -+ (set_attr "mode" "")]) -+ - ;; Load the low word of operand 0 with operand 1. - (define_insn "load_low" - [(set (match_operand:SPLITF 0 "register_operand" "=f,f") -@@ -2559,47 +2208,149 @@ - [(set_attr "move_type" "mftg,fpstore") - (set_attr "mode" "")]) - --;; Move operand 1 to the high word of operand 0 using movgr2frh, preserving the -+;; Thread-Local Storage -+ -+(define_insn "got_load_tls_gd" -+ [(set (match_operand:P 0 "register_operand" "=r") -+ (unspec:P -+ [(match_operand:P 1 "symbolic_operand" "")] -+ UNSPEC_TLS_GD))] -+ "" -+ "la.tls.gd\t%0,%1" -+ [(set_attr "got" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "got_load_tls_ld" -+ [(set (match_operand:P 0 "register_operand" "=r") -+ (unspec:P -+ [(match_operand:P 1 "symbolic_operand" "")] -+ UNSPEC_TLS_LD))] -+ "" -+ "la.tls.ld\t%0,%1" -+ [(set_attr "got" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "got_load_tls_le" -+ [(set (match_operand:P 0 "register_operand" "=r") -+ (unspec:P -+ [(match_operand:P 1 "symbolic_operand" "")] -+ UNSPEC_TLS_LE))] -+ "" -+ "la.tls.le\t%0,%1" -+ [(set_attr "got" "load") -+ (set_attr "mode" "")]) -+ -+(define_insn "got_load_tls_ie" -+ [(set (match_operand:P 0 "register_operand" "=r") -+ (unspec:P -+ [(match_operand:P 1 "symbolic_operand" "")] -+ UNSPEC_TLS_IE))] -+ "" -+ "la.tls.ie\t%0,%1" -+ [(set_attr "got" "load") -+ (set_attr "mode" "")]) -+ -+;; Move operand 1 to the high word of operand 0 using movgr2frh.w, preserving the - ;; value in the low word. - (define_insn "movgr2frh" - [(set (match_operand:SPLITF 0 "register_operand" "=f") - (unspec:SPLITF [(match_operand: 1 "reg_or_0_operand" "rJ") -- (match_operand:SPLITF 2 "register_operand" "0")] -- UNSPEC_MOVGR2FRH))] -- "TARGET_HARD_FLOAT && TARGET_FLOAT64" -- "movgr2frh.w\t%z1,%0" -+ (match_operand:SPLITF 2 "register_operand" "0")] -+ UNSPEC_MOVGR2FRH))] -+ "TARGET_DOUBLE_FLOAT" -+ "movgr2frh.w\t%0,%z1" - [(set_attr "move_type" "mgtf") - (set_attr "mode" "")]) - --;; Move high word of operand 1 to operand 0 using movfrh2gr. -+(define_insn "movsgr2fr" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (unspec:ANYF [(match_operand:SI 1 "register_operand" "r")] -+ UNSPEC_MOVGR2FR))] -+ "TARGET_DOUBLE_FLOAT" -+ "movgr2fr.w\t%0,%1" -+ ) -+(define_insn "movdgr2fr" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (unspec:ANYF [(match_operand:DI 1 "register_operand" "r")] -+ UNSPEC_MOVGR2FR))] -+ "TARGET_DOUBLE_FLOAT" -+ "movgr2fr.d\t%0,%1" -+ ) -+ -+;; Move high word of operand 1 to operand 0 using movfrh2gr.s. - (define_insn "movfrh2gr" - [(set (match_operand: 0 "register_operand" "=r") - (unspec: [(match_operand:SPLITF 1 "register_operand" "f")] - UNSPEC_MOVFRH2GR))] -- "TARGET_HARD_FLOAT && TARGET_FLOAT64" -+ "TARGET_DOUBLE_FLOAT" - "movfrh2gr.s\t%0,%1" - [(set_attr "move_type" "mftg") - (set_attr "mode" "")]) - -+(define_insn "movsfr2gr" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (unspec:GPR [(match_operand:SF 1 "register_operand" "f")] -+ UNSPEC_MOVFR2GR))] -+ "TARGET_DOUBLE_FLOAT" -+ "movfr2gr.s\t%0,%1" -+ ) -+(define_insn "movdfr2gr" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (unspec:GPR [(match_operand:DF 1 "register_operand" "f")] -+ UNSPEC_MOVFR2GR))] -+ "TARGET_DOUBLE_FLOAT" -+ "movfr2gr.d\t%0,%1" -+ ) -+ -+(define_insn "movfr2fcc" -+ [(set (match_operand:FCC 0 "register_operand" "=z") -+ (unspec:FCC [(match_operand:ANYF 1 "register_operand" "f")] -+ UNSPEC_MOVFR2FCC))] -+ "TARGET_HARD_FLOAT" -+ "movfr2cf\t%0,%1" -+ [(set_attr "mode" "")]) -+ -+(define_insn "movgr2fcc" -+ [(set (match_operand:FCC 0 "register_operand" "=z") -+ (unspec:FCC [(match_operand:GPR 1 "register_operand" "r")] -+ UNSPEC_MOVGR2FCC))] -+ "TARGET_HARD_FLOAT" -+ "movgr2cf\t%0,%1" -+ [(set_attr "mode" "")]) -+ -+(define_insn "movfcc2gr" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (unspec:GPR [(match_operand:FCC 1 "register_operand" "z")] -+ UNSPEC_MOVFCC2GR))] -+ "TARGET_HARD_FLOAT" -+ "movcf2gr\t%0,%1" -+ [ (set_attr "mode" "")]) -+ -+ - ;; Expand in-line code to clear the instruction cache between operand[0] and - ;; operand[1]. - (define_expand "clear_cache" - [(match_operand 0 "pmode_register_operand") - (match_operand 1 "pmode_register_operand")] - "" -- " - { -- emit_insn (gen_ibar (const0_rtx)); -+ emit_insn (gen_loongarch_ibar (const0_rtx)); - DONE; --}") -+}) - --(define_insn "ibar" -- [(unspec_volatile:SI [(match_operand 0 "const_uimm15_operand")] UNSPEC_IBAR)] -+(define_insn "loongarch_ibar" -+ [(unspec_volatile:SI -+ [(match_operand 0 "const_uimm15_operand")] -+ UNSPECV_IBAR) -+ (clobber (mem:BLK (scratch)))] - "" - "ibar\t%0") - --(define_insn "dbar" -- [(unspec_volatile:SI [(match_operand 0 "const_uimm15_operand")] UNSPEC_DBAR)] -+(define_insn "loongarch_dbar" -+ [(unspec_volatile:SI -+ [(match_operand 0 "const_uimm15_operand")] -+ UNSPECV_DBAR) -+ (clobber (mem:BLK (scratch)))] - "" - "dbar\t%0") - -@@ -2607,118 +2358,142 @@ - - ;; Privileged state instruction - --(define_insn "cpucfg" -+(define_insn "loongarch_cpucfg" - [(set (match_operand:SI 0 "register_operand" "=r") - (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")] -- UNSPEC_CPUCFG))] -+ UNSPECV_CPUCFG))] - "" - "cpucfg\t%0,%1" -- [(set_attr "type" "load") -- (set_attr "mode" "SI")]) -+ [(set_attr "type" "load") -+ (set_attr "mode" "SI")]) -+ -+(define_insn "loongarch_syscall" -+ [(unspec_volatile:SI -+ [(match_operand 0 "const_uimm15_operand")] -+ UNSPECV_SYSCALL) -+ (clobber (mem:BLK (scratch)))] -+ "" -+ "syscall\t%0") -+ -+(define_insn "loongarch_break" -+ [(unspec_volatile:SI -+ [(match_operand 0 "const_uimm15_operand")] -+ UNSPECV_BREAK) -+ (clobber (mem:BLK (scratch)))] -+ "" -+ "break\t%0") - --(define_insn "asrtle_d" -- [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "r") -- (match_operand:DI 1 "register_operand" "r")] -- UNSPEC_ASRTLE_D)] -+(define_insn "loongarch_asrtle_d" -+ [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "r") -+ (match_operand:DI 1 "register_operand" "r")] -+ UNSPECV_ASRTLE_D)] - "TARGET_64BIT" - "asrtle.d\t%0,%1" -- [(set_attr "type" "load") -- (set_attr "mode" "DI")]) -+ [(set_attr "type" "load") -+ (set_attr "mode" "DI")]) - --(define_insn "asrtgt_d" -- [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "r") -- (match_operand:DI 1 "register_operand" "r")] -- UNSPEC_ASRTGT_D)] -+(define_insn "loongarch_asrtgt_d" -+ [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "r") -+ (match_operand:DI 1 "register_operand" "r")] -+ UNSPECV_ASRTGT_D)] - "TARGET_64BIT" - "asrtgt.d\t%0,%1" -- [(set_attr "type" "load") -- (set_attr "mode" "DI")]) -+ [(set_attr "type" "load") -+ (set_attr "mode" "DI")]) - --(define_insn "

csrrd" -+(define_insn "loongarch_csrrd_" - [(set (match_operand:GPR 0 "register_operand" "=r") - (unspec_volatile:GPR [(match_operand 1 "const_uimm14_operand")] -- UNSPEC_CSRRD))] -+ UNSPECV_CSRRD)) -+ (clobber (mem:BLK (scratch)))] - "" - "csrrd\t%0,%1" -- [(set_attr "type" "load") -- (set_attr "mode" "")]) -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) - --(define_insn "

csrwr" -+(define_insn "loongarch_csrwr_" - [(set (match_operand:GPR 0 "register_operand" "=r") -- (unspec_volatile:GPR -- [(match_operand:GPR 1 "register_operand" "0") -- (match_operand 2 "const_uimm14_operand")] -- UNSPEC_CSRWR))] -+ (unspec_volatile:GPR -+ [(match_operand:GPR 1 "register_operand" "0") -+ (match_operand 2 "const_uimm14_operand")] -+ UNSPECV_CSRWR)) -+ (clobber (mem:BLK (scratch)))] - "" - "csrwr\t%0,%2" -- [(set_attr "type" "store") -- (set_attr "mode" "")]) -+ [(set_attr "type" "store") -+ (set_attr "mode" "")]) - --(define_insn "

csrxchg" -+(define_insn "loongarch_csrxchg_" - [(set (match_operand:GPR 0 "register_operand" "=r") -- (unspec_volatile:GPR -- [(match_operand:GPR 1 "register_operand" "0") -- (match_operand:GPR 2 "register_operand" "q") -- (match_operand 3 "const_uimm14_operand")] -- UNSPEC_CSRXCHG))] -+ (unspec_volatile:GPR -+ [(match_operand:GPR 1 "register_operand" "0") -+ (match_operand:GPR 2 "register_operand" "q") -+ (match_operand 3 "const_uimm14_operand")] -+ UNSPECV_CSRXCHG)) -+ (clobber (mem:BLK (scratch)))] - "" - "csrxchg\t%0,%2,%3" -- [(set_attr "type" "load") -- (set_attr "mode" "")]) -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) - --(define_insn "iocsrrd_" -+(define_insn "loongarch_iocsrrd_" - [(set (match_operand:QHWD 0 "register_operand" "=r") -- (unspec_volatile:QHWD [(match_operand:SI 1 "register_operand" "r")] -- UNSPEC_IOCSRRD))] -+ (unspec_volatile:QHWD [(match_operand:SI 1 "register_operand" "r")] -+ UNSPECV_IOCSRRD)) -+ (clobber (mem:BLK (scratch)))] - "" - "iocsrrd.\t%0,%1" -- [(set_attr "type" "load") -- (set_attr "mode" "")]) -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) - --(define_insn "iocsrwr_" -+(define_insn "loongarch_iocsrwr_" - [(unspec_volatile:QHWD [(match_operand:QHWD 0 "register_operand" "r") -- (match_operand:SI 1 "register_operand" "r")] -- UNSPEC_IOCSRWR)] -+ (match_operand:SI 1 "register_operand" "r")] -+ UNSPECV_IOCSRWR) -+ (clobber (mem:BLK (scratch)))] - "" - "iocsrwr.\t%0,%1" -- [(set_attr "type" "load") -- (set_attr "mode" "")]) -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) - --(define_insn "

cacop" -+(define_insn "loongarch_cacop_" - [(unspec_volatile:X [(match_operand 0 "const_uimm5_operand") -- (match_operand:X 1 "register_operand" "r") -- (match_operand 2 "const_imm12_operand")] -- UNSPEC_CACOP)] -+ (match_operand:X 1 "register_operand" "r") -+ (match_operand 2 "const_imm12_operand")] -+ UNSPECV_CACOP) -+ (clobber (mem:BLK (scratch)))] - "" - "cacop\t%0,%1,%2" -- [(set_attr "type" "load") -- (set_attr "mode" "")]) -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) - --(define_insn "

lddir" -+(define_insn "loongarch_lddir_" - [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r") -- (match_operand:X 1 "register_operand" "r") -- (match_operand 2 "const_uimm5_operand")] -- UNSPEC_LDDIR)] -+ (match_operand:X 1 "register_operand" "r") -+ (match_operand 2 "const_uimm5_operand")] -+ UNSPECV_LDDIR) -+ (clobber (mem:BLK (scratch)))] - "" - "lddir\t%0,%1,%2" -- [(set_attr "type" "load") -- (set_attr "mode" "")]) -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) - --(define_insn "

ldpte" -+(define_insn "loongarch_ldpte_" - [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r") - (match_operand 1 "const_uimm5_operand")] -- UNSPEC_LDPTE)] -+ UNSPECV_LDPTE) -+ (clobber (mem:BLK (scratch)))] - "" - "ldpte\t%0,%1" -- [(set_attr "type" "load") -- (set_attr "mode" "")]) -+ [(set_attr "type" "load") -+ (set_attr "mode" "")]) - - - ;; Block moves, see loongarch.c for more details. --;; Argument 0 is the destination --;; Argument 1 is the source --;; Argument 2 is the length --;; Argument 3 is the alignment -+;; Argument 0 is the destination. -+;; Argument 1 is the source. -+;; Argument 2 is the length. -+;; Argument 3 is the alignment. - - (define_expand "movmemsi" - [(parallel [(set (match_operand:BLK 0 "general_operand") -@@ -2740,30 +2515,19 @@ - ;; - ;; .................... - --(define_expand "3" -- [(set (match_operand:GPR 0 "register_operand") -- (any_shift:GPR (match_operand:GPR 1 "register_operand") -- (match_operand:SI 2 "arith_operand")))] -- "" --{ --}) -- --(define_insn "*3" -+(define_insn "3" - [(set (match_operand:GPR 0 "register_operand" "=r") - (any_shift:GPR (match_operand:GPR 1 "register_operand" "r") - (match_operand:SI 2 "arith_operand" "rI")))] - "" - { - if (CONST_INT_P (operands[2])) -- { - operands[2] = GEN_INT (INTVAL (operands[2]) - & (GET_MODE_BITSIZE (mode) - 1)); -- return "i.\t%0,%1,%2"; -- } else -- return ".\t%0,%1,%2"; -+ -+ return "%i2.\t%0,%1,%2"; - } - [(set_attr "type" "shift") -- (set_attr "compression" "none") - (set_attr "mode" "")]) - - (define_insn "*si3_extend" -@@ -2774,86 +2538,68 @@ - "TARGET_64BIT" - { - if (CONST_INT_P (operands[2])) -- { - operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f); -- return "i.w\t%0,%1,%2"; -- } else -- return ".w\t%0,%1,%2"; -+ -+ return "%i2.w\t%0,%1,%2"; - } - [(set_attr "type" "shift") - (set_attr "mode" "SI")]) - --(define_insn "zero_extend_ashift1" -- [ (set (match_operand:DI 0 "register_operand" "=r") -- (and:DI (ashift:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0) -- (match_operand 2 "const_immlsa_operand" "")) -- (match_operand 3 "shift_mask_operand" "")))] --"" --"bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,$r0,%2" --[(set_attr "type" "arith") -- (set_attr "mode" "DI") -- (set_attr "insn_count" "2")]) -- --(define_insn "zero_extend_ashift2" -- [ (set (match_operand:DI 0 "register_operand" "=r") -- (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r") -- (match_operand 2 "const_immlsa_operand" "")) -- (match_operand 3 "shift_mask_operand" "")))] --"" --"bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,$r0,%2" --[(set_attr "type" "arith") -- (set_attr "mode" "DI") -- (set_attr "insn_count" "2")]) -- --(define_insn "alsl_paired1" -- [(set (match_operand:DI 0 "register_operand" "=&r") -- (plus:DI (and:DI (ashift:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0) -- (match_operand 2 "const_immlsa_operand" "")) -- (match_operand 3 "shift_mask_operand" "")) -- (match_operand:DI 4 "register_operand" "r")))] -- "" -- "bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,%4,%2" -- [(set_attr "type" "arith") -- (set_attr "mode" "DI") -- (set_attr "insn_count" "2")]) -- --(define_insn "alsl_paired2" -- [(set (match_operand:DI 0 "register_operand" "=&r") -- (plus:DI (match_operand:DI 1 "register_operand" "r") -- (and:DI (ashift:DI (match_operand:DI 2 "register_operand" "r") -- (match_operand 3 "const_immlsa_operand" "")) -- (match_operand 4 "shift_mask_operand" ""))))] -- "" -- "bstrpick.d\t%0,%2,31,0\n\talsl.d\t%0,%0,%1,%3" -- [(set_attr "type" "arith") -- (set_attr "mode" "DI") -- (set_attr "insn_count" "2")]) -- --(define_insn "alsl_" -- [(set (match_operand:GPR 0 "register_operand" "=r") -- (plus:GPR (ashift:GPR (match_operand:GPR 1 "register_operand" "r") -- (match_operand 2 "const_immlsa_operand" "")) -- (match_operand:GPR 3 "register_operand" "r")))] -- "ISA_HAS_LSA" -- "alsl.\t%0,%1,%3,%2" -- [(set_attr "type" "arith") -- (set_attr "mode" "")]) -- - (define_insn "rotr3" -+ [(set (match_operand:GPR 0 "register_operand" "=r,r") -+ (rotatert:GPR (match_operand:GPR 1 "register_operand" "r,r") -+ (match_operand:SI 2 "arith_operand" "r,I")))] -+ "" -+ "rotr%i2.\t%0,%1,%2" -+ [(set_attr "type" "shift,shift") -+ (set_attr "mode" "")]) -+ -+;; The following templates were added to generate "bstrpick.d + alsl.d" -+;; instruction pairs. -+;; It is required that the values of const_immalsl_operand and -+;; immediate_operand must have the following correspondence: -+;; -+;; (immediate_operand >> const_immalsl_operand) == 0xffffffff -+ -+(define_insn "zero_extend_ashift" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r") -+ (match_operand 2 "const_immalsl_operand" "")) -+ (match_operand 3 "immediate_operand" "")))] -+ "TARGET_64BIT -+ && ((INTVAL (operands[3]) >> INTVAL (operands[2])) == 0xffffffff)" -+ "bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,$r0,%2" -+ [(set_attr "type" "arith") -+ (set_attr "mode" "DI") -+ (set_attr "insn_count" "2")]) -+ -+(define_insn "bstrpick_alsl_paired" -+ [(set (match_operand:DI 0 "register_operand" "=&r") -+ (plus:DI (match_operand:DI 1 "register_operand" "r") -+ (and:DI (ashift:DI (match_operand:DI 2 "register_operand" "r") -+ (match_operand 3 "const_immalsl_operand" "")) -+ (match_operand 4 "immediate_operand" ""))))] -+ "TARGET_64BIT -+ && ((INTVAL (operands[4]) >> INTVAL (operands[3])) == 0xffffffff)" -+ "bstrpick.d\t%0,%2,31,0\n\talsl.d\t%0,%0,%1,%3" -+ [(set_attr "type" "arith") -+ (set_attr "mode" "DI") -+ (set_attr "insn_count" "2")]) -+ -+(define_insn "alsl3" - [(set (match_operand:GPR 0 "register_operand" "=r") -- (rotatert:GPR (match_operand:GPR 1 "register_operand" "r") -- (match_operand:SI 2 "arith_operand" "rI")))] -+ (plus:GPR (ashift:GPR (match_operand:GPR 1 "register_operand" "r") -+ (match_operand 2 "const_immalsl_operand" "")) -+ (match_operand:GPR 3 "register_operand" "r")))] - "" --{ -- if (CONST_INT_P (operands[2])) -- { -- return "rotri.\t%0,%1,%2"; -- } else -- return "rotr.\t%0,%1,%2"; --} -- [(set_attr "type" "shift") -+ "alsl.\t%0,%1,%3,%2" -+ [(set_attr "type" "arith") - (set_attr "mode" "")]) - -+ -+ -+;; Reverse the order of bytes of operand 1 and store the result in operand 0. -+ - (define_insn "bswaphi2" - [(set (match_operand:HI 0 "register_operand" "=r") - (bswap:HI (match_operand:HI 1 "register_operand" "r")))] -@@ -2867,7 +2613,7 @@ - "" - "#" - "" -- [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_WSBH)) -+ [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_REVB_2H)) - (set (match_dup 0) (rotatert:SI (match_dup 0) (const_int 16)))] - "" - [(set_attr "insn_count" "2")]) -@@ -2878,28 +2624,28 @@ - "TARGET_64BIT" - "#" - "" -- [(set (match_dup 0) (unspec:DI [(match_dup 1)] UNSPEC_DSBH)) -- (set (match_dup 0) (unspec:DI [(match_dup 0)] UNSPEC_DSHD))] -+ [(set (match_dup 0) (unspec:DI [(match_dup 1)] UNSPEC_REVB_4H)) -+ (set (match_dup 0) (unspec:DI [(match_dup 0)] UNSPEC_REVH_D))] - "" - [(set_attr "insn_count" "2")]) - --(define_insn "wsbh" -+(define_insn "revb_2h" - [(set (match_operand:SI 0 "register_operand" "=r") -- (unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_WSBH))] -+ (unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_REVB_2H))] - "" - "revb.2h\t%0,%1" - [(set_attr "type" "shift")]) - --(define_insn "dsbh" -+(define_insn "revb_4h" - [(set (match_operand:DI 0 "register_operand" "=r") -- (unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_DSBH))] -+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_REVB_4H))] - "TARGET_64BIT" - "revb.4h\t%0,%1" - [(set_attr "type" "shift")]) - --(define_insn "dshd" -+(define_insn "revh_d" - [(set (match_operand:DI 0 "register_operand" "=r") -- (unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_DSHD))] -+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_REVH_D))] - "TARGET_64BIT" - "revh.d\t%0,%1" - [(set_attr "type" "shift")]) -@@ -2911,37 +2657,37 @@ - ;; - ;; .................... - --;; Conditional branches on floating-point equality tests. -+;; Conditional branches - --(define_insn "*branch_fp_fcc" -+(define_insn "*branch_fp_FCCmode" - [(set (pc) -- (if_then_else -- (match_operator 1 "equality_operator" -- [(match_operand:FCC 2 "register_operand" "z") -- (const_int 0)]) -- (label_ref (match_operand 0 "" "")) -- (pc)))] -+ (if_then_else -+ (match_operator 1 "equality_operator" -+ [(match_operand:FCC 2 "register_operand" "z") -+ (const_int 0)]) -+ (label_ref (match_operand 0 "" "")) -+ (pc)))] - "TARGET_HARD_FLOAT" - { - return loongarch_output_conditional_branch (insn, operands, -- LARCH_BRANCH ("b%F1", "%Z2%0"), -- LARCH_BRANCH ("b%W1", "%Z2%0")); -+ LARCH_BRANCH ("b%F1", "%Z2%0"), -+ LARCH_BRANCH ("b%W1", "%Z2%0")); - } - [(set_attr "type" "branch")]) - --(define_insn "*branch_fp_inverted_fcc" -+(define_insn "*branch_fp_inverted_FCCmode" - [(set (pc) -- (if_then_else -- (match_operator 1 "equality_operator" -- [(match_operand:FCC 2 "register_operand" "z") -- (const_int 0)]) -- (pc) -- (label_ref (match_operand 0 "" ""))))] -+ (if_then_else -+ (match_operator 1 "equality_operator" -+ [(match_operand:FCC 2 "register_operand" "z") -+ (const_int 0)]) -+ (pc) -+ (label_ref (match_operand 0 "" ""))))] - "TARGET_HARD_FLOAT" - { - return loongarch_output_conditional_branch (insn, operands, -- LARCH_BRANCH ("b%W1", "%Z2%0"), -- LARCH_BRANCH ("b%F1", "%Z2%0")); -+ LARCH_BRANCH ("b%W1", "%Z2%0"), -+ LARCH_BRANCH ("b%F1", "%Z2%0")); - } - [(set_attr "type" "branch")]) - -@@ -2951,28 +2697,26 @@ - [(set (pc) - (if_then_else - (match_operator 1 "order_operator" -- [(match_operand:GPR 2 "register_operand" "r,r") -- (match_operand:GPR 3 "reg_or_0_operand" "J,r")]) -+ [(match_operand:X 2 "register_operand" "r,r") -+ (match_operand:X 3 "reg_or_0_operand" "J,r")]) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - { return loongarch_output_order_conditional_branch (insn, operands, false); } - [(set_attr "type" "branch") -- (set_attr "compact_form" "maybe,always") - (set_attr "hazard" "forbidden_slot")]) - - (define_insn "*branch_order_inverted" - [(set (pc) - (if_then_else - (match_operator 1 "order_operator" -- [(match_operand:GPR 2 "register_operand" "r,r") -- (match_operand:GPR 3 "reg_or_0_operand" "J,r")]) -+ [(match_operand:X 2 "register_operand" "r,r") -+ (match_operand:X 3 "reg_or_0_operand" "J,r")]) - (pc) - (label_ref (match_operand 0 "" ""))))] - "" - { return loongarch_output_order_conditional_branch (insn, operands, true); } - [(set_attr "type" "branch") -- (set_attr "compact_form" "maybe,always") - (set_attr "hazard" "forbidden_slot")]) - - ;; Conditional branch on equality comparison. -@@ -2981,14 +2725,13 @@ - [(set (pc) - (if_then_else - (match_operator 1 "equality_operator" -- [(match_operand:GPR 2 "register_operand" "r") -- (match_operand:GPR 3 "reg_or_0_operand" "rJ")]) -+ [(match_operand:X 2 "register_operand" "r") -+ (match_operand:X 3 "reg_or_0_operand" "rJ")]) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - { return loongarch_output_equal_conditional_branch (insn, operands, false); } - [(set_attr "type" "branch") -- (set_attr "compact_form" "maybe") - (set_attr "hazard" "forbidden_slot")]) - - -@@ -2996,22 +2739,21 @@ - [(set (pc) - (if_then_else - (match_operator 1 "equality_operator" -- [(match_operand:GPR 2 "register_operand" "r") -- (match_operand:GPR 3 "reg_or_0_operand" "rJ")]) -+ [(match_operand:X 2 "register_operand" "r") -+ (match_operand:X 3 "reg_or_0_operand" "rJ")]) - (pc) - (label_ref (match_operand 0 "" ""))))] - "" - { return loongarch_output_equal_conditional_branch (insn, operands, true); } - [(set_attr "type" "branch") -- (set_attr "compact_form" "maybe") - (set_attr "hazard" "forbidden_slot")]) - - - (define_expand "cbranch4" - [(set (pc) - (if_then_else (match_operator 0 "comparison_operator" -- [(match_operand:GPR 1 "register_operand") -- (match_operand:GPR 2 "nonmemory_operand")]) -+ [(match_operand:GPR 1 "register_operand") -+ (match_operand:GPR 2 "nonmemory_operand")]) - (label_ref (match_operand 3 "")) - (pc)))] - "" -@@ -3023,8 +2765,8 @@ - (define_expand "cbranch4" - [(set (pc) - (if_then_else (match_operator 0 "comparison_operator" -- [(match_operand:SCALARF 1 "register_operand") -- (match_operand:SCALARF 2 "register_operand")]) -+ [(match_operand:ANYF 1 "register_operand") -+ (match_operand:ANYF 2 "register_operand")]) - (label_ref (match_operand 3 "")) - (pc)))] - "" -@@ -3062,71 +2804,63 @@ - DONE; - }) - --(define_insn "*seq_zero_" -- [(set (match_operand:GPR2 0 "register_operand" "=r") -- (eq:GPR2 (match_operand:GPR 1 "register_operand" "r") -+(define_insn "*seq_zero_" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (eq:GPR (match_operand:X 1 "register_operand" "r") - (const_int 0)))] - "" - "sltui\t%0,%1,1" - [(set_attr "type" "slt") -- (set_attr "mode" "")]) -+ (set_attr "mode" "")]) - - --(define_insn "*sne_zero_" -- [(set (match_operand:GPR2 0 "register_operand" "=r") -- (ne:GPR2 (match_operand:GPR 1 "register_operand" "r") -+(define_insn "*sne_zero_" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (ne:GPR (match_operand:X 1 "register_operand" "r") - (const_int 0)))] - "" - "sltu\t%0,%.,%1" - [(set_attr "type" "slt") -- (set_attr "mode" "")]) -+ (set_attr "mode" "")]) - --(define_insn "*sgt_" -- [(set (match_operand:GPR2 0 "register_operand" "=r") -- (any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r") -- (match_operand:GPR 2 "reg_or_0_operand" "rJ")))] -+(define_insn "*sgt_" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (any_gt:GPR (match_operand:X 1 "register_operand" "r") -+ (match_operand:X 2 "reg_or_0_operand" "rJ")))] - "" - "slt\t%0,%z2,%1" - [(set_attr "type" "slt") -- (set_attr "mode" "")]) -+ (set_attr "mode" "")]) - -- --(define_insn "*sge_" -- [(set (match_operand:GPR2 0 "register_operand" "=r") -- (any_ge:GPR2 (match_operand:GPR 1 "register_operand" "r") -+(define_insn "*sge_" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (any_ge:GPR (match_operand:X 1 "register_operand" "r") - (const_int 1)))] - "" - "slti\t%0,%.,%1" - [(set_attr "type" "slt") -- (set_attr "mode" "")]) -+ (set_attr "mode" "")]) - --(define_insn "*slt_" -- [(set (match_operand:GPR2 0 "register_operand" "=r") -- (any_lt:GPR2 (match_operand:GPR 1 "register_operand" "r") -- (match_operand:GPR 2 "arith_operand" "rI")))] -+(define_insn "*slt_" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (any_lt:GPR (match_operand:X 1 "register_operand" "r") -+ (match_operand:X 2 "arith_operand" "rI")))] - "" --{ -- if (CONST_INT_P (operands[2])) -- { -- return "slti\t%0,%1,%2"; -- } else -- return "slt\t%0,%1,%2"; --} -+ "slt%i2\t%0,%1,%2"; - [(set_attr "type" "slt") -- (set_attr "mode" "")]) -+ (set_attr "mode" "")]) - -- --(define_insn "*sle_" -- [(set (match_operand:GPR2 0 "register_operand" "=r") -- (any_le:GPR2 (match_operand:GPR 1 "register_operand" "r") -- (match_operand:GPR 2 "sle_operand" "")))] -+(define_insn "*sle_" -+ [(set (match_operand:GPR 0 "register_operand" "=r") -+ (any_le:GPR (match_operand:X 1 "register_operand" "r") -+ (match_operand:X 2 "sle_operand" "")))] - "" - { - operands[2] = GEN_INT (INTVAL (operands[2]) + 1); - return "slti\t%0,%1,%2"; - } - [(set_attr "type" "slt") -- (set_attr "mode" "")]) -+ (set_attr "mode" "")]) - - - ;; -@@ -3136,23 +2870,15 @@ - ;; - ;; .................... - --(define_insn "s__using_fcc" -+(define_insn "s__using_FCCmode" - [(set (match_operand:FCC 0 "register_operand" "=z") -- (fcond:FCC (match_operand:SCALARF 1 "register_operand" "f") -- (match_operand:SCALARF 2 "register_operand" "f")))] -+ (fcond:FCC (match_operand:ANYF 1 "register_operand" "f") -+ (match_operand:ANYF 2 "register_operand" "f")))] - "" - "fcmp..\t%Z0%1,%2" - [(set_attr "type" "fcmp") - (set_attr "mode" "FCC")]) - --(define_insn "s__using_fcc" -- [(set (match_operand:FCC 0 "register_operand" "=z") -- (swapped_fcond:FCC (match_operand:SCALARF 1 "register_operand" "f") -- (match_operand:SCALARF 2 "register_operand" "f")))] -- "" -- "fcmp..\t%Z0%2,%1" -- [(set_attr "type" "fcmp") -- (set_attr "mode" "FCC")]) - - ;; - ;; .................... -@@ -3170,24 +2896,20 @@ - (define_insn "*jump_absolute" - [(set (pc) - (label_ref (match_operand 0)))] -- "TARGET_ABSOLUTE_JUMPS" -+ "!flag_pic" - { -- return LARCH_ABSOLUTE_JUMP ("b\t%l0"); -+ return "b\t%l0"; - } -- [(set_attr "type" "branch") -- (set_attr "compact_form" "maybe")]) -+ [(set_attr "type" "branch")]) - - (define_insn "*jump_pic" - [(set (pc) - (label_ref (match_operand 0)))] -- "!TARGET_ABSOLUTE_JUMPS" -+ "flag_pic" - { - return "b\t%0"; - } -- [(set_attr "type" "branch") -- (set_attr "compact_form" "maybe")]) -- -- -+ [(set_attr "type" "branch")]) - - (define_expand "indirect_jump" - [(set (pc) (match_operand 0 "register_operand"))] -@@ -3198,12 +2920,10 @@ - DONE; - }) - --(define_insn "indirect_jump_" -+(define_insn "indirect_jump" - [(set (pc) (match_operand:P 0 "register_operand" "r"))] - "" -- { -- return "jr\t%0"; -- } -+ "jr\t%0" - [(set_attr "type" "jump") - (set_attr "mode" "none")]) - -@@ -3214,25 +2934,25 @@ - "" - { - if (flag_pic) -- operands[0] = expand_simple_binop (Pmode, PLUS, operands[0], -- gen_rtx_LABEL_REF (Pmode, operands[1]), -- NULL_RTX, 0, OPTAB_DIRECT); -+ operands[0] = expand_simple_binop (Pmode, PLUS, operands[0], -+ gen_rtx_LABEL_REF (Pmode, -+ operands[1]), -+ NULL_RTX, 0, OPTAB_DIRECT); - emit_jump_insn (PMODE_INSN (gen_tablejump, (operands[0], operands[1]))); - DONE; - }) - --(define_insn "tablejump_" -+(define_insn "tablejump" - [(set (pc) - (match_operand:P 0 "register_operand" "r")) - (use (label_ref (match_operand 1 "" "")))] - "" -- { -- return "jr\t%0"; -- } -+ "jr\t%0" - [(set_attr "type" "jump") - (set_attr "mode" "none")]) - - -+ - ;; - ;; .................... - ;; -@@ -3254,22 +2974,25 @@ - ;; saved or used to pass arguments. - - (define_insn "blockage" -- [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)] -+ [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)] - "" - "" - [(set_attr "type" "ghost") - (set_attr "mode" "none")]) - --(define_insn "probe_stack_range_" -+(define_insn "probe_stack_range" - [(set (match_operand:P 0 "register_operand" "=r") - (unspec_volatile:P [(match_operand:P 1 "register_operand" "0") - (match_operand:P 2 "register_operand" "r") -- (match_operand:P 3 "register_operand" "r")] -- UNSPEC_PROBE_STACK_RANGE))] -+ (match_operand:P 3 "register_operand" "r")] -+ UNSPECV_PROBE_STACK_RANGE))] - "" -- { return loongarch_output_probe_stack_range (operands[0], operands[2], operands[3]); } -+{ -+ return loongarch_output_probe_stack_range (operands[0], -+ operands[2], -+ operands[3]); -+} - [(set_attr "type" "unknown") -- (set_attr "can_delay" "no") - (set_attr "mode" "")]) - - (define_expand "epilogue" -@@ -3304,12 +3027,12 @@ - (define_insn "*" - [(any_return)] - "" -- { -- operands[0] = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM); -- return "jr\t%0"; -- } -- [(set_attr "type" "jump") -- (set_attr "mode" "none")]) -+{ -+ operands[0] = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM); -+ return "jr\t%0"; -+} -+ [(set_attr "type" "jump") -+ (set_attr "mode" "none")]) - - ;; Normal return. - -@@ -3317,46 +3040,18 @@ - [(any_return) - (use (match_operand 0 "pmode_register_operand" ""))] - "" -- { -- return "jr\t%0"; -- } -- [(set_attr "type" "jump") -- (set_attr "mode" "none")]) -- --;; Exception return. --(define_insn "loongarch_ertn" -- [(return) -- (unspec_volatile [(const_int 0)] UNSPEC_ERTN)] -- "" -- "ertn" -- [(set_attr "type" "trap") -- (set_attr "mode" "none")]) -- --;; Disable interrupts. --(define_insn "loongarch_di" -- [(unspec_volatile [(const_int 0)] UNSPEC_DI)] -- "" -- "di" -- [(set_attr "type" "trap") -- (set_attr "mode" "none")]) -- --;; Execution hazard barrier. --(define_insn "loongarch_ehb" -- [(unspec_volatile [(const_int 0)] UNSPEC_EHB)] -- "" -- "ehb" -- [(set_attr "type" "trap") -- (set_attr "mode" "none")]) -+ "jr\t%0" -+ [(set_attr "type" "jump") -+ (set_attr "mode" "none")]) - --;; Read GPR from previous shadow register set. --(define_insn "loongarch_rdpgpr_" -- [(set (match_operand:P 0 "register_operand" "=r") -- (unspec_volatile:P [(match_operand:P 1 "register_operand" "r")] -- UNSPEC_RDPGPR))] -+;; Exception return. -+(define_insn "loongarch_ertn" -+ [(return) -+ (unspec_volatile [(const_int 0)] UNSPECV_ERTN)] - "" -- "rdpgpr\t%0,%1" -- [(set_attr "type" "move") -- (set_attr "mode" "")]) -+ "ertn" -+ [(set_attr "type" "trap") -+ (set_attr "mode" "none")]) - - ;; This is used in compiling the unwind routines. - (define_expand "eh_return" -@@ -3366,22 +3061,22 @@ - if (GET_MODE (operands[0]) != word_mode) - operands[0] = convert_to_mode (word_mode, operands[0], 0); - if (TARGET_64BIT) -- emit_insn (gen_eh_set_lr_di (operands[0])); -+ emit_insn (gen_eh_set_ra_di (operands[0])); - else -- emit_insn (gen_eh_set_lr_si (operands[0])); -+ emit_insn (gen_eh_set_ra_si (operands[0])); - DONE; - }) - - ;; Clobber the return address on the stack. We can't expand this - ;; until we know where it will be put in the stack frame. - --(define_insn "eh_set_lr_si" -+(define_insn "eh_set_ra_si" - [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN) - (clobber (match_scratch:SI 1 "=&r"))] - "! TARGET_64BIT" - "#") - --(define_insn "eh_set_lr_di" -+(define_insn "eh_set_ra_di" - [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN) - (clobber (match_scratch:DI 1 "=&r"))] - "TARGET_64BIT" -@@ -3406,23 +3101,14 @@ - ;; - ;; .................... - -- - ;; Sibling calls. All these patterns use jump instructions. - --;; If TARGET_SIBCALLS, call_insn_operand will only accept constant --;; addresses if a direct jump is acceptable. Since the 'S' constraint --;; is defined in terms of call_insn_operand, the same is true of the --;; constraints. -- --;; When we use an indirect jump, we need a register that will be --;; preserved by the epilogue. -- - (define_expand "sibcall" - [(parallel [(call (match_operand 0 "") - (match_operand 1 "")) - (use (match_operand 2 "")) ;; next_arg_reg - (use (match_operand 3 ""))])] ;; struct_value_size_rtx -- "TARGET_SIBCALLS" -+ "" - { - rtx target = loongarch_legitimize_call_address (XEXP (operands[0], 0)); - -@@ -3433,172 +3119,170 @@ - (define_insn "sibcall_internal" - [(call (mem:SI (match_operand 0 "call_insn_operand" "j,c,a,t,h")) - (match_operand 1 "" ""))] -- "TARGET_SIBCALLS && SIBLING_CALL_P (insn)" -+ "SIBLING_CALL_P (insn)" - { - switch (which_alternative) - { - case 0: - return "jr\t%0"; - case 1: -- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -- return "pcaddu18i\t$r12,(%%pcrel(%0+0x20000))>>18\n\t" -- "jirl\t$r0,$r12,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.local\t$r12,$r13,%0\n\tjr\t$r12"; -+ if (TARGET_CMODEL_LARGE) -+ return "pcaddu18i\t$r12,(%%pcrel(%0+0x20000))>>18\n\t" -+ "jirl\t$r0,$r12,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; -+ else if (TARGET_CMODEL_EXTREME) -+ return "la.local\t$r12,$r13,%0\n\tjr\t$r12"; - else -- return "b\t%0"; -+ return "b\t%0"; - case 2: -- if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) -- return "b\t%0"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; -+ if (TARGET_CMODEL_TINY_STATIC) -+ return "b\t%0"; -+ else if (TARGET_CMODEL_EXTREME) -+ return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; - else -- return "la.global\t$r12,%0\n\tjr\t$r12"; -+ return "la.global\t$r12,%0\n\tjr\t$r12"; - case 3: -- if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; -+ if (TARGET_CMODEL_EXTREME) -+ return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; - else -- return "la.global\t$r12,%0\n\tjr\t$r12"; -+ return "la.global\t$r12,%0\n\tjr\t$r12"; - case 4: -- if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) -- return "b\t%%plt(%0)"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -- return "pcaddu18i\t$r12,(%%plt(%0)+0x20000)>>18\n\t" -- "jirl\t$r0,$r12,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; -+ if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) -+ return "b\t%%plt(%0)"; -+ else if (TARGET_CMODEL_LARGE) -+ return "pcaddu18i\t$r12,(%%plt(%0)+0x20000)>>18\n\t" -+ "jirl\t$r0,$r12,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; - else -- sorry ("cmodel extreme and tiny static not support plt."); -+ /* Code model "extreme" and "tiny-static" do not support plt. */ -+ gcc_unreachable (); - default: - gcc_unreachable (); - } - } -- [(set_attr "jal" "indirect,direct,direct,direct,direct")]) -+ [(set_attr "jirl" "indirect,direct,direct,direct,direct")]) - - (define_expand "sibcall_value" - [(parallel [(set (match_operand 0 "") - (call (match_operand 1 "") - (match_operand 2 ""))) - (use (match_operand 3 ""))])] ;; next_arg_reg -- "TARGET_SIBCALLS" -+ "" - { - rtx target = loongarch_legitimize_call_address (XEXP (operands[1], 0)); - -- /* Handle return values created by loongarch_return_fpr_pair. */ -+ /* Handle return values created by loongarch_pass_fpr_pair. */ - if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 2) - { -- emit_call_insn (gen_sibcall_value_multiple_internal (XEXP (XVECEXP (operands[0], 0, 0), 0), -- target, operands[2], XEXP (XVECEXP (operands[0], 0, 1), 0))); -+ rtx arg1 = XEXP (XVECEXP (operands[0],0, 0), 0); -+ rtx arg2 = XEXP (XVECEXP (operands[0],0, 1), 0); -+ -+ emit_call_insn (gen_sibcall_value_multiple_internal (arg1, target, -+ operands[2], -+ arg2)); - } - else - { -- /* Handle return values created by loongarch_return_fpr_single. */ -+ /* Handle return values created by loongarch_return_fpr_single. */ - if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 1) -- operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); -- -- emit_call_insn (gen_sibcall_value_internal (operands[0], target, operands[2])); -+ operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); -+ -+ emit_call_insn (gen_sibcall_value_internal (operands[0], target, -+ operands[2])); - } - DONE; - }) - - (define_insn "sibcall_value_internal" - [(set (match_operand 0 "register_operand" "") -- (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) -- (match_operand 2 "" "")))] -- "TARGET_SIBCALLS && SIBLING_CALL_P (insn)" -+ (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) -+ (match_operand 2 "" "")))] -+ "SIBLING_CALL_P (insn)" - { - switch (which_alternative) - { - case 0: - return "jr\t%1"; - case 1: -- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -- return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" -- "jirl\t$r0,$r12,%%pcrel(%1+4)-((%%pcrel(%1+4+0x20000))>>18<<18)"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.local\t$r12,$r13,%1\n\t" -- "jr\t$r12"; -+ if (TARGET_CMODEL_LARGE) -+ return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" -+ "jirl\t$r0,$r12,%%pcrel(%1+4)-((%%pcrel(%1+4+0x20000))>>18<<18)"; -+ else if (TARGET_CMODEL_EXTREME) -+ return "la.local\t$r12,$r13,%1\n\tjr\t$r12"; - else -- return "b\t%1"; -+ return "b\t%1"; - case 2: -- if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) -- return "b\t%1"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.global\t$r12,$r13,%1\n\t" -- "jr\t$r12"; -+ if (TARGET_CMODEL_TINY_STATIC) -+ return "b\t%1"; -+ else if (TARGET_CMODEL_EXTREME) -+ return "la.global\t$r12,$r13,%1\n\tjr\t$r12"; - else -- return "la.global\t$r12,%1\n\t" -- "jr\t$r12"; -+ return "la.global\t$r12,%1\n\tjr\t$r12"; - case 3: -- if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.global\t$r12,$r13,%1\n\t" -- "jr\t$r12"; -+ if (TARGET_CMODEL_EXTREME) -+ return "la.global\t$r12,$r13,%1\n\tjr\t$r12"; - else -- return "la.global\t$r12,%1\n\t" -- "jr\t$r12"; -+ return "la.global\t$r12,%1\n\tjr\t$r12"; - case 4: -- if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) -- return " b\t%%plt(%1)"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -- return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" -- "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; -+ if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) -+ return " b\t%%plt(%1)"; -+ else if (TARGET_CMODEL_LARGE) -+ return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" -+ "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; - else -- sorry ("loongarch cmodel extreme and tiny-static not support plt."); -+ /* Code model "extreme" and "tiny-static" do not support plt. */ -+ gcc_unreachable (); - default: - gcc_unreachable (); - } - } -- [(set_attr "jal" "indirect,direct,direct,direct,direct")]) -+ [(set_attr "jirl" "indirect,direct,direct,direct,direct")]) - - (define_insn "sibcall_value_multiple_internal" - [(set (match_operand 0 "register_operand" "") -- (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) -- (match_operand 2 "" ""))) -+ (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) -+ (match_operand 2 "" ""))) - (set (match_operand 3 "register_operand" "") - (call (mem:SI (match_dup 1)) - (match_dup 2)))] -- "TARGET_SIBCALLS && SIBLING_CALL_P (insn)" -+ "SIBLING_CALL_P (insn)" - { - switch (which_alternative) - { - case 0: - return "jr\t%1"; - case 1: -- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -- return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" -- "jirl\t$r0,$r12,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.local\t$r12,$r13,%1\n\t" -- "jr\t$r12"; -+ if (TARGET_CMODEL_LARGE) -+ return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" -+ "jirl\t$r0,$r12,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; -+ else if (TARGET_CMODEL_EXTREME) -+ return "la.local\t$r12,$r13,%1\n\tjr\t$r12"; - else -- return "b\t%1"; -+ return "b\t%1"; - case 2: -- if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) -- return "b\t%1"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.global\t$r12,$r13,%1\n\t" -- "jr\t$r12"; -+ if (TARGET_CMODEL_TINY_STATIC) -+ return "b\t%1"; -+ else if (TARGET_CMODEL_EXTREME) -+ return "la.global\t$r12,$r13,%1\n\tjr\t$r12"; - else -- return "la.global\t$r12,%1\n\t" -- "jr\t$r12"; -+ return "la.global\t$r12,%1\n\tjr\t$r12"; - case 3: -- if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.global\t$r12,$r13,%1\n\t" -- "jr\t$r12"; -+ if (TARGET_CMODEL_EXTREME) -+ return "la.global\t$r12,$r13,%1\n\tjr\t$r12"; - else -- return "la.global\t$r12,%1\n\t" -- "jr\t$r12"; -+ return "la.global\t$r12,%1\n\tjr\t$r12"; - case 4: -- if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) -- return "b\t%%plt(%1)"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -- return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" -- "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; -+ if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) -+ return "b\t%%plt(%1)"; -+ else if (TARGET_CMODEL_LARGE) -+ return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" -+ "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; - else -- sorry ("loongarch cmodel extreme and tiny-static not support plt."); -+ /* Code model "extreme" and "tiny-static" do not support plt. */ -+ gcc_unreachable (); - default: - gcc_unreachable (); - } - } -- [(set_attr "jal" "indirect,direct,direct,direct,direct")]) -+ [(set_attr "jirl" "indirect,direct,direct,direct,direct")]) - - (define_expand "call" - [(parallel [(call (match_operand 0 "") -@@ -3612,22 +3296,6 @@ - emit_call_insn (gen_call_internal (target, operands[1])); - DONE; - }) --;; In the last case, we can generate the individual instructions with --;; a define_split. There are several things to be wary of: --;; --;; - We can't expose the load of $gp before reload. If we did, --;; it might get removed as dead, but reload can introduce new --;; uses of $gp by rematerializing constants. --;; --;; - We shouldn't restore $gp after calls that never return. --;; It isn't valid to insert instructions between a noreturn --;; call and the following barrier. --;; --;; - The splitter deliberately changes the liveness of $gp. The unsplit --;; instruction preserves $gp and so have no effect on its liveness. --;; But once we generate the separate insns, it becomes obvious that --;; $gp is not live on entry to the call. --;; - - (define_insn "call_internal" - [(call (mem:SI (match_operand 0 "call_insn_operand" "e,c,a,t,h")) -@@ -3640,46 +3308,41 @@ - case 0: - return "jirl\t$r1,%0,0"; - case 1: -- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -- return "pcaddu18i\t$r1,%%pcrel(%0+0x20000)>>18\n\t" -- "jirl\t$r1,$r1,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.local\t$r1,$r12,%0\n\t" -- "jirl\t$r1,$r1,0"; -+ if (TARGET_CMODEL_LARGE) -+ return "pcaddu18i\t$r1,%%pcrel(%0+0x20000)>>18\n\t" -+ "jirl\t$r1,$r1,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; -+ else if (TARGET_CMODEL_EXTREME) -+ return "la.local\t$r1,$r12,%0\n\tjirl\t$r1,$r1,0"; - else -- return "bl\t%0"; -+ return "bl\t%0"; - case 2: -- if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) -- return "bl\t%0"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.global\t$r1,$r12,%0\n\t" -- "jirl\t$r1,$r1,0"; -+ if (TARGET_CMODEL_TINY_STATIC) -+ return "bl\t%0"; -+ else if (TARGET_CMODEL_EXTREME) -+ return "la.global\t$r1,$r12,%0\n\tjirl\t$r1,$r1,0"; - else -- return "la.global\t$r1,%0\n\t" -- "jirl\t$r1,$r1,0"; -+ return "la.global\t$r1,%0\n\tjirl\t$r1,$r1,0"; - case 3: -- if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.global\t$r1,$r12,%0\n\t" -- "jirl\t$r1,$r1,0"; -+ if (TARGET_CMODEL_EXTREME) -+ return "la.global\t$r1,$r12,%0\n\tjirl\t$r1,$r1,0"; - else -- return "la.global\t$r1,%0\n\t" -- "jirl\t$r1,$r1,0"; -+ return "la.global\t$r1,%0\n\tjirl\t$r1,$r1,0"; - case 4: -- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -- return "pcaddu18i\t$r1,(%%plt(%0)+0x20000)>>18\n\t" -- "jirl\t$r1,$r1,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) -- return "bl\t%%plt(%0)"; -+ if (TARGET_CMODEL_LARGE) -+ return "pcaddu18i\t$r1,(%%plt(%0)+0x20000)>>18\n\t" -+ "jirl\t$r1,$r1,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; -+ else if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) -+ return "bl\t%%plt(%0)"; - else -- sorry ("cmodel extreme and tiny-static not support plt."); -+ /* Code model "extreme" and "tiny-static" do not support plt. */ -+ gcc_unreachable (); - default: - gcc_unreachable (); - } - } -- [(set_attr "jal" "indirect,direct,direct,direct,direct") -+ [(set_attr "jirl" "indirect,direct,direct,direct,direct") - (set_attr "insn_count" "1,2,3,3,2")]) - -- - (define_expand "call_value" - [(parallel [(set (match_operand 0 "") - (call (match_operand 1 "") -@@ -3688,26 +3351,31 @@ - "" - { - rtx target = loongarch_legitimize_call_address (XEXP (operands[1], 0)); -- /* Handle return values created by loongarch_return_fpr_pair. */ -+ /* Handle return values created by loongarch_pass_fpr_pair. */ - if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 2) -- emit_call_insn (gen_call_value_multiple_internal (XEXP (XVECEXP (operands[0], 0, 0), 0), -- target, operands[2], XEXP (XVECEXP (operands[0], 0, 1), 0))); -+ { -+ rtx arg1 = XEXP (XVECEXP (operands[0], 0, 0), 0); -+ rtx arg2 = XEXP (XVECEXP (operands[0], 0, 1), 0); -+ -+ emit_call_insn (gen_call_value_multiple_internal (arg1, target, -+ operands[2], arg2)); -+ } - else - { -- /* Handle return values created by loongarch_return_fpr_single. */ -+ /* Handle return values created by loongarch_return_fpr_single. */ - if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 1) -- operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); -- -- emit_call_insn (gen_call_value_internal (operands[0], target, operands[2])); -+ operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); -+ -+ emit_call_insn (gen_call_value_internal (operands[0], target, -+ operands[2])); - } - DONE; - }) - --;; See comment for call_internal. - (define_insn "call_value_internal" - [(set (match_operand 0 "register_operand" "") -- (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) -- (match_operand 2 "" ""))) -+ (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) -+ (match_operand 2 "" ""))) - (clobber (reg:SI RETURN_ADDR_REGNUM))] - "" - { -@@ -3716,50 +3384,45 @@ - case 0: - return "jirl\t$r1,%1,0"; - case 1: -- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -- return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" -- "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.local\t$r1,$r12,%1\n\t" -- "jirl\t$r1,$r1,0"; -+ if (TARGET_CMODEL_LARGE) -+ return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" -+ "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; -+ else if (TARGET_CMODEL_EXTREME) -+ return "la.local\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; - else -- return "bl\t%1"; -+ return "bl\t%1"; - case 2: -- if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) -- return "bl\t%1"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.global\t$r1,$r12,%1\n\t" -- "jirl\t$r1,$r1,0"; -+ if (TARGET_CMODEL_TINY_STATIC) -+ return "bl\t%1"; -+ else if (TARGET_CMODEL_EXTREME) -+ return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; - else -- return "la.global\t$r1,%1\n\t" -- "jirl\t$r1,$r1,0"; -+ return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0"; - case 3: -- if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.global\t$r1,$r12,%1\n\t" -- "jirl\t$r1,$r1,0"; -+ if (TARGET_CMODEL_EXTREME) -+ return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; - else -- return "la.global\t$r1,%1\n\t" -- "jirl\t$r1,$r1,0"; -+ return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0"; - case 4: -- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -- return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" -- "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) -- return "bl\t%%plt(%1)"; -+ if (TARGET_CMODEL_LARGE) -+ return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" -+ "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; -+ else if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) -+ return "bl\t%%plt(%1)"; - else -- sorry ("loongarch cmodel extreme and tiny-static not support plt."); -+ /* Code model "extreme" and "tiny-static" do not support plt. */ -+ gcc_unreachable (); - default: - gcc_unreachable (); - } - } -- [(set_attr "jal" "indirect,direct,direct,direct,direct") -+ [(set_attr "jirl" "indirect,direct,direct,direct,direct") - (set_attr "insn_count" "1,2,3,3,2")]) - --;; See comment for call_internal. - (define_insn "call_value_multiple_internal" - [(set (match_operand 0 "register_operand" "") -- (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) -- (match_operand 2 "" ""))) -+ (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) -+ (match_operand 2 "" ""))) - (set (match_operand 3 "register_operand" "") - (call (mem:SI (match_dup 1)) - (match_dup 2))) -@@ -3771,48 +3434,43 @@ - case 0: - return "jirl\t$r1,%1,0"; - case 1: -- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -- return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" -- "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.local\t$r1,$r12,%1\n\t" -- "jirl\t$r1,$r1,0"; -+ if (TARGET_CMODEL_LARGE) -+ return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" -+ "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; -+ else if (TARGET_CMODEL_EXTREME) -+ return "la.local\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; - else -- return "bl\t%1"; -+ return "bl\t%1"; - case 2: -- if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) -- return "bl\t%1"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.global\t$r1,$r12,%1\n\t" -- "jirl\t$r1,$r1,0 "; -+ if (TARGET_CMODEL_TINY_STATIC) -+ return "bl\t%1"; -+ else if (TARGET_CMODEL_EXTREME) -+ return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0 "; - else -- return "la.global\t$r1,%1\n\t" -- "jirl\t$r1,$r1,0"; -+ return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0"; - case 3: -- if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) -- return "la.global\t$r1,$r12,%1\n\t" -- "jirl\t$r1,$r1,0"; -+ if (TARGET_CMODEL_EXTREME) -+ return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; - else -- return "la.global\t$r1,%1\n\t" -- "jirl\t$r1,$r1,0"; -+ return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0"; - case 4: -- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) -- return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" -- "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; -- else if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) -- return "bl\t%%plt(%1)"; -+ if (TARGET_CMODEL_LARGE) -+ return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" -+ "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; -+ else if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) -+ return "bl\t%%plt(%1)"; - else -- sorry ("loongarch cmodel extreme and tiny-static not support plt."); -+ /* Code model "extreme" and "tiny-static" do not support plt. */ -+ gcc_unreachable (); - default: - gcc_unreachable (); - } - } -- [(set_attr "jal" "indirect,direct,direct,direct,direct") -+ [(set_attr "jirl" "indirect,direct,direct,direct,direct") - (set_attr "insn_count" "1,2,3,3,2")]) - - - ;; Call subroutine returning any type. -- - (define_expand "untyped_call" - [(parallel [(call (match_operand 0 "") - (const_int 0)) -@@ -3842,105 +3500,109 @@ - ;; .................... - ;; - -+(define_insn "prefetch" -+ [(prefetch (match_operand 0 "address_operand" "p") -+ (match_operand 1 "const_int_operand" "n") -+ (match_operand 2 "const_int_operand" "n"))] -+ "" -+ { -+ operands[1] = loongarch_prefetch_cookie (operands[1], operands[2]); -+ return "preld\t%1,%a0"; -+ } -+ [(set_attr "type" "prefetch")]) - - (define_insn "*prefetch_indexed_" -- [(prefetch (plus:P (match_operand:P 0 "register_operand" "r") -- (match_operand:P 1 "register_operand" "r")) -- (match_operand 2 "const_int_operand" "n") -- (match_operand 3 "const_int_operand" "n"))] -- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" --{ -- operands[2] = loongarch_prefetch_cookie (operands[2], operands[3]); -- return "prefx\t%2,%1(%0)"; --} -+ [(prefetch (plus:P (match_operand 0 "register_operand" "r") -+ (match_operand 1 "register_operand" "r")) -+ (match_operand 2 "const_int_operand" "n") -+ (match_operand 3 "const_int_operand" "n"))] -+ "" -+ { -+ operands[2] = loongarch_prefetch_cookie (operands[2], operands[3]); -+ return "preldx\t%2,%1,%0"; -+ } - [(set_attr "type" "prefetchx")]) - - (define_insn "nop" - [(const_int 0)] - "" - "nop" -- [(set_attr "type" "nop") -- (set_attr "mode" "none")]) -- --;; Like nop, but commented out when outside a .set noreorder block. --(define_insn "hazard_nop" -- [(const_int 1)] -- "" -- { -- return "#nop"; -- } -- [(set_attr "type" "nop")]) -+ [(set_attr "type" "nop") -+ (set_attr "mode" "none")]) - --;; The `.insn' pseudo-op. --(define_insn "insn_pseudo" -- [(unspec_volatile [(const_int 0)] UNSPEC_INSN_PSEUDO)] -- "" -- ".insn" -- [(set_attr "mode" "none") -- (set_attr "insn_count" "0")]) -- --;; Conditional move instructions. -+;; __builtin_loongarch_movfcsr2gr: move the FCSR into operand 0. -+(define_insn "loongarch_movfcsr2gr" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (unspec_volatile:SI [(match_operand 1 "const_uimm5_operand")] -+ UNSPECV_MOVFCSR2GR))] -+ "TARGET_HARD_FLOAT" -+ "movfcsr2gr\t%0,$r%1") - --(define_insn "*sel_using_" -- [(set (match_operand:GPR 0 "register_operand" "=r,r") -- (if_then_else:GPR -- (equality_op:GPR2 (match_operand:GPR2 1 "register_operand" "r,r") -- (const_int 0)) -- (match_operand:GPR 2 "reg_or_0_operand" "r,J") -- (match_operand:GPR 3 "reg_or_0_operand" "J,r")))] -- "register_operand (operands[2], mode) -- != register_operand (operands[3], mode)" -- "@ -- \t%0,%2,%1 -- \t%0,%3,%1" -- [(set_attr "type" "condmove") -- (set_attr "mode" "")]) -+;; __builtin_loongarch_movgr2fcsr: move operand 0 into the FCSR. -+(define_insn "loongarch_movgr2fcsr" -+ [(unspec_volatile [(match_operand 0 "const_uimm5_operand") -+ (match_operand:SI 1 "register_operand" "r")] -+ UNSPECV_MOVGR2FCSR)] -+ "TARGET_HARD_FLOAT" -+ "movgr2fcsr\t$r%0,%1") - --;; sel.fmt copies the 3rd argument when the 1st is non-zero and the 2nd --;; argument if the 1st is zero. This means operand 2 and 3 are --;; inverted in the instruction. -+(define_insn "fclass_" -+ [(set (match_operand:ANYF 0 "register_operand" "=f") -+ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] -+ UNSPEC_FCLASS))] -+ "TARGET_HARD_FLOAT" -+ "fclass.\t%0,%1" -+ [(set_attr "type" "unknown") -+ (set_attr "mode" "")]) - --(define_insn "*sel" -- [(set (match_operand:SCALARF 0 "register_operand" "=f") -- (if_then_else:SCALARF -- (ne:FCC (match_operand:FCC 1 "register_operand" "z") -- (const_int 0)) -- (match_operand:SCALARF 2 "reg_or_0_operand" "f") -- (match_operand:SCALARF 3 "reg_or_0_operand" "f")))] -+(define_insn "bytepick_w" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (unspec:SI [(match_operand:SI 1 "register_operand" "r") -+ (match_operand:SI 2 "register_operand" "r") -+ (match_operand:SI 3 "const_0_to_3_operand" "n")] -+ UNSPEC_BYTEPICK_W))] - "" -- "fsel\t%0,%3,%2,%1" -- [(set_attr "type" "condmove") -- (set_attr "mode" "")]) -+ "bytepick.w\t%0,%1,%2,%z3" -+ [(set_attr "mode" "SI")]) - --;; These are the main define_expand's used to make conditional moves. -+(define_insn "bytepick_d" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (unspec:DI [(match_operand:DI 1 "register_operand" "r") -+ (match_operand:DI 2 "register_operand" "r") -+ (match_operand:DI 3 "const_0_to_7_operand" "n")] -+ UNSPEC_BYTEPICK_D))] -+ "" -+ "bytepick.d\t%0,%1,%2,%z3" -+ [(set_attr "mode" "DI")]) - --(define_expand "movcc" -- [(set (match_operand:GPR 0 "register_operand") -- (if_then_else:GPR (match_operator 1 "comparison_operator" -- [(match_operand:GPR 2 "reg_or_0_operand") -- (match_operand:GPR 3 "reg_or_0_operand")])))] -- "TARGET_COND_MOVE_INT" --{ -- if (!INTEGRAL_MODE_P (GET_MODE (XEXP (operands[1], 0)))) -- FAIL; -+(define_insn "bitrev_4b" -+ [(set (match_operand:SI 0 "register_operand" "=r") -+ (unspec:SI [(match_operand:SI 1 "register_operand" "r")] -+ UNSPEC_BITREV_4B))] -+ "" -+ "bitrev.4b\t%0,%1" -+ [(set_attr "type" "unknown") -+ (set_attr "mode" "SI")]) - -- loongarch_expand_conditional_move (operands); -- DONE; --}) -+(define_insn "bitrev_8b" -+ [(set (match_operand:DI 0 "register_operand" "=r") -+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] -+ UNSPEC_BITREV_8B))] -+ "" -+ "bitrev.8b\t%0,%1" -+ [(set_attr "type" "unknown") -+ (set_attr "mode" "DI")]) - --(define_expand "movcc" -- [(set (match_operand:SCALARF 0 "register_operand") -- (if_then_else:SCALARF (match_operator 1 "comparison_operator" -- [(match_operand:SCALARF 2 "reg_or_0_operand") -- (match_operand:SCALARF 3 "reg_or_0_operand")])))] -- "TARGET_COND_MOVE_FLOAT" --{ -- if (!FLOAT_MODE_P (GET_MODE (XEXP (operands[1], 0)))) -- FAIL; -+(define_insn "stack_tie" -+ [(set (mem:BLK (scratch)) -+ (unspec:BLK [(match_operand:X 0 "register_operand" "r") -+ (match_operand:X 1 "register_operand" "r")] -+ UNSPEC_TIE))] -+ "" -+ "" -+ [(set_attr "length" "0") -+ (set_attr "type" "ghost")]) - -- loongarch_expand_conditional_move (operands); -- DONE; --}) - - (define_split - [(match_operand 0 "small_data_pattern")] -@@ -3948,97 +3610,30 @@ - [(match_dup 0)] - { operands[0] = loongarch_rewrite_small_data (operands[0]); }) - --;; Thread-Local Storage -- --(define_insn "got_load_tls_gd" -- [(set (match_operand:P 0 "register_operand" "=r") -- (unspec:P -- [(match_operand:P 1 "symbolic_operand" "")] -- UNSPEC_TLS_GD))] -- "" -- "la.tls.gd\t%0,%1" -- [(set_attr "got" "load") -- (set_attr "mode" "")]) -- --(define_insn "got_load_tls_ld" -- [(set (match_operand:P 0 "register_operand" "=r") -- (unspec:P -- [(match_operand:P 1 "symbolic_operand" "")] -- UNSPEC_TLS_LD))] -- "" -- "la.tls.ld\t%0,%1" -- [(set_attr "got" "load") -- (set_attr "mode" "")]) -- --(define_insn "got_load_tls_le" -- [(set (match_operand:P 0 "register_operand" "=r") -- (unspec:P -- [(match_operand:P 1 "symbolic_operand" "")] -- UNSPEC_TLS_LE))] -- "" -- "la.tls.le\t%0,%1" -- [(set_attr "got" "load") -- (set_attr "mode" "")]) -- --(define_insn "got_load_tls_ie" -- [(set (match_operand:P 0 "register_operand" "=r") -- (unspec:P -- [(match_operand:P 1 "symbolic_operand" "")] -- UNSPEC_TLS_IE))] -- "" -- "la.tls.ie\t%0,%1" -- [(set_attr "got" "load") -- (set_attr "mode" "")]) -- --(define_insn "loongarch_movfcsr2gr" -- [(set (match_operand:SI 0 "register_operand" "=r") -- (unspec_volatile:SI [(match_operand 1 "const_uimm5_operand")] UNSPEC_MOVFCSR2GR))] -- "TARGET_HARD_FLOAT" -- "movfcsr2gr\t%0,$r%1") -- --(define_insn "loongarch_movgr2fcsr" -- [(unspec_volatile [(match_operand 0 "const_uimm5_operand") -- (match_operand:SI 1 "register_operand" "r")] -- UNSPEC_MOVGR2FCSR)] -- "TARGET_HARD_FLOAT" -- "movgr2fcsr\t$r%0,%1") -- - - ;; Match paired HI/SI/SF/DFmode load/stores. - (define_insn "*join2_load_store" -- [(set (match_operand:JOIN_MODE 0 "nonimmediate_operand" "=r,f,m,m,r,ZC") -+ [(set (match_operand:JOIN_MODE 0 "nonimmediate_operand" -+ "=&r,f,m,m,&r,ZC") - (match_operand:JOIN_MODE 1 "nonimmediate_operand" "m,m,r,f,ZC,r")) -- (set (match_operand:JOIN_MODE 2 "nonimmediate_operand" "=r,f,m,m,r,ZC") -+ (set (match_operand:JOIN_MODE 2 "nonimmediate_operand" -+ "=r,f,m,m,r,ZC") - (match_operand:JOIN_MODE 3 "nonimmediate_operand" "m,m,r,f,ZC,r"))] - "reload_completed" - { -- bool load_p = (which_alternative == 0 || which_alternative == 1); -- /* Reg-renaming pass reuses base register if it is dead after bonded loads. -- Hardware does not bond those loads, even when they are consecutive. -- However, order of the loads need to be checked for correctness. */ -- if (!load_p || !reg_overlap_mentioned_p (operands[0], operands[1])) -- { -- output_asm_insn (loongarch_output_move (operands[0], operands[1]), -- operands); -- output_asm_insn (loongarch_output_move (operands[2], operands[3]), -- &operands[2]); -- } -- else -- { -- output_asm_insn (loongarch_output_move (operands[2], operands[3]), -- &operands[2]); -- output_asm_insn (loongarch_output_move (operands[0], operands[1]), -- operands); -- } -+ /* The load destination does not overlap the source. */ -+ gcc_assert (!reg_overlap_mentioned_p (operands[0], operands[1])); -+ output_asm_insn (loongarch_output_move (operands[0], operands[1]), -+ operands); -+ output_asm_insn (loongarch_output_move (operands[2], operands[3]), -+ &operands[2]); - return ""; - } -- [(set_attr "move_type" "load,fpload,store,fpstore,load,store") -+ [(set_attr "move_type" -+ "load,fpload,store,fpstore,load,store") - (set_attr "insn_count" "2,2,2,2,2,2")]) - --;; 2 HI/SI/SF/DF loads are joined. --;; P5600 does not support bonding of two LBs, hence QI mode is not included. --;; The loads must be non-volatile as they might be reordered at the time of asm --;; generation. -+;; 2 HI/SI/SF/DF loads are bonded. - (define_peephole2 - [(set (match_operand:JOIN_MODE 0 "register_operand") - (match_operand:JOIN_MODE 1 "non_volatile_mem_operand")) -@@ -4051,8 +3646,7 @@ - (match_dup 3))])] - "") - --;; 2 HI/SI/SF/DF stores are joined. --;; P5600 does not support bonding of two SBs, hence QI mode is not included. -+;; 2 HI/SI/SF/DF stores are bonded. - (define_peephole2 - [(set (match_operand:JOIN_MODE 0 "memory_operand") - (match_operand:JOIN_MODE 1 "register_operand")) -@@ -4067,25 +3661,16 @@ - - ;; Match paired HImode loads. - (define_insn "*join2_loadhi" -- [(set (match_operand:SI 0 "register_operand" "=r") -+ [(set (match_operand:SI 0 "register_operand" "=&r") - (any_extend:SI (match_operand:HI 1 "non_volatile_mem_operand" "m"))) - (set (match_operand:SI 2 "register_operand" "=r") - (any_extend:SI (match_operand:HI 3 "non_volatile_mem_operand" "m")))] - "reload_completed" - { -- /* Reg-renaming pass reuses base register if it is dead after bonded loads. -- Hardware does not bond those loads, even when they are consecutive. -- However, order of the loads need to be checked for correctness. */ -- if (!reg_overlap_mentioned_p (operands[0], operands[1])) -- { -- output_asm_insn ("ld.h\t%0,%1", operands); -- output_asm_insn ("ld.h\t%2,%3", operands); -- } -- else -- { -- output_asm_insn ("ld.h\t%2,%3", operands); -- output_asm_insn ("ld.h\t%0,%1", operands); -- } -+ /* The load destination does not overlap the source. */ -+ gcc_assert (!reg_overlap_mentioned_p (operands[0], operands[1])); -+ output_asm_insn ("ld.h\t%0,%1", operands); -+ output_asm_insn ("ld.h\t%2,%3", operands); - - return ""; - } -@@ -4093,7 +3678,7 @@ - (set_attr "insn_count" "2")]) - - --;; 2 HI loads are joined. -+;; 2 HI loads are bonded. - (define_peephole2 - [(set (match_operand:SI 0 "register_operand") - (any_extend:SI (match_operand:HI 1 "non_volatile_mem_operand"))) -@@ -4107,153 +3692,10 @@ - "") - - --;; Logical AND NOT. --(define_insn "loongson_gsandn" -- [(set (match_operand:GPR 0 "register_operand" "=r") -- (and:GPR -- (not:GPR (match_operand:GPR 1 "register_operand" "r")) -- (match_operand:GPR 2 "register_operand" "r")))] -- "" -- "andn\t%0,%2,%1" -- [(set_attr "type" "logical")]) -- --;; Logical AND NOT. --(define_insn "loongson_gsorn" -- [(set (match_operand:GPR 0 "register_operand" "=r") -- (ior:GPR -- (not:GPR (match_operand:GPR 1 "register_operand" "r")) -- (match_operand:GPR 2 "register_operand" "r")))] -- "" -- "orn\t%0,%2,%1" -- [(set_attr "type" "logical")]) -- --(define_insn "smax3" -- [(set (match_operand:SCALARF 0 "register_operand" "=f") -- (smax:SCALARF (match_operand:SCALARF 1 "register_operand" "f") -- (match_operand:SCALARF 2 "register_operand" "f")))] -- "TARGET_HARD_FLOAT" -- "fmax.\t%0,%1,%2" -- [(set_attr "type" "fmove") -- (set_attr "mode" "")]) -- --(define_insn "smin3" -- [(set (match_operand:SCALARF 0 "register_operand" "=f") -- (smin:SCALARF (match_operand:SCALARF 1 "register_operand" "f") -- (match_operand:SCALARF 2 "register_operand" "f")))] -- "TARGET_HARD_FLOAT" -- "fmin.\t%0,%1,%2" -- [(set_attr "type" "fmove") -- (set_attr "mode" "")]) -- --(define_insn "smaxa3" -- [(set (match_operand:SCALARF 0 "register_operand" "=f") -- (if_then_else:SCALARF -- (gt (abs:SCALARF (match_operand:SCALARF 1 "register_operand" "f")) -- (abs:SCALARF (match_operand:SCALARF 2 "register_operand" "f"))) -- (match_dup 1) -- (match_dup 2)))] -- "TARGET_HARD_FLOAT" -- "fmaxa.\t%0,%1,%2" -- [(set_attr "type" "fmove") -- (set_attr "mode" "")]) -- --(define_insn "smina3" -- [(set (match_operand:SCALARF 0 "register_operand" "=f") -- (if_then_else:SCALARF -- (lt (abs:SCALARF (match_operand:SCALARF 1 "register_operand" "f")) -- (abs:SCALARF (match_operand:SCALARF 2 "register_operand" "f"))) -- (match_dup 1) -- (match_dup 2)))] -- "TARGET_HARD_FLOAT" -- "fmina.\t%0,%1,%2" -- [(set_attr "type" "fmove") -- (set_attr "mode" "")]) -- --(define_insn "frint_" -- [(set (match_operand:SCALARF 0 "register_operand" "=f") -- (unspec:SCALARF [(match_operand:SCALARF 1 "register_operand" "f")] -- UNSPEC_FRINT))] -- "" -- "frint.\t%0,%1" -- [(set_attr "type" "fcvt") -- (set_attr "mode" "")]) -- --(define_insn "fclass_" -- [(set (match_operand:SCALARF 0 "register_operand" "=f") -- (unspec:SCALARF [(match_operand:SCALARF 1 "register_operand" "f")] -- UNSPEC_FCLASS))] -- "" -- "fclass.\t%0,%1" -- [(set_attr "type" "unknown") -- (set_attr "mode" "")]) -- --(define_insn "bytepick_w" -- [(set (match_operand:SI 0 "register_operand" "=r") -- (unspec:SI [(match_operand:SI 1 "register_operand" "r") -- (match_operand:SI 2 "register_operand" "r") -- (match_operand:SI 3 "const_0_to_3_operand" "n")] -- UNSPEC_BYTEPICK_W))] -- "" -- "bytepick.w\t%0,%1,%2,%z3" -- [(set_attr "type" "dspalu") -- (set_attr "mode" "SI")]) -- --(define_insn "bytepick_d" -- [(set (match_operand:DI 0 "register_operand" "=r") -- (unspec:DI [(match_operand:DI 1 "register_operand" "r") -- (match_operand:DI 2 "register_operand" "r") -- (match_operand:DI 3 "const_0_to_7_operand" "n")] -- UNSPEC_BYTEPICK_D))] -- "" -- "bytepick.d\t%0,%1,%2,%z3" -- [(set_attr "type" "dspalu") -- (set_attr "mode" "DI")]) -- --(define_insn "bitrev_4b" -- [(set (match_operand:SI 0 "register_operand" "=r") -- (unspec:SI [(match_operand:SI 1 "register_operand" "r")] -- UNSPEC_BITREV_4B))] -- "" -- "bitrev.4b\t%0,%1" -- [(set_attr "type" "unknown") -- (set_attr "mode" "SI")]) -- --(define_insn "bitrev_8b" -- [(set (match_operand:DI 0 "register_operand" "=r") -- (unspec:DI [(match_operand:DI 1 "register_operand" "r")] -- UNSPEC_BITREV_8B))] -- "" -- "bitrev.8b\t%0,%1" -- [(set_attr "type" "unknown") -- (set_attr "mode" "DI")]) -- -- -- --(define_insn "lu32i_d" -- [(set (match_operand:DI 0 "register_operand" "=r") -- (ior:DI -- (zero_extend:DI -- (subreg:SI (match_operand:DI 1 "register_operand" "0") 0)) -- (match_operand:DI 2 "const_lu32i_operand" "u")))] -- "TARGET_64BIT" -- "lu32i.d\t%0,%X2>>32" -- [(set_attr "type" "arith") -- (set_attr "mode" "DI")]) -- --(define_insn "lu52i_d" -- [(set (match_operand:DI 0 "register_operand" "=r") -- (ior:DI -- (and:DI (match_operand:DI 1 "register_operand" "r") -- (match_operand 2 "lu52i_mask_operand")) -- (match_operand 3 "const_lu52i_operand" "v")))] -- "TARGET_64BIT" -- "lu52i.d\t%0,%1,%X3>>52" -- [(set_attr "type" "arith") -- (set_attr "mode" "DI")]) - - (define_mode_iterator QHSD [QI HI SI DI]) - --(define_insn "crc_w__w" -+(define_insn "loongarch_crc_w__w" - [(set (match_operand:SI 0 "register_operand" "=r") - (unspec:SI [(match_operand:QHSD 1 "register_operand" "r") - (match_operand:SI 2 "register_operand" "r")] -@@ -4263,7 +3705,7 @@ - [(set_attr "type" "unknown") - (set_attr "mode" "")]) - --(define_insn "crcc_w__w" -+(define_insn "loongarch_crcc_w__w" - [(set (match_operand:SI 0 "register_operand" "=r") - (unspec:SI [(match_operand:QHSD 1 "register_operand" "r") - (match_operand:SI 2 "register_operand" "r")] -@@ -4277,6 +3719,9 @@ - - (include "sync.md") - -+(include "generic.md") -+(include "la464.md") -+ - ; The LoongArch SX Instructions. - (include "lsx.md") - -@@ -4286,35 +3731,6 @@ - ; The LoongArch ASX Instructions. - (include "lasx.md") - --;; Is copying of this instruction disallowed? --(define_attr "cannot_copy" "no,yes" (const_string "no")) -- --(define_insn "stack_tie" -- [(set (mem:BLK (scratch)) -- (unspec:BLK [(match_operand:X 0 "register_operand" "r") -- (match_operand:X 1 "register_operand" "r")] -- UNSPEC_TIE))] -- "" -- "" -- [(set_attr "length" "0")] --) -- --(define_insn "gpr_save" -- [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_SAVE) -- (clobber (reg:SI T0_REGNUM)) -- (clobber (reg:SI T1_REGNUM))] -- "" -- { return loongarch_output_gpr_save (INTVAL (operands[0])); }) -- --(define_insn "gpr_restore" -- [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_RESTORE)] -- "" -- "tail\t__loongarch_restore_%0") -- --(define_insn "gpr_restore_return" -- [(return) -- (use (match_operand 0 "pmode_register_operand" "")) -- (const_int 0)] -- "" -- "") -- -+(define_c_enum "unspec" [ -+ UNSPEC_ADDRESS_FIRST -+]) -diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt -index 660de3674..075a2d6c7 100644 ---- a/gcc/config/loongarch/loongarch.opt -+++ b/gcc/config/loongarch/loongarch.opt -@@ -1,6 +1,14 @@ -- -+; Generated by "genstr" from the template "loongarch.opt.in" -+; and definitions from "loongarch-strings". -+; -+; Please do not edit this file directly. -+; It will be automatically updated during a gcc build -+; if you change "loongarch.opt.in" or "loongarch-strings". -+; -+; Generated by "genstr" from the template "loongarch.opt.in" -+; and definitions from "loongarch-strings". - ; --; Copyright (C) 2005-2018 Free Software Foundation, Inc. -+; Copyright (C) 2020-2022 Free Software Foundation, Inc. - ; - ; This file is part of GCC. - ; -@@ -17,155 +25,225 @@ - ; You should have received a copy of the GNU General Public License - ; along with GCC; see the file COPYING3. If not see - ; . -+; - - HeaderInclude - config/loongarch/loongarch-opts.h - --mabi= --Target RejectNegative Joined Enum(loongarch_abi) Var(loongarch_abi) Init(LARCH_ABI_DEFAULT) ---mabi=ABI Generate code that conforms to the given ABI. -+HeaderInclude -+config/loongarch/loongarch-str.h - -+TargetVariable -+unsigned int recip_mask = 0 -+ -+; ISA related options -+;; Base ISA - Enum --Name(loongarch_abi) Type(int) --Known Loongarch ABIs (for use with the -mabi= option): -+Name(isa_base) Type(int) -+Basic ISAs of LoongArch: - - EnumValue --Enum(loongarch_abi) String(lp32) Value(ABILP32) -+Enum(isa_base) String(la64) Value(ISA_BASE_LA64V100) -+ -+;; ISA extensions / adjustments -+Enum -+Name(isa_ext_fpu) Type(int) -+FPU types of LoongArch: - - EnumValue --Enum(loongarch_abi) String(lpx32) Value(ABILPX32) -+Enum(isa_ext_fpu) String(none) Value(ISA_EXT_NONE) - - EnumValue --Enum(loongarch_abi) String(lp64) Value(ABILP64) -+Enum(isa_ext_fpu) String(32) Value(ISA_EXT_FPU32) - --march= --Target RejectNegative Joined Var(loongarch_arch_option) ToLower Enum(loongarch_arch_opt_value) ---march=ISA Generate code for the given ISA. -+EnumValue -+Enum(isa_ext_fpu) String(64) Value(ISA_EXT_FPU64) - --mbranch-cost= --Target RejectNegative Joined UInteger Var(loongarch_branch_cost) ---mbranch-cost=COST Set the cost of branches to roughly COST instructions. -+mfpu= -+Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET) -+-mfpu=FPU Generate code for the given FPU. - --mcheck-zero-division --Target Report Mask(CHECK_ZERO_DIV) --Trap on integer divide by zero. -+mfpu=0 -+Target RejectNegative Alias(mfpu=,none) -+ -+msoft-float -+Target Driver Defer Var(la_deferred_options) RejectNegative Negative(msingle-float) -+Prevent the use of all hardware floating-point instructions. -+ -+msingle-float -+Target Driver Defer Var(la_deferred_options) RejectNegative Negative(mdouble-float) -+Restrict the use of hardware floating-point instructions to 32-bit operations. - - mdouble-float --Target Report RejectNegative InverseMask(SINGLE_FLOAT, DOUBLE_FLOAT) -+Target Driver Defer Var(la_deferred_options) RejectNegative Negative(msoft-float) - Allow hardware floating-point instructions to cover both 32-bit and 64-bit operations. - --mflush-func= --Target RejectNegative Joined Var(loongarch_cache_flush_func) Init(CACHE_FLUSH_FUNC) ---mflush-func=FUNC Use FUNC to flush the cache before calling stack trampolines. -+Enum -+Name(isa_ext_simd) Type(int) -+SIMD extension levels of LoongArch: -+ -+EnumValue -+Enum(isa_ext_simd) String(none) Value(ISA_EXT_NONE) -+ -+EnumValue -+Enum(isa_ext_simd) String(lsx) Value(ISA_EXT_SIMD_LSX) - --Mask(64BIT) -+EnumValue -+Enum(isa_ext_simd) String(lasx) Value(ISA_EXT_SIMD_LASX) - --Mask(FLOAT64) -+msimd= -+Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET) -+-msimd=SIMD Generate code for the given SIMD extension. - --mhard-float --Target Report RejectNegative InverseMask(SOFT_FLOAT_ABI, HARD_FLOAT_ABI) --Allow the use of hardware floating-point ABI and instructions. -+mlsx -+Target Driver Defer Var(la_deferred_options) -+Enable LoongArch SIMD Extension (LSX, 128-bit). - --mlong-calls --Target Report Var(TARGET_LONG_CALLS) --Use indirect calls. -+mlasx -+Target Driver Defer Var(la_deferred_options) -+Enable LoongArch Advanced SIMD Extension (LASX, 256-bit). - --mmemcpy --Target Report Mask(MEMCPY) --Don't optimize block moves. -+;; Base target models (implies ISA & tune parameters) -+Enum -+Name(cpu_type) Type(int) -+LoongArch CPU types: - --mno-float --Target Report RejectNegative Var(TARGET_NO_FLOAT) Condition(TARGET_SUPPORTS_NO_FLOAT) --Prevent the use of all floating-point operations. -+EnumValue -+Enum(cpu_type) String(native) Value(CPU_NATIVE) - --mno-flush-func --Target RejectNegative --Do not use a cache-flushing function before calling stack trampolines. -+EnumValue -+Enum(cpu_type) String(abi-default) Value(CPU_ABI_DEFAULT) - --mrelax-pic-calls --Target Report Mask(RELAX_PIC_CALLS) --Try to allow the linker to turn PIC calls into direct calls. -+EnumValue -+Enum(cpu_type) String(loongarch64) Value(CPU_LOONGARCH64) - --mshared --Target Report Var(TARGET_SHARED) Init(1) --When generating -mabicalls code, make the code suitable for use in shared libraries. -+EnumValue -+Enum(cpu_type) String(la664) Value(CPU_LA664) - --msingle-float --Target Report RejectNegative Mask(SINGLE_FLOAT) --Restrict the use of hardware floating-point instructions to 32-bit operations. -+EnumValue -+Enum(cpu_type) String(la464) Value(CPU_LA464) - --msoft-float --Target Report RejectNegative Mask(SOFT_FLOAT_ABI) --Prevent the use of all hardware floating-point instructions. -+EnumValue -+Enum(cpu_type) String(la264) Value(CPU_LA264) -+ -+EnumValue -+Enum(cpu_type) String(la364) Value(CPU_LA364) - --mlra --Target Report Var(loongarch_lra_flag) Init(1) Save --Use LRA instead of reload. -+march= -+Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) -+-march=PROCESSOR Generate code for the given PROCESSOR ISA. - - mtune= --Target RejectNegative Joined Var(loongarch_tune_option) ToLower Enum(loongarch_arch_opt_value) ---mtune=PROCESSOR Optimize the output for PROCESSOR. -+Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) -+-mtune=PROCESSOR Generate optimized code for PROCESSOR. - --mframe-header-opt --Target Report Var(flag_frame_header_optimization) Optimization --Optimize frame header. - --noasmopt --Driver -+; ABI related options -+; (ISA constraints on ABI are handled dynamically) - --mstrict-align --Target Report Mask(STRICT_ALIGN) Save --Do not generate unaligned memory accesses. -+;; Base ABI -+Enum -+Name(abi_base) Type(int) -+Base ABI types for LoongArch: - --mlsx --Target Report Mask(LSX) --Use LoongArch SX Extension instructions. -+EnumValue -+Enum(abi_base) String(lp64d) Value(ABI_BASE_LP64D) - --mlasx --Target Report Var(TARGET_LASX) --Use LoongArch ASX Extension instructions. -+EnumValue -+Enum(abi_base) String(lp64f) Value(ABI_BASE_LP64F) - --malign-llsc-target --Target Report Var(TARGET_ALIGN_LLSC_TARGET) --Target align llsc target. -+EnumValue -+Enum(abi_base) String(lp64s) Value(ABI_BASE_LP64S) - --mmax-inline-memcpy-size= --Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) ---mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. -+mabi= -+Target RejectNegative Joined ToLower Enum(abi_base) Var(la_opt_abi_base) Init(M_OPT_UNSET) -+-mabi=BASEABI Generate code that conforms to the given BASEABI. -+ -+;; Legacy option: -mabi=lp64 -+mabi=lp64 -+Target RejectNegative Mask(LP64) -+-mabi=lp64 Legacy option that enables the lp64 integer ABI. -+ -+;; ABI Extension -+Variable -+int la_opt_abi_ext = M_OPT_UNSET -+ -+mbranch-cost= -+Target RejectNegative Joined UInteger Var(loongarch_branch_cost) -+-mbranch-cost=COST Set the cost of branches to roughly COST instructions. - - mvecarg - Target Report Var(TARGET_VECARG) Init(1) - Target pass vect arg uses vector register. - -+mmemvec-cost= -+Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) -+mmemvec-cost=COST Set the cost of vector memory access instructions. -+ -+mveclibabi= -+Target RejectNegative Joined Var(loongarch_veclibabi_name) -+Vector library ABI to use. -+ -+mstackrealign -+Target Var(loongarch_stack_realign) Init(1) -+Realign stack in prologue. -+ -+mforce-drap -+Target Var(loongarch_force_drap) Init(0) -+Always use Dynamic Realigned Argument Pointer (DRAP) to realign stack. -+ -+mcheck-zero-division -+Target Mask(CHECK_ZERO_DIV) -+Trap on integer divide by zero. -+ - mcond-move-int --Target Report Var(TARGET_COND_MOVE_INT) Init(1) -+Target Var(TARGET_COND_MOVE_INT) Init(1) - Conditional moves for integral are enabled. - - mcond-move-float --Target Report Var(TARGET_COND_MOVE_FLOAT) Init(1) -+Target Var(TARGET_COND_MOVE_FLOAT) Init(1) - Conditional moves for float are enabled. - --; The code model option names for -mcmodel. -+mmemcpy -+Target Mask(MEMCPY) -+Prevent optimizing block moves, which is also the default behavior of -Os. - -+mstrict-align -+Target Var(TARGET_STRICT_ALIGN) Init(0) -+Do not generate unaligned memory accesses. -+ -+mmax-inline-memcpy-size= -+Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) -+-mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. -+ -+mrecip -+Target Report RejectNegative Var(loongarch_recip) -+Generate reciprocals instead of divss and sqrtss. -+ -+mrecip= -+Target Report RejectNegative Joined Var(loongarch_recip_name) -+Control generation of reciprocal estimates. -+ -+; The code model option names for -mcmodel. - Enum --Name(cmodel) Type(enum loongarch_code_model) -+Name(cmodel) Type(int) - The code model option names for -mcmodel: - - EnumValue --Enum(cmodel) String(normal) Value(LARCH_CMODEL_NORMAL) -+Enum(cmodel) String(normal) Value(CMODEL_NORMAL) - - EnumValue --Enum(cmodel) String(tiny) Value(LARCH_CMODEL_TINY) -+Enum(cmodel) String(tiny) Value(CMODEL_TINY) - - EnumValue --Enum(cmodel) String(tiny-static) Value(LARCH_CMODEL_TINY_STATIC) -+Enum(cmodel) String(tiny-static) Value(CMODEL_TINY_STATIC) - - EnumValue --Enum(cmodel) String(large) Value(LARCH_CMODEL_LARGE) -+Enum(cmodel) String(large) Value(CMODEL_LARGE) - - EnumValue --Enum(cmodel) String(extreme) Value(LARCH_CMODEL_EXTREME) -+Enum(cmodel) String(extreme) Value(CMODEL_EXTREME) - - mcmodel= --Target RejectNegative Joined Enum(cmodel) Var(loongarch_cmodel_var) Init(LARCH_CMODEL_NORMAL) Save -+Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET) - Specify the code model. -diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md -index 1f7034366..2b1d6f109 100644 ---- a/gcc/config/loongarch/lsx.md -+++ b/gcc/config/loongarch/lsx.md -@@ -168,6 +168,9 @@ - ;; As ILSX but excludes V16QI. - (define_mode_iterator ILSX_DWH [V2DI V4SI V8HI]) - -+;; As LSX but excludes V16QI. -+(define_mode_iterator LSX_DWH [V2DF V4SF V2DI V4SI V8HI]) -+ - ;; As ILSX but excludes V2DI. - (define_mode_iterator ILSX_WHB [V4SI V8HI V16QI]) - -@@ -291,6 +294,10 @@ - (V2DI "d") - (V4SI "s")]) - -+(define_mode_attr flsxfrint -+ [(V2DF "d") -+ (V4SF "s")]) -+ - (define_mode_attr ilsxfmt - [(V2DF "l") - (V4SF "w")]) -@@ -327,6 +334,38 @@ - (V4SI "uimm5") - (V2DI "uimm6")]) - -+ -+(define_int_iterator FRINT_S [UNSPEC_LSX_VFRINTRP_S -+ UNSPEC_LSX_VFRINTRZ_S -+ UNSPEC_LSX_VFRINT -+ UNSPEC_LSX_VFRINTRM_S]) -+ -+(define_int_iterator FRINT_D [UNSPEC_LSX_VFRINTRP_D -+ UNSPEC_LSX_VFRINTRZ_D -+ UNSPEC_LSX_VFRINT -+ UNSPEC_LSX_VFRINTRM_D]) -+ -+(define_int_attr frint_pattern_s -+ [(UNSPEC_LSX_VFRINTRP_S "ceil") -+ (UNSPEC_LSX_VFRINTRZ_S "btrunc") -+ (UNSPEC_LSX_VFRINT "rint") -+ (UNSPEC_LSX_VFRINTRM_S "floor")]) -+ -+(define_int_attr frint_pattern_d -+ [(UNSPEC_LSX_VFRINTRP_D "ceil") -+ (UNSPEC_LSX_VFRINTRZ_D "btrunc") -+ (UNSPEC_LSX_VFRINT "rint") -+ (UNSPEC_LSX_VFRINTRM_D "floor")]) -+ -+(define_int_attr frint_suffix -+ [(UNSPEC_LSX_VFRINTRP_S "rp") -+ (UNSPEC_LSX_VFRINTRP_D "rp") -+ (UNSPEC_LSX_VFRINTRZ_S "rz") -+ (UNSPEC_LSX_VFRINTRZ_D "rz") -+ (UNSPEC_LSX_VFRINT "") -+ (UNSPEC_LSX_VFRINTRM_S "rm") -+ (UNSPEC_LSX_VFRINTRM_D "rm")]) -+ - (define_expand "vec_init" - [(match_operand:LSX 0 "register_operand") - (match_operand:LSX 1 "")] -@@ -513,12 +552,12 @@ - DONE; - }) - --(define_insn "lsx_vinsgr2vr_" -- [(set (match_operand:LSX 0 "register_operand" "=f") -- (vec_merge:LSX -- (vec_duplicate:LSX -+(define_insn "lsx_vinsgr2vr_" -+ [(set (match_operand:ILSX 0 "register_operand" "=f") -+ (vec_merge:ILSX -+ (vec_duplicate:ILSX - (match_operand: 1 "reg_or_0_operand" "rJ")) -- (match_operand:LSX 2 "register_operand" "0") -+ (match_operand:ILSX 2 "register_operand" "0") - (match_operand 3 "const__operand" "")))] - "ISA_HAS_LSX" - { -@@ -688,11 +727,23 @@ - DONE; - }) - --(define_insn "lsx_vshuf_" -- [(set (match_operand:ILSX_DWH 0 "register_operand" "=f") -- (unspec:ILSX_DWH [(match_operand:ILSX_DWH 1 "register_operand" "0") -- (match_operand:ILSX_DWH 2 "register_operand" "f") -- (match_operand:ILSX_DWH 3 "register_operand" "f")] -+(define_expand "vec_perm" -+ [(match_operand:LSX 0 "register_operand") -+ (match_operand:LSX 1 "register_operand") -+ (match_operand:LSX 2 "register_operand") -+ (match_operand:LSX 3 "register_operand")] -+ "ISA_HAS_LSX" -+{ -+ loongarch_expand_vec_perm (operands[0], operands[1], -+ operands[2], operands[3]); -+ DONE; -+}) -+ -+(define_insn "lsx_vshuf_" -+ [(set (match_operand:LSX_DWH 0 "register_operand" "=f") -+ (unspec:LSX_DWH [(match_operand:LSX_DWH 1 "register_operand" "0") -+ (match_operand:LSX_DWH 2 "register_operand" "f") -+ (match_operand:LSX_DWH 3 "register_operand" "f")] - UNSPEC_LSX_VSHUF))] - "ISA_HAS_LSX" - "vshuf.\t%w0,%w2,%w3" -@@ -731,7 +782,7 @@ - [(set (match_operand:LSX 0 "nonimmediate_operand") - (match_operand:LSX 1 "move_operand"))] - "reload_completed && ISA_HAS_LSX -- && loongarch_split_move_insn_p (operands[0], operands[1], insn)" -+ && loongarch_split_move_insn_p (operands[0], operands[1])" - [(const_int 0)] - { - loongarch_split_move_insn (operands[0], operands[1], curr_insn); -@@ -996,7 +1047,25 @@ - [(set_attr "type" "simd_fmul") - (set_attr "mode" "")]) - --(define_insn "div3" -+(define_expand "div3" -+ [(set (match_operand:FLSX 0 "register_operand") -+ (div:FLSX (match_operand:FLSX 1 "register_operand") -+ (match_operand:FLSX 2 "register_operand")))] -+ "ISA_HAS_LSX" -+{ -+ if (mode == V4SFmode -+ && TARGET_RECIP_VEC_DIV -+ && optimize_insn_for_speed_p () -+ && flag_finite_math_only && !flag_trapping_math -+ && flag_unsafe_math_optimizations) -+ { -+ loongarch_emit_swdivsf (operands[0], operands[1], -+ operands[2], V4SFmode); -+ DONE; -+ } -+}) -+ -+(define_insn "*div3" - [(set (match_operand:FLSX 0 "register_operand" "=f") - (div:FLSX (match_operand:FLSX 1 "register_operand" "f") - (match_operand:FLSX 2 "register_operand" "f")))] -@@ -1025,7 +1094,23 @@ - [(set_attr "type" "simd_fmadd") - (set_attr "mode" "")]) - --(define_insn "sqrt2" -+(define_expand "sqrt2" -+ [(set (match_operand:FLSX 0 "register_operand") -+ (sqrt:FLSX (match_operand:FLSX 1 "register_operand")))] -+ "ISA_HAS_LSX" -+{ -+ if (mode == V4SFmode -+ && TARGET_RECIP_VEC_SQRT -+ && flag_unsafe_math_optimizations -+ && optimize_insn_for_speed_p () -+ && flag_finite_math_only && !flag_trapping_math) -+ { -+ loongarch_emit_swrsqrtsf (operands[0], operands[1], V4SFmode, 0); -+ DONE; -+ } -+}) -+ -+(define_insn "*sqrt2" - [(set (match_operand:FLSX 0 "register_operand" "=f") - (sqrt:FLSX (match_operand:FLSX 1 "register_operand" "f")))] - "ISA_HAS_LSX" -@@ -1362,8 +1447,8 @@ - (V2DF "V4SI")]) - - (define_insn "lsx_vreplgr2vr_" -- [(set (match_operand:LSX 0 "register_operand" "=f,f") -- (vec_duplicate:LSX -+ [(set (match_operand:ILSX 0 "register_operand" "=f,f") -+ (vec_duplicate:ILSX - (match_operand: 1 "reg_or_0_operand" "r,J")))] - "ISA_HAS_LSX" - { -@@ -1389,7 +1474,7 @@ - DONE; - }) - --(define_insn "lsx_vflogb_" -+(define_insn "logb2" - [(set (match_operand:FLSX 0 "register_operand" "=f") - (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] - UNSPEC_LSX_VFLOGB))] -@@ -1449,6 +1534,15 @@ - [(set_attr "type" "simd_fdiv") - (set_attr "mode" "")]) - -+(define_insn "lsx_vfrecipe_" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] -+ UNSPEC_RECIPE))] -+ "ISA_HAS_LSX && flag_unsafe_math_optimizations && TARGET_RECIP_VEC_DIV" -+ "vfrecipe.\t%w0,%w1" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ - (define_insn "lsx_vfrint_" - [(set (match_operand:FLSX 0 "register_operand" "=f") - (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] -@@ -1467,6 +1561,42 @@ - [(set_attr "type" "simd_fdiv") - (set_attr "mode" "")]) - -+(define_insn "lsx_vfrsqrte_" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] -+ UNSPEC_RSQRTE))] -+ "ISA_HAS_LSX && flag_unsafe_math_optimizations && TARGET_RECIP_VEC_SQRT" -+ "vfrsqrte.\t%w0,%w1" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ -+(define_expand "rsqrt2" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRSQRT))] -+ "ISA_HAS_LSX" -+{ -+ if (mode == V4SFmode -+ && TARGET_RECIP_VEC_RSQRT -+ && flag_unsafe_math_optimizations -+ && optimize_insn_for_speed_p () -+ && flag_finite_math_only && !flag_trapping_math) -+ { -+ loongarch_emit_swrsqrtsf (operands[0], operands[1], V4SFmode, 1); -+ DONE; -+ } -+}) -+ -+(define_insn "*rsqrt2" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRSQRT))] -+ "ISA_HAS_LSX" -+ "vfrsqrt.\t%w0,%w1" -+ [(set_attr "type" "simd_fdiv") -+ (set_attr "mode" "")]) -+ -+ - (define_insn "lsx_vftint_s__" - [(set (match_operand: 0 "register_operand" "=f") - (unspec: [(match_operand:FLSX 1 "register_operand" "f")] -@@ -2172,8 +2302,8 @@ - - (define_insn "lsx_vreplvei__scalar" - [(set (match_operand:FLSX 0 "register_operand" "=f") -- (unspec:FLSX [(match_operand: 1 "register_operand" "f")] -- UNSPEC_LSX_VREPLVEI))] -+ (vec_duplicate:FLSX -+ (match_operand: 1 "register_operand" "f")))] - "ISA_HAS_LSX" - "vreplvei.\t%w0,%w1,0" - [(set_attr "type" "simd_splat") -@@ -2285,8 +2415,7 @@ - "vset.\t%Z3%w1\n\tbcnez\t%Z3%0"); - } - [(set_attr "type" "simd_branch") -- (set_attr "mode" "") -- (set_attr "compact_form" "never")]) -+ (set_attr "mode" "")]) - - (define_insn "lsx__v_" - [(set (pc) (if_then_else -@@ -2304,8 +2433,7 @@ - "vset.v\t%Z3%w1\n\tbcnez\t%Z3%0"); - } - [(set_attr "type" "simd_branch") -- (set_attr "mode" "TI") -- (set_attr "compact_form" "never")]) -+ (set_attr "mode" "TI")]) - - ;; vec_concate - (define_expand "vec_concatv2di" -@@ -2923,8 +3051,8 @@ - (set_attr "mode" "V4SF")]) - - (define_insn "lsx_vfrintrne_s" -- [(set (match_operand:V4SI 0 "register_operand" "=f") -- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] - UNSPEC_LSX_VFRINTRNE_S))] - "ISA_HAS_LSX" - "vfrintrne.s\t%w0,%w1" -@@ -2932,8 +3060,8 @@ - (set_attr "mode" "V4SF")]) - - (define_insn "lsx_vfrintrne_d" -- [(set (match_operand:V2DI 0 "register_operand" "=f") -- (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] - UNSPEC_LSX_VFRINTRNE_D))] - "ISA_HAS_LSX" - "vfrintrne.d\t%w0,%w1" -@@ -2941,8 +3069,8 @@ - (set_attr "mode" "V2DF")]) - - (define_insn "lsx_vfrintrz_s" -- [(set (match_operand:V4SI 0 "register_operand" "=f") -- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] - UNSPEC_LSX_VFRINTRZ_S))] - "ISA_HAS_LSX" - "vfrintrz.s\t%w0,%w1" -@@ -2950,8 +3078,8 @@ - (set_attr "mode" "V4SF")]) - - (define_insn "lsx_vfrintrz_d" -- [(set (match_operand:V2DI 0 "register_operand" "=f") -- (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] - UNSPEC_LSX_VFRINTRZ_D))] - "ISA_HAS_LSX" - "vfrintrz.d\t%w0,%w1" -@@ -2959,8 +3087,8 @@ - (set_attr "mode" "V2DF")]) - - (define_insn "lsx_vfrintrp_s" -- [(set (match_operand:V4SI 0 "register_operand" "=f") -- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] - UNSPEC_LSX_VFRINTRP_S))] - "ISA_HAS_LSX" - "vfrintrp.s\t%w0,%w1" -@@ -2968,8 +3096,8 @@ - (set_attr "mode" "V4SF")]) - - (define_insn "lsx_vfrintrp_d" -- [(set (match_operand:V2DI 0 "register_operand" "=f") -- (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] - UNSPEC_LSX_VFRINTRP_D))] - "ISA_HAS_LSX" - "vfrintrp.d\t%w0,%w1" -@@ -2977,8 +3105,8 @@ - (set_attr "mode" "V2DF")]) - - (define_insn "lsx_vfrintrm_s" -- [(set (match_operand:V4SI 0 "register_operand" "=f") -- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] - UNSPEC_LSX_VFRINTRM_S))] - "ISA_HAS_LSX" - "vfrintrm.s\t%w0,%w1" -@@ -2986,14 +3114,44 @@ - (set_attr "mode" "V4SF")]) - - (define_insn "lsx_vfrintrm_d" -- [(set (match_operand:V2DI 0 "register_operand" "=f") -- (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] - UNSPEC_LSX_VFRINTRM_D))] - "ISA_HAS_LSX" - "vfrintrm.d\t%w0,%w1" - [(set_attr "type" "simd_shift") - (set_attr "mode" "V2DF")]) - -+;; Vector versions of the floating-point frint patterns. -+;; Expands to btrunc, ceil, floor, rint. -+(define_insn "v4sf2" -+ [(set (match_operand:V4SF 0 "register_operand" "=f") -+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] -+ FRINT_S))] -+ "ISA_HAS_LSX" -+ "vfrint.s\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V4SF")]) -+ -+(define_insn "v2df2" -+ [(set (match_operand:V2DF 0 "register_operand" "=f") -+ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] -+ FRINT_D))] -+ "ISA_HAS_LSX" -+ "vfrint.d\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "V2DF")]) -+ -+;; Expands to round. -+(define_insn "round2" -+ [(set (match_operand:FLSX 0 "register_operand" "=f") -+ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] -+ UNSPEC_LSX_VFRINT))] -+ "ISA_HAS_LSX" -+ "vfrint.\t%w0,%w1" -+ [(set_attr "type" "simd_shift") -+ (set_attr "mode" "")]) -+ - ;; Offset load and broadcast - (define_expand "lsx_vldrepl_" - [(match_operand:LSX 0 "register_operand") -@@ -3019,6 +3177,18 @@ - (set_attr "mode" "") - (set_attr "length" "4")]) - -+(define_insn "lsx_vldrepl__insn_0" -+ [(set (match_operand:LSX 0 "register_operand" "=f") -+ (vec_duplicate:LSX -+ (mem: (match_operand:DI 1 "register_operand" "r"))))] -+ "ISA_HAS_LSX" -+{ -+ return "vldrepl.\t%w0,%1,0"; -+} -+ [(set_attr "type" "simd_load") -+ (set_attr "mode" "") -+ (set_attr "length" "4")]) -+ - ;; Offset store by sel - (define_expand "lsx_vstelm_" - [(match_operand:LSX 0 "register_operand") -@@ -3047,6 +3217,20 @@ - (set_attr "mode" "") - (set_attr "length" "4")]) - -+;; Offset is "0" -+(define_insn "lsx_vstelm__insn_0" -+ [(set (mem: (match_operand:DI 0 "register_operand" "r")) -+ (vec_select: -+ (match_operand:LSX 1 "register_operand" "f") -+ (parallel [(match_operand:SI 2 "const__operand")])))] -+ "ISA_HAS_LSX" -+{ -+ return "vstelm.\t%w1,%0,0,%2"; -+} -+ [(set_attr "type" "simd_store") -+ (set_attr "mode" "") -+ (set_attr "length" "4")]) -+ - (define_expand "lsx_vld" - [(match_operand:V16QI 0 "register_operand") - (match_operand 1 "pmode_register_operand") -@@ -3179,3 +3363,101 @@ - } - [(set_attr "type" "simd_fcmp") - (set_attr "mode" "FCC")]) -+ -+;; Vector reduction operation -+(define_expand "reduc_plus_scal_v2di" -+ [(match_operand:DI 0 "register_operand") -+ (match_operand:V2DI 1 "register_operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx tmp = gen_reg_rtx (V2DImode); -+ emit_insn (gen_lsx_vhaddw_q_d (tmp, operands[1], operands[1])); -+ emit_insn (gen_vec_extractv2didi (operands[0], tmp, const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc_plus_scal_v4si" -+ [(match_operand:SI 0 "register_operand") -+ (match_operand:V4SI 1 "register_operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx tmp = gen_reg_rtx (V2DImode); -+ rtx tmp1 = gen_reg_rtx (V2DImode); -+ emit_insn (gen_lsx_vhaddw_d_w (tmp, operands[1], operands[1])); -+ emit_insn (gen_lsx_vhaddw_q_d (tmp1, tmp, tmp)); -+ emit_insn (gen_vec_extractv4sisi (operands[0], gen_lowpart(V4SImode,tmp1), const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc_plus_scal_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:FLSX 1 "register_operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx tmp = gen_reg_rtx (mode); -+ loongarch_expand_vector_reduc (gen_add3, tmp, operands[1]); -+ emit_insn (gen_vec_extract (operands[0], tmp, -+ const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc__scal_" -+ [(any_bitwise: -+ (match_operand: 0 "register_operand") -+ (match_operand:ILSX 1 "register_operand"))] -+ "ISA_HAS_LSX" -+{ -+ rtx tmp = gen_reg_rtx (mode); -+ loongarch_expand_vector_reduc (gen_3, tmp, operands[1]); -+ emit_insn (gen_vec_extract (operands[0], tmp, -+ const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc_smax_scal_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:LSX 1 "register_operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx tmp = gen_reg_rtx (mode); -+ loongarch_expand_vector_reduc (gen_smax3, tmp, operands[1]); -+ emit_insn (gen_vec_extract (operands[0], tmp, -+ const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc_smin_scal_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:LSX 1 "register_operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx tmp = gen_reg_rtx (mode); -+ loongarch_expand_vector_reduc (gen_smin3, tmp, operands[1]); -+ emit_insn (gen_vec_extract (operands[0], tmp, -+ const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc_umax_scal_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILSX 1 "register_operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx tmp = gen_reg_rtx (mode); -+ loongarch_expand_vector_reduc (gen_umax3, tmp, operands[1]); -+ emit_insn (gen_vec_extract (operands[0], tmp, -+ const0_rtx)); -+ DONE; -+}) -+ -+(define_expand "reduc_umin_scal_" -+ [(match_operand: 0 "register_operand") -+ (match_operand:ILSX 1 "register_operand")] -+ "ISA_HAS_LSX" -+{ -+ rtx tmp = gen_reg_rtx (mode); -+ loongarch_expand_vector_reduc (gen_umin3, tmp, operands[1]); -+ emit_insn (gen_vec_extract (operands[0], tmp, -+ const0_rtx)); -+ DONE; -+}) -diff --git a/gcc/config/loongarch/lsxintrin.h b/gcc/config/loongarch/lsxintrin.h -index fe3043e3d..2d1598536 100644 ---- a/gcc/config/loongarch/lsxintrin.h -+++ b/gcc/config/loongarch/lsxintrin.h -@@ -3291,65 +3291,65 @@ __m128i __lsx_vftintrneh_l_s(__m128 _1) - /* Assembly instruction format: vd, vj. */ - /* Data types in instruction templates: V4SI, V4SF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m128i __lsx_vfrintrne_s(__m128 _1) -+__m128 __lsx_vfrintrne_s(__m128 _1) - { -- return (__m128i)__builtin_lsx_vfrintrne_s((v4f32)_1); -+ return (__m128)__builtin_lsx_vfrintrne_s((v4f32)_1); - } - - /* Assembly instruction format: vd, vj. */ - /* Data types in instruction templates: V2DI, V2DF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m128i __lsx_vfrintrne_d(__m128d _1) -+__m128d __lsx_vfrintrne_d(__m128d _1) - { -- return (__m128i)__builtin_lsx_vfrintrne_d((v2f64)_1); -+ return (__m128d)__builtin_lsx_vfrintrne_d((v2f64)_1); - } - - /* Assembly instruction format: vd, vj. */ - /* Data types in instruction templates: V4SI, V4SF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m128i __lsx_vfrintrz_s(__m128 _1) -+__m128 __lsx_vfrintrz_s(__m128 _1) - { -- return (__m128i)__builtin_lsx_vfrintrz_s((v4f32)_1); -+ return (__m128)__builtin_lsx_vfrintrz_s((v4f32)_1); - } - - /* Assembly instruction format: vd, vj. */ - /* Data types in instruction templates: V2DI, V2DF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m128i __lsx_vfrintrz_d(__m128d _1) -+__m128d __lsx_vfrintrz_d(__m128d _1) - { -- return (__m128i)__builtin_lsx_vfrintrz_d((v2f64)_1); -+ return (__m128d)__builtin_lsx_vfrintrz_d((v2f64)_1); - } - - /* Assembly instruction format: vd, vj. */ - /* Data types in instruction templates: V4SI, V4SF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m128i __lsx_vfrintrp_s(__m128 _1) -+__m128 __lsx_vfrintrp_s(__m128 _1) - { -- return (__m128i)__builtin_lsx_vfrintrp_s((v4f32)_1); -+ return (__m128)__builtin_lsx_vfrintrp_s((v4f32)_1); - } - - /* Assembly instruction format: vd, vj. */ - /* Data types in instruction templates: V2DI, V2DF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m128i __lsx_vfrintrp_d(__m128d _1) -+__m128d __lsx_vfrintrp_d(__m128d _1) - { -- return (__m128i)__builtin_lsx_vfrintrp_d((v2f64)_1); -+ return (__m128d)__builtin_lsx_vfrintrp_d((v2f64)_1); - } - - /* Assembly instruction format: vd, vj. */ - /* Data types in instruction templates: V4SI, V4SF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m128i __lsx_vfrintrm_s(__m128 _1) -+__m128 __lsx_vfrintrm_s(__m128 _1) - { -- return (__m128i)__builtin_lsx_vfrintrm_s((v4f32)_1); -+ return (__m128)__builtin_lsx_vfrintrm_s((v4f32)_1); - } - - /* Assembly instruction format: vd, vj. */ - /* Data types in instruction templates: V2DI, V2DF. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m128i __lsx_vfrintrm_d(__m128d _1) -+__m128d __lsx_vfrintrm_d(__m128d _1) - { -- return (__m128i)__builtin_lsx_vfrintrm_d((v2f64)_1); -+ return (__m128d)__builtin_lsx_vfrintrm_d((v2f64)_1); - } - - /* Assembly instruction format: vd, rj, si8, idx. */ -@@ -4154,19 +4154,19 @@ __m128i __lsx_vsub_q(__m128i _1, __m128i _2) - - /* Assembly instruction format: vd, rj, si12. */ - /* Data types in instruction templates: V16QI, CVPOINTER, SI. */ --#define __lsx_vldrepl_b(/*void **/ _1, /*si12*/ _2) ((__m128i)__builtin_lsx_vldrepl_b((void *)(_1), (_2))) -+#define __lsx_vldrepl_b(/*void **/ _1, /*si12*/ _2) ((__m128i)__builtin_lsx_vldrepl_b((void const *)(_1), (_2))) - - /* Assembly instruction format: vd, rj, si11. */ - /* Data types in instruction templates: V8HI, CVPOINTER, SI. */ --#define __lsx_vldrepl_h(/*void **/ _1, /*si11*/ _2) ((__m128i)__builtin_lsx_vldrepl_h((void *)(_1), (_2))) -+#define __lsx_vldrepl_h(/*void **/ _1, /*si11*/ _2) ((__m128i)__builtin_lsx_vldrepl_h((void const *)(_1), (_2))) - - /* Assembly instruction format: vd, rj, si10. */ - /* Data types in instruction templates: V4SI, CVPOINTER, SI. */ --#define __lsx_vldrepl_w(/*void **/ _1, /*si10*/ _2) ((__m128i)__builtin_lsx_vldrepl_w((void *)(_1), (_2))) -+#define __lsx_vldrepl_w(/*void **/ _1, /*si10*/ _2) ((__m128i)__builtin_lsx_vldrepl_w((void const *)(_1), (_2))) - - /* Assembly instruction format: vd, rj, si9. */ - /* Data types in instruction templates: V2DI, CVPOINTER, SI. */ --#define __lsx_vldrepl_d(/*void **/ _1, /*si9*/ _2) ((__m128i)__builtin_lsx_vldrepl_d((void *)(_1), (_2))) -+#define __lsx_vldrepl_d(/*void **/ _1, /*si9*/ _2) ((__m128i)__builtin_lsx_vldrepl_d((void const *)(_1), (_2))) - - /* Assembly instruction format: vd, vj. */ - /* Data types in instruction templates: V16QI, V16QI. */ -@@ -4470,7 +4470,7 @@ __m128i __lsx_vextl_q_d(__m128i _1) - - /* Assembly instruction format: vd, rj, si12. */ - /* Data types in instruction templates: V16QI, CVPOINTER, SI. */ --#define __lsx_vld(/*void **/ _1, /*si12*/ _2) ((__m128i)__builtin_lsx_vld((void *)(_1), (_2))) -+#define __lsx_vld(/*void **/ _1, /*si12*/ _2) ((__m128i)__builtin_lsx_vld((void const *)(_1), (_2))) - - /* Assembly instruction format: vd, rj, si12. */ - /* Data types in instruction templates: VOID, V16QI, CVPOINTER, SI. */ -@@ -4547,9 +4547,9 @@ __m128i __lsx_vshuf_b(__m128i _1, __m128i _2, __m128i _3) - /* Assembly instruction format: vd, rj, rk. */ - /* Data types in instruction templates: V16QI, CVPOINTER, DI. */ - extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) --__m128i __lsx_vldx(void * _1, long int _2) -+__m128i __lsx_vldx(void const * _1, long int _2) - { -- return (__m128i)__builtin_lsx_vldx((void *)_1, (long int)_2); -+ return (__m128i)__builtin_lsx_vldx((void const *)_1, (long int)_2); - } - - /* Assembly instruction format: vd, rj, rk. */ -diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md -index 20638559d..daacaf003 100644 ---- a/gcc/config/loongarch/predicates.md -+++ b/gcc/config/loongarch/predicates.md -@@ -1,5 +1,7 @@ --;; Predicate definitions for LARCH. --;; Copyright (C) 2004-2018 Free Software Foundation, Inc. -+;; Predicate definitions for LoongArch target. -+;; Copyright (C) 2020-2022 Free Software Foundation, Inc. -+;; Contributed by Loongson Co. Ltd. -+;; Based on MIPS target for GNU compiler. - ;; - ;; This file is part of GCC. - ;; -@@ -19,7 +21,7 @@ - - (define_predicate "const_uns_arith_operand" - (and (match_code "const_int") -- (match_test "SMALL_OPERAND_UNSIGNED (INTVAL (op))"))) -+ (match_test "IMM12_OPERAND_UNSIGNED (INTVAL (op))"))) - - (define_predicate "uns_arith_operand" - (ior (match_operand 0 "const_uns_arith_operand") -@@ -45,7 +47,7 @@ - (ior (match_operand 0 "const_arith_operand") - (match_operand 0 "register_operand"))) - --(define_predicate "const_immlsa_operand" -+(define_predicate "const_immalsl_operand" - (and (match_code "const_int") - (match_test "IN_RANGE (INTVAL (op), 1, 4)"))) - -@@ -69,9 +71,6 @@ - (and (match_code "const_int") - (match_test "UIMM6_OPERAND (INTVAL (op))"))) - --(define_predicate "const_uimm7_operand" -- (and (match_code "const_int") -- (match_test "IN_RANGE (INTVAL (op), 0, 127)"))) - - (define_predicate "const_uimm8_operand" - (and (match_code "const_int") -@@ -85,10 +84,6 @@ - (and (match_code "const_int") - (match_test "IN_RANGE (INTVAL (op), 0, 32767)"))) - --(define_predicate "const_imm5_operand" -- (and (match_code "const_int") -- (match_test "IN_RANGE (INTVAL (op), -16, 15)"))) -- - (define_predicate "const_imm10_operand" - (and (match_code "const_int") - (match_test "IMM10_OPERAND (INTVAL (op))"))) -@@ -101,10 +96,6 @@ - (and (match_code "const_int") - (match_test "IMM13_OPERAND (INTVAL (op))"))) - --(define_predicate "reg_imm10_operand" -- (ior (match_operand 0 "const_imm10_operand") -- (match_operand 0 "register_operand"))) -- - (define_predicate "aq8b_operand" - (and (match_code "const_int") - (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 0)"))) -@@ -137,6 +128,7 @@ - (and (match_code "const_int") - (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 3)"))) - -+ - (define_predicate "aq12b_operand" - (and (match_code "const_int") - (match_test "loongarch_signed_immediate_p (INTVAL (op), 12, 0)"))) -@@ -155,7 +147,7 @@ - - (define_predicate "sle_operand" - (and (match_code "const_int") -- (match_test "SMALL_OPERAND (INTVAL (op) + 1)"))) -+ (match_test "IMM12_OPERAND (INTVAL (op) + 1)"))) - - (define_predicate "sleu_operand" - (and (match_operand 0 "sle_operand") -@@ -223,179 +215,40 @@ - (and (match_code "const_int") - (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) - --(define_predicate "const_4_to_7_operand" -+(define_predicate "const_4_to_7_operand" - (and (match_code "const_int") - (match_test "IN_RANGE (INTVAL (op), 4, 7)"))) -- -+ - (define_predicate "const_8_to_15_operand" - (and (match_code "const_int") - (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) -- --(define_predicate "const_16_to_31_operand" -- (and (match_code "const_int") -- (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) -- --(define_predicate "qi_mask_operand" -- (and (match_code "const_int") -- (match_test "UINTVAL (op) == 0xff"))) - --(define_predicate "hi_mask_operand" -+(define_predicate "const_8_to_11_operand" - (and (match_code "const_int") -- (match_test "UINTVAL (op) == 0xffff"))) -+ (match_test "IN_RANGE (INTVAL (op), 8, 11)"))) - --(define_predicate "lu52i_mask_operand" -+(define_predicate "const_12_to_15_operand" - (and (match_code "const_int") -- (match_test "UINTVAL (op) == 0xfffffffffffff"))) -+ (match_test "IN_RANGE (INTVAL (op), 12, 15)"))) - --(define_predicate "shift_mask_operand" -+(define_predicate "const_16_to_31_operand" - (and (match_code "const_int") -- (ior (match_test "UINTVAL (op) == 0x3fffffffc") -- (match_test "UINTVAL (op) == 0x1fffffffe") -- (match_test "UINTVAL (op) == 0x7fffffff8") -- (match_test "UINTVAL (op) == 0xffffffff0")))) -- -- -+ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) - --(define_predicate "si_mask_operand" -+(define_predicate "lu52i_mask_operand" - (and (match_code "const_int") -- (match_test "UINTVAL (op) == 0xffffffff"))) -- --(define_predicate "and_load_operand" -- (ior (match_operand 0 "qi_mask_operand") -- (match_operand 0 "hi_mask_operand") -- (match_operand 0 "si_mask_operand"))) -+ (match_test "UINTVAL (op) == 0xfffffffffffff"))) - - (define_predicate "low_bitmask_operand" - (and (match_code "const_int") - (match_test "low_bitmask_len (mode, INTVAL (op)) > 12"))) - --(define_predicate "and_reg_operand" -- (ior (match_operand 0 "register_operand") -- (match_operand 0 "const_uns_arith_operand") -- (match_operand 0 "low_bitmask_operand") -- (match_operand 0 "si_mask_operand"))) -- --(define_predicate "and_operand" -- (ior (match_operand 0 "and_load_operand") -- (match_operand 0 "and_reg_operand"))) -- --(define_predicate "d_operand" -- (and (match_code "reg") -- (match_test "GP_REG_P (REGNO (op))"))) -- --(define_predicate "lwsp_swsp_operand" -- (and (match_code "mem") -- (match_test "lwsp_swsp_address_p (XEXP (op, 0), mode)"))) -- --(define_predicate "db4_operand" -- (and (match_code "const_int") -- (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 4, 0)"))) -- --(define_predicate "db7_operand" -- (and (match_code "const_int") -- (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 7, 0)"))) -- --(define_predicate "db8_operand" -- (and (match_code "const_int") -- (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 8, 0)"))) -- --(define_predicate "ib3_operand" -- (and (match_code "const_int") -- (match_test "loongarch_unsigned_immediate_p (INTVAL (op) - 1, 3, 0)"))) -- --(define_predicate "sb4_operand" -- (and (match_code "const_int") -- (match_test "loongarch_signed_immediate_p (INTVAL (op), 4, 0)"))) -- --(define_predicate "sb5_operand" -- (and (match_code "const_int") -- (match_test "loongarch_signed_immediate_p (INTVAL (op), 5, 0)"))) -- --(define_predicate "sb8_operand" -- (and (match_code "const_int") -- (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 0)"))) -- --(define_predicate "sd8_operand" -- (and (match_code "const_int") -- (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 3)"))) -- --(define_predicate "ub4_operand" -- (and (match_code "const_int") -- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 0)"))) -- --(define_predicate "ub8_operand" -- (and (match_code "const_int") -- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 8, 0)"))) -- --(define_predicate "uh4_operand" -- (and (match_code "const_int") -- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 1)"))) -- --(define_predicate "uw4_operand" -- (and (match_code "const_int") -- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 2)"))) -- --(define_predicate "uw5_operand" -- (and (match_code "const_int") -- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 5, 2)"))) -- --(define_predicate "uw6_operand" -- (and (match_code "const_int") -- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 6, 2)"))) -- --(define_predicate "uw8_operand" -- (and (match_code "const_int") -- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 8, 2)"))) -- --(define_predicate "addiur2_operand" -- (and (match_code "const_int") -- (ior (match_test "INTVAL (op) == -1") -- (match_test "INTVAL (op) == 1") -- (match_test "INTVAL (op) == 4") -- (match_test "INTVAL (op) == 8") -- (match_test "INTVAL (op) == 12") -- (match_test "INTVAL (op) == 16") -- (match_test "INTVAL (op) == 20") -- (match_test "INTVAL (op) == 24")))) -- --(define_predicate "addiusp_operand" -- (and (match_code "const_int") -- (ior (match_test "(IN_RANGE (INTVAL (op), 2, 257))") -- (match_test "(IN_RANGE (INTVAL (op), -258, -3))")))) -- --(define_predicate "andi16_operand" -- (and (match_code "const_int") -- (ior (match_test "IN_RANGE (INTVAL (op), 1, 4)") -- (match_test "IN_RANGE (INTVAL (op), 7, 8)") -- (match_test "IN_RANGE (INTVAL (op), 15, 16)") -- (match_test "IN_RANGE (INTVAL (op), 31, 32)") -- (match_test "IN_RANGE (INTVAL (op), 63, 64)") -- (match_test "INTVAL (op) == 255") -- (match_test "INTVAL (op) == 32768") -- (match_test "INTVAL (op) == 65535")))) -- --(define_predicate "movep_src_register" -- (and (match_code "reg") -- (ior (match_test ("IN_RANGE (REGNO (op), 2, 3)")) -- (match_test ("IN_RANGE (REGNO (op), 16, 20)"))))) -- --(define_predicate "movep_src_operand" -- (ior (match_operand 0 "const_0_operand") -- (match_operand 0 "movep_src_register"))) -- --(define_predicate "fcc_reload_operand" -- (and (match_code "reg,subreg") -- (match_test "ST_REG_P (true_regnum (op))"))) -- --(define_predicate "muldiv_target_operand" -- (match_operand 0 "register_operand")) -- - (define_predicate "const_call_insn_operand" - (match_code "const,symbol_ref,label_ref") - { - enum loongarch_symbol_type symbol_type; - -- if (!loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_CALL, &symbol_type)) -+ if (!loongarch_symbolic_constant_p (op, &symbol_type)) - return false; - - switch (symbol_type) -@@ -403,9 +256,9 @@ - case SYMBOL_GOT_DISP: - /* Without explicit relocs, there is no special syntax for - loading the address of a call destination into a register. -- Using "la $25,foo; jal $25" would prevent the lazy binding -- of "foo", so keep the address of global symbols with the -- jal macro. */ -+ Using "la.global JIRL_REGS,foo; jirl JIRL_REGS" would prevent the lazy -+ binding of "foo", so keep the address of global symbols with the jirl -+ macro. */ - return 1; - - default: -@@ -420,7 +273,7 @@ - (define_predicate "is_const_call_local_symbol" - (and (match_operand 0 "const_call_insn_operand") - (ior (match_test "loongarch_global_symbol_p (op) == 0") -- (match_test "loongarch_symbol_binds_local_p (op) != 0")) -+ (match_test "loongarch_symbol_binds_local_p (op) != 0")) - (match_test "CONSTANT_P (op)"))) - - (define_predicate "is_const_call_weak_symbol" -@@ -446,7 +299,6 @@ - (define_predicate "splittable_const_int_operand" - (match_code "const_int") - { -- - /* Don't handle multi-word moves this way; we don't want to introduce - the individual word-mode moves until after reload. */ - if (GET_MODE_SIZE (mode) > UNITS_PER_WORD) -@@ -454,9 +306,8 @@ - - /* Otherwise check whether the constant can be loaded in a single - instruction. */ --// return !LUI_INT (op) && !SMALL_INT (op) && !SMALL_INT_UNSIGNED (op); -- return !LUI_INT (op) && !SMALL_INT (op) && !SMALL_INT_UNSIGNED (op) -- && !LU52I_INT (op); -+ return !LU12I_INT (op) && !IMM12_INT (op) && !IMM12_INT_UNSIGNED (op) -+ && !LU52I_INT (op); - }) - - (define_predicate "move_operand" -@@ -504,73 +355,34 @@ - case CONST: - case SYMBOL_REF: - case LABEL_REF: -- return (loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &symbol_type)); -+ return (loongarch_symbolic_constant_p (op, &symbol_type)); - default: - return true; - } - }) - --(define_predicate "consttable_operand" -- (match_test "CONSTANT_P (op)")) -- - (define_predicate "symbolic_operand" - (match_code "const,symbol_ref,label_ref") - { - enum loongarch_symbol_type type; -- return loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type); --}) -- --(define_predicate "force_to_mem_operand" -- (match_code "const,symbol_ref,label_ref") --{ -- enum loongarch_symbol_type symbol_type; -- return (loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &symbol_type) -- && loongarch_use_pcrel_pool_p[(int) symbol_type]); --}) -- --(define_predicate "got_disp_operand" -- (match_code "const,symbol_ref,label_ref") --{ -- enum loongarch_symbol_type type; -- return (loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type) -- && type == SYMBOL_GOT_DISP); -+ return loongarch_symbolic_constant_p (op, &type); - }) - --(define_predicate "symbol_ref_operand" -- (match_code "symbol_ref")) -- --(define_predicate "stack_operand" -- (and (match_code "mem") -- (match_test "loongarch_stack_address_p (XEXP (op, 0), GET_MODE (op))"))) -- -- -- - (define_predicate "equality_operator" - (match_code "eq,ne")) - --(define_predicate "extend_operator" -- (match_code "zero_extend,sign_extend")) -- --(define_predicate "trap_comparison_operator" -- (match_code "eq,ne,lt,ltu,ge,geu")) -- - (define_predicate "order_operator" - (match_code "lt,ltu,le,leu,ge,geu,gt,gtu")) - - ;; For NE, cstore uses sltu instructions in which the first operand is $0. - - (define_predicate "loongarch_cstore_operator" -- (ior (match_code "eq,gt,gtu,ge,geu,lt,ltu,le,leu") -- (match_code "ne"))) -+ (match_code "ne,eq,gt,gtu,ge,geu,lt,ltu,le,leu")) - - (define_predicate "small_data_pattern" - (and (match_code "set,parallel,unspec,unspec_volatile,prefetch") - (match_test "loongarch_small_data_pattern_p (op)"))) - --(define_predicate "mem_noofs_operand" -- (and (match_code "mem") -- (match_code "reg" "0"))) -- - ;; Return 1 if the operand is in non-volatile memory. - (define_predicate "non_volatile_mem_operand" - (and (match_operand 0 "memory_operand") -@@ -606,12 +418,6 @@ - return loongarch_const_vector_same_int_p (op, mode, 0, 63); - }) - --(define_predicate "const_vector_same_uimm8_operand" -- (match_code "const_vector") --{ -- return loongarch_const_vector_same_int_p (op, mode, 0, 255); --}) -- - (define_predicate "par_const_vector_shf_set_operand" - (match_code "parallel") - { -diff --git a/gcc/config/loongarch/rtems.h b/gcc/config/loongarch/rtems.h -deleted file mode 100644 -index bbb70b040..000000000 ---- a/gcc/config/loongarch/rtems.h -+++ /dev/null -@@ -1,39 +0,0 @@ --/* Definitions for rtems targeting a LARCH using ELF. -- Copyright (C) 1996-2018 Free Software Foundation, Inc. -- Contributed by Joel Sherrill (joel@OARcorp.com). -- -- This file is part of GCC. -- -- GCC is free software; you can redistribute it and/or modify it -- under the terms of the GNU General Public License as published -- by the Free Software Foundation; either version 3, or (at your -- option) any later version. -- -- GCC is distributed in the hope that it will be useful, but WITHOUT -- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -- License for more details. -- -- Under Section 7 of GPL version 3, you are granted additional -- permissions described in the GCC Runtime Library Exception, version -- 3.1, as published by the Free Software Foundation. -- -- You should have received a copy of the GNU General Public License and -- a copy of the GCC Runtime Library Exception along with this program; -- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -- . */ -- --/* Specify predefined symbols in preprocessor. */ -- --#define TARGET_OS_CPP_BUILTINS() \ --do { \ -- builtin_define ("__rtems__"); \ -- builtin_define ("__USE_INIT_FINI__"); \ -- builtin_assert ("system=rtems"); \ --} while (0) -- --/* No sdata. -- * The RTEMS BSPs expect -G0 -- */ --#undef LARCH_DEFAULT_GVALUE --#define LARCH_DEFAULT_GVALUE 0 -diff --git a/gcc/config/loongarch/sde.opt b/gcc/config/loongarch/sde.opt -deleted file mode 100644 -index 321217d51..000000000 ---- a/gcc/config/loongarch/sde.opt -+++ /dev/null -@@ -1,28 +0,0 @@ --; LARCH SDE options. --; --; Copyright (C) 2010-2018 Free Software Foundation, Inc. --; --; This file is part of GCC. --; --; GCC is free software; you can redistribute it and/or modify it under --; the terms of the GNU General Public License as published by the Free --; Software Foundation; either version 3, or (at your option) any later --; version. --; --; GCC is distributed in the hope that it will be useful, but WITHOUT --; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY --; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public --; License for more details. --; --; You should have received a copy of the GNU General Public License --; along with GCC; see the file COPYING3. If not see --; . -- --; -mcode-xonly is a traditional alias for -mcode-readable=pcrel and --; -mno-data-in-code is a traditional alias for -mcode-readable=no. -- --mno-data-in-code --Target RejectNegative Alias(mcode-readable=, no) -- --mcode-xonly --Target RejectNegative Alias(mcode-readable=, pcrel) -diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md -index 5a16c4fa3..abc401339 100644 ---- a/gcc/config/loongarch/sync.md -+++ b/gcc/config/loongarch/sync.md -@@ -1,7 +1,7 @@ --;; Machine description for LARCH atomic operations. --;; Copyright (C) 2011-2018 Free Software Foundation, Inc. --;; Contributed by Andrew Waterman (andrew@sifive.com). --;; Based on LARCH target for GNU compiler. -+;; Machine description for LoongArch atomic operations. -+;; Copyright (C) 2020-2022 Free Software Foundation, Inc. -+;; Contributed by Loongson Co. Ltd. -+;; Based on MIPS and RISC-V target for GNU compiler. - - ;; This file is part of GCC. - -@@ -29,6 +29,7 @@ - UNSPEC_COMPARE_AND_SWAP_NAND - UNSPEC_SYNC_OLD_OP - UNSPEC_SYNC_EXCHANGE -+ UNSPEC_ATOMIC_LOAD - UNSPEC_ATOMIC_STORE - UNSPEC_MEMORY_BARRIER - ]) -@@ -37,21 +38,25 @@ - (define_code_attr atomic_optab - [(plus "add") (ior "or") (xor "xor") (and "and")]) - -+(define_mode_iterator AMO_BHWD [(QI "TARGET_uARCH_LA664") -+ (HI "TARGET_uARCH_LA664") -+ SI DI]) -+ - ;; This attribute gives the format suffix for atomic memory operations. --(define_mode_attr amo [(SI "w") (DI "d")]) -+(define_mode_attr amo [(QI "b") (HI "h") (SI "w") (DI "d")]) -+ -+;; expands to the name of the atomic operand that implements a -+;; particular code. -+(define_code_attr amop [(ior "or") (xor "xor") (and "and") (plus "add")]) - --;; expands to the name of the atomic operand that implements a particular code. --(define_code_attr amop [(ior "or") -- (xor "xor") -- (and "and") -- (plus "add")]) - ;; Memory barriers. - - (define_expand "mem_thread_fence" - [(match_operand:SI 0 "const_int_operand" "")] ;; model - "" - { -- if (INTVAL (operands[0]) != MEMMODEL_RELAXED) -+ enum memmodel model = memmodel_from_int (INTVAL (operands[0])); -+ if (!is_mm_relaxed (model)) - { - rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode)); - MEM_VOLATILE_P (mem) = 1; -@@ -60,37 +65,79 @@ - DONE; - }) - --;; Until the LARCH memory model (hence its mapping from C++) is finalized, -+;; Until the LoongArch memory model (hence its mapping from C++) is finalized, - ;; conservatively emit a full FENCE. - (define_insn "mem_thread_fence_1" - [(set (match_operand:BLK 0 "" "") - (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER)) - (match_operand:SI 1 "const_int_operand" "")] ;; model - "" -- "dbar\t0") -+{ -+ enum memmodel model = memmodel_from_int (INTVAL (operands[1])); -+ if (is_mm_consume (model)) -+ return "dbar\t0x700"; -+ else if (is_mm_acquire (model)) -+ return "dbar\t0x14"; -+ else -+ return "dbar\t0x10"; -+}) - - ;; Atomic memory operations. - -+(define_insn "atomic_load" -+ [(set (match_operand:QHWD 0 "register_operand" "=r") -+ (unspec_volatile:QHWD -+ [(match_operand:QHWD 1 "memory_operand" "+m") -+ (match_operand:SI 2 "const_int_operand")] ;; model -+ UNSPEC_ATOMIC_LOAD))] -+ "" -+{ -+ enum memmodel model = memmodel_from_int (INTVAL (operands[2])); -+ if (is_mm_relaxed (model) || is_mm_release (model)) -+ return "ld.\t%0,%1"; -+ if (is_mm_consume (model)) -+ return "ld.\t%0,%1\n\tdbar\t0x700"; -+ else -+ return "ld.\t%0,%1\n\tdbar\t0x14"; -+}) -+ - ;; Implement atomic stores with amoswap. Fall back to fences for atomic loads. - (define_insn "atomic_store" -- [(set (match_operand:GPR 0 "memory_operand" "+ZB") -- (unspec_volatile:GPR -- [(match_operand:GPR 1 "reg_or_0_operand" "rJ") -+ [(set (match_operand:QHWD 0 "memory_operand" "+m") -+ (unspec_volatile:QHWD -+ [(match_operand:QHWD 1 "reg_or_0_operand" "rJ") - (match_operand:SI 2 "const_int_operand")] ;; model - UNSPEC_ATOMIC_STORE))] - "" -- "amswap%A2.\t$zero,%z1,%0" -+{ -+ enum memmodel model = memmodel_from_int (INTVAL (operands[2])); -+ if (is_mm_relaxed (model) || is_mm_acquire (model) || is_mm_consume (model)) -+ return "st.\t%z1,%0"; -+ else -+ return "dbar\t0x12\n\tst.\t%z1,%0"; -+} - [(set (attr "length") (const_int 8))]) - - (define_insn "atomic_" - [(set (match_operand:GPR 0 "memory_operand" "+ZB") - (unspec_volatile:GPR - [(any_atomic:GPR (match_dup 0) -- (match_operand:GPR 1 "reg_or_0_operand" "rJ")) -+ (match_operand:GPR 1 "reg_or_0_operand" "rJ")) - (match_operand:SI 2 "const_int_operand")] ;; model - UNSPEC_SYNC_OLD_OP))] - "" -- "am%A2.\t$zero,%z1,%0" -+ "%J2\n\tam%A2.\t$zero,%z1,%0\n\t%K2" -+ [(set (attr "length") (const_int 8))]) -+ -+(define_insn "atomic_add" -+ [(set (match_operand:SHORT 0 "memory_operand" "+ZB") -+ (unspec_volatile:SHORT -+ [(plus:SHORT (match_dup 0) -+ (match_operand:SHORT 1 "reg_or_0_operand" "rJ")) -+ (match_operand:SI 2 "const_int_operand")] ;; model -+ UNSPEC_SYNC_OLD_OP))] -+ "TARGET_uARCH_LA664" -+ "%J2\n\tamadd%A2.\t$zero,%z1,%0\n\t%K2" - [(set (attr "length") (const_int 8))]) - - (define_insn "atomic_fetch_" -@@ -99,11 +146,11 @@ - (set (match_dup 1) - (unspec_volatile:GPR - [(any_atomic:GPR (match_dup 1) -- (match_operand:GPR 2 "reg_or_0_operand" "rJ")) -+ (match_operand:GPR 2 "reg_or_0_operand" "rJ")) - (match_operand:SI 3 "const_int_operand")] ;; model - UNSPEC_SYNC_OLD_OP))] - "" -- "am%A3.\t%0,%z2,%1" -+ "%J3\n\tam%A3.\t%0,%z2,%1\n\t%K3" - [(set (attr "length") (const_int 8))]) - - (define_insn "atomic_exchange" -@@ -115,35 +162,90 @@ - (set (match_dup 1) - (match_operand:GPR 2 "register_operand" "r"))] - "" -- "amswap%A3.\t%0,%z2,%1" -+ "%J3\n\tamswap%A3.\t%0,%z2,%1\n\t%K3" -+ [(set (attr "length") (const_int 8))]) -+ -+(define_insn "atomic_exchange_1" -+ [(set (match_operand:SHORT 0 "register_operand" "=&r") -+ (unspec_volatile:SHORT -+ [(match_operand:SHORT 1 "memory_operand" "+ZB") -+ (match_operand:SI 3 "const_int_operand")] ;; model -+ UNSPEC_SYNC_EXCHANGE)) -+ (set (match_dup 1) -+ (match_operand:SHORT 2 "register_operand" "r"))] -+ "" -+ "%J3\n\tamswap%A3.\t%0,%z2,%1\n\t%K3" - [(set (attr "length") (const_int 8))]) - - (define_insn "atomic_cas_value_strong" - [(set (match_operand:GPR 0 "register_operand" "=&r") -- (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (match_operand:GPR 1 "memory_operand" "+ZB")) - (set (match_dup 1) - (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") - (match_operand:GPR 3 "reg_or_0_operand" "rJ") -- (match_operand:SI 4 "const_int_operand") ;; mod_s -- (match_operand:SI 5 "const_int_operand")] ;; mod_f -+ (match_operand:SI 4 "const_int_operand")] ;; mod_s - UNSPEC_COMPARE_AND_SWAP)) -- (clobber (match_scratch:GPR 6 "=&r"))] -+ (clobber (match_scratch:GPR 5 "=&r"))] - "" - { -- return "%G5\n\t" -- "1:\n\t" -- "ll.\t%0,%1\n\t" -- "bne\t%0,%z2,2f\n\t" -- "or%i3\t%6,$zero,%3\n\t" -- "sc.\t%6,%1\n\t" -- "beq\t$zero,%6,1b\n\t" -- "b\t3f\n\t" -- "2:\n\t" -- "dbar\t0x700\n\t" -- "3:\n\t"; -+ if (TARGET_uARCH_LA664) -+ { -+ enum memmodel model = memmodel_from_int (INTVAL (operands[4])); -+ output_asm_insn ("1:",operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) -+ output_asm_insn ("ll.\t%0,%1", operands); -+ else -+ output_asm_insn ("llacq.\t%0,%1", operands); -+ -+ output_asm_insn ("bne\t%0,%z2,2f", operands); -+ output_asm_insn ("or%i3\t%5,$zero,%3", operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) -+ output_asm_insn ("sc.\t%5,%1", operands); -+ else -+ output_asm_insn ("screl.\t%5,%1", operands); -+ -+ output_asm_insn ("beq\t$zero,%5,1b", operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) -+ { -+ output_asm_insn ("b\t3f", operands); -+ output_asm_insn ("2:", operands); -+ output_asm_insn ("dbar\t0x700", operands); -+ output_asm_insn ("3:", operands); -+ } -+ else -+ output_asm_insn ("2:", operands); -+ return ""; -+ } -+ else -+ return "%G4\\n\\t" -+ "1:\\n\\t" -+ "ll.\\t%0,%1\\n\\t" -+ "bne\\t%0,%z2,2f\\n\\t" -+ "or%i3\\t%5,$zero,%3\\n\\t" -+ "sc.\\t%5,%1\\n\\t" -+ "beq\\t$zero,%5,1b\\n\\t" -+ "b\\t3f\\n\\t" -+ "2:\\n\\t" -+ "dbar\\t0x700\\n\\t" -+ "3:\\n\\t"; - } - [(set (attr "length") (const_int 32))]) - -+(define_insn "atomic_cas_value_strong_3a6000" -+ [(set (match_operand:AMO_BHWD 0 "register_operand" "=&r") -+ (match_operand:AMO_BHWD 1 "memory_operand" "+ZB")) -+ (set (match_dup 1) -+ (unspec_volatile:AMO_BHWD [(match_operand:AMO_BHWD 2 "reg_or_0_operand" "rJ") -+ (match_operand:AMO_BHWD 3 "reg_or_0_operand" "rJ") -+ (match_operand:SI 4 "const_int_operand")] ;; mod_s -+ UNSPEC_COMPARE_AND_SWAP))] -+ "TARGET_uARCH_LA664" -+ "ori\t%0,%z2,0\n\t%J4\n\tamcas%A4.\t%0,%z3,%1\n\t%K4" -+ [(set (attr "length") (const_int 32))]) -+ - (define_expand "atomic_compare_and_swap" - [(match_operand:SI 0 "register_operand" "") ;; bool output - (match_operand:GPR 1 "register_operand" "") ;; val output -@@ -155,9 +257,29 @@ - (match_operand:SI 7 "const_int_operand" "")] ;; mod_f - "" - { -- emit_insn (gen_atomic_cas_value_strong (operands[1], operands[2], -- operands[3], operands[4], -- operands[6], operands[7])); -+ rtx mod_s, mod_f; -+ -+ mod_s = operands[6]; -+ mod_f = operands[7]; -+ -+ /* Normally the succ memory model must be stronger than fail, but in the -+ unlikely event of fail being ACQUIRE and succ being RELEASE we need to -+ promote succ to ACQ_REL so that we don't lose the acquire semantics. */ -+ -+ if (is_mm_acquire (memmodel_from_int (INTVAL (mod_f))) -+ && is_mm_release (memmodel_from_int (INTVAL (mod_s)))) -+ mod_s = GEN_INT (MEMMODEL_ACQ_REL); -+ -+ operands[6] = mod_s; -+ -+ if (TARGET_uARCH_LA664) -+ emit_insn (gen_atomic_cas_value_strong_3a6000 (operands[1], operands[2], -+ operands[3], operands[4], -+ operands[6])); -+ else -+ emit_insn (gen_atomic_cas_value_strong (operands[1], operands[2], -+ operands[3], operands[4], -+ operands[6])); - - rtx compare = operands[1]; - if (operands[3] != const0_rtx) -@@ -174,7 +296,8 @@ - compare = reg; - } - -- emit_insn (gen_rtx_SET (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx))); -+ emit_insn (gen_rtx_SET (operands[0], -+ gen_rtx_EQ (SImode, compare, const0_rtx))); - DONE; - }) - -@@ -185,7 +308,7 @@ - "" - { - /* We have no QImode atomics, so use the address LSBs to form a mask, -- then use an aligned SImode atomic. */ -+ then use an aligned SImode atomic. */ - rtx result = operands[0]; - rtx mem = operands[1]; - rtx model = operands[2]; -@@ -221,11 +344,9 @@ - DONE; - }) - -- -- - (define_insn "atomic_cas_value_cmp_and_7_" - [(set (match_operand:GPR 0 "register_operand" "=&r") -- (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (match_operand:GPR 1 "memory_operand" "+ZB")) - (set (match_dup 1) - (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") - (match_operand:GPR 3 "reg_or_0_operand" "rJ") -@@ -236,23 +357,56 @@ - (clobber (match_scratch:GPR 7 "=&r"))] - "" - { -- return "%G6\n\t" -- "1:\n\t" -- "ll.\t%0,%1\n\t" -- "and\t%7,%0,%2\n\t" -- "bne\t%7,%z4,2f\n\t" -- "and\t%7,%0,%z3\n\t" -- "or%i5\t%7,%7,%5\n\t" -- "sc.\t%7,%1\n\t" -- "beq\t$zero,%7,1b\n\t" -- "b\t3f\n\t" -- "2:\n\t" -- "dbar\t0x700\n\t" -- "3:\n\t"; -+ if (TARGET_uARCH_LA664) -+ { -+ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); -+ output_asm_insn ("1:",operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) -+ output_asm_insn ("ll.\t%0,%1", operands); -+ else -+ output_asm_insn ("llacq.\t%0,%1", operands); -+ -+ output_asm_insn ("and\t%7,%0,%2", operands); -+ output_asm_insn ("bne\t%7,%z4,2f", operands); -+ output_asm_insn ("and\t%7,%0,%z3", operands); -+ output_asm_insn ("or%i5\t%7,%7,%5", operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) -+ output_asm_insn ("sc.\t%7,%1", operands); -+ else -+ output_asm_insn ("screl.\t%7,%1", operands); -+ -+ output_asm_insn ("beq\t$zero,%7,1b", operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) -+ { -+ output_asm_insn ("b\t3f", operands); -+ output_asm_insn ("2:", operands); -+ output_asm_insn ("dbar\t0x700", operands); -+ output_asm_insn ("3:", operands); -+ } -+ else -+ output_asm_insn ("2:", operands); -+ return ""; -+ } -+ else -+ return "%G6\\n\\t" -+ "1:\\n\\t" -+ "ll.\\t%0,%1\\n\\t" -+ "and\\t%7,%0,%2\\n\\t" -+ "bne\\t%7,%z4,2f\\n\\t" -+ "and\\t%7,%0,%z3\\n\\t" -+ "or%i5\\t%7,%7,%5\\n\\t" -+ "sc.\\t%7,%1\\n\\t" -+ "beq\\t$zero,%7,1b\\n\\t" -+ "b\\t3f\\n\\t" -+ "2:\\n\\t" -+ "dbar\\t0x700\\n\\t" -+ "3:\\n\\t"; - } - [(set (attr "length") (const_int 40))]) - -- - (define_expand "atomic_compare_and_swap" - [(match_operand:SI 0 "register_operand" "") ;; bool output - (match_operand:SHORT 1 "register_operand" "") ;; val output -@@ -264,43 +418,59 @@ - (match_operand:SI 7 "const_int_operand" "")] ;; mod_f - "" - { -- union loongarch_gen_fn_ptrs generator; -- generator.fn_7 = gen_atomic_cas_value_cmp_and_7_si; -- loongarch_expand_atomic_qihi (generator, -- operands[1], -- operands[2], -- operands[3], -- operands[4], -- operands[7]); -+ rtx mod_s, mod_f; - -- rtx compare = operands[1]; -- if (operands[3] != const0_rtx) -- { -- machine_mode mode = GET_MODE (operands[3]); -- rtx op1 = convert_modes (SImode, mode, operands[1], true); -- rtx op3 = convert_modes (SImode, mode, operands[3], true); -- rtx difference = gen_rtx_MINUS (SImode, op1, op3); -- compare = gen_reg_rtx (SImode); -- emit_insn (gen_rtx_SET (compare, difference)); -- } -+ mod_s = operands[6]; -+ mod_f = operands[7]; - -- if (word_mode != mode) -+ /* Normally the succ memory model must be stronger than fail, but in the -+ unlikely event of fail being ACQUIRE and succ being RELEASE we need to -+ promote succ to ACQ_REL so that we don't lose the acquire semantics. */ -+ -+ if (is_mm_acquire (memmodel_from_int (INTVAL (mod_f))) -+ && is_mm_release (memmodel_from_int (INTVAL (mod_s)))) -+ mod_s = GEN_INT (MEMMODEL_ACQ_REL); -+ -+ operands[6] = mod_s; -+ -+ if (TARGET_uARCH_LA664) -+ emit_insn (gen_atomic_cas_value_strong_3a6000 (operands[1], operands[2], -+ operands[3], operands[4], -+ operands[6])); -+ else - { -- rtx reg = gen_reg_rtx (word_mode); -- emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare))); -- compare = reg; -+ union loongarch_gen_fn_ptrs generator; -+ generator.fn_7 = gen_atomic_cas_value_cmp_and_7_si; -+ loongarch_expand_atomic_qihi (generator, operands[1], operands[2], -+ operands[3], operands[4], operands[6]); - } - -- emit_insn (gen_rtx_SET (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx))); -+ rtx compare = operands[1]; -+ if (operands[3] != const0_rtx) -+ { -+ machine_mode mode = GET_MODE (operands[3]); -+ rtx op1 = convert_modes (SImode, mode, operands[1], true); -+ rtx op3 = convert_modes (SImode, mode, operands[3], true); -+ rtx difference = gen_rtx_MINUS (SImode, op1, op3); -+ compare = gen_reg_rtx (SImode); -+ emit_insn (gen_rtx_SET (compare, difference)); -+ } -+ -+ if (word_mode != mode) -+ { -+ rtx reg = gen_reg_rtx (word_mode); -+ emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare))); -+ compare = reg; -+ } -+ -+ emit_insn (gen_rtx_SET (operands[0], -+ gen_rtx_EQ (SImode, compare, const0_rtx))); - DONE; - }) - -- -- -- - (define_insn "atomic_cas_value_add_7_" - [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res -- (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (match_operand:GPR 1 "memory_operand" "+ZB")) - (set (match_dup 1) - (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask - (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask -@@ -312,24 +482,46 @@ - (clobber (match_scratch:GPR 8 "=&r"))] - "" - { -- return "%G6\n\t" -- "1:\n\t" -- "ll.\t%0,%1\n\t" -- "and\t%7,%0,%3\n\t" -- "add.w\t%8,%0,%z5\n\t" -- "and\t%8,%8,%z2\n\t" -- "or%i8\t%7,%7,%8\n\t" -- "sc.\t%7,%1\n\t" -- "beq\t$zero,%7,1b"; -+ if (TARGET_uARCH_LA664) -+ { -+ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); -+ output_asm_insn ("1:",operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) -+ output_asm_insn ("ll.\t%0,%1", operands); -+ else -+ output_asm_insn ("llacq.\t%0,%1", operands); -+ -+ output_asm_insn ("and\t%7,%0,%3", operands); -+ output_asm_insn ("add.w\t%8,%0,%z5", operands); -+ output_asm_insn ("and\t%8,%8,%z2", operands); -+ output_asm_insn ("or%i8\t%7,%7,%8", operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) -+ output_asm_insn ("sc.\t%7,%1", operands); -+ else -+ output_asm_insn ("screl.\t%7,%1", operands); -+ -+ output_asm_insn ("beq\t$zero,%7,1b",operands); -+ return ""; -+ } -+ else -+ return "%G6\\n\\t" -+ "1:\\n\\t" -+ "ll.\\t%0,%1\\n\\t" -+ "and\\t%7,%0,%3\\n\\t" -+ "add.w\\t%8,%0,%z5\\n\\t" -+ "and\\t%8,%8,%z2\\n\\t" -+ "or%i8\\t%7,%7,%8\\n\\t" -+ "sc.\\t%7,%1\\n\\t" -+ "beq\\t$zero,%7,1b"; - } - - [(set (attr "length") (const_int 32))]) - -- -- - (define_insn "atomic_cas_value_sub_7_" - [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res -- (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (match_operand:GPR 1 "memory_operand" "+ZB")) - (set (match_dup 1) - (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask - (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask -@@ -341,23 +533,45 @@ - (clobber (match_scratch:GPR 8 "=&r"))] - "" - { -- return "%G6\n\t" -- "1:\n\t" -- "ll.\t%0,%1\n\t" -- "and\t%7,%0,%3\n\t" -- "sub.w\t%8,%0,%z5\n\t" -- "and\t%8,%8,%z2\n\t" -- "or%i8\t%7,%7,%8\n\t" -- "sc.\t%7,%1\n\t" -- "beq\t$zero,%7,1b"; -+ if (TARGET_uARCH_LA664) -+ { -+ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); -+ output_asm_insn ("1:",operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) -+ output_asm_insn ("ll.\t%0,%1", operands); -+ else -+ output_asm_insn ("llacq.\t%0,%1", operands); -+ -+ output_asm_insn ("and\t%7,%0,%3", operands); -+ output_asm_insn ("sub.w\t%8,%0,%z5", operands); -+ output_asm_insn ("and\t%8,%8,%z2", operands); -+ output_asm_insn ("or%i8\t%7,%7,%8", operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) -+ output_asm_insn ("sc.\t%7,%1", operands); -+ else -+ output_asm_insn ("screl.\t%7,%1", operands); -+ -+ output_asm_insn ("beq\t$zero,%7,1b", operands); -+ return ""; -+ } -+ else -+ return "%G6\\n\\t" -+ "1:\\n\\t" -+ "ll.\\t%0,%1\\n\\t" -+ "and\\t%7,%0,%3\\n\\t" -+ "sub.w\\t%8,%0,%z5\\n\\t" -+ "and\\t%8,%8,%z2\\n\\t" -+ "or%i8\\t%7,%7,%8\\n\\t" -+ "sc.\\t%7,%1\\n\\t" -+ "beq\\t$zero,%7,1b"; - } - [(set (attr "length") (const_int 32))]) - -- -- - (define_insn "atomic_cas_value_and_7_" - [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res -- (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (match_operand:GPR 1 "memory_operand" "+ZB")) - (set (match_dup 1) - (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask - (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask -@@ -369,21 +583,45 @@ - (clobber (match_scratch:GPR 8 "=&r"))] - "" - { -- return "%G6\n\t" -- "1:\n\t" -- "ll.\t%0,%1\n\t" -- "and\t%7,%0,%3\n\t" -- "and\t%8,%0,%z5\n\t" -- "and\t%8,%8,%z2\n\t" -- "or%i8\t%7,%7,%8\n\t" -- "sc.\t%7,%1\n\t" -- "beq\t$zero,%7,1b"; -+ if (TARGET_uARCH_LA664) -+ { -+ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); -+ output_asm_insn ("1:",operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) -+ output_asm_insn ("ll.\t%0,%1", operands); -+ else -+ output_asm_insn ("llacq.\t%0,%1", operands); -+ -+ output_asm_insn ("and\t%7,%0,%3", operands); -+ output_asm_insn ("and\t%8,%0,%z5", operands); -+ output_asm_insn ("and\t%8,%8,%z2", operands); -+ output_asm_insn ("or%i8\t%7,%7,%8", operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) -+ output_asm_insn ("sc.\t%7,%1", operands); -+ else -+ output_asm_insn ("screl.\t%7,%1", operands); -+ -+ output_asm_insn ("beq\t$zero,%7,1b", operands); -+ return ""; -+ } -+ else -+ return "%G6\\n\\t" -+ "1:\\n\\t" -+ "ll.\\t%0,%1\\n\\t" -+ "and\\t%7,%0,%3\\n\\t" -+ "and\\t%8,%0,%z5\\n\\t" -+ "and\\t%8,%8,%z2\\n\\t" -+ "or%i8\\t%7,%7,%8\\n\\t" -+ "sc.\\t%7,%1\\n\\t" -+ "beq\\t$zero,%7,1b"; - } - [(set (attr "length") (const_int 32))]) - - (define_insn "atomic_cas_value_xor_7_" - [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res -- (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (match_operand:GPR 1 "memory_operand" "+ZB")) - (set (match_dup 1) - (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask - (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask -@@ -395,22 +633,46 @@ - (clobber (match_scratch:GPR 8 "=&r"))] - "" - { -- return "%G6\n\t" -- "1:\n\t" -- "ll.\t%0,%1\n\t" -- "and\t%7,%0,%3\n\t" -- "xor\t%8,%0,%z5\n\t" -- "and\t%8,%8,%z2\n\t" -- "or%i8\t%7,%7,%8\n\t" -- "sc.\t%7,%1\n\t" -- "beq\t$zero,%7,1b"; -+ if (TARGET_uARCH_LA664) -+ { -+ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); -+ output_asm_insn ("1:",operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) -+ output_asm_insn ("ll.\t%0,%1", operands); -+ else -+ output_asm_insn ("llacq.\t%0,%1", operands); -+ -+ output_asm_insn ("and\t%7,%0,%3", operands); -+ output_asm_insn ("xor\t%8,%0,%z5", operands); -+ output_asm_insn ("and\t%8,%8,%z2", operands); -+ output_asm_insn ("or%i8\t%7,%7,%8", operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) -+ output_asm_insn ("sc.\t%7,%1", operands); -+ else -+ output_asm_insn ("screl.\t%7,%1", operands); -+ -+ output_asm_insn ("beq\t$zero,%7,1b", operands); -+ return ""; -+ } -+ else -+ return "%G6\\n\\t" -+ "1:\\n\\t" -+ "ll.\\t%0,%1\\n\\t" -+ "and\\t%7,%0,%3\\n\\t" -+ "xor\\t%8,%0,%z5\\n\\t" -+ "and\\t%8,%8,%z2\\n\\t" -+ "or%i8\\t%7,%7,%8\\n\\t" -+ "sc.\\t%7,%1\\n\\t" -+ "beq\\t$zero,%7,1b"; - } - - [(set (attr "length") (const_int 32))]) - - (define_insn "atomic_cas_value_or_7_" - [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res -- (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (match_operand:GPR 1 "memory_operand" "+ZB")) - (set (match_dup 1) - (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask - (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask -@@ -422,22 +684,46 @@ - (clobber (match_scratch:GPR 8 "=&r"))] - "" - { -- return "%G6\n\t" -- "1:\n\t" -- "ll.\t%0,%1\n\t" -- "and\t%7,%0,%3\n\t" -- "or\t%8,%0,%z5\n\t" -- "and\t%8,%8,%z2\n\t" -- "or%i8\t%7,%7,%8\n\t" -- "sc.\t%7,%1\n\t" -- "beq\t$zero,%7,1b"; -+ if (TARGET_uARCH_LA664) -+ { -+ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); -+ output_asm_insn ("1:",operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) -+ output_asm_insn ("ll.\t%0,%1", operands); -+ else -+ output_asm_insn ("llacq.\t%0,%1", operands); -+ -+ output_asm_insn ("and\t%7,%0,%3", operands); -+ output_asm_insn ("or\t%8,%0,%z5", operands); -+ output_asm_insn ("and\t%8,%8,%z2", operands); -+ output_asm_insn ("or%i8\t%7,%7,%8", operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) -+ output_asm_insn ("sc.\t%7,%1", operands); -+ else -+ output_asm_insn ("screl.\t%7,%1", operands); -+ -+ output_asm_insn ("beq\t$zero,%7,1b", operands); -+ return ""; -+ } -+ else -+ return "%G6\\n\\t" -+ "1:\\n\\t" -+ "ll.\\t%0,%1\\n\\t" -+ "and\\t%7,%0,%3\\n\\t" -+ "or\\t%8,%0,%z5\\n\\t" -+ "and\\t%8,%8,%z2\\n\\t" -+ "or%i8\\t%7,%7,%8\\n\\t" -+ "sc.\\t%7,%1\\n\\t" -+ "beq\\t$zero,%7,1b"; - } - - [(set (attr "length") (const_int 32))]) - - (define_insn "atomic_cas_value_nand_7_" - [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res -- (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (match_operand:GPR 1 "memory_operand" "+ZB")) - (set (match_dup 1) - (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask - (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask -@@ -449,21 +735,45 @@ - (clobber (match_scratch:GPR 8 "=&r"))] - "" - { -- return "%G6\n\t" -- "1:\n\t" -- "ll.\t%0,%1\n\t" -- "and\t%7,%0,%3\n\t" -- "and\t%8,%0,%z5\n\t" -- "xor\t%8,%8,%z2\n\t" -- "or%i8\t%7,%7,%8\n\t" -- "sc.\t%7,%1\n\t" -- "beq\t$zero,%7,1b"; -+ if (TARGET_uARCH_LA664) -+ { -+ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); -+ output_asm_insn ("1:",operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) -+ output_asm_insn ("ll.\t%0,%1", operands); -+ else -+ output_asm_insn ("llacq.\t%0,%1", operands); -+ -+ output_asm_insn ("and\t%7,%0,%3", operands); -+ output_asm_insn ("and\t%8,%0,%z5", operands); -+ output_asm_insn ("xor\t%8,%8,%z2", operands); -+ output_asm_insn ("or%i8\t%7,%7,%8", operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) -+ output_asm_insn ("sc.\t%7,%1", operands); -+ else -+ output_asm_insn ("screl.\t%7,%1", operands); -+ -+ output_asm_insn ("beq\t$zero,%7,1b", operands); -+ return ""; -+ } -+ else -+ return "%G6\\n\\t" -+ "1:\\n\\t" -+ "ll.\\t%0,%1\\n\\t" -+ "and\\t%7,%0,%3\\n\\t" -+ "and\\t%8,%0,%z5\\n\\t" -+ "xor\\t%8,%8,%z2\\n\\t" -+ "or%i8\\t%7,%7,%8\\n\\t" -+ "sc.\\t%7,%1\\n\\t" -+ "beq\\t$zero,%7,1b"; - } - [(set (attr "length") (const_int 32))]) - - (define_insn "atomic_cas_value_exchange_7_" - [(set (match_operand:GPR 0 "register_operand" "=&r") -- (match_operand:GPR 1 "memory_operand" "+ZC")) -+ (match_operand:GPR 1 "memory_operand" "+ZB")) - (set (match_dup 1) - (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") - (match_operand:GPR 3 "reg_or_0_operand" "rJ") -@@ -474,13 +784,36 @@ - (clobber (match_scratch:GPR 7 "=&r"))] - "" - { -- return "%G6\\n\\t" -- "1:\\n\\t" -- "ll.\\t%0,%1\\n\\t" -- "and\\t%7,%0,%z3\\n\\t" -- "or%i5\\t%7,%7,%5\\n\\t" -- "sc.\\t%7,%1\\n\\t" -- "beqz\\t%7,1b\\n\\t"; -+ if (TARGET_uARCH_LA664) -+ { -+ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); -+ output_asm_insn ("1:",operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) -+ output_asm_insn ("ll.\t%0,%1", operands); -+ else -+ output_asm_insn ("llacq.\t%0,%1", operands); -+ -+ output_asm_insn ("and\t%7,%0,%z3", operands); -+ output_asm_insn ("or%i5\t%7,%7,%5", operands); -+ -+ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) -+ output_asm_insn ("sc.\t%7,%1", operands); -+ else -+ output_asm_insn ("screl.\t%7,%1", operands); -+ -+ output_asm_insn ("beqz\t%7,1b", operands); -+ -+ return ""; -+ } -+ else -+ return "%G6\\n\\t" -+ "1:\\n\\t" -+ "ll.\\t%0,%1\\n\\t" -+ "and\\t%7,%0,%z3\\n\\t" -+ "or%i5\\t%7,%7,%5\\n\\t" -+ "sc.\\t%7,%1\\n\\t" -+ "beqz\\t%7,1b\\n\\t"; - } - [(set (attr "length") (const_int 20))]) - -@@ -494,17 +827,30 @@ - (match_operand:SHORT 2 "register_operand"))] - "" - { -- union loongarch_gen_fn_ptrs generator; -- generator.fn_7 = gen_atomic_cas_value_exchange_7_si; -- loongarch_expand_atomic_qihi (generator, -- operands[0], -- operands[1], -- const0_rtx, -- operands[2], -- operands[3]); -+ if (TARGET_uARCH_LA664) -+ emit_insn (gen_atomic_exchange_1 (operands[0], operands[1], operands[2], operands[3])); -+ else -+ { -+ union loongarch_gen_fn_ptrs generator; -+ generator.fn_7 = gen_atomic_cas_value_exchange_7_si; -+ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], -+ const0_rtx, operands[2], operands[3]); -+ } - DONE; - }) - -+(define_insn "atomic_fetch_add_1" -+ [(set (match_operand:SHORT 0 "register_operand" "=&r") -+ (match_operand:SHORT 1 "memory_operand" "+ZB")) -+ (set (match_dup 1) -+ (unspec_volatile:SHORT -+ [(plus:SHORT (match_dup 1) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) -+ (match_operand:SI 3 "const_int_operand")] ;; model -+ UNSPEC_SYNC_OLD_OP))] -+ "" -+ "%J3\n\tamadd%A3.\t%0,%z2,%1\n\t%K3" -+ [(set (attr "length") (const_int 8))]) - - (define_expand "atomic_fetch_add" - [(set (match_operand:SHORT 0 "register_operand" "=&r") -@@ -512,19 +858,21 @@ - (set (match_dup 1) - (unspec_volatile:SHORT - [(plus:SHORT (match_dup 1) -- (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) - (match_operand:SI 3 "const_int_operand")] ;; model - UNSPEC_SYNC_OLD_OP))] - "" - { -- union loongarch_gen_fn_ptrs generator; -- generator.fn_7 = gen_atomic_cas_value_add_7_si; -- loongarch_expand_atomic_qihi (generator, -- operands[0], -- operands[1], -- operands[1], -- operands[2], -- operands[3]); -+ if (TARGET_uARCH_LA664) -+ emit_insn (gen_atomic_fetch_add_1 (operands[0], operands[1], -+ operands[2], operands[3])); -+ else -+ { -+ union loongarch_gen_fn_ptrs generator; -+ generator.fn_7 = gen_atomic_cas_value_add_7_si; -+ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], -+ operands[1], operands[2], operands[3]); -+ } - DONE; - }) - -@@ -534,19 +882,15 @@ - (set (match_dup 1) - (unspec_volatile:SHORT - [(minus:SHORT (match_dup 1) -- (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) - (match_operand:SI 3 "const_int_operand")] ;; model - UNSPEC_SYNC_OLD_OP))] - "" - { - union loongarch_gen_fn_ptrs generator; - generator.fn_7 = gen_atomic_cas_value_sub_7_si; -- loongarch_expand_atomic_qihi (generator, -- operands[0], -- operands[1], -- operands[1], -- operands[2], -- operands[3]); -+ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], -+ operands[1], operands[2], operands[3]); - DONE; - }) - -@@ -556,19 +900,15 @@ - (set (match_dup 1) - (unspec_volatile:SHORT - [(and:SHORT (match_dup 1) -- (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) - (match_operand:SI 3 "const_int_operand")] ;; model - UNSPEC_SYNC_OLD_OP))] - "" - { - union loongarch_gen_fn_ptrs generator; - generator.fn_7 = gen_atomic_cas_value_and_7_si; -- loongarch_expand_atomic_qihi (generator, -- operands[0], -- operands[1], -- operands[1], -- operands[2], -- operands[3]); -+ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], -+ operands[1], operands[2], operands[3]); - DONE; - }) - -@@ -578,19 +918,15 @@ - (set (match_dup 1) - (unspec_volatile:SHORT - [(xor:SHORT (match_dup 1) -- (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) - (match_operand:SI 3 "const_int_operand")] ;; model - UNSPEC_SYNC_OLD_OP))] - "" - { - union loongarch_gen_fn_ptrs generator; - generator.fn_7 = gen_atomic_cas_value_xor_7_si; -- loongarch_expand_atomic_qihi (generator, -- operands[0], -- operands[1], -- operands[1], -- operands[2], -- operands[3]); -+ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], -+ operands[1], operands[2], operands[3]); - DONE; - }) - -@@ -600,19 +936,15 @@ - (set (match_dup 1) - (unspec_volatile:SHORT - [(ior:SHORT (match_dup 1) -- (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) - (match_operand:SI 3 "const_int_operand")] ;; model - UNSPEC_SYNC_OLD_OP))] - "" - { - union loongarch_gen_fn_ptrs generator; - generator.fn_7 = gen_atomic_cas_value_or_7_si; -- loongarch_expand_atomic_qihi (generator, -- operands[0], -- operands[1], -- operands[1], -- operands[2], -- operands[3]); -+ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], -+ operands[1], operands[2], operands[3]); - DONE; - }) - -@@ -622,18 +954,14 @@ - (set (match_dup 1) - (unspec_volatile:SHORT - [(not:SHORT (and:SHORT (match_dup 1) -- (match_operand:SHORT 2 "reg_or_0_operand" "rJ"))) -+ (match_operand:SHORT 2 "reg_or_0_operand" "rJ"))) - (match_operand:SI 3 "const_int_operand")] ;; model - UNSPEC_SYNC_OLD_OP))] - "" - { - union loongarch_gen_fn_ptrs generator; - generator.fn_7 = gen_atomic_cas_value_nand_7_si; -- loongarch_expand_atomic_qihi (generator, -- operands[0], -- operands[1], -- operands[1], -- operands[2], -- operands[3]); -+ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], -+ operands[1], operands[2], operands[3]); - DONE; - }) -diff --git a/gcc/config/loongarch/t-linux b/gcc/config/loongarch/t-linux -index 58f27f89d..5ecf814fa 100644 ---- a/gcc/config/loongarch/t-linux -+++ b/gcc/config/loongarch/t-linux -@@ -16,8 +16,65 @@ - # along with GCC; see the file COPYING3. If not see - # . - --MULTILIB_OSDIRNAMES := ../lib64$(call if_multiarch,:loongarch64-linux-gnu) --MULTIARCH_DIRNAME := $(call if_multiarch,loongarch64-linux-gnu) -+# Multilib -+MULTILIB_OPTIONS = mabi=lp64d/mabi=lp64f/mabi=lp64s -+MULTILIB_DIRNAMES = . base/lp64f base/lp64s -+ -+# The GCC driver always gets all abi-related options on the command line. -+# (see loongarch-driver.c:driver_get_normalized_m_opts) -+comma=, -+MULTILIB_REQUIRED = $(foreach mlib,$(subst $(comma), ,$(TM_MULTILIB_CONFIG)),\ -+ $(firstword $(subst /, ,$(mlib)))) - --# haven't supported lp32 yet --MULTILIB_EXCEPTIONS = mabi=lp32 -+SPECS = specs.install -+ -+# temporary self_spec when building libraries (e.g. libgcc) -+gen_mlib_spec = $(if $(word 2,$1),\ -+ %{$(firstword $1):$(patsubst %,-%,$(wordlist 2,$(words $1),$1))}) -+ -+# clean up the result of DRIVER_SELF_SPEC to avoid conflict -+lib_build_self_spec = % $@ -+ -+# Remove lib_build_self_specs before regression tests. -+.PHONY: remove-lib-specs -+check check-host check-target $(CHECK_TARGETS) $(lang_checks): remove-lib-specs -+remove-lib-specs: -+ -mv -f specs.install specs 2>/dev/null -+ -+# Multiarch -+ifneq ($(call if_multiarch,yes),yes) -+ # Define LA_DISABLE_MULTIARCH if multiarch is disabled. -+ tm_defines += LA_DISABLE_MULTIARCH -+else -+ # Only define MULTIARCH_DIRNAME when multiarch is enabled, -+ # or it would always introduce ${target} into the search path. -+ MULTIARCH_DIRNAME = $(LA_MULTIARCH_TRIPLET) -+endif -+ -+# Don't define MULTILIB_OSDIRNAMES if multilib is disabled. -+ifeq ($(filter LA_DISABLE_MULTILIB,$(tm_defines)),) -+ -+ MULTILIB_OSDIRNAMES = \ -+ mabi.lp64d=../lib64$\ -+ $(call if_multiarch,:loongarch64-linux-gnu) -+ -+ MULTILIB_OSDIRNAMES += \ -+ mabi.lp64f=../lib64/f32$\ -+ $(call if_multiarch,:loongarch64-linux-gnuf32) -+ -+ MULTILIB_OSDIRNAMES += \ -+ mabi.lp64s=../lib64/sf$\ -+ $(call if_multiarch,:loongarch64-linux-gnusf) -+else -+ MULTILIB_OSDIRNAMES := ../lib64 -+endif -diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch -index 5689da44a..9d32fbcf6 100644 ---- a/gcc/config/loongarch/t-loongarch -+++ b/gcc/config/loongarch/t-loongarch -@@ -16,14 +16,20 @@ - # along with GCC; see the file COPYING3. If not see - # . - --$(srcdir)/config/loongarch/loongarch-tables.opt: $(srcdir)/config/loongarch/genopt.sh \ -- $(srcdir)/config/loongarch/loongarch-cpus.def -- $(SHELL) $(srcdir)/config/loongarch/genopt.sh $(srcdir)/config/loongarch > \ -- $(srcdir)/config/loongarch/loongarch-tables.opt -+# Canonical target triplet from config.gcc -+LA_MULTIARCH_TRIPLET = $(patsubst LA_MULTIARCH_TRIPLET=%,%,$\ -+$(filter LA_MULTIARCH_TRIPLET=%,$(tm_defines))) - --frame-header-opt.o: $(srcdir)/config/loongarch/frame-header-opt.c -- $(COMPILE) $< -- $(POSTCOMPILE) -+# String definition header -+LA_STR_H = $(srcdir)/config/loongarch/loongarch-str.h -+$(LA_STR_H): s-loongarch-str ; @true -+s-loongarch-str: $(srcdir)/config/loongarch/genopts/genstr.sh \ -+ $(srcdir)/config/loongarch/genopts/loongarch-strings -+ $(SHELL) $(srcdir)/config/loongarch/genopts/genstr.sh header \ -+ $(srcdir)/config/loongarch/genopts/loongarch-strings > \ -+ tmp-loongarch-str.h -+ $(SHELL) $(srcdir)/../move-if-change tmp-loongarch-str.h $(LA_STR_H) -+ $(STAMP) s-loongarch-str - - loongarch-c.o: $(srcdir)/config/loongarch/loongarch-c.c $(CONFIG_H) $(SYSTEM_H) \ - coretypes.h $(TM_H) $(TREE_H) output.h $(C_COMMON_H) $(TARGET_H) -@@ -31,15 +37,32 @@ loongarch-c.o: $(srcdir)/config/loongarch/loongarch-c.c $(CONFIG_H) $(SYSTEM_H) - $(srcdir)/config/loongarch/loongarch-c.c - - loongarch-builtins.o: $(srcdir)/config/loongarch/loongarch-builtins.c $(CONFIG_H) \ -- $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TREE_H) $(RECOG_H) langhooks.h \ -- $(DIAGNOSTIC_CORE_H) $(OPTABS_H) $(srcdir)/config/loongarch/loongarch-ftypes.def \ -- $(srcdir)/config/loongarch/loongarch-modes.def -+ $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TREE_H) $(RECOG_H) langhooks.h \ -+ $(DIAGNOSTIC_CORE_H) $(OPTABS_H) $(srcdir)/config/loongarch/loongarch-ftypes.def \ -+ $(srcdir)/config/loongarch/loongarch-modes.def - $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ -- $(srcdir)/config/loongarch/loongarch-builtins.c --loongarch-d.o: $(srcdir)/config/loongarch/loongarch-d.c -- $(COMPILE) $< -- $(POSTCOMPILE) -- --comma=, --MULTILIB_OPTIONS = $(subst $(comma),/, $(patsubst %, mabi=%, $(subst $(comma),$(comma)mabi=,$(TM_MULTILIB_CONFIG)))) --MULTILIB_DIRNAMES = $(subst $(comma), ,$(TM_MULTILIB_CONFIG)) -+ $(srcdir)/config/loongarch/loongarch-builtins.c -+ -+loongarch-driver.o : $(srcdir)/config/loongarch/loongarch-driver.c $(LA_STR_H) \ -+ $(CONFIG_H) $(SYSTEM_H) -+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -+ -+loongarch-opts.o: $(srcdir)/config/loongarch/loongarch-opts.c $(LA_STR_H) -+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -+ -+loongarch-cpu.o: $(srcdir)/config/loongarch/loongarch-cpu.c $(LA_STR_H) -+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -+ -+loongarch-def.o: $(srcdir)/config/loongarch/loongarch-def.c $(LA_STR_H) -+ $(CC) -c $(ALL_CFLAGS) $(INCLUDES) $< -+ -+$(srcdir)/config/loongarch/loongarch.opt: s-loongarch-opt ; @true -+s-loongarch-opt: $(srcdir)/config/loongarch/genopts/genstr.sh \ -+ $(srcdir)/config/loongarch/genopts/loongarch.opt.in -+ $(SHELL) $(srcdir)/config/loongarch/genopts/genstr.sh opt \ -+ $(srcdir)/config/loongarch/genopts/loongarch.opt.in \ -+ > tmp-loongarch.opt -+ $(SHELL) $(srcdir)/../move-if-change tmp-loongarch.opt \ -+ $(srcdir)/config/loongarch/loongarch.opt -+ $(STAMP) s-loongarch-opt -+ -diff --git a/gcc/config/loongarch/x-native b/gcc/config/loongarch/x-native -deleted file mode 100644 -index 827d21f1a..000000000 ---- a/gcc/config/loongarch/x-native -+++ /dev/null -@@ -1,3 +0,0 @@ --driver-native.o : $(srcdir)/config/loongarch/driver-native.c \ -- $(CONFIG_H) $(SYSTEM_H) -- $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -diff --git a/libgcc/config/loongarch/crtfastmath.c b/libgcc/config/loongarch/crtfastmath.c -index d7371de6d..5f7b298ac 100644 ---- a/libgcc/config/loongarch/crtfastmath.c -+++ b/libgcc/config/loongarch/crtfastmath.c -@@ -1,30 +1,32 @@ --/* Copyright (C) 2010-2018 Free Software Foundation, Inc. -+/* Copyright (C) 2020-2022 Free Software Foundation, Inc. -+ Contributed by Loongson Ltd. -+ Based on MIPS target for GNU compiler. - -- This file is part of GCC. -+This file is part of GCC. - -- GCC is free software; you can redistribute it and/or modify it -- under the terms of the GNU General Public License as published by -- the Free Software Foundation; either version 3, or (at your option) -- any later version. -+GCC is free software; you can redistribute it and/or modify it -+under the terms of the GNU General Public License as published by -+the Free Software Foundation; either version 3, or (at your option) -+any later version. - -- GCC is distributed in the hope that it will be useful, but WITHOUT -- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -- License for more details. -+GCC is distributed in the hope that it will be useful, but WITHOUT -+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -+License for more details. - -- Under Section 7 of GPL version 3, you are granted additional -- permissions described in the GCC Runtime Library Exception, version -- 3.1, as published by the Free Software Foundation. -+Under Section 7 of GPL version 3, you are granted additional -+permissions described in the GCC Runtime Library Exception, version -+3.1, as published by the Free Software Foundation. - -- You should have received a copy of the GNU General Public License -- and a copy of the GCC Runtime Library Exception along with this -- program; see the files COPYING3 and COPYING.RUNTIME respectively. -- If not, see . */ -+You should have received a copy of the GNU General Public License -+and a copy of the GCC Runtime Library Exception along with this -+program; see the files COPYING3 and COPYING.RUNTIME respectively. -+If not, see . */ - - #ifdef __loongarch_hard_float - - /* Rounding control. */ --#define _FPU_RC_NEAREST 0x000 /* RECOMMENDED */ -+#define _FPU_RC_NEAREST 0x000 /* RECOMMENDED. */ - #define _FPU_RC_ZERO 0x100 - #define _FPU_RC_UP 0x200 - #define _FPU_RC_DOWN 0x300 -@@ -33,18 +35,18 @@ - #define _FPU_IEEE 0x0000001F - - /* Macros for accessing the hardware control word. */ --#define _FPU_GETCW(cw) __asm__ ("movgr2fcsr %0,$r1" : "=r" (cw)) --#define _FPU_SETCW(cw) __asm__ ("movfcsr2gr %0,$r1" : : "r" (cw)) -+#define _FPU_GETCW(cw) __asm__ volatile ("movfcsr2gr %0,$r0" : "=r" (cw)) -+#define _FPU_SETCW(cw) __asm__ volatile ("movgr2fcsr $r0,%0" : : "r" (cw)) - - static void __attribute__((constructor)) - set_fast_math (void) - { - unsigned int fcr; - -- /* round to nearest, IEEE exceptions disabled. */ -+ /* Flush to zero, round to nearest, IEEE exceptions disabled. */ - fcr = _FPU_RC_NEAREST; - -- _FPU_SETCW(fcr); -+ _FPU_SETCW (fcr); - } - --#endif /* __loongarch_hard_float */ -+#endif /* __loongarch_hard_float */ -diff --git a/libgcc/config/loongarch/crti.S b/libgcc/config/loongarch/crti.S -deleted file mode 100644 -index dcd05afea..000000000 ---- a/libgcc/config/loongarch/crti.S -+++ /dev/null -@@ -1,43 +0,0 @@ --/* Copyright (C) 2001-2018 Free Software Foundation, Inc. -- --This file is part of GCC. -- --GCC is free software; you can redistribute it and/or modify it under --the terms of the GNU General Public License as published by the Free --Software Foundation; either version 3, or (at your option) any later --version. -- --GCC is distributed in the hope that it will be useful, but WITHOUT ANY --WARRANTY; without even the implied warranty of MERCHANTABILITY or --FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License --for more details. -- --Under Section 7 of GPL version 3, you are granted additional --permissions described in the GCC Runtime Library Exception, version --3.1, as published by the Free Software Foundation. -- --You should have received a copy of the GNU General Public License and --a copy of the GCC Runtime Library Exception along with this program; --see the files COPYING3 and COPYING.RUNTIME respectively. If not, see --. */ -- --/* 4 slots for argument spill area. 1 for cpreturn, 1 for stack. -- Return spill offset of 40 and 20. Aligned to 16 bytes for n32. */ -- -- .section .init,"ax",@progbits -- .globl _init -- .type _init,@function --_init: -- addi.d $r3,$r3,-48 -- st.d $r1,$r3,40 -- addi.d $r3,$r3,48 -- jirl $r0,$r1,0 -- -- .section .fini,"ax",@progbits -- .globl _fini -- .type _fini,@function --_fini: -- addi.d $r3,$r3,-48 -- st.d $r1,$r3,40 -- addi.d $r3,$r3,48 -- jirl $r0,$r1,0 -diff --git a/libgcc/config/loongarch/crtn.S b/libgcc/config/loongarch/crtn.S -deleted file mode 100644 -index 91d9d5e7f..000000000 ---- a/libgcc/config/loongarch/crtn.S -+++ /dev/null -@@ -1,39 +0,0 @@ --/* Copyright (C) 2001-2018 Free Software Foundation, Inc. -- --This file is part of GCC. -- --GCC is free software; you can redistribute it and/or modify it under --the terms of the GNU General Public License as published by the Free --Software Foundation; either version 3, or (at your option) any later --version. -- --GCC is distributed in the hope that it will be useful, but WITHOUT ANY --WARRANTY; without even the implied warranty of MERCHANTABILITY or --FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License --for more details. -- --Under Section 7 of GPL version 3, you are granted additional --permissions described in the GCC Runtime Library Exception, version --3.1, as published by the Free Software Foundation. -- --You should have received a copy of the GNU General Public License and --a copy of the GCC Runtime Library Exception along with this program; --see the files COPYING3 and COPYING.RUNTIME respectively. If not, see --. */ -- --/* 4 slots for argument spill area. 1 for cpreturn, 1 for stack. -- Return spill offset of 40 and 20. Aligned to 16 bytes for n32. */ -- -- -- .section .init,"ax",@progbits --init: -- ld.d $r1,$r3,40 -- addi.d $r3,$r3,48 -- jirl $r0,$r1,0 -- -- .section .fini,"ax",@progbits --fini: -- ld.d $r1,$r3,40 -- addi.d $r3,$r3,48 -- jirl $r0,$r1,0 -- -diff --git a/libgcc/config/loongarch/gthr-loongnixsde.h b/libgcc/config/loongarch/gthr-loongnixsde.h -deleted file mode 100644 -index f62b57318..000000000 ---- a/libgcc/config/loongarch/gthr-loongnixsde.h -+++ /dev/null -@@ -1,237 +0,0 @@ --/* LARCH SDE threads compatibility routines for libgcc2 and libobjc. */ --/* Compile this one with gcc. */ --/* Copyright (C) 2006-2018 Free Software Foundation, Inc. -- Contributed by Nigel Stephens -- --This file is part of GCC. -- --GCC is free software; you can redistribute it and/or modify it under --the terms of the GNU General Public License as published by the Free --Software Foundation; either version 3, or (at your option) any later --version. -- --GCC is distributed in the hope that it will be useful, but WITHOUT ANY --WARRANTY; without even the implied warranty of MERCHANTABILITY or --FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License --for more details. -- --Under Section 7 of GPL version 3, you are granted additional --permissions described in the GCC Runtime Library Exception, version --3.1, as published by the Free Software Foundation. -- --You should have received a copy of the GNU General Public License and --a copy of the GCC Runtime Library Exception along with this program; --see the files COPYING3 and COPYING.RUNTIME respectively. If not, see --. */ -- --#ifndef GCC_GTHR_LARCHSDE_H --#define GCC_GTHR_LARCHSDE_H -- --/* LARCH SDE threading API specific definitions. -- Easy, since the interface is pretty much one-to-one. */ -- --#define __GTHREADS 1 -- --#include --#include -- --#ifdef __cplusplus --extern "C" { --#endif -- --typedef __sdethread_key_t __gthread_key_t; --typedef __sdethread_once_t __gthread_once_t; --typedef __sdethread_mutex_t __gthread_mutex_t; -- --typedef struct { -- long depth; -- __sdethread_t owner; -- __sdethread_mutex_t actual; --} __gthread_recursive_mutex_t; -- --#define __GTHREAD_MUTEX_INIT __SDETHREAD_MUTEX_INITIALIZER("gthr") --#define __GTHREAD_ONCE_INIT __SDETHREAD_ONCE_INIT --static inline int --__gthread_recursive_mutex_init_function(__gthread_recursive_mutex_t *__mutex); --#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function -- --#if SUPPORTS_WEAK && GTHREAD_USE_WEAK --# define __gthrw(name) \ -- static __typeof(name) __gthrw_ ## name __attribute__ ((__weakref__(#name))); --# define __gthrw_(name) __gthrw_ ## name --#else --# define __gthrw(name) --# define __gthrw_(name) name --#endif -- --__gthrw(__sdethread_once) --__gthrw(__sdethread_key_create) --__gthrw(__sdethread_key_delete) --__gthrw(__sdethread_getspecific) --__gthrw(__sdethread_setspecific) -- --__gthrw(__sdethread_self) -- --__gthrw(__sdethread_mutex_lock) --__gthrw(__sdethread_mutex_trylock) --__gthrw(__sdethread_mutex_unlock) -- --__gthrw(__sdethread_mutex_init) -- --__gthrw(__sdethread_threading) -- --#if SUPPORTS_WEAK && GTHREAD_USE_WEAK -- --static inline int --__gthread_active_p (void) --{ -- return !!(void *)&__sdethread_threading; --} -- --#else /* not SUPPORTS_WEAK */ -- --static inline int --__gthread_active_p (void) --{ -- return 1; --} -- --#endif /* SUPPORTS_WEAK */ -- --static inline int --__gthread_once (__gthread_once_t *__once, void (*__func) (void)) --{ -- if (__gthread_active_p ()) -- return __gthrw_(__sdethread_once) (__once, __func); -- else -- return -1; --} -- --static inline int --__gthread_key_create (__gthread_key_t *__key, void (*__dtor) (void *)) --{ -- return __gthrw_(__sdethread_key_create) (__key, __dtor); --} -- --static inline int --__gthread_key_delete (__gthread_key_t __key) --{ -- return __gthrw_(__sdethread_key_delete) (__key); --} -- --static inline void * --__gthread_getspecific (__gthread_key_t __key) --{ -- return __gthrw_(__sdethread_getspecific) (__key); --} -- --static inline int --__gthread_setspecific (__gthread_key_t __key, const void *__ptr) --{ -- return __gthrw_(__sdethread_setspecific) (__key, __ptr); --} -- --static inline int --__gthread_mutex_destroy (__gthread_mutex_t * UNUSED(__mutex)) --{ -- return 0; --} -- --static inline int --__gthread_mutex_lock (__gthread_mutex_t *__mutex) --{ -- if (__gthread_active_p ()) -- return __gthrw_(__sdethread_mutex_lock) (__mutex); -- else -- return 0; --} -- --static inline int --__gthread_mutex_trylock (__gthread_mutex_t *__mutex) --{ -- if (__gthread_active_p ()) -- return __gthrw_(__sdethread_mutex_trylock) (__mutex); -- else -- return 0; --} -- --static inline int --__gthread_mutex_unlock (__gthread_mutex_t *__mutex) --{ -- if (__gthread_active_p ()) -- return __gthrw_(__sdethread_mutex_unlock) (__mutex); -- else -- return 0; --} -- --static inline int --__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex) --{ -- __mutex->depth = 0; -- __mutex->owner = __gthrw_(__sdethread_self) (); -- return __gthrw_(__sdethread_mutex_init) (&__mutex->actual, NULL); --} -- --static inline int --__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex) --{ -- if (__gthread_active_p ()) -- { -- __sdethread_t __me = __gthrw_(__sdethread_self) (); -- -- if (__mutex->owner != __me) -- { -- __gthrw_(__sdethread_mutex_lock) (&__mutex->actual); -- __mutex->owner = __me; -- } -- -- __mutex->depth++; -- } -- return 0; --} -- --static inline int --__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex) --{ -- if (__gthread_active_p ()) -- { -- __sdethread_t __me = __gthrw_(__sdethread_self) (); -- -- if (__mutex->owner != __me) -- { -- if (__gthrw_(__sdethread_mutex_trylock) (&__mutex->actual)) -- return 1; -- __mutex->owner = __me; -- } -- -- __mutex->depth++; -- } -- return 0; --} -- --static inline int --__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex) --{ -- if (__gthread_active_p ()) -- { -- if (--__mutex->depth == 0) -- { -- __mutex->owner = (__sdethread_t) 0; -- __gthrw_(__sdethread_mutex_unlock) (&__mutex->actual); -- } -- } -- return 0; --} -- --static inline int --__gthread_recursive_mutex_destroy (__gthread_recursive_mutex_t -- * UNUSED(__mutex)) --{ -- return 0; --} -- --#ifdef __cplusplus --} --#endif -- --#endif /* ! GCC_GTHR_LARCHSDE_H */ -diff --git a/libgcc/config/loongarch/linux-unwind.h b/libgcc/config/loongarch/linux-unwind.h -index d77dfb058..30603e44f 100644 ---- a/libgcc/config/loongarch/linux-unwind.h -+++ b/libgcc/config/loongarch/linux-unwind.h -@@ -1,5 +1,5 @@ --/* DWARF2 EH unwinding support for LARCH Linux. -- Copyright (C) 2004-2018 Free Software Foundation, Inc. -+/* DWARF2 EH unwinding support for LoongArch Linux. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. - - This file is part of GCC. - -@@ -34,26 +34,27 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - - static _Unwind_Reason_Code - loongarch_fallback_frame_state (struct _Unwind_Context *context, -- _Unwind_FrameState *fs) -+ _Unwind_FrameState *fs) - { - u_int32_t *pc = (u_int32_t *) context->ra; - struct sigcontext *sc; - _Unwind_Ptr new_cfa; - int i; - -- /* 03822c0b dli a7, 0x8b (sigreturn) */ -- /* 002b0000 syscall 0 */ -+ /* 03822c0b li.d a7, 0x8b (sigreturn) */ -+ /* 002b0000 syscall 0 */ - if (pc[1] != 0x002b0000) - return _URC_END_OF_STACK; - if (pc[0] == 0x03822c0b) - { -- struct rt_sigframe { -+ struct rt_sigframe -+ { - u_int32_t ass[4]; /* Argument save space for o32. */ - u_int32_t trampoline[2]; - siginfo_t info; - ucontext_t uc; - } *rt_ = context->cfa; -- sc = &rt_->uc.uc_mcontext; -+ sc = (struct sigcontext *) (void *) &rt_->uc.uc_mcontext; - } - else - return _URC_END_OF_STACK; -@@ -63,17 +64,17 @@ loongarch_fallback_frame_state (struct _Unwind_Context *context, - fs->regs.cfa_reg = __LIBGCC_STACK_POINTER_REGNUM__; - fs->regs.cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa; - -- for (i = 0; i < 32; i++) { -- fs->regs.reg[i].how = REG_SAVED_OFFSET; -- fs->regs.reg[i].loc.offset -- = (_Unwind_Ptr)&(sc->sc_regs[i]) - new_cfa; -- } -+ for (i = 0; i < 32; i++) -+ { -+ fs->regs.reg[i].how = REG_SAVED_OFFSET; -+ fs->regs.reg[i].loc.offset = (_Unwind_Ptr) & (sc->sc_regs[i]) - new_cfa; -+ } - - fs->signal_frame = 1; - fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].how - = REG_SAVED_VAL_OFFSET; - fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].loc.offset -- = (_Unwind_Ptr)(sc->sc_pc) - new_cfa; -+ = (_Unwind_Ptr) (sc->sc_pc) - new_cfa; - fs->retaddr_column = __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__; - - return _URC_NO_REASON; -diff --git a/libgcc/config/loongarch/sfp-machine.h b/libgcc/config/loongarch/sfp-machine.h -index f7800a003..420f94274 100644 ---- a/libgcc/config/loongarch/sfp-machine.h -+++ b/libgcc/config/loongarch/sfp-machine.h -@@ -1,5 +1,5 @@ --/* softfp machine description for LARCH. -- Copyright (C) 2009-2018 Free Software Foundation, Inc. -+/* softfp machine description for LoongArch. -+ Copyright (C) 2020-2022 Free Software Foundation, Inc. - - This file is part of GCC. - -@@ -23,49 +23,49 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - - #ifdef __loongarch64 --#define _FP_W_TYPE_SIZE 64 --#define _FP_W_TYPE unsigned long long --#define _FP_WS_TYPE signed long long --#define _FP_I_TYPE long long -+#define _FP_W_TYPE_SIZE 64 -+#define _FP_W_TYPE unsigned long long -+#define _FP_WS_TYPE signed long long -+#define _FP_I_TYPE long long - - typedef int TItype __attribute__ ((mode (TI))); - typedef unsigned int UTItype __attribute__ ((mode (TI))); - #define TI_BITS (__CHAR_BIT__ * (int) sizeof (TItype)) - --#define _FP_MUL_MEAT_S(R,X,Y) \ -- _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) --#define _FP_MUL_MEAT_D(R,X,Y) \ -- _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) --#define _FP_MUL_MEAT_Q(R,X,Y) \ -- _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) -+#define _FP_MUL_MEAT_S(R, X, Y) \ -+ _FP_MUL_MEAT_1_wide (_FP_WFRACBITS_S, R, X, Y, umul_ppmm) -+#define _FP_MUL_MEAT_D(R, X, Y) \ -+ _FP_MUL_MEAT_1_wide (_FP_WFRACBITS_D, R, X, Y, umul_ppmm) -+#define _FP_MUL_MEAT_Q(R, X, Y) \ -+ _FP_MUL_MEAT_2_wide (_FP_WFRACBITS_Q, R, X, Y, umul_ppmm) - --#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y) --#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y) --#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y) -+#define _FP_DIV_MEAT_S(R, X, Y) _FP_DIV_MEAT_1_udiv_norm (S, R, X, Y) -+#define _FP_DIV_MEAT_D(R, X, Y) _FP_DIV_MEAT_1_udiv_norm (D, R, X, Y) -+#define _FP_DIV_MEAT_Q(R, X, Y) _FP_DIV_MEAT_2_udiv (Q, R, X, Y) - --# define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) --# define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1) --# define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1 -+#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) -+#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1) -+#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1 - #else --#define _FP_W_TYPE_SIZE 32 --#define _FP_W_TYPE unsigned int --#define _FP_WS_TYPE signed int --#define _FP_I_TYPE int -- --#define _FP_MUL_MEAT_S(R,X,Y) \ -- _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) --#define _FP_MUL_MEAT_D(R,X,Y) \ -- _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) --#define _FP_MUL_MEAT_Q(R,X,Y) \ -- _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) -- --#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y) --#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y) --#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y) -- --# define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) --# define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1 --# define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1 -+#define _FP_W_TYPE_SIZE 32 -+#define _FP_W_TYPE unsigned int -+#define _FP_WS_TYPE signed int -+#define _FP_I_TYPE int -+ -+#define _FP_MUL_MEAT_S(R, X, Y) \ -+ _FP_MUL_MEAT_1_wide (_FP_WFRACBITS_S, R, X, Y, umul_ppmm) -+#define _FP_MUL_MEAT_D(R, X, Y) \ -+ _FP_MUL_MEAT_2_wide (_FP_WFRACBITS_D, R, X, Y, umul_ppmm) -+#define _FP_MUL_MEAT_Q(R, X, Y) \ -+ _FP_MUL_MEAT_4_wide (_FP_WFRACBITS_Q, R, X, Y, umul_ppmm) -+ -+#define _FP_DIV_MEAT_S(R, X, Y) _FP_DIV_MEAT_1_udiv_norm (S, R, X, Y) -+#define _FP_DIV_MEAT_D(R, X, Y) _FP_DIV_MEAT_2_udiv (D, R, X, Y) -+#define _FP_DIV_MEAT_Q(R, X, Y) _FP_DIV_MEAT_4_udiv (Q, R, X, Y) -+ -+#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) -+#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1 -+#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1 - #endif - - /* The type of the result of a floating point comparison. This must -@@ -73,76 +73,80 @@ typedef unsigned int UTItype __attribute__ ((mode (TI))); - typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__))); - #define CMPtype __gcc_CMPtype - --#define _FP_NANSIGN_S 0 --#define _FP_NANSIGN_D 0 --#define _FP_NANSIGN_Q 0 -+#define _FP_NANSIGN_S 0 -+#define _FP_NANSIGN_D 0 -+#define _FP_NANSIGN_Q 0 - - #define _FP_KEEPNANFRACP 1 --# define _FP_QNANNEGATEDP 0 -+#define _FP_QNANNEGATEDP 0 - - /* NaN payloads should be preserved for NAN2008. */ --# define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ -- do \ -- { \ -- R##_s = X##_s; \ -- _FP_FRAC_COPY_##wc (R, X); \ -- R##_c = FP_CLS_NAN; \ -- } \ -+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ -+ do \ -+ { \ -+ R##_s = X##_s; \ -+ _FP_FRAC_COPY_##wc (R, X); \ -+ R##_c = FP_CLS_NAN; \ -+ } \ - while (0) - - #ifdef __loongarch_hard_float --#define FP_EX_INVALID 0x100000 --#define FP_EX_DIVZERO 0x080000 --#define FP_EX_OVERFLOW 0x040000 --#define FP_EX_UNDERFLOW 0x020000 --#define FP_EX_INEXACT 0x010000 -+#define FP_EX_INVALID 0x100000 -+#define FP_EX_DIVZERO 0x080000 -+#define FP_EX_OVERFLOW 0x040000 -+#define FP_EX_UNDERFLOW 0x020000 -+#define FP_EX_INEXACT 0x010000 - #define FP_EX_ALL \ -- (FP_EX_INVALID | FP_EX_DIVZERO | FP_EX_OVERFLOW | FP_EX_UNDERFLOW \ -- | FP_EX_INEXACT) -+ (FP_EX_INVALID | FP_EX_DIVZERO | FP_EX_OVERFLOW | FP_EX_UNDERFLOW \ -+ | FP_EX_INEXACT) - --#define FP_EX_ENABLE_SHIFT 16 --#define FP_EX_CAUSE_SHIFT 8 -+#define FP_EX_ENABLE_SHIFT 16 -+#define FP_EX_CAUSE_SHIFT 8 - --#define FP_RND_NEAREST 0x000 --#define FP_RND_ZERO 0x100 --#define FP_RND_PINF 0x200 --#define FP_RND_MINF 0x300 --#define FP_RND_MASK 0x300 -+#define FP_RND_NEAREST 0x000 -+#define FP_RND_ZERO 0x100 -+#define FP_RND_PINF 0x200 -+#define FP_RND_MINF 0x300 -+#define FP_RND_MASK 0x300 - - #define _FP_DECL_EX \ - unsigned long int _fcsr __attribute__ ((unused)) = FP_RND_NEAREST - --#define FP_INIT_ROUNDMODE \ -- do { \ -- _fcsr = __builtin_loongarch_movfcsr2gr (0); \ -- } while (0) -+#define FP_INIT_ROUNDMODE \ -+ do \ -+ { \ -+ _fcsr = __builtin_loongarch_movfcsr2gr (0); \ -+ } \ -+ while (0) - - #define FP_ROUNDMODE (_fcsr & FP_RND_MASK) - - #define FP_TRAPPING_EXCEPTIONS ((_fcsr << FP_EX_ENABLE_SHIFT) & FP_EX_ALL) - --#define FP_HANDLE_EXCEPTIONS \ -- do { \ -- _fcsr &= ~(FP_EX_ALL << FP_EX_CAUSE_SHIFT); \ -- _fcsr |= _fex | (_fex << FP_EX_CAUSE_SHIFT); \ -- __builtin_loongarch_movgr2fcsr (0, _fcsr); \ -- } while (0) -+#define FP_HANDLE_EXCEPTIONS \ -+ do \ -+ { \ -+ _fcsr &= ~(FP_EX_ALL << FP_EX_CAUSE_SHIFT); \ -+ _fcsr |= _fex | (_fex << FP_EX_CAUSE_SHIFT); \ -+ __builtin_loongarch_movgr2fcsr (0, _fcsr); \ -+ } \ -+ while (0) - - #else --#define FP_EX_INVALID (1 << 4) --#define FP_EX_DIVZERO (1 << 3) --#define FP_EX_OVERFLOW (1 << 2) --#define FP_EX_UNDERFLOW (1 << 1) --#define FP_EX_INEXACT (1 << 0) -+#define FP_EX_INVALID (1 << 4) -+#define FP_EX_DIVZERO (1 << 3) -+#define FP_EX_OVERFLOW (1 << 2) -+#define FP_EX_UNDERFLOW (1 << 1) -+#define FP_EX_INEXACT (1 << 0) - #endif - - #define _FP_TININESS_AFTER_ROUNDING 1 - --#define __LITTLE_ENDIAN 1234 -+#define __LITTLE_ENDIAN 1234 - --# define __BYTE_ORDER __LITTLE_ENDIAN -+#define __BYTE_ORDER __LITTLE_ENDIAN - - /* Define ALIASNAME as a strong alias for NAME. */ --# define strong_alias(name, aliasname) _strong_alias(name, aliasname) --# define _strong_alias(name, aliasname) \ -+#define strong_alias(name, aliasname) _strong_alias (name, aliasname) -+#define _strong_alias(name, aliasname) \ - extern __typeof (name) aliasname __attribute__ ((alias (#name))); -diff --git a/libgcc/config/loongarch/t-elf b/libgcc/config/loongarch/t-elf -deleted file mode 100644 -index 651f10a53..000000000 ---- a/libgcc/config/loongarch/t-elf -+++ /dev/null -@@ -1,3 +0,0 @@ --# We must build libgcc2.a with -G 0, in case the user wants to link --# without the $gp register. --HOST_LIBGCC2_CFLAGS += -G 0 -diff --git a/libgcc/config/loongarch/t-loongarch b/libgcc/config/loongarch/t-loongarch -index 9508cb2fc..2a7dbf6ca 100644 ---- a/libgcc/config/loongarch/t-loongarch -+++ b/libgcc/config/loongarch/t-loongarch -@@ -5,5 +5,3 @@ softfp_int_modes := si di - softfp_extensions := - softfp_truncations := - softfp_exclude_libgcc2 := n -- --LIB2ADD_ST += $(srcdir)/config/loongarch/lib2funcs.c -diff --git a/libgcc/config/loongarch/t-sdemtk b/libgcc/config/loongarch/t-sdemtk -deleted file mode 100644 -index a06e828b5..000000000 ---- a/libgcc/config/loongarch/t-sdemtk -+++ /dev/null -@@ -1,3 +0,0 @@ --# Don't build FPBIT and DPBIT; we'll be using the SDE soft-float library. --FPBIT = --DPBIT = -diff --git a/libgcc/config/loongarch/t-vr b/libgcc/config/loongarch/t-vr -deleted file mode 100644 -index e69de29bb..000000000 --- -2.39.3 - diff --git a/gcc.spec b/gcc.spec index 5dcc5b9..e2721a9 100644 --- a/gcc.spec +++ b/gcc.spec @@ -1,11 +1,10 @@ -%define anolis_release .0.1 %global DATE 20210514 %global gitrev a3253c88425835d5b339d6998a1110a66ccd8b44 %global gcc_version 8.5.0 %global gcc_major 8 # Note, gcc_release must be integer, if you want to add suffixes to # %%{release}, append them after %%{gcc_release} on Release: line. -%global gcc_release 22 +%global gcc_release 23 %global nvptx_tools_gitrev c28050f60193b3b95a18866a96f03334e874e78f %global nvptx_newlib_gitrev aadc8eb0ec43b7cd0dd2dfb484bae63c8b05ef24 %global _unpackaged_files_terminate_build 0 @@ -14,7 +13,7 @@ # Until annobin is fixed (#1519165). %undefine _annotated_build %endif - +%global multilib_64_archs sparc64 ppc64 ppc64p7 x86_64 %if 0%{?rhel} > 7 %global build_ada 0 %global build_objc 0 @@ -39,32 +38,32 @@ %else %global build_libquadmath 0 %endif -%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 loongarch64 +%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 %global build_libasan 1 %else %global build_libasan 0 %endif -%ifarch x86_64 ppc64 ppc64le aarch64 loongarch64 +%ifarch x86_64 ppc64 ppc64le aarch64 %global build_libtsan 1 %else %global build_libtsan 0 %endif -%ifarch x86_64 ppc64 ppc64le aarch64 loongarch64 +%ifarch x86_64 ppc64 ppc64le aarch64 %global build_liblsan 1 %else %global build_liblsan 0 %endif -%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 loongarch64 +%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 %global build_libubsan 1 %else %global build_libubsan 0 %endif -%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 %{mips} loongarch64 +%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 %{mips} %global build_libatomic 1 %else %global build_libatomic 0 %endif -%ifarch %{ix86} x86_64 %{arm} alpha ppc ppc64 ppc64le ppc64p7 s390 s390x aarch64 loongarch64 +%ifarch %{ix86} x86_64 %{arm} alpha ppc ppc64 ppc64le ppc64p7 s390 s390x aarch64 %global build_libitm 1 %else %global build_libitm 0 @@ -80,7 +79,7 @@ %endif %global build_isl 1 %global build_libstdcxx_docs 1 -%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 %{mips} loongarch64 +%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 %{mips} %global attr_ifunc 1 %else %global attr_ifunc 0 @@ -102,11 +101,10 @@ %ifarch x86_64 %global multilib_32_arch i686 %endif - Summary: Various compilers (C, C++, Objective-C, ...) Name: gcc Version: %{gcc_version} -Release: %{gcc_release}%{anolis_release}%{?dist} +Release: %{gcc_release}%{?dist} # libgcc, libgfortran, libgomp, libstdc++ and crtstuff have # GCC Runtime Exception. License: GPLv3+ and GPLv3+ with exceptions and GPLv2+ with exceptions and LGPLv2+ and BSD @@ -314,20 +312,6 @@ Patch1000: nvptx-tools-no-ptxas.patch Patch1001: nvptx-tools-build.patch Patch1002: nvptx-tools-glibc.patch -Patch10000: 0001-gcc-anolis-Rebrand-for-OpenAnolis.patch - -Patch1003: 0001-Add-LoongArch-support-for-anolis-a8-gcc.patch -Patch1004: 0002-loongarch-fix-multilib-osdirnames-to-lib64.patch -Patch1005: 0001-LoongArch-Fixup-configure-file-error.patch -Patch1006: 0002-LoongArch-Rename-config-file-for-loongarch.patch -Patch1007: LoongArch-Fix-atomic_exchange-expanding-PR107713.patch -Patch1008: Sync-to-gcc-8-vec-36.patch -Patch1009: LoongArch-Remove-NOOP_TRUNCATION-and-fix-extendsidi2.patch -Patch1010: Improve-specs-processing-to-allow-in-function-argume.patch -Patch1011: LoongArch-Add-sanitizer-support.patch -Patch1012: libitm-Add-LoongArch-support.patch -Patch1013: LoongArch-Add-missing-headers.patch -Patch1014: Fix-dwarf2cfi-error.patch # On ARM EABI systems, we do want -gnueabi to be part of the # target triple. @@ -954,8 +938,7 @@ cd nvptx-tools-%{nvptx_tools_gitrev} %patch1002 -p1 -b .nvptx-tools-glibc~ cd .. - -echo 'Anolis %{version}-%{gcc_release}%{anolis_release}' > gcc/DEV-PHASE +echo 'Red Hat %{version}-%{gcc_release}' > gcc/DEV-PHASE cp -a libstdc++-v3/config/cpu/i{4,3}86/atomicity.h @@ -987,23 +970,6 @@ fi # This test causes fork failures, because it spawns way too many threads rm -f gcc/testsuite/go.test/test/chan/goroutines.go -%patch10000 -p1 - -%ifarch loongarch64 -%patch1003 -p1 -%patch1004 -p1 -%patch1005 -p1 -%patch1006 -p1 -%patch1007 -p1 -%patch1008 -p1 -%patch1009 -p1 -%patch1010 -p1 -%patch1011 -p1 -%patch1012 -p1 -%patch1013 -p1 -%patch1014 -p1 -%endif - %build # Undo the broken autoconf change in recent Fedora versions @@ -1057,7 +1023,7 @@ CC="$CC" CXX="$CXX" CFLAGS="$OPT_FLAGS" LDFLAGS="$RPM_LD_FLAGS" \ --target nvptx-none --enable-as-accelerator-for=%{gcc_target_platform} \ --enable-languages=c,c++,fortran,lto \ --prefix=%{_prefix} --mandir=%{_mandir} --infodir=%{_infodir} \ - --with-bugurl=https://bugzilla.openanolis.cn \ + --with-bugurl=http://bugzilla.redhat.com/bugzilla \ --enable-checking=release --with-system-zlib \ --with-gcc-major-version-only --without-isl make %{?_smp_mflags} @@ -1083,19 +1049,15 @@ enablelgo=,go %endif CONFIGURE_OPTS="\ --prefix=%{_prefix} --mandir=%{_mandir} --infodir=%{_infodir} \ - --with-bugurl=https://bugzilla.openanolis.cn \ + --with-bugurl=http://bugzilla.redhat.com/bugzilla \ --enable-shared --enable-threads=posix --enable-checking=release \ %ifarch ppc64le --enable-targets=powerpcle-linux \ %endif -%ifarch ppc64le %{mips} riscv64 s390x loongarch64 - --disable-multilib \ -%else -%if 0%{?anolis} +%ifarch ppc64le %{mips} riscv64 s390x --disable-multilib \ %else --enable-multilib \ -%endif %endif --with-system-zlib --enable-__cxa_atexit --disable-libunwind-exceptions \ --enable-gnu-unique-object --enable-linker-build-id --with-gcc-major-version-only \ @@ -1201,17 +1163,6 @@ CONFIGURE_OPTS="\ %endif %ifnarch sparc sparcv9 ppc --build=%{gcc_target_platform} \ -%endif -%ifarch loongarch64 - --with-arch=loongarch64 \ - --with-abi=lp64 \ - --enable-tls \ - --with-long-double-128 \ - --enable-initfini-array \ - --enable-gnu-indirect-function \ - --disable-emultls \ - --disable-multilib \ - --with-linker-hash-style=gnu \ %endif " @@ -1332,8 +1283,6 @@ then echo "Unpacking annobin sources" rm -fr annobin-* tar xvf %{annobin_source_dir}/latest-annobin.tar.xz - %_update_config_guess - %_update_config_sub # Setting this as a local symbol because using %%global does not appear to work. annobin_dir=$(find . -maxdepth 1 -type d -name "annobin*") @@ -2585,17 +2534,6 @@ fi %{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/include/htmxlintrin.h %{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/include/vecintrin.h %endif -%ifarch loongarch64 -%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/include/larchintrin.h -%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/include/lasxintrin.h -%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/include/lsxintrin.h -%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/plugin/include/config/loongarch/loongarch-tune.h -%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/plugin/include/config/loongarch/loongarch-def.h -%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/plugin/include/config/loongarch/loongarch-protos.h -%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/plugin/include/config/loongarch/loongarch-opts.h -%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/plugin/include/config/loongarch/loongarch-str.h -%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/plugin/include/config/loongarch/loongarch-driver.h -%endif %if %{build_libmpx} %{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/libmpx.spec %endif @@ -3400,9 +3338,8 @@ fi %{ANNOBIN_GCC_PLUGIN_DIR}/gcc-annobin.so.0.0.0 %changelog -* Wed Jul 17 2024 Xue haolin 8.5.0-22.0.1 -- Rebrand for Anolis OS. -- Separate LoongArch's supported patches.(fanpeng@loongson.cn) +* Fri Feb 7 2025 Marek Polacek 8.5.0-23 +- rebuild for CVE-2020-11023 (RHEL-78274) * Thu Apr 18 2024 Marek Polacek 8.5.0-22 - fix ICE in the vectorizer (RHEL-32886) diff --git a/libitm-Add-LoongArch-support.patch b/libitm-Add-LoongArch-support.patch deleted file mode 100644 index 8cb1d50..0000000 --- a/libitm-Add-LoongArch-support.patch +++ /dev/null @@ -1,285 +0,0 @@ -From 59b72352ab2e4e16f28d5e242f83ff37257c5301 Mon Sep 17 00:00:00 2001 -From: Xing Li -Date: Fri, 6 Jan 2023 10:44:00 +0800 -Subject: [PATCH 1/2] libitm: Add LoongArch support. - -Signed-off-by: Xing Li -Signed-off-by: Yang Yujie ---- - libitm/config/loongarch/asm.h | 54 +++++++++++++ - libitm/config/loongarch/sjlj.S | 130 +++++++++++++++++++++++++++++++ - libitm/config/loongarch/target.h | 50 ++++++++++++ - libitm/configure.tgt | 2 + - 4 files changed, 236 insertions(+) - create mode 100644 libitm/config/loongarch/asm.h - create mode 100644 libitm/config/loongarch/sjlj.S - create mode 100644 libitm/config/loongarch/target.h - -diff --git a/libitm/config/loongarch/asm.h b/libitm/config/loongarch/asm.h -new file mode 100644 -index 000000000..e7f881b03 ---- /dev/null -+++ b/libitm/config/loongarch/asm.h -@@ -0,0 +1,54 @@ -+/* Copyright (C) 2014-2018 Free Software Foundation, Inc. -+ Contributed by Loongson Co. Ltd. -+ -+ This file is part of the GNU Transactional Memory Library (libitm). -+ -+ Libitm is free software; you can redistribute it and/or modify it -+ under the terms of the GNU General Public License as published by -+ the Free Software Foundation; either version 3 of the License, or -+ (at your option) any later version. -+ -+ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY -+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ Under Section 7 of GPL version 3, you are granted additional -+ permissions described in the GCC Runtime Library Exception, version -+ 3.1, as published by the Free Software Foundation. -+ -+ You should have received a copy of the GNU General Public License and -+ a copy of the GCC Runtime Library Exception along with this program; -+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+ . */ -+ -+#ifndef _LA_ASM_H -+#define _LA_ASM_H -+ -+#if defined(__loongarch64) -+# define GPR_L ld.d -+# define GPR_S st.d -+# define SZ_GPR 8 -+# define ADDSP(si) addi.d $sp, $sp, si -+#elif defined(__loongarch32) -+# define GPR_L ld.w -+# define GPR_S st.w -+# define SZ_GPR 4 -+# define ADDSP(si) addi.w $sp, $sp, si -+#else -+# error Unsupported GPR size (must be 64-bit or 32-bit). -+#endif -+ -+#if defined(__loongarch_hard_float) -+# define FPR_L fld.d -+# define FPR_S fst.d -+# define SZ_FPR 8 -+#elif defined(__loongarch_single_float) -+# define FPR_L fld.s -+# define FPR_S fst.s -+# define SZ_FPR 4 -+#else -+# define SZ_FPR 0 -+#endif -+ -+#endif /* _LA_ASM_H */ -diff --git a/libitm/config/loongarch/sjlj.S b/libitm/config/loongarch/sjlj.S -new file mode 100644 -index 000000000..e8610f9b5 ---- /dev/null -+++ b/libitm/config/loongarch/sjlj.S -@@ -0,0 +1,130 @@ -+/* Copyright (C) 2014-2018 Free Software Foundation, Inc. -+ Contributed by Loongson Co. Ltd. -+ -+ This file is part of the GNU Transactional Memory Library (libitm). -+ -+ Libitm is free software; you can redistribute it and/or modify it -+ under the terms of the GNU General Public License as published by -+ the Free Software Foundation; either version 3 of the License, or -+ (at your option) any later version. -+ -+ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY -+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -+ -+ Under Section 7 of GPL version 3, you are granted additional -+ permissions described in the GCC Runtime Library Exception, version -+ 3.1, as published by the Free Software Foundation. -+ -+ You should have received a copy of the GNU General Public License and -+ a copy of the GCC Runtime Library Exception along with this program; -+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+ . */ -+ -+#include "asmcfi.h" -+#include "asm.h" -+ -+ .text -+ .align 2 -+ .global _ITM_beginTransaction -+ .type _ITM_beginTransaction, @function -+ -+_ITM_beginTransaction: -+ cfi_startproc -+ move $r5, $sp -+ ADDSP(-(12*SZ_GPR+8*SZ_FPR)) -+ cfi_adjust_cfa_offset(12*SZ_GPR+8*SZ_FPR) -+ -+ /* Frame Pointer */ -+ GPR_S $fp, $sp, 0*SZ_GPR -+ cfi_rel_offset(22, 0) -+ -+ /* Return Address */ -+ GPR_S $r1, $sp, 1*SZ_GPR -+ cfi_rel_offset(1, SZ_GPR) -+ -+ /* Caller's $sp */ -+ GPR_S $r5, $sp, 2*SZ_GPR -+ -+ /* Callee-saved scratch GPRs (r23-r31) */ -+ GPR_S $s0, $sp, 3*SZ_GPR -+ GPR_S $s1, $sp, 4*SZ_GPR -+ GPR_S $s2, $sp, 5*SZ_GPR -+ GPR_S $s3, $sp, 6*SZ_GPR -+ GPR_S $s4, $sp, 7*SZ_GPR -+ GPR_S $s5, $sp, 8*SZ_GPR -+ GPR_S $s6, $sp, 9*SZ_GPR -+ GPR_S $s7, $sp, 10*SZ_GPR -+ GPR_S $s8, $sp, 11*SZ_GPR -+ -+#if !defined(__loongarch_soft_float) -+ /* Callee-saved scratch FPRs (f24-f31) */ -+ FPR_S $f24, $sp, 12*SZ_GPR + 0*SZ_FPR -+ FPR_S $f25, $sp, 12*SZ_GPR + 1*SZ_FPR -+ FPR_S $f26, $sp, 12*SZ_GPR + 2*SZ_FPR -+ FPR_S $f27, $sp, 12*SZ_GPR + 3*SZ_FPR -+ FPR_S $f28, $sp, 12*SZ_GPR + 4*SZ_FPR -+ FPR_S $f29, $sp, 12*SZ_GPR + 5*SZ_FPR -+ FPR_S $f30, $sp, 12*SZ_GPR + 6*SZ_FPR -+ FPR_S $f31, $sp, 12*SZ_GPR + 7*SZ_FPR -+#endif -+ move $fp, $sp -+ -+ /* Invoke GTM_begin_transaction with the struct we've just built. */ -+ move $r5, $sp -+ bl %plt(GTM_begin_transaction) -+ -+ /* Return. (no call-saved scratch reg needs to be restored here) */ -+ GPR_L $fp, $sp, 0*SZ_GPR -+ cfi_restore(22) -+ GPR_L $r1, $sp, 1*SZ_GPR -+ cfi_restore(1) -+ -+ ADDSP(12*SZ_GPR+8*SZ_FPR) -+ cfi_adjust_cfa_offset(-(12*SZ_GPR+8*SZ_FPR)) -+ -+ jr $r1 -+ cfi_endproc -+ .size _ITM_beginTransaction, . - _ITM_beginTransaction -+ -+ .align 2 -+ .global GTM_longjmp -+ .hidden GTM_longjmp -+ .type GTM_longjmp, @function -+ -+GTM_longjmp: -+ cfi_startproc -+ GPR_L $s0, $r5, 3*SZ_GPR -+ GPR_L $s1, $r5, 4*SZ_GPR -+ GPR_L $s2, $r5, 5*SZ_GPR -+ GPR_L $s3, $r5, 6*SZ_GPR -+ GPR_L $s4, $r5, 7*SZ_GPR -+ GPR_L $s5, $r5, 8*SZ_GPR -+ GPR_L $s6, $r5, 9*SZ_GPR -+ GPR_L $s7, $r5, 10*SZ_GPR -+ GPR_L $s8, $r5, 11*SZ_GPR -+ -+#if !defined(__loongarch_soft_float) -+ /* Callee-saved scratch FPRs (f24-f31) */ -+ FPR_L $f24, $r5, 12*SZ_GPR + 0*SZ_FPR -+ FPR_L $f25, $r5, 12*SZ_GPR + 1*SZ_FPR -+ FPR_L $f26, $r5, 12*SZ_GPR + 2*SZ_FPR -+ FPR_L $f27, $r5, 12*SZ_GPR + 3*SZ_FPR -+ FPR_L $f28, $r5, 12*SZ_GPR + 4*SZ_FPR -+ FPR_L $f29, $r5, 12*SZ_GPR + 5*SZ_FPR -+ FPR_L $f30, $r5, 12*SZ_GPR + 6*SZ_FPR -+ FPR_L $f31, $r5, 12*SZ_GPR + 7*SZ_FPR -+#endif -+ -+ GPR_L $r7, $r5, 2*SZ_GPR -+ GPR_L $fp, $r5, 0*SZ_GPR -+ GPR_L $r1, $r5, 1*SZ_GPR -+ cfi_def_cfa(5, 0) -+ move $sp, $r7 -+ jr $r1 -+ cfi_endproc -+ .size GTM_longjmp, . - GTM_longjmp -+ -+#ifdef __linux__ -+.section .note.GNU-stack, "", @progbits -+#endif -diff --git a/libitm/config/loongarch/target.h b/libitm/config/loongarch/target.h -new file mode 100644 -index 000000000..2853bf203 ---- /dev/null -+++ b/libitm/config/loongarch/target.h -@@ -0,0 +1,50 @@ -+/* Copyright (C) 2014-2018 Free Software Foundation, Inc. -+ Contributed by Loongson Co. Ltd. -+ -+ This file is part of the GNU Transactional Memory Library (libitm). -+ -+ Libitm is free software; you can redistribute it and/or modify it -+ under the terms of the GNU General Public License as published by -+ the Free Software Foundation; either version 3 of the License, or -+ (at your option) any later version. -+ -+ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY -+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ more details. -+ -+ Under Section 7 of GPL version 3, you are granted additional -+ permissions described in the GCC Runtime Library Exception, version -+ 3.1, as published by the Free Software Foundation. -+ -+ You should have received a copy of the GNU General Public License and -+ a copy of the GCC Runtime Library Exception along with this program; -+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -+ . */ -+ -+namespace GTM HIDDEN { -+ -+typedef struct gtm_jmpbuf -+ { -+ long int fp; /* Frame Pointer: r22 */ -+ long int pc; /* Return Address: r1 */ -+ void *cfa; /* CFA: r3 */ -+ long int gpr[9]; /* Callee-saved scratch GPRs: r23(s0)-r31(s8) */ -+ -+ /* Callee-saved scratch FPRs: f24-f31 */ -+#if defined(__loongarch_double_float) -+ double fpr[8]; -+#elif defined(__loongarch_single_float) -+ float fpr[8]; -+#endif -+ } gtm_jmpbuf; -+ -+#define HW_CACHELINE_SIZE 128 -+ -+static inline void -+cpu_relax (void) -+{ -+ __asm__ volatile ("" : : : "memory"); -+} -+ -+} // namespace GTM -diff --git a/libitm/configure.tgt b/libitm/configure.tgt -index 0cbb0974d..18a06e45f 100644 ---- a/libitm/configure.tgt -+++ b/libitm/configure.tgt -@@ -69,6 +69,8 @@ case "${target_cpu}" in - ARCH=x86 - ;; - -+ loongarch*) ARCH=loongarch ;; -+ - sh*) ARCH=sh ;; - - sparc) --- -2.39.3 - -- Gitee From 4abab6f5f65330c14435a9ce1e86b260435ee435 Mon Sep 17 00:00:00 2001 From: Zhao Hang Date: Thu, 9 Dec 2021 13:13:38 +0000 Subject: [PATCH 2/4] rebrand: Rebrand for Anolis OS --- 0001-gcc-anolis-Rebrand-for-OpenAnolis.patch | 25 ++++++++++++++++++++ gcc.spec | 24 +++++++++++++++---- 2 files changed, 44 insertions(+), 5 deletions(-) create mode 100644 0001-gcc-anolis-Rebrand-for-OpenAnolis.patch diff --git a/0001-gcc-anolis-Rebrand-for-OpenAnolis.patch b/0001-gcc-anolis-Rebrand-for-OpenAnolis.patch new file mode 100644 index 0000000..dfca2f2 --- /dev/null +++ b/0001-gcc-anolis-Rebrand-for-OpenAnolis.patch @@ -0,0 +1,25 @@ +From 19b22393da77dfbe6b7c792955e41620db423b2f Mon Sep 17 00:00:00 2001 +From: xuehaolin +Date: Thu, 21 Jan 2021 21:09:15 +0800 +Subject: [PATCH] gcc anolis Rebrand for OpenAnolis + +--- + zlib/contrib/minizip/configure.ac | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/zlib/contrib/minizip/configure.ac b/zlib/contrib/minizip/configure.ac +index 5b1197097..857ced9b5 100644 +--- a/zlib/contrib/minizip/configure.ac ++++ b/zlib/contrib/minizip/configure.ac +@@ -1,7 +1,7 @@ + # -*- Autoconf -*- + # Process this file with autoconf to produce a configure script. + +-AC_INIT([minizip], [1.2.11], [bugzilla.redhat.com]) ++AC_INIT([minizip], [1.2.11], [https://bugzilla.openanolis.cn]) + AC_CONFIG_SRCDIR([minizip.c]) + AM_INIT_AUTOMAKE([foreign]) + LT_INIT +-- +2.18.4 + diff --git a/gcc.spec b/gcc.spec index e2721a9..c77ee69 100644 --- a/gcc.spec +++ b/gcc.spec @@ -1,3 +1,4 @@ +%define anolis_release .0.1 %global DATE 20210514 %global gitrev a3253c88425835d5b339d6998a1110a66ccd8b44 %global gcc_version 8.5.0 @@ -13,7 +14,7 @@ # Until annobin is fixed (#1519165). %undefine _annotated_build %endif -%global multilib_64_archs sparc64 ppc64 ppc64p7 x86_64 + %if 0%{?rhel} > 7 %global build_ada 0 %global build_objc 0 @@ -101,10 +102,11 @@ %ifarch x86_64 %global multilib_32_arch i686 %endif + Summary: Various compilers (C, C++, Objective-C, ...) Name: gcc Version: %{gcc_version} -Release: %{gcc_release}%{?dist} +Release: %{gcc_release}%{anolis_release}%{?dist} # libgcc, libgfortran, libgomp, libstdc++ and crtstuff have # GCC Runtime Exception. License: GPLv3+ and GPLv3+ with exceptions and GPLv2+ with exceptions and LGPLv2+ and BSD @@ -312,6 +314,8 @@ Patch1000: nvptx-tools-no-ptxas.patch Patch1001: nvptx-tools-build.patch Patch1002: nvptx-tools-glibc.patch +Patch10000: 0001-gcc-anolis-Rebrand-for-OpenAnolis.patch + # On ARM EABI systems, we do want -gnueabi to be part of the # target triple. @@ -938,7 +942,8 @@ cd nvptx-tools-%{nvptx_tools_gitrev} %patch1002 -p1 -b .nvptx-tools-glibc~ cd .. -echo 'Red Hat %{version}-%{gcc_release}' > gcc/DEV-PHASE + +echo 'Anolis %{version}-%{gcc_release}%{anolis_release}' > gcc/DEV-PHASE cp -a libstdc++-v3/config/cpu/i{4,3}86/atomicity.h @@ -970,6 +975,8 @@ fi # This test causes fork failures, because it spawns way too many threads rm -f gcc/testsuite/go.test/test/chan/goroutines.go +%patch10000 -p1 + %build # Undo the broken autoconf change in recent Fedora versions @@ -1023,7 +1030,7 @@ CC="$CC" CXX="$CXX" CFLAGS="$OPT_FLAGS" LDFLAGS="$RPM_LD_FLAGS" \ --target nvptx-none --enable-as-accelerator-for=%{gcc_target_platform} \ --enable-languages=c,c++,fortran,lto \ --prefix=%{_prefix} --mandir=%{_mandir} --infodir=%{_infodir} \ - --with-bugurl=http://bugzilla.redhat.com/bugzilla \ + --with-bugurl=https://bugzilla.openanolis.cn \ --enable-checking=release --with-system-zlib \ --with-gcc-major-version-only --without-isl make %{?_smp_mflags} @@ -1049,15 +1056,19 @@ enablelgo=,go %endif CONFIGURE_OPTS="\ --prefix=%{_prefix} --mandir=%{_mandir} --infodir=%{_infodir} \ - --with-bugurl=http://bugzilla.redhat.com/bugzilla \ + --with-bugurl=https://bugzilla.openanolis.cn \ --enable-shared --enable-threads=posix --enable-checking=release \ %ifarch ppc64le --enable-targets=powerpcle-linux \ %endif %ifarch ppc64le %{mips} riscv64 s390x --disable-multilib \ +%else +%if 0%{?anolis} + --disable-multilib \ %else --enable-multilib \ +%endif %endif --with-system-zlib --enable-__cxa_atexit --disable-libunwind-exceptions \ --enable-gnu-unique-object --enable-linker-build-id --with-gcc-major-version-only \ @@ -3338,6 +3349,9 @@ fi %{ANNOBIN_GCC_PLUGIN_DIR}/gcc-annobin.so.0.0.0 %changelog +* Wed Feb 12 2025 Xue haolin 8.5.0-23.0.1 +- Rebrand for Anolis OS. + * Fri Feb 7 2025 Marek Polacek 8.5.0-23 - rebuild for CVE-2020-11023 (RHEL-78274) -- Gitee From 395f358c42cd79b5eaee944be4a7385e6257b891 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Tue, 16 Jan 2024 14:18:30 +0800 Subject: [PATCH 3/4] Re-enable support for LoongArch Signed-off-by: Peng Fan --- ...-LoongArch-support-for-anolis-a8-gcc.patch | 208589 +++++++++++++++ ...LoongArch-Fixup-configure-file-error.patch | 153 + ...rch-Rename-config-file-for-loongarch.patch | 18 + ...rch-fix-multilib-osdirnames-to-lib64.patch | 27 + Fix-dwarf2cfi-error.patch | 33 + ...ocessing-to-allow-in-function-argume.patch | 220 + LoongArch-Add-missing-headers.patch | 65 + LoongArch-Add-sanitizer-support.patch | 1100 + ...x-atomic_exchange-expanding-PR107713.patch | 164 + ...-NOOP_TRUNCATION-and-fix-extendsidi2.patch | 101 + Sync-to-gcc-8-vec-36.patch | 30492 +++ gcc.spec | 65 +- libitm-Add-LoongArch-support.patch | 285 + 13 files changed, 241304 insertions(+), 8 deletions(-) create mode 100644 0001-Add-LoongArch-support-for-anolis-a8-gcc.patch create mode 100644 0001-LoongArch-Fixup-configure-file-error.patch create mode 100644 0002-LoongArch-Rename-config-file-for-loongarch.patch create mode 100644 0002-loongarch-fix-multilib-osdirnames-to-lib64.patch create mode 100644 Fix-dwarf2cfi-error.patch create mode 100644 Improve-specs-processing-to-allow-in-function-argume.patch create mode 100644 LoongArch-Add-missing-headers.patch create mode 100644 LoongArch-Add-sanitizer-support.patch create mode 100644 LoongArch-Fix-atomic_exchange-expanding-PR107713.patch create mode 100644 LoongArch-Remove-NOOP_TRUNCATION-and-fix-extendsidi2.patch create mode 100644 Sync-to-gcc-8-vec-36.patch create mode 100644 libitm-Add-LoongArch-support.patch diff --git a/0001-Add-LoongArch-support-for-anolis-a8-gcc.patch b/0001-Add-LoongArch-support-for-anolis-a8-gcc.patch new file mode 100644 index 0000000..bcc40a4 --- /dev/null +++ b/0001-Add-LoongArch-support-for-anolis-a8-gcc.patch @@ -0,0 +1,208589 @@ +From fac81a61084fa8cc05a39c8d8553accf237cbe05 Mon Sep 17 00:00:00 2001 +From: yala +Date: Fri, 10 Jun 2022 14:17:05 +0800 +Subject: [PATCH 1/2] Add LoongArch support for anolis a8 gcc + +Change-Id: Ia517b342881e0486b1cd143725f4c064962c998d +--- + config.guess | 3 + + config.sub | 8 +- + config/mt-loongnix-gnu | 1 + + config/picflag.m4 | 3 + + configure | 14 +- + configure.ac | 13 +- + .../config/loongarch/loongarch-common.c | 68 + + gcc/config.gcc | 196 +- + gcc/config.host | 12 + + gcc/config/host-linux.c | 2 + + gcc/config/loongarch/constraints.md | 389 + + gcc/config/loongarch/driver-native.c | 82 + + gcc/config/loongarch/elf.h | 50 + + gcc/config/loongarch/frame-header-opt.c | 292 + + gcc/config/loongarch/generic.md | 109 + + gcc/config/loongarch/genopt.sh | 110 + + gcc/config/loongarch/gnu-user.h | 132 + + gcc/config/loongarch/larchintrin.h | 386 + + gcc/config/loongarch/lasx.md | 4825 + + gcc/config/loongarch/lasxintrin.h | 5139 + + gcc/config/loongarch/linux-common.h | 68 + + gcc/config/loongarch/linux.h | 33 + + gcc/config/loongarch/loongarch-builtins.c | 3152 + + gcc/config/loongarch/loongarch-c.c | 135 + + gcc/config/loongarch/loongarch-cpus.def | 38 + + gcc/config/loongarch/loongarch-d.c | 31 + + gcc/config/loongarch/loongarch-ftypes.def | 719 + + gcc/config/loongarch/loongarch-modes.def | 64 + + gcc/config/loongarch/loongarch-opts.h | 34 + + gcc/config/loongarch/loongarch-protos.h | 290 + + gcc/config/loongarch/loongarch-tables.opt | 34 + + gcc/config/loongarch/loongarch.c | 10465 + + gcc/config/loongarch/loongarch.h | 2145 + + gcc/config/loongarch/loongarch.md | 4320 + + gcc/config/loongarch/loongarch.opt | 171 + + gcc/config/loongarch/lsx.md | 3181 + + gcc/config/loongarch/lsx2.md | 1091 + + gcc/config/loongarch/lsxintrin.h | 4980 + + gcc/config/loongarch/predicates.md | 639 + + gcc/config/loongarch/rtems.h | 39 + + gcc/config/loongarch/sde.opt | 28 + + gcc/config/loongarch/sync.md | 616 + + gcc/config/loongarch/t-linux | 23 + + gcc/config/loongarch/t-loongarch | 45 + + gcc/config/loongarch/x-native | 3 + + gcc/configure.ac | 35 +- + gcc/targhooks.c | 2 +- + gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C | 2 +- + gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C | 2 +- + gcc/testsuite/g++.old-deja/g++.pt/ptrmem6.C | 2 +- + gcc/testsuite/gcc.dg/20020312-2.c | 2 + + gcc/testsuite/gcc.dg/loop-8.c | 2 +- + gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c | 2 +- + .../loongarch/insn_correctness_check.c | 159432 +++++++++++++++ + .../gcc.target/loongarch/lasx-builtin.c | 1509 + + .../gcc.target/loongarch/loongarch.exp | 40 + + .../gcc.target/loongarch/lsx-builtin.c | 1461 + + .../gcc.target/loongarch/tst-asm-const.c | 16 + + .../mips/loongson-shift-count-truncated-1.c | 6 +- + gcc/testsuite/gcc.target/mips/loongson-simd.c | 4 +- + gcc/testsuite/gcc.target/mips/mips.exp | 23 + + gcc/testsuite/go.test/go-test.exp | 3 + + gcc/testsuite/lib/target-supports.exp | 64 +- + libgcc/config.host | 40 + + libgcc/config/loongarch/crtfastmath.c | 50 + + libgcc/config/loongarch/crti.S | 43 + + libgcc/config/loongarch/crtn.S | 39 + + libgcc/config/loongarch/gthr-loongnixsde.h | 237 + + libgcc/config/loongarch/lib2funcs.c | 23 + + libgcc/config/loongarch/linux-unwind.h | 81 + + libgcc/config/loongarch/sfp-machine.h | 148 + + libgcc/config/loongarch/t-crtstuff | 5 + + libgcc/config/loongarch/t-elf | 3 + + libgcc/config/loongarch/t-loongarch | 9 + + libgcc/config/loongarch/t-loongarch64 | 1 + + libgcc/config/loongarch/t-sdemtk | 3 + + libgcc/config/loongarch/t-softfp-tf | 3 + + libgcc/config/loongarch/t-vr | 0 + libgcc/configure.ac | 2 +- + libgomp/configure.tgt | 4 + + .../22_locale/time_get/get_date/wchar_t/4.cc | 24 +- + 81 files changed, 207456 insertions(+), 44 deletions(-) + create mode 100644 config/mt-loongnix-gnu + create mode 100644 gcc/common/config/loongarch/loongarch-common.c + create mode 100644 gcc/config/loongarch/constraints.md + create mode 100644 gcc/config/loongarch/driver-native.c + create mode 100644 gcc/config/loongarch/elf.h + create mode 100644 gcc/config/loongarch/frame-header-opt.c + create mode 100644 gcc/config/loongarch/generic.md + create mode 100644 gcc/config/loongarch/genopt.sh + create mode 100644 gcc/config/loongarch/gnu-user.h + create mode 100644 gcc/config/loongarch/larchintrin.h + create mode 100644 gcc/config/loongarch/lasx.md + create mode 100644 gcc/config/loongarch/lasxintrin.h + create mode 100644 gcc/config/loongarch/linux-common.h + create mode 100644 gcc/config/loongarch/linux.h + create mode 100644 gcc/config/loongarch/loongarch-builtins.c + create mode 100644 gcc/config/loongarch/loongarch-c.c + create mode 100644 gcc/config/loongarch/loongarch-cpus.def + create mode 100644 gcc/config/loongarch/loongarch-d.c + create mode 100644 gcc/config/loongarch/loongarch-ftypes.def + create mode 100644 gcc/config/loongarch/loongarch-modes.def + create mode 100644 gcc/config/loongarch/loongarch-opts.h + create mode 100644 gcc/config/loongarch/loongarch-protos.h + create mode 100644 gcc/config/loongarch/loongarch-tables.opt + create mode 100644 gcc/config/loongarch/loongarch.c + create mode 100644 gcc/config/loongarch/loongarch.h + create mode 100644 gcc/config/loongarch/loongarch.md + create mode 100644 gcc/config/loongarch/loongarch.opt + create mode 100644 gcc/config/loongarch/lsx.md + create mode 100644 gcc/config/loongarch/lsx2.md + create mode 100644 gcc/config/loongarch/lsxintrin.h + create mode 100644 gcc/config/loongarch/predicates.md + create mode 100644 gcc/config/loongarch/rtems.h + create mode 100644 gcc/config/loongarch/sde.opt + create mode 100644 gcc/config/loongarch/sync.md + create mode 100644 gcc/config/loongarch/t-linux + create mode 100644 gcc/config/loongarch/t-loongarch + create mode 100644 gcc/config/loongarch/x-native + create mode 100644 gcc/testsuite/gcc.target/loongarch/insn_correctness_check.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/lasx-builtin.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/loongarch.exp + create mode 100644 gcc/testsuite/gcc.target/loongarch/lsx-builtin.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/tst-asm-const.c + create mode 100644 libgcc/config/loongarch/crtfastmath.c + create mode 100644 libgcc/config/loongarch/crti.S + create mode 100644 libgcc/config/loongarch/crtn.S + create mode 100644 libgcc/config/loongarch/gthr-loongnixsde.h + create mode 100644 libgcc/config/loongarch/lib2funcs.c + create mode 100644 libgcc/config/loongarch/linux-unwind.h + create mode 100644 libgcc/config/loongarch/sfp-machine.h + create mode 100644 libgcc/config/loongarch/t-crtstuff + create mode 100644 libgcc/config/loongarch/t-elf + create mode 100644 libgcc/config/loongarch/t-loongarch + create mode 100644 libgcc/config/loongarch/t-loongarch64 + create mode 100644 libgcc/config/loongarch/t-sdemtk + create mode 100644 libgcc/config/loongarch/t-softfp-tf + create mode 100644 libgcc/config/loongarch/t-vr + +diff --git a/config.guess b/config.guess +index 588fe82a4..edfd052ae 100755 +--- a/config.guess ++++ b/config.guess +@@ -985,6 +985,9 @@ EOF + mips64el:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; ++ loongarch64:Linux:*:*) ++ echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-${LIBC} + exit ;; +diff --git a/config.sub b/config.sub +index f2632cd8a..11ee24619 100755 +--- a/config.sub ++++ b/config.sub +@@ -2,7 +2,7 @@ + # Configuration validation subroutine script. + # Copyright 1992-2018 Free Software Foundation, Inc. + +-timestamp='2018-01-01' ++timestamp='2020-01-04' + + # This file is free software; you can redistribute it and/or modify it + # under the terms of the GNU General Public License as published by +@@ -142,7 +142,7 @@ case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; +- -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ ++ -dec* | -mips* | -loongarch* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ +@@ -288,6 +288,8 @@ case $basic_machine in + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ + | mipstx39 | mipstx39el \ ++ | loongarch \ ++ | loongarch64 \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ +@@ -415,6 +417,8 @@ case $basic_machine in + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ + | mipstx39-* | mipstx39el-* \ ++ | loongarch-* \ ++ | loongarch64-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ +diff --git a/config/mt-loongnix-gnu b/config/mt-loongnix-gnu +new file mode 100644 +index 000000000..713c4e379 +--- /dev/null ++++ b/config/mt-loongnix-gnu +@@ -0,0 +1 @@ ++include $(srcdir)/config/mt-gnu +diff --git a/config/picflag.m4 b/config/picflag.m4 +index 8b106f9af..0aefcf619 100644 +--- a/config/picflag.m4 ++++ b/config/picflag.m4 +@@ -44,6 +44,9 @@ case "${$2}" in + # sets the default TLS model and affects inlining. + $1=-fPIC + ;; ++ loongarch*-*-*) ++ $1=-fpic ++ ;; + mips-sgi-irix6*) + # PIC is the default. + ;; +diff --git a/configure b/configure +index 060139551..633db33c5 100755 +--- a/configure ++++ b/configure +@@ -2974,7 +2974,7 @@ case "${ENABLE_GOLD}" in + # Check for target supported by gold. + case "${target}" in + i?86-*-* | x86_64-*-* | sparc*-*-* | powerpc*-*-* | arm*-*-* \ +- | aarch64*-*-* | tilegx*-*-* | mips*-*-* | s390*-*-*) ++ | aarch64*-*-* | tilegx*-*-* | mips*-*-* | loongarch*-*-* | s390*-*-*) + configdirs="$configdirs gold" + if test x${ENABLE_GOLD} = xdefault; then + default_ld=gold +@@ -3507,6 +3507,9 @@ case "${target}" in + mips*-*-*) + libgloss_dir=mips + ;; ++ loongarch*-*-*) ++ libgloss_dir=loongarch ++ ;; + powerpc*-*-*) + libgloss_dir=rs6000 + ;; +@@ -3863,6 +3866,12 @@ case "${target}" in + wasm32-*-*) + noconfigdirs="$noconfigdirs ld" + ;; ++ loongarch*-*-linux*) ++ ;; ++ loongarch*-*-*) ++ noconfigdirs="$noconfigdirs gprof" ++ ;; ++ + esac + + # If we aren't building newlib, then don't build libgloss, since libgloss +@@ -6905,6 +6914,9 @@ case "${target}" in + mips*-*-*linux* | mips*-*-gnu*) + target_makefile_frag="config/mt-mips-gnu" + ;; ++ loongarch*-*-*linux* | loongarch*-*-gnu*) ++ target_makefile_frag="config/mt-loongarch-gnu" ++ ;; + nios2-*-elf*) + target_makefile_frag="config/mt-nios2-elf" + ;; +diff --git a/configure.ac b/configure.ac +index c34333365..59e0fc035 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -345,7 +345,7 @@ case "${ENABLE_GOLD}" in + # Check for target supported by gold. + case "${target}" in + i?86-*-* | x86_64-*-* | sparc*-*-* | powerpc*-*-* | arm*-*-* \ +- | aarch64*-*-* | tilegx*-*-* | mips*-*-* | s390*-*-*) ++ | aarch64*-*-* | tilegx*-*-* | mips*-*-* | loongarch*-*-* | s390*-*-*) + configdirs="$configdirs gold" + if test x${ENABLE_GOLD} = xdefault; then + default_ld=gold +@@ -838,6 +838,9 @@ case "${target}" in + mips*-*-*) + libgloss_dir=mips + ;; ++ loongarch*-*-*) ++ libgloss_dir=loongarch ++ ;; + powerpc*-*-*) + libgloss_dir=rs6000 + ;; +@@ -1194,6 +1197,11 @@ case "${target}" in + wasm32-*-*) + noconfigdirs="$noconfigdirs ld" + ;; ++ loongarch*-*-linux*) ++ ;; ++ loongarch*-*-*) ++ noconfigdirs="$noconfigdirs gprof" ++ ;; + esac + + # If we aren't building newlib, then don't build libgloss, since libgloss +@@ -2499,6 +2507,9 @@ case "${target}" in + mips*-*-*linux* | mips*-*-gnu*) + target_makefile_frag="config/mt-mips-gnu" + ;; ++ loongarch*-*-*linux* | loongarch*-*-gnu*) ++ target_makefile_frag="config/mt-loongarch-gnu" ++ ;; + nios2-*-elf*) + target_makefile_frag="config/mt-nios2-elf" + ;; +diff --git a/gcc/common/config/loongarch/loongarch-common.c b/gcc/common/config/loongarch/loongarch-common.c +new file mode 100644 +index 000000000..afbbc3ad0 +--- /dev/null ++++ b/gcc/common/config/loongarch/loongarch-common.c +@@ -0,0 +1,68 @@ ++/* Common hooks for LARCH. ++ Copyright (C) 1989-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tm.h" ++#include "common/common-target.h" ++#include "common/common-target-def.h" ++#include "opts.h" ++#include "flags.h" ++ ++#undef TARGET_OPTION_OPTIMIZATION_TABLE ++#define TARGET_OPTION_OPTIMIZATION_TABLE loongarch_option_optimization_table ++ ++/* Set default optimization options. */ ++static const struct default_options loongarch_option_optimization_table[] = ++{ ++ { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 }, ++ { OPT_LEVELS_NONE, 0, NULL, 0 } ++}; ++ ++/* Implement TARGET_HANDLE_OPTION. */ ++ ++static bool ++loongarch_handle_option (struct gcc_options *opts, ++ struct gcc_options *opts_set ATTRIBUTE_UNUSED, ++ const struct cl_decoded_option *decoded, ++ location_t loc ATTRIBUTE_UNUSED) ++{ ++ size_t code = decoded->opt_index; ++ ++ switch (code) ++ { ++ case OPT_mno_flush_func: ++ opts->x_loongarch_cache_flush_func = NULL; ++ return true; ++ ++ default: ++ return true; ++ } ++} ++ ++#undef TARGET_DEFAULT_TARGET_FLAGS ++#define TARGET_DEFAULT_TARGET_FLAGS \ ++ (TARGET_DEFAULT \ ++ | TARGET_CPU_DEFAULT \ ++ | MASK_CHECK_ZERO_DIV) ++#undef TARGET_HANDLE_OPTION ++#define TARGET_HANDLE_OPTION loongarch_handle_option ++ ++struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER; +diff --git a/gcc/config.gcc b/gcc/config.gcc +index a6140f900..ba061efa4 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -282,7 +282,7 @@ case ${target} in + | *-*-sysv* \ + | vax-*-vms* \ + ) +- echo "*** Configuration ${target} not supported" 1>&2 ++ echo "*** Configuration ${target} not supported " 1>&2 + exit 1 + ;; + esac +@@ -425,6 +425,13 @@ hppa*-*-*) + lm32*) + extra_options="${extra_options} g.opt" + ;; ++loongarch*-*-*) ++ cpu_type=loongarch ++ d_target_objs="loongarch-d.o" ++ extra_headers="lasxintrin.h lsxintrin.h larchintrin.h" ++ extra_objs="frame-header-opt.o loongarch-c.o loongarch-builtins.o" ++ extra_options="${extra_options} g.opt fused-madd.opt loongarch/loongarch-tables.opt" ++ ;; + m32r*-*-*) + cpu_type=m32r + extra_options="${extra_options} g.opt" +@@ -2185,6 +2192,55 @@ mips*-*-linux*) # Linux MIPS, either endian. + tmake_file="${tmake_file} mips/t-linux64" + fi + ;; ++loongarch*-*-linux*) ++ case ${with_abi} in ++ "") ++ echo "not specify ABI, default is lp64 for loongarch64" ++ with_abi=lp64 # for default ++ ;; ++ lpx32) ++ ;; ++ lp32) ++ ;; ++ lp64) ++ ;; ++ *) ++ echo "Unknown ABI used in --with-abi=$with_abi" ++ exit 1 ++ esac ++ ++ enable_multilib="yes" ++ loongarch_multilibs="${with_multilib_list}" ++ if test "$loongarch_multilibs" = "default"; then ++ loongarch_multilibs="${with_abi}" ++ fi ++ loongarch_multilibs=`echo $loongarch_multilibs | sed -e 's/,/ /g'` ++ for loongarch_multilib in ${loongarch_multilibs}; do ++ case ${loongarch_multilib} in ++ lp64 | lpx32 | lp32 ) ++ TM_MULTILIB_CONFIG="${TM_MULTILIB_CONFIG},${loongarch_multilib}" ++ ;; ++ *) ++ echo "--with-multilib-list=${loongarch_multilib} not supported." ++ exit 1 ++ esac ++ done ++ TM_MULTILIB_CONFIG=`echo $TM_MULTILIB_CONFIG | sed 's/^,//'` ++ ++ if test `for one_abi in ${loongarch_multilibs}; do if [ x\$one_abi = x$with_abi ]; then echo 1; exit 0; fi; done; echo 0;` = "0"; then ++ echo "--with-abi=${with_abi} must be one of --with-multilib-list=${with_multilib_list}" ++ exit 1 ++ fi ++ ++ tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} loongarch/gnu-user.h loongarch/linux.h loongarch/linux-common.h" ++ extra_options="${extra_options} linux-android.opt" ++ tmake_file="${tmake_file} loongarch/t-linux" ++ gnu_ld=yes ++ gas=yes ++ # Force .init_array support. The configure script cannot always ++ # automatically detect that GAS supports it, yet we require it. ++ gcc_cv_initfini_array=yes ++ ;; + mips*-mti-elf*) + tm_file="elfos.h newlib-stdint.h ${tm_file} mips/elf.h mips/n32-elf.h mips/sde.h mips/mti-elf.h" + tmake_file="mips/t-mti-elf" +@@ -2239,6 +2295,31 @@ mips*-sde-elf*) + ;; + esac + ;; ++loongarch*-sde-elf*) ++ tm_file="elfos.h newlib-stdint.h ${tm_file} loongarch/elf.h loongarch/sde.h" ++# tmake_file="loongarch/t-sde" ++ extra_options="${extra_options} loongarch/sde.opt" ++ case "${with_newlib}" in ++ yes) ++ # newlib / libgloss. ++ ;; ++ *) ++ # MIPS toolkit libraries. ++ tm_file="$tm_file loongarch/sdemtk.h" ++ tmake_file="$tmake_file loongarch/t-sdemtk" ++ case ${enable_threads} in ++ "" | yes | loongarchsde) ++ thread_file='loongarchsde' ++ ;; ++ esac ++ ;; ++ esac ++ case ${target} in ++ loongarch*) ++ tm_defines="LARCH_ISA_DEFAULT=0 LARCH_ABI_DEFAULT=ABILP64" ++ ;; ++ esac ++ ;; + mipsisa32-*-elf* | mipsisa32el-*-elf* | \ + mipsisa32r2-*-elf* | mipsisa32r2el-*-elf* | \ + mipsisa32r6-*-elf* | mipsisa32r6el-*-elf* | \ +@@ -2524,7 +2605,7 @@ powerpc*-*-linux*) + esac + case ${target} in + powerpc64*-*-linux*spe* | powerpc64*-*-linux*paired*) +- echo "*** Configuration ${target} not supported" 1>&2 ++ echo "*** Configuration ${target} not supported " 1>&2 + exit 1 + ;; + powerpc*-*-linux*spe* | powerpc*-*-linux*paired*) +@@ -3143,7 +3224,7 @@ m32c-*-elf*) + cxx_target_objs="m32c-pragma.o" + ;; + *) +- echo "*** Configuration ${target} not supported" 1>&2 ++ echo "*** Configuration ${target} not supported " 1>&2 + exit 1 + ;; + esac +@@ -4174,7 +4255,31 @@ case "${target}" in + ;; + + mips*-*-*) +- supported_defaults="abi arch arch_32 arch_64 float fpu nan fp_32 odd_spreg_32 tune tune_32 tune_64 divide llsc mips-plt synci lxc1-sxc1 madd4" ++ supported_defaults="abi arch arch_32 arch_64 float fpu nan fp_32 odd_spreg_32 tune tune_32 tune_64 divide llsc mips-plt synci lxc1-sxc1 madd4 fix-loongson3-llsc" ++ ++ all_defaults="abi cpu cpu_32 cpu_64 arch arch_32 arch_64 tune tune_32 tune_64 schedule float mode fpu nan fp_32 odd_spreg_32 divide llsc mips-plt synci tls lxc1-sxc1 madd4 fix-loongson3-llsc" ++ for option in $all_defaults ++ do ++ eval "val=\$with_"`echo $option | sed s/-/_/g` ++ if test -n "$val"; then ++ case " $supported_defaults " in ++ *" $option "*) ++ ;; ++ *) ++ echo "This target does not support --with-$option." 2>&1 ++ echo "Valid --with options are: $supported_defaults" 2>&1 ++ exit 1 ++ ;; ++ esac ++ ++ if test "x$t" = x ++ then ++ t="{ \"$option\", \"$val\" }" ++ else ++ t="${t}, { \"$option\", \"$val\" }" ++ fi ++ fi ++ done + + case ${with_float} in + "" | soft | hard) +@@ -4327,6 +4432,75 @@ case "${target}" in + exit 1 + ;; + esac ++ ++ case ${with_fix_loongson3_llsc} in ++ yes) ++ with_fix_loongson3_llsc=fix-loongson3-llsc ++ ;; ++ no) ++ with_fix_loongson3_llsc=no-fix-loongson3-llsc ++ ;; ++ "") ++ ;; ++ *) ++ echo "Unknown fix-loongson3-llsc type used in --with-fix-loongson3-llsc" 1>&2 ++ exit 1 ++ ;; ++ esac ++ ;; ++ ++ loongarch*-*-*) ++ supported_defaults="abi arch float fpu tune" ++ ++ case ${with_arch} in ++ loongarch64 | loongarch32) ++ # OK ++ default_loongarch_arch=$with_arch ++ ;; ++ "") ++ # fallback ++ default_loongarch_arch=loongarch64 ++ ;; ++ *) ++ echo "Unknown arch given in --with-arch=$with_arch, available choices are: loongarch64" 1>&2 ++ exit 1 ++ ;; ++ esac ++ ++ case ${with_abi} in ++ lp64 | lp32) ++ # OK ++ default_loongarch_abi=$with_abi ++ ;; ++ "") ++ # fallback ++ default_loongarch_abi=lp64 ++ ;; ++ *) ++ echo "Unknown ABI given in --with-abi=$with_abi, available choices are: lp32 lp64" 1>&2 ++ exit 1 ++ ;; ++ esac ++ ++ case ${with_float} in ++ "" | soft | hard) ++ # OK ++ ;; ++ *) ++ echo "Unknown floating point type used in --with-float=$with_float" 1>&2 ++ exit 1 ++ ;; ++ esac ++ ++ case ${with_fpu} in ++ "" | single | double) ++ # OK ++ ;; ++ *) ++ echo "Unknown fpu type used in --with-fpu=$with_fpu" 1>&2 ++ exit 1 ++ ;; ++ esac + ;; + + nds32*-*-*) +@@ -4760,6 +4934,18 @@ case ${target} in + tmake_file="mips/t-mips $tmake_file" + ;; + ++ loongarch*-*-*) ++ case ${default_loongarch_arch} in ++ loongarch64) tm_defines="$tm_defines LARCH_ISA_DEFAULT=0" ;; ++ loongarch32) tm_defines="$tm_defines LARCH_ISA_DEFAULT=1" ;; ++ esac ++ case ${default_loongarch_abi} in ++ lp64) tm_defines="$tm_defines LARCH_ABI_DEFAULT=ABILP64" ;; ++ lp32) tm_defines="$tm_defines LARCH_ABI_DEFAULT=ABILP32" ;; ++ esac ++ tmake_file="loongarch/t-loongarch $tmake_file" ++ ;; ++ + powerpc*-*-* | rs6000-*-*) + # FIXME: The PowerPC port uses the value set at compile time, + # although it's only cosmetic. +@@ -4822,7 +5008,7 @@ case ${target} in + esac + + t= +-all_defaults="abi cpu cpu_32 cpu_64 arch arch_32 arch_64 tune tune_32 tune_64 schedule float mode fpu nan fp_32 odd_spreg_32 divide llsc mips-plt synci tls lxc1-sxc1 madd4" ++all_defaults="abi cpu cpu_32 cpu_64 arch arch_32 arch_64 tune tune_32 tune_64 schedule float mode fpu nan fp_32 odd_spreg_32 divide llsc mips-plt synci tls lxc1-sxc1 madd4 fix-loongson3-llsc" + for option in $all_defaults + do + eval "val=\$with_"`echo $option | sed s/-/_/g` +diff --git a/gcc/config.host b/gcc/config.host +index c65569da2..d23dae4ac 100644 +--- a/gcc/config.host ++++ b/gcc/config.host +@@ -139,6 +139,18 @@ case ${host} in + host_extra_gcc_objs="driver-native.o" + host_xmake_file="${host_xmake_file} mips/x-native" + ;; ++ loongarch*-*-linux*) ++ host_extra_gcc_objs="driver-native.o" ++ host_xmake_file="${host_xmake_file} loongarch/x-native" ++ ;; ++ esac ++ ;; ++ loongarch*-*-linux*) ++ case ${target} in ++ loongarch*-*-linux*) ++ host_extra_gcc_objs="driver-native.o" ++ host_xmake_file="${host_xmake_file} loongarch/x-native" ++ ;; + esac + ;; + rs6000-*-* \ +diff --git a/gcc/config/host-linux.c b/gcc/config/host-linux.c +index 4696e413a..95fc19196 100644 +--- a/gcc/config/host-linux.c ++++ b/gcc/config/host-linux.c +@@ -94,6 +94,8 @@ + # define TRY_EMPTY_VM_SPACE 0x60000000 + #elif defined(__mips__) && defined(__LP64__) + # define TRY_EMPTY_VM_SPACE 0x8000000000 ++#elif defined(__loongarch__) && defined(__LP64__) ++# define TRY_EMPTY_VM_SPACE 0x8000000000 + #elif defined(__mips__) + # define TRY_EMPTY_VM_SPACE 0x60000000 + #else +diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md +new file mode 100644 +index 000000000..ae8596107 +--- /dev/null ++++ b/gcc/config/loongarch/constraints.md +@@ -0,0 +1,389 @@ ++;; Constraint definitions for LARCH. ++;; Copyright (C) 2006-2018 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++;; Register constraints ++ ++;; "a" A constant call global and noplt address. ++;; "b" ALL_REGS ++;; "c" A constant call local address. ++;; "d" - ++;; "e" JALR_REGS ++;; "f" FP_REGS ++;; "g" * ++;; "h" A constant call plt address. ++;; "i" "Matches a general integer constant." ++;; "j" SIBCALL_REGS ++;; "k" * ++;; "l" "A signed 16-bit constant ." ++;; "m" "A memory operand whose address is formed by a base register and offset ++;; that is suitable for use in instructions with the same addressing mode ++;; as @code{st.w} and @code{ld.w}." ++;; "n" "Matches a non-symbolic integer constant." ++;; "o" "Matches an offsettable memory reference." ++;; "p" "Matches a general address." ++;; "q" CSR_REGS ++;; "r" GENERAL_REGS ++;; "s" "Matches a symbolic integer constant." ++;; "t" A constant call weak address ++;; "u" - ++;; "v" - ++;; "w" "Matches any valid memory." ++;; "x" - ++;; "y" GR_REGS ++;; "z" ST_REGS ++;; "A" - ++;; "B" - ++;; "C" - ++;; "D" - ++;; "E" "Matches a floating-point constant." ++;; "F" "Matches a floating-point constant." ++;; "G" "Floating-point zero." ++;; "H" - ++;; "I" "A signed 12-bit constant (for arithmetic instructions)." ++;; "J" "Integer zero." ++;; "K" "An unsigned 12-bit constant (for logic instructions)." ++;; "L" "A signed 32-bit constant in which the lower 12 bits are zero. ++;; "M" "A constant that cannot be loaded using @code{lui}, @code{addiu} or @code{ori}." ++;; "N" "A constant in the range -65535 to -1 (inclusive)." ++;; "O" "A signed 15-bit constant." ++;; "P" "A constant in the range 1 to 65535 (inclusive)." ++;; "Q" "A signed 12-bit constant" ++;; "R" "An address that can be used in a non-macro load or store." ++;; "S" "A constant call address." ++;; "T" - ++;; "U" - ++;; "V" "Matches a non-offsettable memory reference." ++;; "W" "A memory address based on a member of @code{BASE_REG_CLASS}. This is ++;; true for all references (although it can sometimes be implicit ++;; if @samp{!TARGET_EXPLICIT_RELOCS})." ++;; "X" "Matches anything." ++;; "Y" - ++;; "YG" ++;; "A vector zero." ++;; "YA" ++;; "An unsigned 6-bit constant." ++;; "YB" ++;; "A signed 10-bit constant." ++;; "Yb" ++;; "Yd" ++;; "A constant @code{move_operand} that can be safely loaded into @code{$25} ++;; using @code{la}." ++;; "Yh" ++;; "Yw" ++;; "Yx" ++;; "YI" ++;; "A replicated vector const in which the replicated value is in the range ++;; [-512,511]." ++;; "YC" ++;; "A replicated vector const in which the replicated value has a single ++;; bit set." ++;; "YZ" ++;; "A replicated vector const in which the replicated value has a single ++;; bit clear." ++;; "Z" - ++;; "ZC" ++;; "A memory operand whose address is formed by a base register and offset ++;; that is suitable for use in instructions with the same addressing mode ++;; as @code{ll.w} and @code{sc.w}." ++;; "ZD" ++;; "An address suitable for a @code{prefetch} instruction, or for any other ++;; instruction with the same addressing mode as @code{prefetch}." ++;; "ZR" ++;; "An address valid for loading/storing register exclusive" ++;; "ZB" ++;; "An address that is held in a general-purpose register. ++;; The offset is zero" ++ ++ ++(define_constraint "c" ++ "@internal ++ A constant call local address." ++ (match_operand 0 "is_const_call_local_symbol")) ++ ++(define_constraint "a" ++ "@internal ++ A constant call global and noplt address." ++ (match_operand 0 "is_const_call_global_noplt_symbol")) ++ ++(define_constraint "h" ++ "@internal ++ A constant call plt address." ++ (match_operand 0 "is_const_call_plt_symbol")) ++ ++(define_constraint "t" ++ "@internal ++ A constant call weak address." ++ (match_operand 0 "is_const_call_weak_symbol")) ++ ++(define_register_constraint "e" "JALR_REGS" ++ "@internal") ++ ++(define_register_constraint "q" "CSR_REGS" ++ "A general-purpose register except for $r0 and $r1 for csr.") ++ ++(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS" ++ "A floating-point register (if available).") ++ ++(define_register_constraint "b" "ALL_REGS" ++ "@internal") ++ ++(define_register_constraint "j" "SIBCALL_REGS" ++ "@internal") ++ ++(define_constraint "l" ++ "A signed 16-bit constant ." ++ (and (match_code "const_int") ++ (match_test "IMM16_OPERAND (ival)"))) ++ ++(define_register_constraint "y" "GR_REGS" ++ "Equivalent to @code{r}; retained for backwards compatibility.") ++ ++(define_register_constraint "z" "ST_REGS" ++ "A floating-point condition code register.") ++ ++(define_constraint "kf" ++ "@internal" ++ (match_operand 0 "force_to_mem_operand")) ++ ++;; This is a normal rather than a register constraint because we can ++;; never use the stack pointer as a reload register. ++(define_constraint "ks" ++ "@internal" ++ (and (match_code "reg") ++ (match_test "REGNO (op) == STACK_POINTER_REGNUM"))) ++ ++;; Integer constraints ++ ++(define_constraint "I" ++ "A signed 12-bit constant (for arithmetic instructions)." ++ (and (match_code "const_int") ++ (match_test "SMALL_OPERAND (ival)"))) ++ ++(define_constraint "J" ++ "Integer zero." ++ (and (match_code "const_int") ++ (match_test "ival == 0"))) ++ ++(define_constraint "K" ++ "An unsigned 12-bit constant (for logic instructions)." ++ (and (match_code "const_int") ++ (match_test "SMALL_OPERAND_UNSIGNED (ival)"))) ++ ++(define_constraint "u" ++ "An unsigned 12-bit constant (for logic instructions)." ++ (and (match_code "const_int") ++ (match_test "LU32I_OPERAND (ival)"))) ++ ++(define_constraint "v" ++ "An unsigned 12-bit constant (for logic instructions)." ++ (and (match_code "const_int") ++ (match_test "LU52I_OPERAND (ival)"))) ++ ++(define_constraint "L" ++ "A signed 32-bit constant in which the lower 12 bits are zero. ++ Such constants can be loaded using @code{lui}." ++ (and (match_code "const_int") ++ (match_test "LUI_OPERAND (ival)"))) ++ ++(define_constraint "M" ++ "A constant that cannot be loaded using @code{lui}, @code{addiu} ++ or @code{ori}." ++ (and (match_code "const_int") ++ (not (match_test "SMALL_OPERAND (ival)")) ++ (not (match_test "SMALL_OPERAND_UNSIGNED (ival)")) ++ (not (match_test "LUI_OPERAND (ival)")))) ++ ++(define_constraint "N" ++ "A constant in the range -65535 to -1 (inclusive)." ++ (and (match_code "const_int") ++ (match_test "ival >= -0xffff && ival < 0"))) ++ ++(define_constraint "O" ++ "A signed 15-bit constant." ++ (and (match_code "const_int") ++ (match_test "ival >= -0x4000 && ival < 0x4000"))) ++ ++(define_constraint "P" ++ "A constant in the range 1 to 65535 (inclusive)." ++ (and (match_code "const_int") ++ (match_test "ival > 0 && ival < 0x10000"))) ++ ++;; Floating-point constraints ++ ++(define_constraint "G" ++ "Floating-point zero." ++ (and (match_code "const_double") ++ (match_test "op == CONST0_RTX (mode)"))) ++ ++;; General constraints ++ ++(define_constraint "Q" ++ "@internal" ++ (match_operand 0 "const_arith_operand")) ++ ++(define_memory_constraint "R" ++ "An address that can be used in a non-macro load or store." ++ (and (match_code "mem") ++ (match_test "loongarch_address_insns (XEXP (op, 0), mode, false) == 1"))) ++ ++(define_memory_constraint "m" ++ "A memory operand whose address is formed by a base register and offset ++ that is suitable for use in instructions with the same addressing mode ++ as @code{st.w} and @code{ld.w}." ++ (and (match_code "mem") ++ (match_test "loongarch_12bit_offset_address_p (XEXP (op, 0), mode)"))) ++ ++(define_constraint "S" ++ "@internal ++ A constant call address." ++ (and (match_operand 0 "call_insn_operand") ++ (match_test "CONSTANT_P (op)"))) ++ ++(define_memory_constraint "W" ++ "@internal ++ A memory address based on a member of @code{BASE_REG_CLASS}. This is ++ true for allreferences (although it can sometimes be implicit ++ if @samp{!TARGET_EXPLICIT_RELOCS})." ++ (and (match_code "mem") ++ (match_operand 0 "memory_operand") ++ (and (not (match_operand 0 "stack_operand")) ++ (not (match_test "CONSTANT_P (XEXP (op, 0))"))))) ++ ++(define_constraint "YG" ++ "@internal ++ A vector zero." ++ (and (match_code "const_vector") ++ (match_test "op == CONST0_RTX (mode)"))) ++ ++(define_constraint "YA" ++ "@internal ++ An unsigned 6-bit constant." ++ (and (match_code "const_int") ++ (match_test "UIMM6_OPERAND (ival)"))) ++ ++(define_constraint "YB" ++ "@internal ++ A signed 10-bit constant." ++ (and (match_code "const_int") ++ (match_test "IMM10_OPERAND (ival)"))) ++ ++(define_constraint "Yb" ++ "@internal" ++ (match_operand 0 "qi_mask_operand")) ++ ++(define_constraint "Yd" ++ "@internal ++ A constant @code{move_operand} that can be safely loaded into @code{$25} ++ using @code{la}." ++ (and (match_operand 0 "move_operand") ++ (match_test "CONSTANT_P (op)"))) ++ ++(define_constraint "Yh" ++ "@internal" ++ (match_operand 0 "hi_mask_operand")) ++ ++(define_constraint "Yw" ++ "@internal" ++ (match_operand 0 "si_mask_operand")) ++ ++(define_constraint "Yx" ++ "@internal" ++ (match_operand 0 "low_bitmask_operand")) ++ ++(define_constraint "YI" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [-512,511]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, -512, 511)"))) ++ ++(define_constraint "YC" ++ "@internal ++ A replicated vector const in which the replicated value has a single ++ bit set." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_bitimm_set_p (op, mode)"))) ++ ++(define_constraint "YZ" ++ "@internal ++ A replicated vector const in which the replicated value has a single ++ bit clear." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_bitimm_clr_p (op, mode)"))) ++ ++(define_constraint "Unv5" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [-31,0]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, -31, 0)"))) ++ ++(define_constraint "Uuv5" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [0,31]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, 0, 31)"))) ++ ++(define_constraint "Usv5" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [-16,15]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, -16, 15)"))) ++ ++(define_constraint "Uuv6" ++ "@internal ++ A replicated vector const in which the replicated value is in the range ++ [0,63]." ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_int_p (op, mode, 0, 63)"))) ++ ++(define_constraint "Urv8" ++ "@internal ++ A replicated vector const with replicated byte values as well as elements" ++ (and (match_code "const_vector") ++ (match_test "loongarch_const_vector_same_bytes_p (op, mode)"))) ++ ++(define_memory_constraint "ZC" ++ "A memory operand whose address is formed by a base register and offset ++ that is suitable for use in instructions with the same addressing mode ++ as @code{ll.w} and @code{sc.w}." ++ (and (match_code "mem") ++ (match_test "loongarch_14bit_shifted_offset_address_p (XEXP (op, 0), mode)"))) ++ ++;;(define_address_constraint "ZD" ++;; "An address suitable for a @code{prefetch} instruction, or for any other ++;; instruction with the same addressing mode as @code{prefetch}." ++;; (if_then_else (match_test "ISA_HAS_9BIT_DISPLACEMENT") ++;; (match_test "loongarch_9bit_offset_address_p (op, mode)") ++;; (match_test "loongarch_address_insns (op, mode, false)"))) ++ ++(define_memory_constraint "ZR" ++ "@internal ++ An address valid for loading/storing register exclusive" ++ (match_operand 0 "mem_noofs_operand")) ++ ++(define_memory_constraint "ZB" ++ "@internal ++ An address that is held in a general-purpose register. ++ The offset is zero" ++ (and (match_code "mem") ++ (match_test "GET_CODE(XEXP(op,0)) == REG"))) ++ +diff --git a/gcc/config/loongarch/driver-native.c b/gcc/config/loongarch/driver-native.c +new file mode 100644 +index 000000000..5484ee502 +--- /dev/null ++++ b/gcc/config/loongarch/driver-native.c +@@ -0,0 +1,82 @@ ++/* Subroutines for the gcc driver. ++ Copyright (C) 2008-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#define IN_TARGET_CODE 1 ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tm.h" ++ ++ ++/* This function must set to noinline. Otherwise the arg can not be passed. */ ++int loongson_cpucfg (int arg) ++{ ++ int ret; ++ __asm__ __volatile__ ("cpucfg %0,%1\n\t" /* cpucfg $2,$4. */ ++ :"=r"(ret) ++ :"r"(arg) ++ :); ++ return ret; ++} ++ ++/* This will be called by the spec parser in gcc.c when it sees ++ a %:local_cpu_detect(args) construct. Currently it will be called ++ with either "arch" or "tune" as argument depending on if -march=native ++ or -mtune=native is to be substituted. ++ ++ It returns a string containing new command line parameters to be ++ put at the place of the above two options, depending on what CPU ++ this is executed. E.g. "-march=loongson2f" on a Loongson 2F for ++ -march=native. If the routine can't detect a known processor, ++ the -march or -mtune option is discarded. ++ ++ ARGC and ARGV are set depending on the actual arguments given ++ in the spec. */ ++const char * ++host_detect_local_cpu (int argc, const char **argv) ++{ ++ const char *cpu = NULL; ++ bool arch; ++ int cpucfg_arg; ++ int cpucfg_ret; ++ ++ if (argc < 1) ++ return NULL; ++ ++ arch = strcmp (argv[0], "arch") == 0; ++ if (!arch && strcmp (argv[0], "tune")) ++ return NULL; ++ ++ cpucfg_arg = 0; ++ cpucfg_ret = loongson_cpucfg (cpucfg_arg); ++ if (((cpucfg_ret >> 16) & 0xff) == 0x14) ++ { ++ if (((cpucfg_ret >> 8) & 0xff) == 0xc0) ++ cpu = "la464"; ++ else ++ cpu = NULL; ++ } ++ ++ ++ if (cpu == NULL) ++ return NULL; ++ ++ return concat ("-m", argv[0], "=", cpu, NULL); ++} +diff --git a/gcc/config/loongarch/elf.h b/gcc/config/loongarch/elf.h +new file mode 100644 +index 000000000..b7f938e31 +--- /dev/null ++++ b/gcc/config/loongarch/elf.h +@@ -0,0 +1,50 @@ ++/* Target macros for loongarch*-elf targets. ++ Copyright (C) 1994-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* LARCH assemblers don't have the usual .set foo,bar construct; ++ .set is used for assembler options instead. */ ++#undef SET_ASM_OP ++#define ASM_OUTPUT_DEF(FILE, LABEL1, LABEL2) \ ++ do \ ++ { \ ++ fputc ('\t', FILE); \ ++ assemble_name (FILE, LABEL1); \ ++ fputs (" = ", FILE); \ ++ assemble_name (FILE, LABEL2); \ ++ fputc ('\n', FILE); \ ++ } \ ++ while (0) ++ ++#undef ASM_DECLARE_OBJECT_NAME ++#define ASM_DECLARE_OBJECT_NAME loongarch_declare_object_name ++ ++#undef ASM_FINISH_DECLARE_OBJECT ++#define ASM_FINISH_DECLARE_OBJECT loongarch_finish_declare_object ++ ++/* Leave the linker script to choose the appropriate libraries. */ ++#undef LIB_SPEC ++#define LIB_SPEC "" ++ ++#undef STARTFILE_SPEC ++#define STARTFILE_SPEC "crti%O%s crtbegin%O%s" ++ ++#undef ENDFILE_SPEC ++#define ENDFILE_SPEC "crtend%O%s crtn%O%s" ++ ++#define NO_IMPLICIT_EXTERN_C 1 +diff --git a/gcc/config/loongarch/frame-header-opt.c b/gcc/config/loongarch/frame-header-opt.c +new file mode 100644 +index 000000000..86e5d423d +--- /dev/null ++++ b/gcc/config/loongarch/frame-header-opt.c +@@ -0,0 +1,292 @@ ++/* Analyze functions to determine if callers need to allocate a frame header ++ on the stack. The frame header is used by callees to save their arguments. ++ This optimization is specific to TARGET_OLDABI targets. For TARGET_NEWABI ++ targets, if a frame header is required, it is allocated by the callee. ++ ++ ++ Copyright (C) 2015-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it ++under the terms of the GNU General Public License as published by the ++Free Software Foundation; either version 3, or (at your option) any ++later version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ++ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++ ++#define IN_TARGET_CODE 1 ++ ++#include "config.h" ++#include "system.h" ++#include "context.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-core.h" ++#include "tree-pass.h" ++#include "target.h" ++#include "target-globals.h" ++#include "profile-count.h" ++#include "cfg.h" ++#include "cgraph.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "gimple-iterator.h" ++#include "gimple-walk.h" ++ ++static unsigned int frame_header_opt (void); ++ ++namespace { ++ ++const pass_data pass_data_ipa_frame_header_opt = ++{ ++ IPA_PASS, /* type */ ++ "frame-header-opt", /* name */ ++ OPTGROUP_NONE, /* optinfo_flags */ ++ TV_CGRAPHOPT, /* tv_id */ ++ 0, /* properties_required */ ++ 0, /* properties_provided */ ++ 0, /* properties_destroyed */ ++ 0, /* todo_flags_start */ ++ 0, /* todo_flags_finish */ ++}; ++ ++class pass_ipa_frame_header_opt : public ipa_opt_pass_d ++{ ++public: ++ pass_ipa_frame_header_opt (gcc::context *ctxt) ++ : ipa_opt_pass_d (pass_data_ipa_frame_header_opt, ctxt, ++ NULL, /* generate_summary */ ++ NULL, /* write_summary */ ++ NULL, /* read_summary */ ++ NULL, /* write_optimization_summary */ ++ NULL, /* read_optimization_summary */ ++ NULL, /* stmt_fixup */ ++ 0, /* function_transform_todo_flags_start */ ++ NULL, /* function_transform */ ++ NULL) /* variable_transform */ ++ {} ++ ++ /* opt_pass methods: */ ++ virtual bool gate (function *) ++ { ++ /* This optimization has no affect if TARGET_NEWABI. If optimize ++ is not at least 1 then the data needed for the optimization is ++ not available and nothing will be done anyway. */ ++ return TARGET_OLDABI && flag_frame_header_optimization && optimize > 0; ++ } ++ ++ virtual unsigned int execute (function *) { return frame_header_opt (); } ++ ++}; // class pass_ipa_frame_header_opt ++ ++} // anon namespace ++ ++static ipa_opt_pass_d * ++make_pass_ipa_frame_header_opt (gcc::context *ctxt) ++{ ++ return new pass_ipa_frame_header_opt (ctxt); ++} ++ ++void ++loongarch_register_frame_header_opt (void) ++{ ++ opt_pass *p = make_pass_ipa_frame_header_opt (g); ++ struct register_pass_info f = { p, "comdats", 1, PASS_POS_INSERT_AFTER }; ++ register_pass (&f); ++} ++ ++ ++/* Return true if it is certain that this is a leaf function. False if it is ++ not a leaf function or if it is impossible to tell. */ ++ ++static bool ++is_leaf_function (function *fn) ++{ ++ basic_block bb; ++ gimple_stmt_iterator gsi; ++ ++ /* If we do not have a cfg for this function be conservative and assume ++ it is not a leaf function. */ ++ if (fn->cfg == NULL) ++ return false; ++ ++ FOR_EACH_BB_FN (bb, fn) ++ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) ++ if (is_gimple_call (gsi_stmt (gsi))) ++ return false; ++ return true; ++} ++ ++/* Return true if this function has inline assembly code or if we cannot ++ be certain that it does not. False if we know that there is no inline ++ assembly. */ ++ ++static bool ++has_inlined_assembly (function *fn) ++{ ++ basic_block bb; ++ gimple_stmt_iterator gsi; ++ ++ /* If we do not have a cfg for this function be conservative and assume ++ it is may have inline assembly. */ ++ if (fn->cfg == NULL) ++ return true; ++ ++ FOR_EACH_BB_FN (bb, fn) ++ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) ++ if (gimple_code (gsi_stmt (gsi)) == GIMPLE_ASM) ++ return true; ++ ++ return false; ++} ++ ++/* Return true if this function will use the stack space allocated by its ++ caller or if we cannot determine for certain that it does not. */ ++ ++static bool ++needs_frame_header_p (function *fn) ++{ ++ tree t; ++ ++ if (fn->decl == NULL) ++ return true; ++ ++ if (fn->stdarg) ++ return true; ++ ++ for (t = DECL_ARGUMENTS (fn->decl); t; t = TREE_CHAIN (t)) ++ { ++ if (!use_register_for_decl (t)) ++ return true; ++ ++ /* Some 64-bit types may get copied to general registers using the frame ++ header, see loongarch_output_64bit_xfer. Checking for SImode only may be ++ overly restrictive but it is guaranteed to be safe. */ ++ if (DECL_MODE (t) != SImode) ++ return true; ++ } ++ ++ return false; ++} ++ ++/* Return true if the argument stack space allocated by function FN is used. ++ Return false if the space is needed or if the need for the space cannot ++ be determined. */ ++ ++static bool ++callees_functions_use_frame_header (function *fn) ++{ ++ basic_block bb; ++ gimple_stmt_iterator gsi; ++ gimple *stmt; ++ tree called_fn_tree; ++ function *called_fn; ++ ++ if (fn->cfg == NULL) ++ return true; ++ ++ FOR_EACH_BB_FN (bb, fn) ++ { ++ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) ++ { ++ stmt = gsi_stmt (gsi); ++ if (is_gimple_call (stmt)) ++ { ++ called_fn_tree = gimple_call_fndecl (stmt); ++ if (called_fn_tree != NULL) ++ { ++ called_fn = DECL_STRUCT_FUNCTION (called_fn_tree); ++ if (called_fn == NULL ++ || DECL_WEAK (called_fn_tree) ++ || has_inlined_assembly (called_fn) ++ || !is_leaf_function (called_fn) ++ || !called_fn->machine->does_not_use_frame_header) ++ return true; ++ } ++ else ++ return true; ++ } ++ } ++ } ++ return false; ++} ++ ++/* Set the callers_may_not_allocate_frame flag for any function which ++ function FN calls because FN may not allocate a frame header. */ ++ ++static void ++set_callers_may_not_allocate_frame (function *fn) ++{ ++ basic_block bb; ++ gimple_stmt_iterator gsi; ++ gimple *stmt; ++ tree called_fn_tree; ++ function *called_fn; ++ ++ if (fn->cfg == NULL) ++ return; ++ ++ FOR_EACH_BB_FN (bb, fn) ++ { ++ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) ++ { ++ stmt = gsi_stmt (gsi); ++ if (is_gimple_call (stmt)) ++ { ++ called_fn_tree = gimple_call_fndecl (stmt); ++ if (called_fn_tree != NULL) ++ { ++ called_fn = DECL_STRUCT_FUNCTION (called_fn_tree); ++ if (called_fn != NULL) ++ called_fn->machine->callers_may_not_allocate_frame = true; ++ } ++ } ++ } ++ } ++ return; ++} ++ ++/* Scan each function to determine those that need its frame headers. Perform ++ a second scan to determine if the allocation can be skipped because none of ++ their callees require the frame header. */ ++ ++static unsigned int ++frame_header_opt () ++{ ++ struct cgraph_node *node; ++ function *fn; ++ ++ FOR_EACH_DEFINED_FUNCTION (node) ++ { ++ fn = node->get_fun (); ++ if (fn != NULL) ++ fn->machine->does_not_use_frame_header = !needs_frame_header_p (fn); ++ } ++ ++ FOR_EACH_DEFINED_FUNCTION (node) ++ { ++ fn = node->get_fun (); ++ if (fn != NULL) ++ fn->machine->optimize_call_stack ++ = !callees_functions_use_frame_header (fn) && !is_leaf_function (fn); ++ } ++ ++ FOR_EACH_DEFINED_FUNCTION (node) ++ { ++ fn = node->get_fun (); ++ if (fn != NULL && fn->machine->optimize_call_stack) ++ set_callers_may_not_allocate_frame (fn); ++ } ++ ++ return 0; ++} +diff --git a/gcc/config/loongarch/generic.md b/gcc/config/loongarch/generic.md +new file mode 100644 +index 000000000..321b8e561 +--- /dev/null ++++ b/gcc/config/loongarch/generic.md +@@ -0,0 +1,109 @@ ++;; Generic DFA-based pipeline description for LARCH targets ++;; Copyright (C) 2004-2018 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++ ++;; GCC is free software; you can redistribute it and/or modify it ++;; under the terms of the GNU General Public License as published ++;; by the Free Software Foundation; either version 3, or (at your ++;; option) any later version. ++ ++;; GCC is distributed in the hope that it will be useful, but WITHOUT ++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++;; License for more details. ++ ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++ ++;; This file is derived from the old define_function_unit description. ++;; Each reservation can be overridden on a processor-by-processor basis. ++ ++(define_insn_reservation "generic_alu" 1 ++ (eq_attr "type" "unknown,prefetch,prefetchx,condmove,const,arith, ++ shift,slt,clz,trap,multi,nop,logical,signext,move") ++ "alu") ++ ++(define_insn_reservation "generic_load" 3 ++ (eq_attr "type" "load,fpload,fpidxload") ++ "alu") ++ ++(define_insn_reservation "generic_store" 1 ++ (eq_attr "type" "store,fpstore,fpidxstore") ++ "alu") ++ ++(define_insn_reservation "generic_xfer" 2 ++ (eq_attr "type" "mftg,mgtf") ++ "alu") ++ ++(define_insn_reservation "generic_branch" 1 ++ (eq_attr "type" "branch,jump,call") ++ "alu") ++ ++(define_insn_reservation "generic_imul" 17 ++ (eq_attr "type" "imul,imul3") ++ "imuldiv*17") ++ ++(define_insn_reservation "generic_fcvt" 1 ++ (eq_attr "type" "fcvt") ++ "alu") ++ ++(define_insn_reservation "generic_fmove" 2 ++ (eq_attr "type" "fabs,fneg,fmove") ++ "alu") ++ ++(define_insn_reservation "generic_fcmp" 3 ++ (eq_attr "type" "fcmp") ++ "alu") ++ ++(define_insn_reservation "generic_fadd" 4 ++ (eq_attr "type" "fadd") ++ "alu") ++ ++(define_insn_reservation "generic_fmul_single" 7 ++ (and (eq_attr "type" "fmul,fmadd") ++ (eq_attr "mode" "SF")) ++ "alu") ++ ++(define_insn_reservation "generic_fmul_double" 8 ++ (and (eq_attr "type" "fmul,fmadd") ++ (eq_attr "mode" "DF")) ++ "alu") ++ ++(define_insn_reservation "generic_fdiv_single" 23 ++ (and (eq_attr "type" "fdiv,frdiv") ++ (eq_attr "mode" "SF")) ++ "alu") ++ ++(define_insn_reservation "generic_fdiv_double" 36 ++ (and (eq_attr "type" "fdiv,frdiv") ++ (eq_attr "mode" "DF")) ++ "alu") ++ ++(define_insn_reservation "generic_fsqrt_single" 54 ++ (and (eq_attr "type" "fsqrt,frsqrt") ++ (eq_attr "mode" "SF")) ++ "alu") ++ ++(define_insn_reservation "generic_fsqrt_double" 112 ++ (and (eq_attr "type" "fsqrt,frsqrt") ++ (eq_attr "mode" "DF")) ++ "alu") ++ ++(define_insn_reservation "generic_atomic" 10 ++ (eq_attr "type" "atomic") ++ "alu") ++ ++;; Sync loop consists of (in order) ++;; (1) optional sync, ++;; (2) LL instruction, ++;; (3) branch and 1-2 ALU instructions, ++;; (4) SC instruction, ++;; (5) branch and ALU instruction. ++;; The net result of this reservation is a big delay with a flush of ++;; ALU pipeline. ++(define_insn_reservation "generic_sync_loop" 40 ++ (eq_attr "type" "syncloop") ++ "alu*39") +diff --git a/gcc/config/loongarch/genopt.sh b/gcc/config/loongarch/genopt.sh +new file mode 100644 +index 000000000..272aac51d +--- /dev/null ++++ b/gcc/config/loongarch/genopt.sh +@@ -0,0 +1,110 @@ ++#!/bin/sh ++# Generate loongarch-tables.opt from the list of CPUs in loongarch-cpus.def. ++# Copyright (C) 2011-2018 Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++cat <. ++ ++Enum ++Name(loongarch_arch_opt_value) Type(int) ++Known LARCH CPUs (for use with the -march= and -mtune= options): ++ ++EnumValue ++Enum(loongarch_arch_opt_value) String(native) Value(LARCH_ARCH_OPTION_NATIVE) DriverOnly ++ ++EOF ++ ++awk -F'[(, ]+' ' ++BEGIN { ++ value = 0 ++} ++ ++# Write an entry for a single string accepted as a -march= argument. ++ ++function write_one_arch_value(name, value, flags) ++{ ++ print "EnumValue" ++ print "Enum(loongarch_arch_opt_value) String(" name ") Value(" value ")" flags ++ print "" ++} ++ ++# The logic for matching CPU name variants should be the same as in GAS. ++ ++# Write an entry for a single string accepted as a -march= argument, ++# plus any variant with a final "000" replaced by "k". ++ ++function write_arch_value_maybe_k(name, value, flags) ++{ ++ write_one_arch_value(name, value, flags) ++ if (name ~ "000$") { ++ sub("000$", "k", name) ++ write_one_arch_value(name, value, "") ++ } ++} ++ ++# Write all the entries for a -march= argument. In addition to ++# replacement of a final "000" with "k", an argument starting with ++# "vr", "rm" or "r" followed by a number, or just a plain number, ++# matches a plain number or "r" followed by a plain number. ++ ++function write_all_arch_values(name, value) ++{ ++ write_arch_value_maybe_k(name, value, " Canonical") ++ cname = name ++ if (cname ~ "^vr") { ++ sub("^vr", "", cname) ++ } else if (cname ~ "^rm") { ++ sub("^rm", "", cname) ++ } else if (cname ~ "^r") { ++ sub("^r", "", cname) ++ } ++ if (cname ~ "^[0-9]") { ++ if (cname != name) ++ write_arch_value_maybe_k(cname, value, "") ++ rname = "r" cname ++ if (rname != name) ++ write_arch_value_maybe_k(rname, value, "") ++ } ++} ++ ++/^LARCH_CPU/ { ++ name = $2 ++ gsub("\"", "", name) ++ write_all_arch_values(name, value) ++ value++ ++}' $1/loongarch-cpus.def +diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h +new file mode 100644 +index 000000000..1304e2e97 +--- /dev/null ++++ b/gcc/config/loongarch/gnu-user.h +@@ -0,0 +1,132 @@ ++/* Definitions for LARCH systems using GNU userspace. ++ Copyright (C) 1998-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#undef WCHAR_TYPE ++#define WCHAR_TYPE "int" ++ ++#undef WCHAR_TYPE_SIZE ++#define WCHAR_TYPE_SIZE 32 ++ ++#undef ASM_DECLARE_OBJECT_NAME ++#define ASM_DECLARE_OBJECT_NAME loongarch_declare_object_name ++ ++/* If we don't set MASK_ABICALLS, we can't default to PIC. */ ++/* #undef TARGET_DEFAULT */ ++/* #define TARGET_DEFAULT MASK_ABICALLS */ ++ ++#define TARGET_OS_CPP_BUILTINS() \ ++ do { \ ++ GNU_USER_TARGET_OS_CPP_BUILTINS(); \ ++ /* The GNU C++ standard library requires this. */ \ ++ if (c_dialect_cxx ()) \ ++ builtin_define ("_GNU_SOURCE"); \ ++ } while (0) ++ ++#undef SUBTARGET_CPP_SPEC ++#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}" ++ ++/* A standard GNU/Linux mapping. On most targets, it is included in ++ CC1_SPEC itself by config/linux.h, but loongarch.h overrides CC1_SPEC ++ and provides this hook instead. */ ++#undef SUBTARGET_CC1_SPEC ++#define SUBTARGET_CC1_SPEC GNU_USER_TARGET_CC1_SPEC ++ ++/* -G is incompatible with -KPIC which is the default, so only allow objects ++ in the small data section if the user explicitly asks for it. */ ++#undef LARCH_DEFAULT_GVALUE ++#define LARCH_DEFAULT_GVALUE 0 ++ ++#undef GNU_USER_TARGET_LINK_SPEC ++#define GNU_USER_TARGET_LINK_SPEC "\ ++ %{G*} %{EB} %{EL} %{shared} \ ++ %{!shared: \ ++ %{!static: \ ++ %{rdynamic:-export-dynamic} \ ++ %{mabi=lp32: -dynamic-linker " GNU_USER_DYNAMIC_LINKERLP32 "} \ ++ %{mabi=lp64: -dynamic-linker " GNU_USER_DYNAMIC_LINKERLP64 "}} \ ++ %{static}} \ ++ %{mabi=lp32:-m" GNU_USER_LINK_EMULATION32 "} \ ++ %{mabi=lp64:-m" GNU_USER_LINK_EMULATION64 "}" ++ ++#undef LINK_SPEC ++#define LINK_SPEC GNU_USER_TARGET_LINK_SPEC ++ ++/* The LARCH assembler has different syntax for .set. We set it to ++ .dummy to trap any errors. */ ++#undef SET_ASM_OP ++#define SET_ASM_OP "\t.dummy\t" ++ ++#undef ASM_OUTPUT_DEF ++#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \ ++ do { \ ++ fputc ( '\t', FILE); \ ++ assemble_name (FILE, LABEL1); \ ++ fputs ( " = ", FILE); \ ++ assemble_name (FILE, LABEL2); \ ++ fputc ( '\n', FILE); \ ++ } while (0) ++ ++/* The glibc _mcount stub will save $v0 for us. Don't mess with saving ++ it, since ASM_OUTPUT_REG_PUSH/ASM_OUTPUT_REG_POP do not work in the ++ presence of $gp-relative calls. */ ++#undef ASM_OUTPUT_REG_PUSH ++#undef ASM_OUTPUT_REG_POP ++ ++#undef LIB_SPEC ++#define LIB_SPEC GNU_USER_TARGET_LIB_SPEC ++ ++#define NO_SHARED_SPECS "" ++ ++/* -march=native handling only makes sense with compiler running on ++ a LARCH chip. */ ++#if defined(__loongarch__) ++extern const char *host_detect_local_cpu (int argc, const char **argv); ++# define EXTRA_SPEC_FUNCTIONS \ ++ { "local_cpu_detect", host_detect_local_cpu }, ++ ++# define MARCH_MTUNE_NATIVE_SPECS \ ++ " %{march=native:%. */ ++ ++#ifndef _GCC_LOONGARCH_BASE_INTRIN_H ++#define _GCC_LOONGARCH_BASE_INTRIN_H ++ ++#ifdef __cplusplus ++extern "C"{ ++#endif ++ ++typedef struct drdtime{ ++ unsigned long dvalue; ++ unsigned long dtimeid; ++} __drdtime_t; ++ ++typedef struct rdtime{ ++ unsigned int value; ++ unsigned int timeid; ++} __rdtime_t; ++ ++#ifdef __loongarch64 ++extern __inline __drdtime_t __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__builtin_loongarch_rdtime_d (void) ++{ ++ __drdtime_t drdtime; ++ __asm__ volatile ( ++ "rdtime.d\t%[val],%[tid]\n\t" ++ : [val]"=&r"(drdtime.dvalue),[tid]"=&r"(drdtime.dtimeid) ++ : ++ ); ++ return drdtime; ++} ++#define __rdtime_d __builtin_loongarch_rdtime_d ++#endif ++ ++extern __inline __rdtime_t __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__builtin_loongarch_rdtimeh_w (void) ++{ ++ __rdtime_t rdtime; ++ __asm__ volatile ( ++ "rdtimeh.w\t%[val],%[tid]\n\t" ++ : [val]"=&r"(rdtime.value),[tid]"=&r"(rdtime.timeid) ++ : ++ ); ++ return rdtime; ++} ++#define __rdtimel_w __builtin_loongarch_rdtimel_w ++ ++extern __inline __rdtime_t __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__builtin_loongarch_rdtimel_w (void) ++{ ++ __rdtime_t rdtime; ++ __asm__ volatile ( ++ "rdtimel.w\t%[val],%[tid]\n\t" ++ : [val]"=&r"(rdtime.value),[tid]"=&r"(rdtime.timeid) ++ : ++ ); ++ return rdtime; ++} ++#define __rdtimeh_w __builtin_loongarch_rdtimeh_w ++ ++/* Assembly instruction format: rj, fcsr */ ++/* Data types in instruction templates: USI, UQI */ ++#define __movfcsr2gr(/*ui5*/_1) __builtin_loongarch_movfcsr2gr((_1)); ++ ++/* Assembly instruction format: 0, fcsr, rj */ ++/* Data types in instruction templates: VOID, UQI, USI */ ++#define __movgr2fcsr(/*ui5*/ _1, _2) __builtin_loongarch_movgr2fcsr((unsigned short)_1, (unsigned int)_2); ++ ++#ifdef __loongarch32 ++/* Assembly instruction format: ui5, rj, si12 */ ++/* Data types in instruction templates: VOID, USI, USI, SI */ ++#define __cacop(/*ui5*/ _1, /*unsigned int*/ _2, /*si12*/ _3) ((void)__builtin_loongarch_cacop((_1), (unsigned int)(_2), (_3))) ++#elif defined __loongarch64 ++/* Assembly instruction format: ui5, rj, si12 */ ++/* Data types in instruction templates: VOID, USI, UDI, SI */ ++#define __dcacop(/*ui5*/ _1, /*unsigned long int*/ _2, /*si12*/ _3) ((void)__builtin_loongarch_dcacop((_1), (unsigned long int)(_2), (_3))) ++#else ++# error "Don't support this ABI." ++#endif ++ ++/* Assembly instruction format: rd, rj */ ++/* Data types in instruction templates: USI, USI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++unsigned int __cpucfg(unsigned int _1) ++{ ++ return (unsigned int)__builtin_loongarch_cpucfg((unsigned int)_1); ++} ++ ++#ifdef __loongarch64 ++/* Assembly instruction format: rd, rj */ ++/* Data types in instruction templates: DI, DI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++void __asrtle_d(long int _1, long int _2) ++{ ++ __builtin_loongarch_asrtle_d((long int)_1, (long int)_2); ++} ++ ++/* Assembly instruction format: rd, rj */ ++/* Data types in instruction templates: DI, DI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++void __asrtgt_d(long int _1, long int _2) ++{ ++ __builtin_loongarch_asrtgt_d((long int)_1, (long int)_2); ++} ++#endif ++ ++#ifdef __loongarch32 ++/* Assembly instruction format: rd, rj, ui5 */ ++/* Data types in instruction templates: SI, SI, UQI */ ++#define __lddir(/*int*/ _1, /*ui5*/ _2) ((int)__builtin_loongarch_lddir((int)(_1), (_2))) ++#elif defined __loongarch64 ++/* Assembly instruction format: rd, rj, ui5 */ ++/* Data types in instruction templates: DI, DI, UQI */ ++#define __dlddir(/*long int*/ _1, /*ui5*/ _2) ((long int)__builtin_loongarch_dlddir((long int)(_1), (_2))) ++#else ++# error "Don't support this ABI." ++#endif ++ ++#ifdef __loongarch32 ++/* Assembly instruction format: rj, ui5 */ ++/* Data types in instruction templates: VOID, SI, UQI */ ++#define __ldpte(/*int*/ _1, /*ui5*/ _2) ((void)__builtin_loongarch_ldpte((int)(_1), (_2))) ++#elif defined __loongarch64 ++/* Assembly instruction format: rj, ui5 */ ++/* Data types in instruction templates: VOID, DI, UQI */ ++#define __dldpte(/*long int*/ _1, /*ui5*/ _2) ((void)__builtin_loongarch_dldpte((long int)(_1), (_2))) ++#else ++# error "Don't support this ABI." ++#endif ++ ++/* Assembly instruction format: rd, rj, rk */ ++/* Data types in instruction templates: SI, QI, SI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++int __crc_w_b_w(char _1, int _2) ++{ ++ return (int)__builtin_loongarch_crc_w_b_w((char)_1, (int)_2); ++} ++ ++/* Assembly instruction format: rd, rj, rk */ ++/* Data types in instruction templates: SI, HI, SI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++int __crc_w_h_w(short _1, int _2) ++{ ++ return (int)__builtin_loongarch_crc_w_h_w((short)_1, (int)_2); ++} ++ ++/* Assembly instruction format: rd, rj, rk */ ++/* Data types in instruction templates: SI, SI, SI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++int __crc_w_w_w(int _1, int _2) ++{ ++ return (int)__builtin_loongarch_crc_w_w_w((int)_1, (int)_2); ++} ++ ++#ifdef __loongarch64 ++/* Assembly instruction format: rd, rj, rk */ ++/* Data types in instruction templates: SI, DI, SI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++int __crc_w_d_w(long int _1, int _2) ++{ ++ return (int)__builtin_loongarch_crc_w_d_w((long int)_1, (int)_2); ++} ++#endif ++ ++/* Assembly instruction format: rd, rj, rk */ ++/* Data types in instruction templates: SI, QI, SI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++int __crcc_w_b_w(char _1, int _2) ++{ ++ return (int)__builtin_loongarch_crcc_w_b_w((char)_1, (int)_2); ++} ++ ++/* Assembly instruction format: rd, rj, rk */ ++/* Data types in instruction templates: SI, HI, SI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++int __crcc_w_h_w(short _1, int _2) ++{ ++ return (int)__builtin_loongarch_crcc_w_h_w((short)_1, (int)_2); ++} ++ ++/* Assembly instruction format: rd, rj, rk */ ++/* Data types in instruction templates: SI, SI, SI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++int __crcc_w_w_w(int _1, int _2) ++{ ++ return (int)__builtin_loongarch_crcc_w_w_w((int)_1, (int)_2); ++} ++ ++#ifdef __loongarch64 ++/* Assembly instruction format: rd, rj, rk */ ++/* Data types in instruction templates: SI, DI, SI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++int __crcc_w_d_w(long int _1, int _2) ++{ ++ return (int)__builtin_loongarch_crcc_w_d_w((long int)_1, (int)_2); ++} ++#endif ++ ++/* Assembly instruction format: rd, ui14 */ ++/* Data types in instruction templates: USI, USI */ ++#define __csrrd(/*ui14*/ _1) ((unsigned int)__builtin_loongarch_csrrd((_1))) ++ ++/* Assembly instruction format: rd, ui14 */ ++/* Data types in instruction templates: USI, USI, USI */ ++#define __csrwr(/*unsigned int*/ _1, /*ui14*/ _2) ((unsigned int)__builtin_loongarch_csrwr((unsigned int)(_1), (_2))) ++ ++/* Assembly instruction format: rd, rj, ui14 */ ++/* Data types in instruction templates: USI, USI, USI, USI */ ++#define __csrxchg(/*unsigned int*/ _1, /*unsigned int*/ _2, /*ui14*/ _3) ((unsigned int)__builtin_loongarch_csrxchg((unsigned int)(_1), (unsigned int)(_2), (_3))) ++ ++#ifdef __loongarch64 ++/* Assembly instruction format: rd, ui14 */ ++/* Data types in instruction templates: UDI, USI */ ++#define __dcsrrd(/*ui14*/ _1) ((unsigned long int)__builtin_loongarch_dcsrrd((_1))) ++ ++/* Assembly instruction format: rd, ui14 */ ++/* Data types in instruction templates: UDI, UDI, USI */ ++#define __dcsrwr(/*unsigned long int*/ _1, /*ui14*/ _2) ((unsigned long int)__builtin_loongarch_dcsrwr((unsigned long int)(_1), (_2))) ++ ++/* Assembly instruction format: rd, rj, ui14 */ ++/* Data types in instruction templates: UDI, UDI, UDI, USI */ ++#define __dcsrxchg(/*unsigned long int*/ _1, /*unsigned long int*/ _2, /*ui14*/ _3) ((unsigned long int)__builtin_loongarch_dcsrxchg((unsigned long int)(_1), (unsigned long int)(_2), (_3))) ++#endif ++ ++/* Assembly instruction format: rd, rj */ ++/* Data types in instruction templates: UQI, USI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++unsigned char __iocsrrd_b(unsigned int _1) ++{ ++ return (unsigned char)__builtin_loongarch_iocsrrd_b((unsigned int)_1); ++} ++ ++/* Assembly instruction format: rd, rj */ ++/* Data types in instruction templates: UHI, USI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++unsigned short __iocsrrd_h(unsigned int _1) ++{ ++ return (unsigned short)__builtin_loongarch_iocsrrd_h((unsigned int)_1); ++} ++ ++/* Assembly instruction format: rd, rj */ ++/* Data types in instruction templates: USI, USI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++unsigned int __iocsrrd_w(unsigned int _1) ++{ ++ return (unsigned int)__builtin_loongarch_iocsrrd_w((unsigned int)_1); ++} ++ ++#ifdef __loongarch64 ++/* Assembly instruction format: rd, rj */ ++/* Data types in instruction templates: UDI, USI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++unsigned long int __iocsrrd_d(unsigned int _1) ++{ ++ return (unsigned long int)__builtin_loongarch_iocsrrd_d((unsigned int)_1); ++} ++#endif ++ ++/* Assembly instruction format: rd, rj */ ++/* Data types in instruction templates: VOID, UQI, USI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++void __iocsrwr_b(unsigned char _1, unsigned int _2) ++{ ++ return (void)__builtin_loongarch_iocsrwr_b((unsigned char)_1, (unsigned int)_2); ++} ++ ++/* Assembly instruction format: rd, rj */ ++/* Data types in instruction templates: VOID, UHI, USI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++void __iocsrwr_h(unsigned short _1, unsigned int _2) ++{ ++ return (void)__builtin_loongarch_iocsrwr_h((unsigned short)_1, (unsigned int)_2); ++} ++ ++/* Assembly instruction format: rd, rj */ ++/* Data types in instruction templates: VOID, USI, USI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++void __iocsrwr_w(unsigned int _1, unsigned int _2) ++{ ++ return (void)__builtin_loongarch_iocsrwr_w((unsigned int)_1, (unsigned int)_2); ++} ++ ++#ifdef __loongarch64 ++/* Assembly instruction format: rd, rj */ ++/* Data types in instruction templates: VOID, UDI, USI */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++void __iocsrwr_d(unsigned long int _1, unsigned int _2) ++{ ++ return (void)__builtin_loongarch_iocsrwr_d((unsigned long int)_1, (unsigned int)_2); ++} ++#endif ++ ++/* Assembly instruction format: ui15 */ ++/* Data types in instruction templates: UQI */ ++#define __dbar(/*ui15*/ _1) __builtin_loongarch_dbar((_1)) ++ ++/* Assembly instruction format: ui15 */ ++/* Data types in instruction templates: UQI */ ++#define __ibar(/*ui15*/ _1) __builtin_loongarch_ibar((_1)) ++ ++#define __builtin_loongarch_syscall(a) \ ++{ \ ++ __asm__ volatile ("syscall %0\n\t" \ ++ ::"I"(a)); \ ++} ++#define __syscall __builtin_loongarch_syscall ++ ++#define __builtin_loongarch_break(a) \ ++{ \ ++ __asm__ volatile ("break %0\n\t" \ ++ ::"I"(a)); \ ++} ++#define __break __builtin_loongarch_break ++ ++ ++extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__builtin_loongarch_tlbsrch (void) ++{ ++ __asm__ volatile ("tlbsrch\n\t"); ++} ++#define __tlbsrch __builtin_loongarch_tlbsrch ++ ++extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__builtin_loongarch_tlbrd (void) ++{ ++ __asm__ volatile ("tlbrd\n\t"); ++} ++#define __tlbrd __builtin_loongarch_tlbrd ++ ++extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__builtin_loongarch_tlbwr (void) ++{ ++ __asm__ volatile ("tlbwr\n\t"); ++} ++#define __tlbwr __builtin_loongarch_tlbwr ++ ++extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__builtin_loongarch_tlbfill (void) ++{ ++ __asm__ volatile ("tlbfill\n\t"); ++} ++#define __tlbfill __builtin_loongarch_tlbfill ++ ++extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__builtin_loongarch_tlbclr (void) ++{ ++ __asm__ volatile ("tlbclr\n\t"); ++} ++#define __tlbclr __builtin_loongarch_tlbclr ++ ++extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__builtin_loongarch_tlbflush (void) ++{ ++ __asm__ volatile ("tlbflush\n\t"); ++} ++#define __tlbflush __builtin_loongarch_tlbflush ++ ++ ++#ifdef __cplusplus ++} ++#endif ++#endif /* _GCC_LOONGARCH_BASE_INTRIN_H */ +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +new file mode 100644 +index 000000000..24757aaa1 +--- /dev/null ++++ b/gcc/config/loongarch/lasx.md +@@ -0,0 +1,4825 @@ ++;; Machine Description for LARCH Loongson ASX ASE ++;; ++;; Copyright (C) 2018 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++;; ++ ++(define_c_enum "unspec" [ ++ UNSPEC_LASX_XVABSD_S ++ UNSPEC_LASX_XVABSD_U ++ UNSPEC_LASX_XVAVG_S ++ UNSPEC_LASX_XVAVG_U ++ UNSPEC_LASX_XVAVGR_S ++ UNSPEC_LASX_XVAVGR_U ++ UNSPEC_LASX_XVBITCLR ++ UNSPEC_LASX_XVBITCLRI ++ UNSPEC_LASX_XVBITREV ++ UNSPEC_LASX_XVBITREVI ++ UNSPEC_LASX_XVBITSET ++ UNSPEC_LASX_XVBITSETI ++ UNSPEC_LASX_XVFCMP_CAF ++ UNSPEC_LASX_XVFCLASS ++ UNSPEC_LASX_XVFCMP_CUNE ++ UNSPEC_LASX_XVFCVT ++ UNSPEC_LASX_XVFCVTH ++ UNSPEC_LASX_XVFCVTL ++ UNSPEC_LASX_XVFLOGB ++ UNSPEC_LASX_XVFRECIP ++ UNSPEC_LASX_XVFRINT ++ UNSPEC_LASX_XVFRSQRT ++ UNSPEC_LASX_XVFCMP_SAF ++ UNSPEC_LASX_XVFCMP_SEQ ++ UNSPEC_LASX_XVFCMP_SLE ++ UNSPEC_LASX_XVFCMP_SLT ++ UNSPEC_LASX_XVFCMP_SNE ++ UNSPEC_LASX_XVFCMP_SOR ++ UNSPEC_LASX_XVFCMP_SUEQ ++ UNSPEC_LASX_XVFCMP_SULE ++ UNSPEC_LASX_XVFCMP_SULT ++ UNSPEC_LASX_XVFCMP_SUN ++ UNSPEC_LASX_XVFCMP_SUNE ++ UNSPEC_LASX_XVFTINT_S ++ UNSPEC_LASX_XVFTINT_U ++ UNSPEC_LASX_XVCLO ++ UNSPEC_LASX_XVSAT_S ++ UNSPEC_LASX_XVSAT_U ++ UNSPEC_LASX_XVREPLVE0 ++ UNSPEC_LASX_XVREPL128VEI ++ UNSPEC_LASX_XVSRAR ++ UNSPEC_LASX_XVSRARI ++ UNSPEC_LASX_XVSRLR ++ UNSPEC_LASX_XVSRLRI ++ UNSPEC_LASX_XVSSUB_S ++ UNSPEC_LASX_XVSSUB_U ++ UNSPEC_LASX_XVSHUF ++ UNSPEC_LASX_XVSHUF_B ++ UNSPEC_LASX_BRANCH ++ UNSPEC_LASX_BRANCH_V ++ ++ UNSPEC_LASX_XVMUH_S ++ UNSPEC_LASX_XVMUH_U ++ UNSPEC_LASX_MXVEXTW_U ++ UNSPEC_LASX_XVSLLWIL_S ++ UNSPEC_LASX_XVSLLWIL_U ++ UNSPEC_LASX_XVSRAN ++ UNSPEC_LASX_XVSSRAN_S ++ UNSPEC_LASX_XVSSRAN_U ++ UNSPEC_LASX_XVSRARN ++ UNSPEC_LASX_XVSSRARN_S ++ UNSPEC_LASX_XVSSRARN_U ++ UNSPEC_LASX_XVSRLN ++ UNSPEC_LASX_XVSSRLN_U ++ UNSPEC_LASX_XVSRLRN ++ UNSPEC_LASX_XVSSRLRN_U ++ UNSPEC_LASX_XVFRSTPI ++ UNSPEC_LASX_XVFRSTP ++ UNSPEC_LASX_XVSHUF4I ++ UNSPEC_LASX_XVBSRL_V ++ UNSPEC_LASX_XVBSLL_V ++ UNSPEC_LASX_XVEXTRINS ++ UNSPEC_LASX_XVMSKLTZ ++ UNSPEC_LASX_XVSIGNCOV ++ UNSPEC_LASX_XVFTINTRNE_W_S ++ UNSPEC_LASX_XVFTINTRNE_L_D ++ UNSPEC_LASX_XVFTINTRP_W_S ++ UNSPEC_LASX_XVFTINTRP_L_D ++ UNSPEC_LASX_XVFTINTRM_W_S ++ UNSPEC_LASX_XVFTINTRM_L_D ++ UNSPEC_LASX_XVFTINT_W_D ++ UNSPEC_LASX_XVFFINT_S_L ++ UNSPEC_LASX_XVFTINTRZ_W_D ++ UNSPEC_LASX_XVFTINTRP_W_D ++ UNSPEC_LASX_XVFTINTRM_W_D ++ UNSPEC_LASX_XVFTINTRNE_W_D ++ UNSPEC_LASX_XVFTINTH_L_S ++ UNSPEC_LASX_XVFTINTL_L_S ++ UNSPEC_LASX_XVFFINTH_D_W ++ UNSPEC_LASX_XVFFINTL_D_W ++ UNSPEC_LASX_XVFTINTRZH_L_S ++ UNSPEC_LASX_XVFTINTRZL_L_S ++ UNSPEC_LASX_XVFTINTRPH_L_S ++ UNSPEC_LASX_XVFTINTRPL_L_S ++ UNSPEC_LASX_XVFTINTRMH_L_S ++ UNSPEC_LASX_XVFTINTRML_L_S ++ UNSPEC_LASX_XVFTINTRNEL_L_S ++ UNSPEC_LASX_XVFTINTRNEH_L_S ++ UNSPEC_LASX_XVFRINTRNE_S ++ UNSPEC_LASX_XVFRINTRNE_D ++ UNSPEC_LASX_XVFRINTRZ_S ++ UNSPEC_LASX_XVFRINTRZ_D ++ UNSPEC_LASX_XVFRINTRP_S ++ UNSPEC_LASX_XVFRINTRP_D ++ UNSPEC_LASX_XVFRINTRM_S ++ UNSPEC_LASX_XVFRINTRM_D ++ UNSPEC_LASX_XVREPLVE0_Q ++ UNSPEC_LASX_XVPERM_W ++ UNSPEC_LASX_XVPERMI_Q ++ UNSPEC_LASX_XVPERMI_D ++ ++ UNSPEC_LASX_XVADDWEV ++ UNSPEC_LASX_XVADDWEV2 ++ UNSPEC_LASX_XVADDWEV3 ++ UNSPEC_LASX_XVSUBWEV ++ UNSPEC_LASX_XVSUBWEV2 ++ UNSPEC_LASX_XVMULWEV ++ UNSPEC_LASX_XVMULWEV2 ++ UNSPEC_LASX_XVMULWEV3 ++ UNSPEC_LASX_XVADDWOD ++ UNSPEC_LASX_XVADDWOD2 ++ UNSPEC_LASX_XVADDWOD3 ++ UNSPEC_LASX_XVSUBWOD ++ UNSPEC_LASX_XVSUBWOD2 ++ UNSPEC_LASX_XVMULWOD ++ UNSPEC_LASX_XVMULWOD2 ++ UNSPEC_LASX_XVMULWOD3 ++ UNSPEC_LASX_XVMADDWEV ++ UNSPEC_LASX_XVMADDWEV2 ++ UNSPEC_LASX_XVMADDWEV3 ++ UNSPEC_LASX_XVMADDWOD ++ UNSPEC_LASX_XVMADDWOD2 ++ UNSPEC_LASX_XVMADDWOD3 ++ UNSPEC_LASX_XVHADDW_Q_D ++ UNSPEC_LASX_XVHSUBW_Q_D ++ UNSPEC_LASX_XVHADDW_QU_DU ++ UNSPEC_LASX_XVHSUBW_QU_DU ++ UNSPEC_LASX_XVROTR ++ UNSPEC_LASX_XVADD_Q ++ UNSPEC_LASX_XVSUB_Q ++ UNSPEC_LASX_XVREPLVE ++ UNSPEC_LASX_XVSHUF4 ++ UNSPEC_LASX_XVMSKGEZ ++ UNSPEC_LASX_XVMSKNZ ++ UNSPEC_LASX_XVEXTH_Q_D ++ UNSPEC_LASX_XVEXTH_QU_DU ++ UNSPEC_LASX_XVROTRI ++ UNSPEC_LASX_XVEXTL_Q_D ++ UNSPEC_LASX_XVSRLNI ++ UNSPEC_LASX_XVSRLRNI ++ UNSPEC_LASX_XVSSRLNI ++ UNSPEC_LASX_XVSSRLNI2 ++ UNSPEC_LASX_XVSSRLRNI ++ UNSPEC_LASX_XVSSRLRNI2 ++ UNSPEC_LASX_XVSRANI ++ UNSPEC_LASX_XVSRARNI ++ UNSPEC_LASX_XVSSRANI ++ UNSPEC_LASX_XVSSRANI2 ++ UNSPEC_LASX_XVSSRARNI ++ UNSPEC_LASX_XVSSRARNI2 ++ UNSPEC_LASX_XVPERMI ++ UNSPEC_LASX_XVINSVE0 ++ UNSPEC_LASX_XVPICKVE ++ UNSPEC_LASX_XVSSRLN ++ UNSPEC_LASX_XVSSRLRN ++ UNSPEC_LASX_XVEXTL_QU_DU ++ UNSPEC_LASX_XVLDI ++ UNSPEC_LASX_XVLDX ++ UNSPEC_LASX_XVSTX ++]) ++ ++;; All vector modes with 256 bits. ++(define_mode_iterator LASX [V4DF V8SF V4DI V8SI V16HI V32QI]) ++ ++;; Same as LASX. Used by vcond to iterate two modes. ++(define_mode_iterator LASX_2 [V4DF V8SF V4DI V8SI V16HI V32QI]) ++ ++;; Only used for splitting insert_d and copy_{u,s}.d. ++(define_mode_iterator LASX_D [V4DI V4DF]) ++ ++;; Only used for splitting insert_d and copy_{u,s}.d. ++(define_mode_iterator LASX_WD [V4DI V4DF V8SI V8SF]) ++ ++;; Only used for copy256_{u,s}.w. ++(define_mode_iterator LASX_W [V8SI V8SF]) ++ ++;; Only integer modes in LASX. ++(define_mode_iterator ILASX [V4DI V8SI V16HI V32QI]) ++ ++;; As ILASX but excludes V32QI. ++(define_mode_iterator ILASX_DWH [V4DI V8SI V16HI]) ++ ++;; As ILASX but excludes V4DI. ++(define_mode_iterator ILASX_WHB [V8SI V16HI V32QI]) ++ ++;; Only integer modes equal or larger than a word. ++(define_mode_iterator ILASX_DW [V4DI V8SI]) ++ ++;; Only integer modes smaller than a word. ++(define_mode_iterator ILASX_HB [V16HI V32QI]) ++ ++;; Only floating-point modes in LASX. ++(define_mode_iterator FLASX [V4DF V8SF]) ++ ++;; Only used for immediate set shuffle elements instruction. ++(define_mode_iterator LASX_WHB_W [V8SI V16HI V32QI V8SF]) ++ ++;; The atribute gives the integer vector mode with same size in Loongson ASX. ++ (define_mode_attr VIMODE256 ++ [(V4DF "V4DI") ++ (V8SF "V8SI") ++ (V4DI "V4DI") ++ (V8SI "V8SI") ++ (V16HI "V16HI") ++ (V32QI "V32QI")]) ++ ++;;attribute gives half modes for vector modes. ++;;attribute gives half modes (Same Size) for vector modes. ++(define_mode_attr VHSMODE256 ++ [(V16HI "V32QI") ++ (V8SI "V16HI") ++ (V4DI "V8SI")]) ++ ++;;attribute gives half modes for vector modes. ++(define_mode_attr VHMODE256 ++ [(V32QI "V16QI") ++ (V16HI "V8HI") ++ (V8SI "V4SI") ++ (V4DI "V2DI")]) ++ ++;;attribute gives half float modes for vector modes. ++(define_mode_attr VFHMODE256 ++ [(V8SF "V4SF") ++ (V4DF "V2DF")]) ++ ++;; The attribute gives double modes for vector modes in LASX. ++(define_mode_attr VDMODE256 ++ [(V8SI "V4DI") ++ (V16HI "V8SI") ++ (V32QI "V16HI")]) ++ ++;; extended from VDMODE256 ++(define_mode_attr VDMODEEXD256 ++ [(V4DI "V4DI") ++ (V8SI "V4DI") ++ (V16HI "V8SI") ++ (V32QI "V16HI")]) ++ ++;; The attribute gives half modes with same number of elements for vector modes. ++(define_mode_attr VTRUNCMODE256 ++ [(V16HI "V16QI") ++ (V8SI "V8HI") ++ (V4DI "V4SI")]) ++ ++;; This attribute gives the mode of the result for "copy_s_b, copy_u_b" etc. ++(define_mode_attr VRES256 ++ [(V4DF "DF") ++ (V8SF "SF") ++ (V4DI "DI") ++ (V8SI "SI") ++ (V16HI "SI") ++ (V32QI "SI")]) ++ ++;; Only used with LASX_D iterator. ++(define_mode_attr lasx_d ++ [(V4DI "reg_or_0") ++ (V4DF "register")]) ++ ++;; This attribute gives the 256 bit integer vector mode with same size. ++(define_mode_attr mode256_i ++ [(V4DF "v4di") ++ (V8SF "v8si") ++ (V4DI "v4di") ++ (V8SI "v8si") ++ (V16HI "v16hi") ++ (V32QI "v32qi")]) ++ ++ ++;; This attribute gives the 256 bit float vector mode with same size. ++(define_mode_attr mode256_f ++ [(V4DF "v4df") ++ (V8SF "v8sf") ++ (V4DI "v4df") ++ (V8SI "v8sf")]) ++ ++ ;; This attribute gives suffix for LASX instructions.HOW? ++(define_mode_attr lasxfmt ++ [(V4DF "d") ++ (V8SF "w") ++ (V4DI "d") ++ (V8SI "w") ++ (V16HI "h") ++ (V32QI "b")]) ++ ++(define_mode_attr flasxfmt ++ [(V4DF "d") ++ (V8SF "s")]) ++ ++(define_mode_attr lasxfmt_u ++ [(V4DF "du") ++ (V8SF "wu") ++ (V4DI "du") ++ (V8SI "wu") ++ (V16HI "hu") ++ (V32QI "bu")]) ++ ++(define_mode_attr ilasxfmt ++ [(V4DF "l") ++ (V8SF "w")]) ++ ++(define_mode_attr ilasxfmt_u ++ [(V4DF "lu") ++ (V8SF "wu")]) ++ ++;; This attribute gives suffix for integers in VHMODE256. ++(define_mode_attr hlasxfmt ++ [(V4DI "w") ++ (V8SI "h") ++ (V16HI "b")]) ++ ++(define_mode_attr hlasxfmt_u ++ [(V4DI "wu") ++ (V8SI "hu") ++ (V16HI "bu")]) ++ ++;; This attribute gives suffix for integers in VHSMODE256. ++(define_mode_attr hslasxfmt ++ [(V4DI "w") ++ (V8SI "h") ++ (V16HI "b")]) ++ ++;; This attribute gives define_insn suffix for LASX instructions that need ++;; distinction between integer and floating point. ++(define_mode_attr lasxfmt_f ++ [(V4DF "d_f") ++ (V8SF "w_f") ++ (V4DI "d") ++ (V8SI "w") ++ (V16HI "h") ++ (V32QI "b")]) ++ ++(define_mode_attr flasxfmt_f ++ [(V4DF "d_f") ++ (V8SF "s_f") ++ (V4DI "d") ++ (V8SI "w") ++ (V16HI "h") ++ (V32QI "b")]) ++ ++;; This attribute gives define_insn suffix for LASX instructions that need ++;; distinction between integer and floating point. ++(define_mode_attr lasxfmt_f_wd ++ [(V4DF "d_f") ++ (V8SF "w_f") ++ (V4DI "d") ++ (V8SI "w")]) ++ ++;; This attribute gives suffix for integers in VHMODE256. ++(define_mode_attr dlasxfmt ++ [(V8SI "d") ++ (V16HI "w") ++ (V32QI "h")]) ++ ++(define_mode_attr dlasxfmt_u ++ [(V8SI "du") ++ (V16HI "wu") ++ (V32QI "hu")]) ++ ++;; for VDMODEEXD256 ++(define_mode_attr dlasxqfmt ++ [(V4DI "q") ++ (V8SI "d") ++ (V16HI "w") ++ (V32QI "h")]) ++ ++;; This is used to form an immediate operand constraint using ++;; "const__operand". ++(define_mode_attr indeximm256 ++ [(V4DF "0_to_3") ++ (V8SF "0_to_7") ++ (V4DI "0_to_3") ++ (V8SI "0_to_7") ++ (V16HI "uimm4") ++ (V32QI "uimm5")]) ++ ++;; This is used to form an immediate operand constraint using to ref high half ++;; "const__operand". ++(define_mode_attr indeximm_hi ++ [(V4DF "2_or_3") ++ (V8SF "4_to_7") ++ (V4DI "2_or_3") ++ (V8SI "4_to_7") ++ (V16HI "8_to_15") ++ (V32QI "16_to_31")]) ++ ++;; This is used to form an immediate operand constraint using to ref low half ++;; "const__operand". ++(define_mode_attr indeximm_lo ++ [(V4DF "0_or_1") ++ (V8SF "0_to_3") ++ (V4DI "0_or_1") ++ (V8SI "0_to_3") ++ (V16HI "uimm3") ++ (V32QI "uimm4")]) ++ ++;; This attribute represents bitmask needed for vec_merge using in lasx ++;; "const__operand". ++(define_mode_attr bitmask256 ++ [(V4DF "exp_4") ++ (V8SF "exp_8") ++ (V4DI "exp_4") ++ (V8SI "exp_8") ++ (V16HI "exp_16") ++ (V32QI "exp_32")]) ++ ++;; This attribute represents bitmask needed for vec_merge using to ref low half ++;; "const__operand". ++(define_mode_attr bitmask_lo ++ [(V4DF "exp_2") ++ (V8SF "exp_4") ++ (V4DI "exp_2") ++ (V8SI "exp_4") ++ (V16HI "exp_8") ++ (V32QI "exp_16")]) ++ ++ ++;; This attribute is used to form an immediate operand constraint using ++;; "const__operand". ++(define_mode_attr bitimm256 ++ [(V32QI "uimm3") ++ (V16HI "uimm4") ++ (V8SI "uimm5") ++ (V4DI "uimm6")]) ++ ++ ++(define_mode_attr d2lasxfmt ++ [(V8SI "q") ++ (V16HI "d") ++ (V32QI "w")]) ++ ++(define_mode_attr d2lasxfmt_u ++ [(V8SI "qu") ++ (V16HI "du") ++ (V32QI "wu")]) ++ ++(define_mode_attr VD2MODE256 ++ [(V8SI "V4DI") ++ (V16HI "V4DI") ++ (V32QI "V8SI")]) ++ ++(define_mode_attr lasxfmt_wd ++ [(V4DI "d") ++ (V8SI "w") ++ (V16HI "w") ++ (V32QI "w")]) ++ ++(define_expand "vec_init" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vector_init (operands[0], operands[1]); ++ DONE; ++}) ++ ++;; FIXME: Delete. ++(define_insn "vec_pack_trunc_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vec_concat: ++ (truncate: ++ (match_operand:ILASX_DWH 1 "register_operand" "f")) ++ (truncate: ++ (match_operand:ILASX_DWH 2 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvpickev.\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "") ++ (set_attr "can_delay" "no") ++ (set_attr "length" "8")]) ++ ++(define_expand "vec_unpacks_hi_v8sf" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_dup 2))))] ++ "ISA_HAS_LASX" ++{ ++ operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode, true/*high_p*/); ++}) ++ ++(define_expand "vec_unpacks_lo_v8sf" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_dup 2))))] ++ "ISA_HAS_LASX" ++{ ++ operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode, false/*high_p*/); ++}) ++ ++ ++(define_expand "vec_unpacks_hi_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, true/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacks_lo_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, false/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacku_hi_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, true/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacku_lo_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, false/*high_p*/); ++ DONE; ++}) ++ ++(define_insn "lasx_xvinsgr2vr_" ++ [(set (match_operand:LASX_WD 0 "register_operand" "=f") ++ (vec_merge:LASX_WD ++ (vec_duplicate:LASX_WD ++ (match_operand: 1 "reg_or_0_operand" "rJ")) ++ (match_operand:LASX_WD 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")))] ++ "ISA_HAS_LASX" ++{ ++#if 0 ++ if (!TARGET_64BIT && (mode == V4DImode || mode == V4DFmode)) ++ return "#"; ++ else ++#endif ++ return "xvinsgr2vr.\t%u0,%z1,%y3"; ++} ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ ++(define_insn "vec_concatv4di" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (vec_concat:V4DI ++ (match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "vec_concatv8si" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "0") ++ (match_operand:V4SI 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "vec_concatv16hi" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "0") ++ (match_operand:V8HI 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "vec_concatv32qi" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "0") ++ (match_operand:V16QI 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "vec_concatv4df" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (vec_concat:V4DF ++ (match_operand:V2DF 1 "register_operand" "0") ++ (match_operand:V2DF 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "vec_concatv8sf" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "0") ++ (match_operand:V4SF 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++{ ++ return "xvpermi.q\t%u0,%u2,0x20"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++;; xshuf.w ++(define_insn "lasx_xvperm_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI ++ [(match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVPERM_W))] ++ "ISA_HAS_LASX" ++ "xvperm.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V8SI")]) ++ ++;; xvpermi.d ++(define_insn "lasx_xvpermi_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI ++ [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand 2 "const_uimm8_operand")] ++ UNSPEC_LASX_XVPERMI_D))] ++ "ISA_HAS_LASX" ++ "xvpermi.d\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++;; xvpermi.q ++(define_insn "lasx_xvpermi_q_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX ++ [(match_operand:LASX 1 "register_operand" "0") ++ (match_operand:LASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand")] ++ UNSPEC_LASX_XVPERMI_Q))] ++ "ISA_HAS_LASX" ++ "xvpermi.q\t%u0,%u2,%3" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpickve2gr_d" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (any_extend:DI ++ (vec_select:DI ++ (match_operand:V4DI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_3_operand" "")]))))] ++ "ISA_HAS_LASX" ++ "xvpickve2gr.d\t%0,%u1,%2" ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "V4DI")]) ++ ++(define_expand "vec_extract" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX 1 "register_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LASX" ++{ ++ if (mode == SImode || mode == DImode) ++ { ++ emit_insn(gen_lasx_xvpickve2gr_ (operands[0], operands[1], operands[2])); ++ } ++ else ++ { ++ HOST_WIDE_INT size_0 = GET_MODE_SIZE (GET_MODE (operands[0])); ++ HOST_WIDE_INT size_1 = GET_MODE_SIZE (GET_MODE (operands[1])); ++ HOST_WIDE_INT val = INTVAL (operands[2]); ++ ++ /* High part */ ++ if (val >= size_1/size_0/2 ) ++ { ++ rtx dest1 = gen_reg_rtx (GET_MODE (operands[1])); ++ rtx pos = GEN_INT( val - size_1/size_0/2); ++ emit_insn (gen_lasx_xvpermi_q_ (dest1, dest1, operands[1], GEN_INT(1))); ++ rtx dest2 = gen_reg_rtx (SImode); ++ emit_insn (gen_lsx_vpickve2gr_ (dest2, ++ gen_lowpart(mode, dest1), ++ pos)); ++ emit_move_insn (operands[0], ++ gen_lowpart (mode, dest2)); ++ } ++ else ++ { ++ rtx dest1 = gen_reg_rtx (SImode); ++ emit_insn (gen_lsx_vpickve2gr_ (dest1, ++ gen_lowpart(mode, operands[1]), ++ operands[2])); ++ emit_move_insn (operands[0], ++ gen_lowpart (mode, dest1)); ++ } ++ } ++ DONE; ++}) ++ ++(define_expand "vec_extract" ++ [(match_operand: 0 "register_operand") ++ (match_operand:FLASX 1 "register_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx temp; ++ HOST_WIDE_INT val = INTVAL (operands[2]); ++ ++ if (val == 0) ++ temp = operands[1]; ++ else ++ { ++ temp = gen_reg_rtx (mode); ++ emit_insn (gen_lasx_xvpickve_ (temp, operands[1], operands[2])); ++ } ++ emit_insn (gen_lasx_vec_extract_ (operands[0], temp)); ++ DONE; ++}) ++ ++(define_insn_and_split "lasx_vec_extract_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vec_select: ++ (match_operand:FLASX 1 "register_operand" "f") ++ (parallel [(const_int 0)])))] ++ "ISA_HAS_LASX" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 0) (match_dup 1))] ++{ ++ operands[1] = gen_rtx_REG (mode, REGNO (operands[1])); ++} ++ [(set_attr "move_type" "fmove") ++ (set_attr "mode" "")]) ++ ++;; FIXME: 256?? ++(define_expand "vcondu" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "reg_or_m1_operand") ++ (match_operand:LASX 2 "reg_or_0_operand") ++ (match_operator 3 "" ++ [(match_operand:ILASX 4 "register_operand") ++ (match_operand:ILASX 5 "register_operand")])] ++ "ISA_HAS_LASX ++ && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" ++{ ++ loongarch_expand_vec_cond_expr (mode, mode, operands); ++ DONE; ++}) ++ ++;; FIXME: 256?? ++(define_expand "vcond" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "reg_or_m1_operand") ++ (match_operand:LASX 2 "reg_or_0_operand") ++ (match_operator 3 "" ++ [(match_operand:LASX_2 4 "register_operand") ++ (match_operand:LASX_2 5 "register_operand")])] ++ "ISA_HAS_LASX ++ && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" ++{ ++ loongarch_expand_vec_cond_expr (mode, mode, operands); ++ DONE; ++}) ++ ++;; Same as vcond_ ++(define_expand "vcond_mask" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "reg_or_m1_operand") ++ (match_operand:LASX 2 "reg_or_0_operand") ++ (match_operator 3 "" ++ [(match_operand:LASX_2 4 "register_operand") ++ (match_operand:LASX_2 5 "register_operand")])] ++ "ISA_HAS_LASX ++ && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" ++{ ++ loongarch_expand_vec_cond_expr (mode, mode, operands); ++ DONE; ++}) ++ ++(define_expand "lasx_xvrepli" ++ [(match_operand:ILASX 0 "register_operand") ++ (match_operand 1 "const_imm10_operand")] ++ "ISA_HAS_LASX" ++{ ++ if (mode == V32QImode) ++ operands[1] = GEN_INT (trunc_int_for_mode (INTVAL (operands[1]), ++ mode)); ++ emit_move_insn (operands[0], ++ loongarch_gen_const_int_vector (mode, INTVAL (operands[1]))); ++ DONE; ++}) ++ ++(define_expand "mov" ++ [(set (match_operand:LASX 0) ++ (match_operand:LASX 1))] ++ "ISA_HAS_LASX" ++{ ++ if (loongarch_legitimize_move (mode, operands[0], operands[1])) ++ DONE; ++}) ++ ++ ++(define_expand "movmisalign" ++ [(set (match_operand:LASX 0) ++ (match_operand:LASX 1))] ++ "ISA_HAS_LASX" ++{ ++ if (loongarch_legitimize_move (mode, operands[0], operands[1])) ++ DONE; ++}) ++ ++;; 256-bit LASX modes can only exist in LASX registers or memory. ++(define_insn "mov_lasx" ++ [(set (match_operand:LASX 0 "nonimmediate_operand" "=f,f,R,*r,*f") ++ (match_operand:LASX 1 "move_operand" "fYGYI,R,f,*f,*r"))] ++ "ISA_HAS_LASX" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert") ++ (set_attr "mode" "") ++ (set_attr "can_delay" "no,yes,yes,yes,yes") ++ (set_attr "length" "8,4,4,4,4")]) ++ ++ ++(define_split ++ [(set (match_operand:LASX 0 "nonimmediate_operand") ++ (match_operand:LASX 1 "move_operand"))] ++ "reload_completed && ISA_HAS_LASX ++ && loongarch_split_move_insn_p (operands[0], operands[1], insn)" ++ [(const_int 0)] ++{ ++ loongarch_split_move_insn (operands[0], operands[1], curr_insn); ++ DONE; ++}) ++ ++;; Offset load ++(define_expand "lasx_mxld_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq10_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (operands[0], gen_rtx_MEM (mode, addr)); ++ DONE; ++}) ++ ++;; Offset store ++(define_expand "lasx_mxst_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq10_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (gen_rtx_MEM (mode, addr), operands[0]); ++ DONE; ++}) ++ ++ ++ ++ ++ ++ ++;; LASX ++(define_insn "add3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f,f") ++ (plus:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_ximm5_operand" "f,Unv5,Uuv5")))] ++ "ISA_HAS_LASX" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "xvadd.\t%u0,%u1,%u2"; ++ case 1: ++ { ++ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0)); ++ ++ operands[2] = GEN_INT (-val); ++ return "xvsubi.\t%u0,%u1,%d2"; ++ } ++ case 2: ++ return "xvaddi.\t%u0,%u1,%E2"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "alu_type" "simd_add") ++ (set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "sub3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (minus:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvsub.\t%u0,%u1,%u2 ++ xvsubi.\t%u0,%u1,%E2" ++ [(set_attr "alu_type" "simd_add") ++ (set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "mul3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (mult:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvmul.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvmadd_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (plus:ILASX (mult:ILASX (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand:ILASX 3 "register_operand" "f")) ++ (match_operand:ILASX 1 "register_operand" "0")))] ++ "ISA_HAS_LASX" ++ "xvmadd.\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++ ++ ++(define_insn "lasx_xvmsub_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (minus:ILASX (match_operand:ILASX 1 "register_operand" "0") ++ (mult:ILASX (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand:ILASX 3 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvmsub.\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++(define_insn "div3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (div:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ { return loongarch_lsx_output_division ("xvdiv.\t%u0,%u1,%u2", operands); } ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "udiv3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (udiv:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ { return loongarch_lsx_output_division ("xvdiv.\t%u0,%u1,%u2", operands); } ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "mod3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (mod:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ { return loongarch_lsx_output_division ("xvmod.\t%u0,%u1,%u2", operands); } ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "umod3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (umod:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ { return loongarch_lsx_output_division ("xvmod.\t%u0,%u1,%u2", operands); } ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "xor3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f,f") ++ (xor:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvxor.v\t%u0,%u1,%u2 ++ xvbitrevi.%v0\t%u0,%u1,%V2 ++ xvxori.b\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "ior3" ++ [(set (match_operand:LASX 0 "register_operand" "=f,f,f") ++ (ior:LASX ++ (match_operand:LASX 1 "register_operand" "f,f,f") ++ (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvor.v\t%u0,%u1,%u2 ++ xvbitseti.%v0\t%u0,%u1,%V2 ++ xvori.b\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "and3" ++ [(set (match_operand:LASX 0 "register_operand" "=f,f,f") ++ (and:LASX ++ (match_operand:LASX 1 "register_operand" "f,f,f") ++ (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YZ,Urv8")))] ++ "ISA_HAS_LASX" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "xvand.v\t%u0,%u1,%u2"; ++ case 1: ++ { ++ rtx elt0 = CONST_VECTOR_ELT (operands[2], 0); ++ unsigned HOST_WIDE_INT val = ~UINTVAL (elt0); ++ operands[2] = loongarch_gen_const_int_vector (mode, val & (-val)); ++ return "xvbitclri.%v0\t%u0,%u1,%V2"; ++ } ++ case 2: ++ return "xvandi.b\t%u0,%u1,%B2"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "one_cmpl2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (not:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvnor.v\t%u0,%u1,%u1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "OI")]) ++ ++;; LASX ++(define_insn "vlshr3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (lshiftrt:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvsrl.\t%u0,%u1,%u2 ++ xvsrli.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; LASX ">>" ++(define_insn "vashr3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (ashiftrt:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvsra.\t%u0,%u1,%u2 ++ xvsrai.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; LASX "<<" ++(define_insn "vashl3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (ashift:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvsll.\t%u0,%u1,%u2 ++ xvslli.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "add3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (plus:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfadd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "sub3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (minus:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfsub.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "mul3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (mult:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmul.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fmul") ++ (set_attr "mode" "")]) ++ ++(define_insn "div3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (div:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfdiv.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "fma4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (fma:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (match_operand:FLASX 3 "register_operand" "0")))] ++ "ISA_HAS_LASX" ++ "xvfmadd.\t%u0,%u1,%u2,%u0" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "fnma4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (fma:FLASX (neg:FLASX (match_operand:FLASX 1 "register_operand" "f")) ++ (match_operand:FLASX 2 "register_operand" "f") ++ (match_operand:FLASX 3 "register_operand" "0")))] ++ "ISA_HAS_LASX" ++ "xvfnmsub.\t%u0,%u1,%u2,%u0" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "sqrt2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (sqrt:FLASX (match_operand:FLASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfsqrt.\t%u0,%u1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvadda_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (plus:ILASX (abs:ILASX (match_operand:ILASX 1 "register_operand" "f")) ++ (abs:ILASX (match_operand:ILASX 2 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvadda.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "ssadd3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (ss_plus:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvsadd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "usadd3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (us_plus:ILASX (match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvsadd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvabsd_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVABSD_S))] ++ "ISA_HAS_LASX" ++ "xvabsd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvabsd_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVABSD_U))] ++ "ISA_HAS_LASX" ++ "xvabsd.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvavg_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVAVG_S))] ++ "ISA_HAS_LASX" ++ "xvavg.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvavg_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVAVG_U))] ++ "ISA_HAS_LASX" ++ "xvavg.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvavgr_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVAVGR_S))] ++ "ISA_HAS_LASX" ++ "xvavgr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvavgr_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVAVGR_U))] ++ "ISA_HAS_LASX" ++ "xvavgr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitclr_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVBITCLR))] ++ "ISA_HAS_LASX" ++ "xvbitclr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitclri_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVBITCLRI))] ++ "ISA_HAS_LASX" ++ "xvbitclri.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitrev_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVBITREV))] ++ "ISA_HAS_LASX" ++ "xvbitrev.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitrevi_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVBITREVI))] ++ "ISA_HAS_LASX" ++ "xvbitrevi.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitsel_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (ior:ILASX (and:ILASX (not:ILASX ++ (match_operand:ILASX 3 "register_operand" "f")) ++ (match_operand:ILASX 1 "register_operand" "f")) ++ (and:ILASX (match_dup 3) ++ (match_operand:ILASX 2 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvbitsel.v\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_bitmov") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitseli_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (ior:V32QI (and:V32QI (not:V32QI ++ (match_operand:V32QI 1 "register_operand" "0")) ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (and:V32QI (match_dup 1) ++ (match_operand:V32QI 3 "const_vector_same_val_operand" "Urv8"))))] ++ "ISA_HAS_LASX" ++ "xvbitseli.b\t%u0,%u2,%B3" ++ [(set_attr "type" "simd_bitmov") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvbitset_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVBITSET))] ++ "ISA_HAS_LASX" ++ "xvbitset.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbitseti_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVBITSETI))] ++ "ISA_HAS_LASX" ++ "xvbitseti.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvs_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (ICC:ILASX ++ (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_imm5_operand" "f,Uv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvs.\t%u0,%u1,%u2 ++ xvs.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_expand "vec_cmp" ++ [(set (match_operand:ILASX 0 "register_operand") ++ (match_operator:ILASX 1 "" ++ [(match_operand:ILASX 2 "register_operand") ++ (match_operand:ILASX 3 "register_operand")]))] ++ "ISA_HAS_LASX" ++{ ++ bool ok = loongarch_expand_int_vec_cmp (operands); ++ gcc_assert (ok); ++ DONE; ++}) ++ ++(define_expand "vec_cmp" ++ [(set (match_operand:FLASX 0 "register_operand") ++ (match_operator:FLASX 1 "" ++ [(match_operand:FLASX 2 "register_operand") ++ (match_operand:FLASX 3 "register_operand")]))] ++ "ISA_HAS_LASX" ++{ ++ bool ok = loongarch_expand_fp_vec_cmp (operands); ++ gcc_assert (ok); ++ DONE; ++}) ++ ++(define_insn "lasx_xvfclass_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFCLASS))] ++ "ISA_HAS_LASX" ++ "xvfclass.\t%u0,%u1" ++ [(set_attr "type" "simd_fclass") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfcmp_caf_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFCMP_CAF))] ++ "ISA_HAS_LASX" ++ "xvfcmp.caf.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfcmp_cune_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFCMP_CUNE))] ++ "ISA_HAS_LASX" ++ "xvfcmp.cune.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++ ++ ++(define_int_iterator FSC256_UNS [UNSPEC_LASX_XVFCMP_SAF UNSPEC_LASX_XVFCMP_SUN UNSPEC_LASX_XVFCMP_SOR ++ UNSPEC_LASX_XVFCMP_SEQ UNSPEC_LASX_XVFCMP_SNE UNSPEC_LASX_XVFCMP_SUEQ ++ UNSPEC_LASX_XVFCMP_SUNE UNSPEC_LASX_XVFCMP_SULE UNSPEC_LASX_XVFCMP_SULT ++ UNSPEC_LASX_XVFCMP_SLE UNSPEC_LASX_XVFCMP_SLT]) ++ ++(define_int_attr fsc256 ++ [(UNSPEC_LASX_XVFCMP_SAF "saf") ++ (UNSPEC_LASX_XVFCMP_SUN "sun") ++ (UNSPEC_LASX_XVFCMP_SOR "sor") ++ (UNSPEC_LASX_XVFCMP_SEQ "seq") ++ (UNSPEC_LASX_XVFCMP_SNE "sne") ++ (UNSPEC_LASX_XVFCMP_SUEQ "sueq") ++ (UNSPEC_LASX_XVFCMP_SUNE "sune") ++ (UNSPEC_LASX_XVFCMP_SULE "sule") ++ (UNSPEC_LASX_XVFCMP_SULT "sult") ++ (UNSPEC_LASX_XVFCMP_SLE "sle") ++ (UNSPEC_LASX_XVFCMP_SLT "slt")]) ++ ++(define_insn "lasx_xvfcmp__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vfcond: (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfcmp..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "lasx_xvfcmp__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")] ++ FSC256_UNS))] ++ "ISA_HAS_LASX" ++ "xvfcmp..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++ ++(define_mode_attr fint256 ++ [(V8SF "v8si") ++ (V4DF "v4di")]) ++ ++(define_mode_attr FINTCNV256 ++ [(V8SF "I2S") ++ (V4DF "I2D")]) ++ ++(define_mode_attr FINTCNV256_2 ++ [(V8SF "S2I") ++ (V4DF "D2I")]) ++ ++(define_insn "float2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (float:FLASX (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvffint..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "floatuns2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unsigned_float:FLASX ++ (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvffint..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_mode_attr FFQ256 ++ [(V4SF "V16HI") ++ (V2DF "V8SI")]) ++ ++(define_insn "lasx_xvreplgr2vr_" ++ [(set (match_operand:LASX 0 "register_operand" "=f,f") ++ (vec_duplicate:LASX ++ (match_operand: 1 "reg_or_0_operand" "r,J")))] ++ "ISA_HAS_LASX" ++{ ++ if (which_alternative == 1) ++ return "xvldi.b\t%u0,0" ; ++ ++ if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode)) ++ return "#"; ++ else ++ return "xvreplgr2vr.\t%u0,%z1"; ++} ++ [(set_attr "type" "simd_fill") ++ (set_attr "mode" "") ++ (set_attr "can_delay" "no") ++ (set_attr "length" "8")]) ++ ++(define_insn "lasx_xvflogb_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFLOGB))] ++ "ISA_HAS_LASX" ++ "xvflogb.\t%u0,%u1" ++ [(set_attr "type" "simd_flog2") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "smax3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (smax:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmax.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfmaxa_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (if_then_else:FLASX ++ (gt (abs:FLASX (match_operand:FLASX 1 "register_operand" "f")) ++ (abs:FLASX (match_operand:FLASX 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "ISA_HAS_LASX" ++ "xvfmaxa.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "smin3" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (smin:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmin.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfmina_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (if_then_else:FLASX ++ (lt (abs:FLASX (match_operand:FLASX 1 "register_operand" "f")) ++ (abs:FLASX (match_operand:FLASX 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "ISA_HAS_LASX" ++ "xvfmina.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrecip_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRECIP))] ++ "ISA_HAS_LASX" ++ "xvfrecip.\t%u0,%u1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrint_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINT))] ++ "ISA_HAS_LASX" ++ "xvfrint.\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrsqrt_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRSQRT))] ++ "ISA_HAS_LASX" ++ "xvfrsqrt.\t%u0,%u1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvftint_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINT_S))] ++ "ISA_HAS_LASX" ++ "xvftint..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvftint_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINT_U))] ++ "ISA_HAS_LASX" ++ "xvftint..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++ ++ ++(define_insn "fix_trunc2" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (fix: (match_operand:FLASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvftintrz..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "fixuns_trunc2" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unsigned_fix: (match_operand:FLASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvftintrz..\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvhw_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addsub:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))))] ++ "ISA_HAS_LASX" ++ "xvhw.h.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvhw_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addsub:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LASX" ++ "xvhw.w.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvhw_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addsub:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LASX" ++ "xvhw.d.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvpackev_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 32) ++ (const_int 2) (const_int 34) ++ (const_int 4) (const_int 36) ++ (const_int 6) (const_int 38) ++ (const_int 8) (const_int 40) ++ (const_int 10) (const_int 42) ++ (const_int 12) (const_int 44) ++ (const_int 14) (const_int 46) ++ (const_int 16) (const_int 48) ++ (const_int 18) (const_int 50) ++ (const_int 20) (const_int 52) ++ (const_int 22) (const_int 54) ++ (const_int 24) (const_int 56) ++ (const_int 26) (const_int 58) ++ (const_int 28) (const_int 60) ++ (const_int 30) (const_int 62)])))] ++ "ISA_HAS_LASX" ++ "xvpackev.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++ ++(define_insn "lasx_xvpackev_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 16) ++ (const_int 2) (const_int 18) ++ (const_int 4) (const_int 20) ++ (const_int 6) (const_int 22) ++ (const_int 8) (const_int 24) ++ (const_int 10) (const_int 26) ++ (const_int 12) (const_int 28) ++ (const_int 14) (const_int 30)])))] ++ "ISA_HAS_LASX" ++ "xvpackev.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvpackev_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 2) (const_int 10) ++ (const_int 4) (const_int 12) ++ (const_int 6) (const_int 14)])))] ++ "ISA_HAS_LASX" ++ "xvpackev.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvpackev_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 2) (const_int 10) ++ (const_int 4) (const_int 12) ++ (const_int 6) (const_int 14)])))] ++ "ISA_HAS_LASX" ++ "xvpackev.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvilvh_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 8) (const_int 40) ++ (const_int 9) (const_int 41) ++ (const_int 10) (const_int 42) ++ (const_int 11) (const_int 43) ++ (const_int 12) (const_int 44) ++ (const_int 13) (const_int 45) ++ (const_int 14) (const_int 46) ++ (const_int 15) (const_int 47) ++ (const_int 24) (const_int 56) ++ (const_int 25) (const_int 57) ++ (const_int 26) (const_int 58) ++ (const_int 27) (const_int 59) ++ (const_int 28) (const_int 60) ++ (const_int 29) (const_int 61) ++ (const_int 30) (const_int 62) ++ (const_int 31) (const_int 63)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvilvh_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 4) (const_int 20) ++ (const_int 5) (const_int 21) ++ (const_int 6) (const_int 22) ++ (const_int 7) (const_int 23) ++ (const_int 12) (const_int 28) ++ (const_int 13) (const_int 29) ++ (const_int 14) (const_int 30) ++ (const_int 15) (const_int 31)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvilvh_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 2) (const_int 10) ++ (const_int 3) (const_int 11) ++ (const_int 6) (const_int 14) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvilvh_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 2) (const_int 10) ++ (const_int 3) (const_int 11) ++ (const_int 6) (const_int 14) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++ ++(define_insn "lasx_xvilvh_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (vec_select:V4DI ++ (vec_concat:V8DI ++ (match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 5) ++ (const_int 3) (const_int 7)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.d\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lasx_xvilvh_d_f" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (vec_select:V4DF ++ (vec_concat:V8DF ++ (match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 5) ++ (const_int 3) (const_int 7)])))] ++ "ISA_HAS_LASX" ++ "xvilvh.d\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvpackod_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 33) ++ (const_int 3) (const_int 35) ++ (const_int 5) (const_int 37) ++ (const_int 7) (const_int 39) ++ (const_int 9) (const_int 41) ++ (const_int 11) (const_int 43) ++ (const_int 13) (const_int 45) ++ (const_int 15) (const_int 47) ++ (const_int 17) (const_int 49) ++ (const_int 19) (const_int 51) ++ (const_int 21) (const_int 53) ++ (const_int 23) (const_int 55) ++ (const_int 25) (const_int 57) ++ (const_int 27) (const_int 59) ++ (const_int 29) (const_int 61) ++ (const_int 31) (const_int 63)])))] ++ "ISA_HAS_LASX" ++ "xvpackod.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++ ++(define_insn "lasx_xvpackod_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 17) ++ (const_int 3) (const_int 19) ++ (const_int 5) (const_int 21) ++ (const_int 7) (const_int 23) ++ (const_int 9) (const_int 25) ++ (const_int 11) (const_int 27) ++ (const_int 13) (const_int 29) ++ (const_int 15) (const_int 31)])))] ++ "ISA_HAS_LASX" ++ "xvpackod.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++ ++(define_insn "lasx_xvpackod_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 9) ++ (const_int 3) (const_int 11) ++ (const_int 5) (const_int 13) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvpackod.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++ ++(define_insn "lasx_xvpackod_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 9) ++ (const_int 3) (const_int 11) ++ (const_int 5) (const_int 13) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvpackod.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvilvl_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 32) ++ (const_int 1) (const_int 33) ++ (const_int 2) (const_int 34) ++ (const_int 3) (const_int 35) ++ (const_int 4) (const_int 36) ++ (const_int 5) (const_int 37) ++ (const_int 6) (const_int 38) ++ (const_int 7) (const_int 39) ++ (const_int 16) (const_int 48) ++ (const_int 17) (const_int 49) ++ (const_int 18) (const_int 50) ++ (const_int 19) (const_int 51) ++ (const_int 20) (const_int 52) ++ (const_int 21) (const_int 53) ++ (const_int 22) (const_int 54) ++ (const_int 23) (const_int 55)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvilvl_h" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 16) ++ (const_int 1) (const_int 17) ++ (const_int 2) (const_int 18) ++ (const_int 3) (const_int 19) ++ (const_int 8) (const_int 24) ++ (const_int 9) (const_int 25) ++ (const_int 10) (const_int 26) ++ (const_int 11) (const_int 27)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvilvl_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 1) (const_int 9) ++ (const_int 4) (const_int 12) ++ (const_int 5) (const_int 13)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvilvl_w_f" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 1) (const_int 9) ++ (const_int 4) (const_int 12) ++ (const_int 5) (const_int 13)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvilvl_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (vec_select:V4DI ++ (vec_concat:V8DI ++ (match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 2) (const_int 6)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.d\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvilvl_d_f" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (vec_select:V4DF ++ (vec_concat:V8DF ++ (match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 2) (const_int 6)])))] ++ "ISA_HAS_LASX" ++ "xvilvl.d\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "smax3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (smax:ILASX (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvmax.\t%u0,%u1,%u2 ++ xvmaxi.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "umax3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (umax:ILASX (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvmax.\t%u0,%u1,%u2 ++ xvmaxi.\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "smin3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (smin:ILASX (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvmin.\t%u0,%u1,%u2 ++ xvmini.\t%u0,%u1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "umin3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (umin:ILASX (match_operand:ILASX 1 "register_operand" "f,f") ++ (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LASX" ++ "@ ++ xvmin.\t%u0,%u1,%u2 ++ xvmini.\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvclo_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVCLO))] ++ "ISA_HAS_LASX" ++ "xvclo.\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "clz2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (clz:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvclz.\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvnor_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (and:ILASX (not:ILASX (match_operand:ILASX 1 "register_operand" "f,f")) ++ (not:ILASX (match_operand:ILASX 2 "reg_or_vector_same_val_operand" "f,Urv8"))))] ++ "ISA_HAS_LASX" ++ "@ ++ xvnor.v\t%u0,%u1,%u2 ++ xvnori.b\t%u0,%u1,%B2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpickev_b" ++[(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 32) (const_int 34) ++ (const_int 36) (const_int 38) ++ (const_int 40) (const_int 42) ++ (const_int 44) (const_int 46) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30) ++ (const_int 48) (const_int 50) ++ (const_int 52) (const_int 54) ++ (const_int 56) (const_int 58) ++ (const_int 60) (const_int 62)])))] ++ "ISA_HAS_LASX" ++ "xvpickev.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvpickev_h" ++[(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))] ++ "ISA_HAS_LASX" ++ "xvpickev.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvpickev_w" ++[(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 8) (const_int 10) ++ (const_int 4) (const_int 6) ++ (const_int 12) (const_int 14)])))] ++ "ISA_HAS_LASX" ++ "xvpickev.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvpickev_w_f" ++[(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 8) (const_int 10) ++ (const_int 4) (const_int 6) ++ (const_int 12) (const_int 14)])))] ++ "ISA_HAS_LASX" ++ "xvpickev.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvpickod_b" ++[(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_select:V32QI ++ (vec_concat:V64QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 33) (const_int 35) ++ (const_int 37) (const_int 39) ++ (const_int 41) (const_int 43) ++ (const_int 45) (const_int 47) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31) ++ (const_int 49) (const_int 51) ++ (const_int 53) (const_int 55) ++ (const_int 57) (const_int 59) ++ (const_int 61) (const_int 63)])))] ++ "ISA_HAS_LASX" ++ "xvpickod.b\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvpickod_h" ++[(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_select:V16HI ++ (vec_concat:V32HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (match_operand:V16HI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)])))] ++ "ISA_HAS_LASX" ++ "xvpickod.h\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvpickod_w" ++[(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_select:V8SI ++ (vec_concat:V16SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 9) (const_int 11) ++ (const_int 5) (const_int 7) ++ (const_int 13) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvpickod.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvpickod_w_f" ++[(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_select:V8SF ++ (vec_concat:V16SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 9) (const_int 11) ++ (const_int 5) (const_int 7) ++ (const_int 13) (const_int 15)])))] ++ "ISA_HAS_LASX" ++ "xvpickod.w\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "popcount2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (popcount:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvpcnt.\t%u0,%u1" ++ [(set_attr "type" "simd_pcnt") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "lasx_xvsat_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSAT_S))] ++ "ISA_HAS_LASX" ++ "xvsat.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_sat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsat_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSAT_U))] ++ "ISA_HAS_LASX" ++ "xvsat.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_sat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf4i_" ++ [(set (match_operand:LASX_WHB_W 0 "register_operand" "=f") ++ (unspec:LASX_WHB_W [(match_operand:LASX_WHB_W 1 "register_operand" "f") ++ (match_operand 2 "const_uimm8_operand")] ++ UNSPEC_LASX_XVSHUF4I))] ++ "ISA_HAS_LASX" ++ "xvshuf4i.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "lasx_xvsrar_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRAR))] ++ "ISA_HAS_LASX" ++ "xvsrar.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrari_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSRARI))] ++ "ISA_HAS_LASX" ++ "xvsrari.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrlr_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRLR))] ++ "ISA_HAS_LASX" ++ "xvsrlr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrlri_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSRLRI))] ++ "ISA_HAS_LASX" ++ "xvsrlri.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssub_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSUB_S))] ++ "ISA_HAS_LASX" ++ "xvssub.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssub_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSUB_U))] ++ "ISA_HAS_LASX" ++ "xvssub.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf_" ++ [(set (match_operand:ILASX_DWH 0 "register_operand" "=f") ++ (unspec:ILASX_DWH [(match_operand: 1 "register_operand" "0") ++ (match_operand:ILASX_DWH 2 "register_operand" "f") ++ (match_operand:ILASX_DWH 3 "register_operand" "f")] ++ UNSPEC_LASX_XVSHUF))] ++ "ISA_HAS_LASX" ++ "xvshuf.\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f") ++ (match_operand:V32QI 2 "register_operand" "f") ++ (match_operand:V32QI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVSHUF_B))] ++ "ISA_HAS_LASX" ++ "xvshuf.b\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvreplve0_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (vec_duplicate:LASX ++ (vec_select: ++ (match_operand:LASX 1 "register_operand" "f") ++ (parallel [(const_int 0)]))))] ++ "ISA_HAS_LASX" ++ "xvreplve0.\t%u0,%u1" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvrepl128vei_b_internal" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (vec_duplicate:V32QI ++ (vec_select:V32QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_uimm4_operand" "") ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_operand 3 "const_16_to_31_operand" "") ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3)]))))] ++ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 16)" ++ "xvrepl128vei.b\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvrepl128vei_h_internal" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (vec_duplicate:V16HI ++ (vec_select:V16HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_uimm3_operand" "") ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_dup 2) ++ (match_operand 3 "const_8_to_15_operand" "") ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3) (match_dup 3) (match_dup 3) ++ (match_dup 3)]))))] ++ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 8)" ++ "xvrepl128vei.h\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvrepl128vei_w_internal" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (vec_duplicate:V8SI ++ (vec_select:V8SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_3_operand" "") ++ (match_dup 2) (match_dup 2) (match_dup 2) ++ (match_operand 3 "const_4_to_7_operand" "") ++ (match_dup 3) (match_dup 3) (match_dup 3)]))))] ++ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 4)" ++ "xvrepl128vei.w\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvrepl128vei_d_internal" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (vec_duplicate:V4DI ++ (vec_select:V4DI ++ (match_operand:V4DI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_or_1_operand" "") ++ (match_dup 2) ++ (match_operand 3 "const_2_or_3_operand" "") ++ (match_dup 3)]))))] ++ "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 2)" ++ "xvrepl128vei.d\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvrepl128vei_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX [(match_operand:LASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVREPL128VEI))] ++ "ISA_HAS_LASX" ++ "xvrepl128vei.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++ (define_insn "lasx_xvreplve0__scalar" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand: 1 "register_operand" "f")] ++ UNSPEC_LASX_XVREPLVE0))] ++ "ISA_HAS_LASX" ++ "xvreplve0.\t%u0,%u1" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvreplve0_q" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVREPLVE0_Q))] ++ "ISA_HAS_LASX" ++ "xvreplve0.q\t%u0,%u1" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvfcvt_h_s" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (unspec:V16HI [(match_operand:V8SF 1 "register_operand" "f") ++ (match_operand:V8SF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVT))] ++ "ISA_HAS_LASX" ++ "xvfcvt.h.s\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvfcvt_s_d" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVT))] ++ "ISA_HAS_LASX" ++ "xvfcvt.s.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "vec_pack_trunc_v4df" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (vec_concat:V8SF ++ (float_truncate:V4SF (match_operand:V4DF 1 "register_operand" "f")) ++ (float_truncate:V4SF (match_operand:V4DF 2 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvfcvt.s.d\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SF") ++ (set_attr "can_delay" "no") ++ (set_attr "length" "8")]) ++ ++;; Define for builtin function. ++(define_insn "lasx_xvfcvth_s_h" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V16HI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVTH))] ++ "ISA_HAS_LASX" ++ "xvfcvth.s.h\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SF")]) ++ ++;; Define for builtin function. ++(define_insn "lasx_xvfcvth_d_s" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVTH))] ++ "ISA_HAS_LASX" ++ "xvfcvth.d.s\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DF")]) ++ ++;; Define for gen insn. ++(define_insn "lasx_xvfcvth_d_insn" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (parallel [(const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "xvpermi.d\t%u0,%u1,0xfa\n\txvfcvtl.d.s\t%u0,%u0" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DF") ++ (set_attr "can_delay" "no") ++ (set_attr "length" "12")]) ++ ++;; Define for builtin function. ++(define_insn "lasx_xvfcvtl_s_h" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V16HI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVTL))] ++ "ISA_HAS_LASX" ++ "xvfcvtl.s.h\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SF")]) ++ ++;; Define for builtin function. ++(define_insn "lasx_xvfcvtl_d_s" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFCVTL))] ++ "ISA_HAS_LASX" ++ "xvfcvtl.d.s\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DF")]) ++ ++;; Define for gen insn. ++(define_insn "lasx_xvfcvtl_d_insn" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LASX" ++ "xvpermi.d\t%u0,%u1,0x50\n\txvfcvtl.d.s\t%u0,%u0" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DF") ++ (set_attr "can_delay" "no") ++ (set_attr "length" "8")]) ++ ++(define_code_attr lasxbr ++ [(eq "xbz") ++ (ne "xbnz")]) ++ ++(define_code_attr lasxeq_v ++ [(eq "eqz") ++ (ne "nez")]) ++ ++(define_code_attr lasxne_v ++ [(eq "nez") ++ (ne "eqz")]) ++ ++(define_code_attr lasxeq ++ [(eq "anyeqz") ++ (ne "allnez")]) ++ ++(define_code_attr lasxne ++ [(eq "allnez") ++ (ne "anyeqz")]) ++ ++(define_insn "lasx__" ++ [(set (pc) (if_then_else ++ (equality_op ++ (unspec:SI [(match_operand:LASX 1 "register_operand" "f")] ++ UNSPEC_LASX_BRANCH) ++ (match_operand:SI 2 "const_0_operand")) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_scratch:FCC 3 "=z"))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_output_conditional_branch (insn, operands, ++ "xvset.\t%Z3%u1\n\tbcnez\t%Z3%0", ++ "xvset.\t%z3%u1\n\tbcnez\t%Z3%0"); ++} ++ [(set_attr "type" "simd_branch") ++ (set_attr "mode" "") ++ (set_attr "compact_form" "never")]) ++ ++(define_insn "lasx__v_" ++ [(set (pc) (if_then_else ++ (equality_op ++ (unspec:SI [(match_operand:LASX 1 "register_operand" "f")] ++ UNSPEC_LASX_BRANCH_V) ++ (match_operand:SI 2 "const_0_operand")) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_scratch:FCC 3 "=z"))] ++ "ISA_HAS_LASX" ++{ ++ return loongarch_output_conditional_branch (insn, operands, ++ "xvset.v\t%Z3%u1\n\tbcnez\t%Z3%0", ++ "xvset.v\t%Z3%u1\n\tbcnez\t%Z3%0"); ++} ++ [(set_attr "type" "simd_branch") ++ (set_attr "mode" "") ++ (set_attr "compact_form" "never")]) ++ ++ ++ ++ ++;; loongson-asx. ++(define_insn "lasx_vext2xv_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3) ++ (const_int 4) (const_int 5) ++ (const_int 6) (const_int 7) ++ (const_int 8) (const_int 9) ++ (const_int 10) (const_int 11) ++ (const_int 12) (const_int 13) ++ (const_int 14) (const_int 15)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.h.b\t%u0,%u1" ++[(set_attr "type" "simd_shift") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_vext2xv_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3) ++ (const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.w.h\t%u0,%u1" ++[(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_vext2xv_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.d.w\t%u0,%u1" ++[(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_vext2xv_w_b" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (any_extend:V8SI ++ (vec_select:V8QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3) ++ (const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.w.b\t%u0,%u1" ++[(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_vext2xv_d_h" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (any_extend:V4DI ++ (vec_select:V4HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.d.h\t%u0,%u1" ++[(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_vext2xv_d_b" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (any_extend:V4DI ++ (vec_select:V4QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LASX" ++ "vext2xv.d.b\t%u0,%u1" ++[(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DI")]) ++ ++ ++;; Extend loongson-sx to loongson-asx. ++(define_insn "xvandn3" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (and:LASX (not:LASX (match_operand:LASX 1 "register_operand" "f")) ++ (match_operand:LASX 2 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvandn.v\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "abs2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (abs:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvsigncov.\t%u0,%u1,%u1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "neg2" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (neg:ILASX (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvneg.\t%u0,%u1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvmuh_s_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMUH_S))] ++ "ISA_HAS_LASX" ++ "xvmuh.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvmuh_u_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMUH_U))] ++ "ISA_HAS_LASX" ++ "xvmuh.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_mxvextw_u_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SI 1 "register_operand" "f")] ++ UNSPEC_LASX_MXVEXTW_U))] ++ "ISA_HAS_LASX" ++ "mxvextw_u.d\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvsllwil_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_WHB 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSLLWIL_S))] ++ "ISA_HAS_LASX" ++ "xvsllwil..\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsllwil_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_WHB 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVSLLWIL_U))] ++ "ISA_HAS_LASX" ++ "xvsllwil..\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsran__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRAN))] ++ "ISA_HAS_LASX" ++ "xvsran..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssran_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRAN_S))] ++ "ISA_HAS_LASX" ++ "xvssran..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssran_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRAN_U))] ++ "ISA_HAS_LASX" ++ "xvssran..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrarn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRARN))] ++ "ISA_HAS_LASX" ++ "xvsrarn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrarn_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRARN_S))] ++ "ISA_HAS_LASX" ++ "xvssrarn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrarn_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRARN_U))] ++ "ISA_HAS_LASX" ++ "xvssrarn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrln__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRLN))] ++ "ISA_HAS_LASX" ++ "xvsrln..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrln_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRLN_U))] ++ "ISA_HAS_LASX" ++ "xvssrln..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrlrn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSRLRN))] ++ "ISA_HAS_LASX" ++ "xvsrlrn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlrn_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRLRN_U))] ++ "ISA_HAS_LASX" ++ "xvssrlrn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrstpi_" ++ [(set (match_operand:ILASX_HB 0 "register_operand" "=f") ++ (unspec:ILASX_HB [(match_operand:ILASX_HB 1 "register_operand" "0") ++ (match_operand:ILASX_HB 2 "register_operand" "f") ++ (match_operand 3 "const_uimm5_operand" "")] ++ UNSPEC_LASX_XVFRSTPI))] ++ "ISA_HAS_LASX" ++ "xvfrstpi.\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvfrstp_" ++ [(set (match_operand:ILASX_HB 0 "register_operand" "=f") ++ (unspec:ILASX_HB [(match_operand:ILASX_HB 1 "register_operand" "0") ++ (match_operand:ILASX_HB 2 "register_operand" "f") ++ (match_operand:ILASX_HB 3 "register_operand" "f")] ++ UNSPEC_LASX_XVFRSTP))] ++ "ISA_HAS_LASX" ++ "xvfrstp.\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvshuf4i_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand")] ++ UNSPEC_LASX_XVSHUF4I))] ++ "ISA_HAS_LASX" ++ "xvshuf4i.d\t%u0,%u2,%3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvbsrl_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const_uimm5_operand" "")] ++ UNSPEC_LASX_XVBSRL_V))] ++ "ISA_HAS_LASX" ++ "xvbsrl.v\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvbsll_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const_uimm5_operand" "")] ++ UNSPEC_LASX_XVBSLL_V))] ++ "ISA_HAS_LASX" ++ "xvbsll.v\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvextrins_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVEXTRINS))] ++ "ISA_HAS_LASX" ++ "xvextrins.\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvmskltz_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVMSKLTZ))] ++ "ISA_HAS_LASX" ++ "xvmskltz.\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsigncov_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSIGNCOV))] ++ "ISA_HAS_LASX" ++ "xvsigncov.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_expand "copysign3" ++ [(set (match_dup 4) ++ (and:FLASX ++ (not:FLASX (match_dup 3)) ++ (match_operand:FLASX 1 "register_operand"))) ++ (set (match_dup 5) ++ (and:FLASX (match_dup 3) ++ (match_operand:FLASX 2 "register_operand"))) ++ (set (match_operand:FLASX 0 "register_operand") ++ (ior:FLASX (match_dup 4) (match_dup 5)))] ++ "ISA_HAS_LASX" ++{ ++ operands[3] = loongarch_build_signbit_mask (mode, 1, 0); ++ ++ operands[4] = gen_reg_rtx (mode); ++ operands[5] = gen_reg_rtx (mode); ++}) ++ ++ ++(define_insn "absv4df2" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (abs:V4DF (match_operand:V4DF 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvbitclri.d\t%u0,%u1,63" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "absv8sf2" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (abs:V8SF (match_operand:V8SF 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvbitclri.w\t%u0,%u1,31" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "negv4df2" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (neg:V4DF (match_operand:V4DF 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvbitrevi.d\t%u0,%u1,63" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "negv8sf2" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (neg:V8SF (match_operand:V8SF 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvbitrevi.w\t%u0,%u1,31" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "xvfmadd4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (fma:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (match_operand:FLASX 3 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvfmadd.\t%u0,%u1,$u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "xvfmsub4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (fma:FLASX (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (neg:FLASX (match_operand:FLASX 3 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvfmsub.\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "xvfnmsub4_nmsub4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (neg:FLASX ++ (fma:FLASX ++ (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (neg:FLASX (match_operand:FLASX 3 "register_operand" "f")))))] ++ "ISA_HAS_LASX" ++ "xvfnmsub.\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "xvfnmadd4_nmadd4" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (neg:FLASX ++ (fma:FLASX ++ (match_operand:FLASX 1 "register_operand" "f") ++ (match_operand:FLASX 2 "register_operand" "f") ++ (match_operand:FLASX 3 "register_operand" "f"))))] ++ "ISA_HAS_LASX" ++ "xvfnmadd.\t%u0,%u1,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvftintrne_w_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNE_W_S))] ++ "ISA_HAS_LASX" ++ "xvftintrne.w.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrne_l_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNE_L_D))] ++ "ISA_HAS_LASX" ++ "xvftintrne.l.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrp_w_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRP_W_S))] ++ "ISA_HAS_LASX" ++ "xvftintrp.w.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrp_l_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRP_L_D))] ++ "ISA_HAS_LASX" ++ "xvftintrp.l.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrm_w_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRM_W_S))] ++ "ISA_HAS_LASX" ++ "xvftintrm.w.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrm_l_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRM_L_D))] ++ "ISA_HAS_LASX" ++ "xvftintrm.l.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftint_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINT_W_D))] ++ "ISA_HAS_LASX" ++ "xvftint.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvffint_s_l" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFFINT_S_L))] ++ "ISA_HAS_LASX" ++ "xvffint.s.l\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvftintrz_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRZ_W_D))] ++ "ISA_HAS_LASX" ++ "xvftintrz.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrp_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRP_W_D))] ++ "ISA_HAS_LASX" ++ "xvftintrp.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrm_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRM_W_D))] ++ "ISA_HAS_LASX" ++ "xvftintrm.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftintrne_w_d" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f") ++ (match_operand:V4DF 2 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNE_W_D))] ++ "ISA_HAS_LASX" ++ "xvftintrne.w.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvftinth_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftinth.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintl_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTL_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintl.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvffinth_d_w" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V8SI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFFINTH_D_W))] ++ "ISA_HAS_LASX" ++ "xvffinth.d.w\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvffintl_d_w" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V8SI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFFINTL_D_W))] ++ "ISA_HAS_LASX" ++ "xvffintl.d.w\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvftintrzh_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRZH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrzh.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrzl_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRZL_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrzl.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lasx_xvftintrph_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRPH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrph.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lasx_xvftintrpl_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRPL_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrpl.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrmh_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRMH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrmh.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrml_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRML_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrml.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrneh_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNEH_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrneh.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvftintrnel_l_s" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFTINTRNEL_L_S))] ++ "ISA_HAS_LASX" ++ "xvftintrnel.l.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrne_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRNE_S))] ++ "ISA_HAS_LASX" ++ "xvfrintrne.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrne_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRNE_D))] ++ "ISA_HAS_LASX" ++ "xvfrintrne.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvfrintrz_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRZ_S))] ++ "ISA_HAS_LASX" ++ "xvfrintrz.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrz_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRZ_D))] ++ "ISA_HAS_LASX" ++ "xvfrintrz.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvfrintrp_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRP_S))] ++ "ISA_HAS_LASX" ++ "xvfrintrp.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrp_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRP_D))] ++ "ISA_HAS_LASX" ++ "xvfrintrp.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++(define_insn "lasx_xvfrintrm_s" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRM_S))] ++ "ISA_HAS_LASX" ++ "xvfrintrm.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "lasx_xvfrintrm_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINTRM_D))] ++ "ISA_HAS_LASX" ++ "xvfrintrm.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++;; Offset load and broadcast ++(define_expand "lasx_xvldrepl_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand 2 "aq12_operand") ++ (match_operand 1 "pmode_register_operand")] ++ "ISA_HAS_LASX" ++{ ++ emit_insn (gen_lasx_xvldrepl__insn ++ (operands[0], operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_insn "lasx_xvldrepl__insn" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (vec_duplicate:LASX ++ (mem: (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "aq12_operand" )))))] ++ "ISA_HAS_LASX" ++{ ++ return "xvldrepl.\t%u0,%1,%2"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++;;XVADDWEV.H.B XVSUBWEV.H.B XVMULWEV.H.B ++;;XVADDWEV.H.BU XVSUBWEV.H.BU XVMULWEV.H.BU ++(define_insn "lasx_xvwev_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addsubmul:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.h.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVADDWEV.W.H XVSUBWEV.W.H XVMULWEV.W.H ++;;XVADDWEV.W.HU XVSUBWEV.W.HU XVMULWEV.W.HU ++(define_insn "lasx_xvwev_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addsubmul:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.w.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVADDWEV.D.W XVSUBWEV.D.W XVMULWEV.D.W ++;;XVADDWEV.D.WU XVSUBWEV.D.WU XVMULWEV.D.WU ++(define_insn "lasx_xvwev_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addsubmul:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.d.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWEV.Q.D ++;;TODO2 ++(define_insn "lasx_xvaddwev_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWEV))] ++ "ISA_HAS_LASX" ++ "xvaddwev.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUBWEV.Q.D ++;;TODO2 ++(define_insn "lasx_xvsubwev_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUBWEV))] ++ "ISA_HAS_LASX" ++ "xvsubwev.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWEV.Q.D ++;;TODO2 ++(define_insn "lasx_xvmulwev_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWEV))] ++ "ISA_HAS_LASX" ++ "xvmulwev.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++ ++;;XVADDWOD.H.B XVSUBWOD.H.B XVMULWOD.H.B ++;;XVADDWOD.H.BU XVSUBWOD.H.BU XVMULWOD.H.BU ++(define_insn "lasx_xvwod_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addsubmul:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.h.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVADDWOD.W.H XVSUBWOD.W.H XVMULWOD.W.H ++;;XVADDWOD.W.HU XVSUBWOD.W.HU XVMULWOD.W.HU ++(define_insn "lasx_xvwod_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addsubmul:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.w.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++ ++;;XVADDWOD.D.W XVSUBWOD.D.W XVMULWOD.D.W ++;;XVADDWOD.D.WU XVSUBWOD.D.WU XVMULWOD.D.WU ++(define_insn "lasx_xvwod_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addsubmul:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.d.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWOD.Q.D ++;;TODO2 ++(define_insn "lasx_xvaddwod_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWOD))] ++ "ISA_HAS_LASX" ++ "xvaddwod.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUBWOD.Q.D ++;;TODO2 ++(define_insn "lasx_xvsubwod_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUBWOD))] ++ "ISA_HAS_LASX" ++ "xvsubwod.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWOD.Q.D ++;;TODO2 ++(define_insn "lasx_xvmulwod_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWOD))] ++ "ISA_HAS_LASX" ++ "xvmulwod.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWEV.Q.DU ++;;TODO2 ++(define_insn "lasx_xvaddwev_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWEV2))] ++ "ISA_HAS_LASX" ++ "xvaddwev.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUBWEV.Q.DU ++;;TODO2 ++(define_insn "lasx_xvsubwev_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUBWEV2))] ++ "ISA_HAS_LASX" ++ "xvsubwev.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWEV.Q.DU ++;;TODO2 ++(define_insn "lasx_xvmulwev_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWEV2))] ++ "ISA_HAS_LASX" ++ "xvmulwev.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWOD.Q.DU ++;;TODO2 ++(define_insn "lasx_xvaddwod_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWOD2))] ++ "ISA_HAS_LASX" ++ "xvaddwod.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUBWOD.Q.DU ++;;TODO2 ++(define_insn "lasx_xvsubwod_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUBWOD2))] ++ "ISA_HAS_LASX" ++ "xvsubwod.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWOD.Q.DU ++;;TODO2 ++(define_insn "lasx_xvmulwod_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWOD2))] ++ "ISA_HAS_LASX" ++ "xvmulwod.q.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWEV.H.BU.B XVMULWEV.H.BU.B ++(define_insn "lasx_xvwev_h_bu_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addmul:V16HI ++ (zero_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))) ++ (sign_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.h.bu.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVADDWEV.W.HU.H XVMULWEV.W.HU.H ++(define_insn "lasx_xvwev_w_hu_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addmul:V8SI ++ (zero_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (sign_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.w.hu.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVADDWEV.D.WU.W XVMULWEV.D.WU.W ++(define_insn "lasx_xvwev_d_wu_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addmul:V4DI ++ (zero_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (sign_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LASX" ++ "xvwev.d.wu.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWOD.H.BU.B XVMULWOD.H.BU.B ++(define_insn "lasx_xvwod_h_bu_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (addmul:V16HI ++ (zero_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (sign_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.h.bu.b\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVADDWOD.W.HU.H XVMULWOD.W.HU.H ++(define_insn "lasx_xvwod_w_hu_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (addmul:V8SI ++ (zero_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (sign_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.w.hu.h\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVADDWOD.D.WU.W XVMULWOD.D.WU.W ++(define_insn "lasx_xvwod_d_wu_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (addmul:V4DI ++ (zero_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (sign_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))))] ++ "ISA_HAS_LASX" ++ "xvwod.d.wu.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.H.B XVMADDWEV.H.BU ++(define_insn "lasx_xvmaddwev_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (plus:V16HI ++ (match_operand:V16HI 1 "register_operand" "0") ++ (mult:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.h.b\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVMADDWEV.W.H XVMADDWEV.W.HU ++(define_insn "lasx_xvmaddwev_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (plus:V8SI ++ (match_operand:V8SI 1 "register_operand" "0") ++ (mult:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.w.h\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVMADDWEV.D.W XVMADDWEV.D.WU ++(define_insn "lasx_xvmaddwev_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (plus:V4DI ++ (match_operand:V4DI 1 "register_operand" "0") ++ (mult:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.d.w\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.Q.D ++;;TODO2 ++(define_insn "lasx_xvmaddwev_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWEV))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.q.d\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.H.B XVMADDWOD.H.BU ++(define_insn "lasx_xvmaddwod_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (plus:V16HI ++ (match_operand:V16HI 1 "register_operand" "0") ++ (mult:V16HI ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.h.b\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVMADDWOD.W.H XVMADDWOD.W.HU ++(define_insn "lasx_xvmaddwod_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (plus:V8SI ++ (match_operand:V8SI 1 "register_operand" "0") ++ (mult:V8SI ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.w.h\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVMADDWOD.D.W XVMADDWOD.D.WU ++(define_insn "lasx_xvmaddwod_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (plus:V4DI ++ (match_operand:V4DI 1 "register_operand" "0") ++ (mult:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.d.w\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.Q.D ++;;TODO2 ++(define_insn "lasx_xvmaddwod_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWOD))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.q.d\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.Q.DU ++;;TODO2 ++(define_insn "lasx_xvmaddwev_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWEV2))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.q.du\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.Q.DU ++;;TODO2 ++(define_insn "lasx_xvmaddwod_q_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWOD2))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.q.du\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.H.BU.B ++(define_insn "lasx_xvmaddwev_h_bu_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (plus:V16HI ++ (match_operand:V16HI 1 "register_operand" "0") ++ (mult:V16HI ++ (zero_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))) ++ (sign_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.h.bu.b\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVMADDWEV.W.HU.H ++(define_insn "lasx_xvmaddwev_w_hu_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (plus:V8SI ++ (match_operand:V8SI 1 "register_operand" "0") ++ (mult:V8SI ++ (zero_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (sign_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.w.hu.h\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVMADDWEV.D.WU.W ++(define_insn "lasx_xvmaddwev_d_wu_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (plus:V4DI ++ (match_operand:V4DI 1 "register_operand" "0") ++ (mult:V4DI ++ (zero_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (sign_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.d.wu.w\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWEV.Q.DU.D ++;;TODO2 ++(define_insn "lasx_xvmaddwev_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWEV3))] ++ "ISA_HAS_LASX" ++ "xvmaddwev.q.du.d\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.H.BU.B ++(define_insn "lasx_xvmaddwod_h_bu_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (plus:V16HI ++ (match_operand:V16HI 1 "register_operand" "0") ++ (mult:V16HI ++ (zero_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))) ++ (sign_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.h.bu.b\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V16HI")]) ++ ++;;XVMADDWOD.W.HU.H ++(define_insn "lasx_xvmaddwod_w_hu_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (plus:V8SI ++ (match_operand:V8SI 1 "register_operand" "0") ++ (mult:V8SI ++ (zero_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (sign_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.w.hu.h\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8SI")]) ++ ++;;XVMADDWOD.D.WU.W ++(define_insn "lasx_xvmaddwod_d_wu_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (plus:V4DI ++ (match_operand:V4DI 1 "register_operand" "0") ++ (mult:V4DI ++ (zero_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (sign_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))))))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.d.wu.w\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMADDWOD.Q.DU.D ++;;TODO2 ++(define_insn "lasx_xvmaddwod_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0") ++ (match_operand:V4DI 2 "register_operand" "f") ++ (match_operand:V4DI 3 "register_operand" "f")] ++ UNSPEC_LASX_XVMADDWOD3))] ++ "ISA_HAS_LASX" ++ "xvmaddwod.q.du.d\t%u0,%u2,%u3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVHADDW.Q.D ++;;TODO2 ++(define_insn "lasx_xvhaddw_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVHADDW_Q_D))] ++ "ISA_HAS_LASX" ++ "xvhaddw.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVHSUBW.Q.D ++;;TODO2 ++(define_insn "lasx_xvhsubw_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVHSUBW_Q_D))] ++ "ISA_HAS_LASX" ++ "xvhsubw.q.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVHADDW.QU.DU ++;;TODO2 ++(define_insn "lasx_xvhaddw_qu_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVHADDW_QU_DU))] ++ "ISA_HAS_LASX" ++ "xvhaddw.qu.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVHSUBW.QU.DU ++;;TODO2 ++(define_insn "lasx_xvhsubw_qu_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVHSUBW_QU_DU))] ++ "ISA_HAS_LASX" ++ "xvhsubw.qu.du\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVROTR.B XVROTR.H XVROTR.W XVROTR.D ++;;TODO-478 ++(define_insn "lasx_xvrotr_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand:ILASX 2 "register_operand" "f")] ++ UNSPEC_LASX_XVROTR))] ++ "ISA_HAS_LASX" ++ "xvrotr.\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++;;XVADD.Q ++;;TODO2 ++(define_insn "lasx_xvadd_q" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADD_Q))] ++ "ISA_HAS_LASX" ++ "xvadd.q\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSUB.Q ++;;TODO2 ++(define_insn "lasx_xvsub_q" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSUB_Q))] ++ "ISA_HAS_LASX" ++ "xvsub.q\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVSSRLN.B.H XVSSRLN.H.W XVSSRLN.W.D ++(define_insn "lasx_xvssrln__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRLN))] ++ "ISA_HAS_LASX" ++ "xvssrln..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++;;XVREPLVE.B XVREPLVE.H XVREPLVE.W XVREPLVE.D ++(define_insn "lasx_xvreplve_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX [(match_operand:LASX 1 "register_operand" "f") ++ (match_operand:SI 2 "register_operand" "r")] ++ UNSPEC_LASX_XVREPLVE))] ++ "ISA_HAS_LASX" ++ "xvreplve.\t%u0,%u1,%z2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++;;XVADDWEV.Q.DU.D ++(define_insn "lasx_xvaddwev_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWEV3))] ++ "ISA_HAS_LASX" ++ "xvaddwev.q.du.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVADDWOD.Q.DU.D ++(define_insn "lasx_xvaddwod_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVADDWOD3))] ++ "ISA_HAS_LASX" ++ "xvaddwod.q.du.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWEV.Q.DU.D ++(define_insn "lasx_xvmulwev_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWEV3))] ++ "ISA_HAS_LASX" ++ "xvmulwev.q.du.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;;XVMULWOD.Q.DU.D ++(define_insn "lasx_xvmulwod_q_du_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f") ++ (match_operand:V4DI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVMULWOD3))] ++ "ISA_HAS_LASX" ++ "xvmulwod.q.du.d\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvpickve2gr_w" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (any_extend:SI ++ (vec_select:SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_7_operand" "")]))))] ++ "ISA_HAS_LASX" ++ "xvpickve2gr.w\t%0,%u1,%2" ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "V8SI")]) ++ ++ ++(define_insn "lasx_xvmskgez_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVMSKGEZ))] ++ "ISA_HAS_LASX" ++ "xvmskgez.b\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvmsknz_b" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVMSKNZ))] ++ "ISA_HAS_LASX" ++ "xvmsknz.b\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvexth_h_b" ++ [(set (match_operand:V16HI 0 "register_operand" "=f") ++ (any_extend:V16HI ++ (vec_select:V16QI ++ (match_operand:V32QI 1 "register_operand" "f") ++ (parallel [(const_int 16) (const_int 17) ++ (const_int 18) (const_int 19) ++ (const_int 20) (const_int 21) ++ (const_int 22) (const_int 23) ++ (const_int 24) (const_int 25) ++ (const_int 26) (const_int 27) ++ (const_int 28) (const_int 29) ++ (const_int 30) (const_int 31)]))))] ++ "ISA_HAS_LASX" ++ "xvexth.h.b\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V16HI")]) ++ ++(define_insn "lasx_xvexth_w_h" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (any_extend:V8SI ++ (vec_select:V8HI ++ (match_operand:V16HI 1 "register_operand" "f") ++ (parallel [(const_int 8) (const_int 9) ++ (const_int 10) (const_int 11) ++ (const_int 12) (const_int 13) ++ (const_int 14) (const_int 15)]))))] ++ "ISA_HAS_LASX" ++ "xvexth.w.h\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8SI")]) ++ ++(define_insn "lasx_xvexth_d_w" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "f") ++ (parallel [(const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX" ++ "xvexth.d.w\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvexth_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVEXTH_Q_D))] ++ "ISA_HAS_LASX" ++ "xvexth.q.d\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvexth_qu_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVEXTH_QU_DU))] ++ "ISA_HAS_LASX" ++ "xvexth.qu.du\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvrotri_" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVROTRI))] ++ "ISA_HAS_LASX" ++ "xvrotri.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvextl_q_d" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVEXTL_Q_D))] ++ "ISA_HAS_LASX" ++ "xvextl.q.d\t%u0,%u1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvsrlni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSRLNI))] ++ "ISA_HAS_LASX" ++ "xvsrlni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrlrni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSRLRNI))] ++ "ISA_HAS_LASX" ++ "xvsrlrni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRLNI))] ++ "ISA_HAS_LASX" ++ "xvssrlni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRLNI2))] ++ "ISA_HAS_LASX" ++ "xvssrlni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlrni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRLRNI))] ++ "ISA_HAS_LASX" ++ "xvssrlrni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlrni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRLRNI2))] ++ "ISA_HAS_LASX" ++ "xvssrlrni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrani__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSRANI))] ++ "ISA_HAS_LASX" ++ "xvsrani..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvsrarni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSRARNI))] ++ "ISA_HAS_LASX" ++ "xvsrarni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrani__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRANI))] ++ "ISA_HAS_LASX" ++ "xvssrani..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrani__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRANI2))] ++ "ISA_HAS_LASX" ++ "xvssrani..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrarni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRARNI))] ++ "ISA_HAS_LASX" ++ "xvssrarni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrarni__" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0") ++ (match_operand:ILASX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVSSRARNI2))] ++ "ISA_HAS_LASX" ++ "xvssrarni..\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpermi_w" ++ [(set (match_operand:V8SI 0 "register_operand" "=f") ++ (unspec:V8SI [(match_operand:V8SI 1 "register_operand" "0") ++ (match_operand:V8SI 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVPERMI))] ++ "ISA_HAS_LASX" ++ "xvpermi.w\t%u0,%u2,%3" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V8SI")]) ++ ++(define_expand "lasx_xvld" ++ [(match_operand:V32QI 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq12b_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (operands[0], gen_rtx_MEM (V32QImode, addr)); ++ DONE; ++}) ++ ++(define_expand "lasx_xvst" ++ [(match_operand:V32QI 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq12b_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (gen_rtx_MEM (V32QImode, addr), operands[0]); ++ DONE; ++}) ++ ++(define_expand "lasx_xvstelm_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand 3 "const__operand") ++ (match_operand 2 "aq8_operand") ++ (match_operand 1 "pmode_register_operand")] ++ "ISA_HAS_LASX" ++{ ++ emit_insn (gen_lasx_xvstelm__insn ++ (operands[1], operands[2], operands[0], operands[3])); ++ DONE; ++}) ++ ++(define_insn "lasx_xvstelm__insn" ++ [(set (mem: (plus:DI (match_operand:DI 0 "register_operand" "r") ++ (match_operand 1 "aq8_operand" ))) ++ (vec_select: ++ (match_operand:LASX 2 "register_operand" "f") ++ (parallel [(match_operand 3 "const__operand" "")])))] ++ ++ "ISA_HAS_LASX" ++{ ++ return "xvstelm.\t%u2,%0,%1,%3"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++(define_insn "lasx_xvinsve0_" ++ [(set (match_operand:ILASX_DW 0 "register_operand" "=f") ++ (unspec:ILASX_DW [(match_operand:ILASX_DW 1 "register_operand" "0") ++ (match_operand:ILASX_DW 2 "register_operand" "f") ++ (match_operand 3 "const__operand" "")] ++ UNSPEC_LASX_XVINSVE0))] ++ "ISA_HAS_LASX" ++ "xvinsve0.\t%u0,%u2,%3" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpickve_" ++ [(set (match_operand:LASX_WD 0 "register_operand" "=f") ++ (unspec:LASX_WD [(match_operand:LASX_WD 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LASX_XVPICKVE))] ++ "ISA_HAS_LASX" ++ "xvpickve.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvssrlrn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") ++ (match_operand:ILASX_DWH 2 "register_operand" "f")] ++ UNSPEC_LASX_XVSSRLRN))] ++ "ISA_HAS_LASX" ++ "xvssrlrn..\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "xvorn3" ++ [(set (match_operand:ILASX 0 "register_operand" "=f") ++ (ior:ILASX (not:ILASX (match_operand:ILASX 2 "register_operand" "f")) ++ (match_operand:ILASX 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvorn.v\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvextl_qu_du" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")] ++ UNSPEC_LASX_XVEXTL_QU_DU))] ++ "ISA_HAS_LASX" ++ "xvextl.qu.du\t%u0,%u1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvldi" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (unspec:V4DI[(match_operand 1 "const_imm13_operand")] ++ UNSPEC_LASX_XVLDI))] ++ "ISA_HAS_LASX" ++{ ++ HOST_WIDE_INT val = INTVAL (operands[1]); ++ if(val < 0) ++ { ++ HOST_WIDE_INT modeVal = (val & 0xf00) >> 8; ++ if(modeVal < 13) ++ return "xvldi\t%u0,%1"; ++ else ++ sorry("for const_imm13_operand, only support 0000 ~ 1100 in bits'12...9' when bit'13' is 1."); ++ } ++ else ++ return "xvldi\t%u0,%1"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "V4DI")]) ++ ++(define_insn "lasx_xvldx" ++ [(set (match_operand:V32QI 0 "register_operand" "=f") ++ (unspec:V32QI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "reg_or_0_operand" "rJ")] ++ UNSPEC_LASX_XVLDX))] ++ "ISA_HAS_LASX" ++{ ++ return "xvldx\t%u0,%1,%z2"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "V32QI")]) ++ ++(define_insn "lasx_xvstx" ++ [(set (mem:V32QI (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "reg_or_0_operand" "rJ"))) ++ (unspec: V32QI[(match_operand:V32QI 0 "register_operand" "f")] ++ UNSPEC_LASX_XVSTX))] ++ ++ "ISA_HAS_LASX" ++{ ++ return "xvstx\t%u0,%1,%z2"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "DI")]) ++ +diff --git a/gcc/config/loongarch/lasxintrin.h b/gcc/config/loongarch/lasxintrin.h +new file mode 100644 +index 000000000..185eee869 +--- /dev/null ++++ b/gcc/config/loongarch/lasxintrin.h +@@ -0,0 +1,5139 @@ ++/* LARCH Loongson ASX intrinsics include file. ++ ++ Copyright (C) 2018 Free Software Foundation, Inc. ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published ++ by the Free Software Foundation; either version 3, or (at your ++ option) any later version. ++ ++ GCC is distributed in the hope that it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++ License for more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++#ifndef _GCC_LOONGSON_ASXINTRIN_H ++#define _GCC_LOONGSON_ASXINTRIN_H 1 ++ ++#if defined(__loongarch_asx) ++ ++typedef signed char v32i8 __attribute__ ((vector_size(32), aligned(32))); ++typedef signed char v32i8_b __attribute__ ((vector_size(32), aligned(1))); ++typedef unsigned char v32u8 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned char v32u8_b __attribute__ ((vector_size(32), aligned(1))); ++typedef short v16i16 __attribute__ ((vector_size(32), aligned(32))); ++typedef short v16i16_h __attribute__ ((vector_size(32), aligned(2))); ++typedef unsigned short v16u16 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned short v16u16_h __attribute__ ((vector_size(32), aligned(2))); ++typedef int v8i32 __attribute__ ((vector_size(32), aligned(32))); ++typedef int v8i32_w __attribute__ ((vector_size(32), aligned(4))); ++typedef unsigned int v8u32 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned int v8u32_w __attribute__ ((vector_size(32), aligned(4))); ++typedef long long v4i64 __attribute__ ((vector_size(32), aligned(32))); ++typedef long long v4i64_d __attribute__ ((vector_size(32), aligned(8))); ++typedef unsigned long long v4u64 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned long long v4u64_d __attribute__ ((vector_size(32), aligned(8))); ++typedef float v8f32 __attribute__ ((vector_size(32), aligned(32))); ++typedef float v8f32_w __attribute__ ((vector_size(32), aligned(4))); ++typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); ++typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); ++ ++typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); ++typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); ++ ++typedef float __m256 __attribute__ ((__vector_size__ (32), ++ __may_alias__)); ++typedef long long __m256i __attribute__ ((__vector_size__ (32), ++ __may_alias__)); ++typedef double __m256d __attribute__ ((__vector_size__ (32), ++ __may_alias__)); ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsll_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsll_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsll_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsll_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsll_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsll_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsll_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsll_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvslli_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvslli_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvslli_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvslli_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvslli_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslli_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvslli_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvslli_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsra_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsra_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsra_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsra_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsra_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsra_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsra_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsra_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsrai_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsrai_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsrai_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsrai_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsrai_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsrai_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsrai_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvsrai_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrar_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrar_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrar_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrar_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrar_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrar_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrar_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrar_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsrari_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsrari_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsrari_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsrari_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsrari_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsrari_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsrari_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvsrari_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrl_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrl_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrl_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrl_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrl_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrl_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrl_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrl_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsrli_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsrli_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsrli_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsrli_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsrli_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsrli_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsrli_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvsrli_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlr_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlr_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlr_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlr_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlr_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlr_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlr_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlr_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsrlri_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsrlri_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsrlri_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsrlri_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsrlri_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsrlri_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsrlri_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvsrlri_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitclr_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitclr_b((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitclr_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitclr_h((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitclr_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitclr_w((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitclr_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitclr_d((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvbitclri_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvbitclri_b((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvbitclri_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvbitclri_h((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvbitclri_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvbitclri_w((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvbitclri_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvbitclri_d((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitset_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitset_b((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitset_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitset_h((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitset_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitset_w((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitset_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitset_d((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvbitseti_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvbitseti_b((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvbitseti_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvbitseti_h((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvbitseti_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvbitseti_w((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvbitseti_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvbitseti_d((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitrev_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitrev_b((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitrev_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitrev_h((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitrev_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitrev_w((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitrev_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvbitrev_d((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvbitrevi_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvbitrevi_b((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvbitrevi_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvbitrevi_h((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvbitrevi_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvbitrevi_w((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvbitrevi_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvbitrevi_d((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvaddi_bu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvaddi_bu((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvaddi_hu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvaddi_hu((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvaddi_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvaddi_wu((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvaddi_du(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvaddi_du((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsubi_bu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsubi_bu((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsubi_hu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsubi_hu((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsubi_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsubi_wu((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsubi_du(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsubi_du((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvmaxi_b(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvmaxi_h(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvmaxi_w(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvmaxi_d(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmax_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmax_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvmaxi_bu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_bu((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvmaxi_hu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_hu((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvmaxi_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_wu((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvmaxi_du(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmaxi_du((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvmini_b(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmini_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvmini_h(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmini_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvmini_w(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmini_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvmini_d(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvmini_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmin_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmin_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvmini_bu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmini_bu((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvmini_hu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmini_hu((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvmini_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmini_wu((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvmini_du(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvmini_du((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvseq_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvseq_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvseq_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvseq_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvseq_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvseq_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvseq_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvseq_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvseqi_b(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvseqi_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvseqi_h(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvseqi_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvseqi_w(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvseqi_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvseqi_d(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvseqi_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvslti_b(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslti_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvslti_h(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslti_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvslti_w(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslti_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvslti_d(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslti_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvslt_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvslt_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, UV32QI, UQI. */ ++#define __lasx_xvslti_bu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslti_bu((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, UV16HI, UQI. */ ++#define __lasx_xvslti_hu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslti_hu((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, UV8SI, UQI. */ ++#define __lasx_xvslti_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslti_wu((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, UV4DI, UQI. */ ++#define __lasx_xvslti_du(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslti_du((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V32QI, V32QI, QI. */ ++#define __lasx_xvslei_b(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslei_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V16HI, V16HI, QI. */ ++#define __lasx_xvslei_h(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslei_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V8SI, V8SI, QI. */ ++#define __lasx_xvslei_w(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslei_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, si5. */ ++/* Data types in instruction templates: V4DI, V4DI, QI. */ ++#define __lasx_xvslei_d(/*__m256i*/ _1, /*si5*/ _2) ((__m256i)__builtin_lasx_xvslei_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsle_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsle_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, UV32QI, UQI. */ ++#define __lasx_xvslei_bu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslei_bu((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, UV16HI, UQI. */ ++#define __lasx_xvslei_hu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslei_hu((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, UV8SI, UQI. */ ++#define __lasx_xvslei_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslei_wu((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, UV4DI, UQI. */ ++#define __lasx_xvslei_du(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvslei_du((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvsat_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsat_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvsat_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsat_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvsat_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsat_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvsat_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvsat_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvsat_bu(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsat_bu((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UQI. */ ++#define __lasx_xvsat_hu(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsat_hu((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UQI. */ ++#define __lasx_xvsat_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsat_wu((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UQI. */ ++#define __lasx_xvsat_du(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvsat_du((v4u64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadda_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadda_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadda_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadda_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadda_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadda_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadda_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadda_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsadd_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsadd_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavg_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavg_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvavgr_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvavgr_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssub_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssub_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvabsd_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvabsd_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmul_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmul_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmul_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmul_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmul_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmul_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmul_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmul_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmadd_b(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmadd_b((v32i8)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmadd_h(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmadd_h((v16i16)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmadd_w(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmadd_w((v8i32)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmadd_d(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmadd_d((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsub_b(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmsub_b((v32i8)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsub_h(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmsub_h((v16i16)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsub_w(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmsub_w((v8i32)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsub_d(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmsub_d((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvdiv_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvdiv_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_h_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_h_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_w_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_w_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_d_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_d_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_hu_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_hu_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_wu_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_wu_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_du_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_du_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_h_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_h_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_w_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_w_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_d_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_d_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_hu_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_hu_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_wu_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_wu_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_du_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_du_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmod_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmod_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvrepl128vei_b(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvrepl128vei_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvrepl128vei_h(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvrepl128vei_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui2. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvrepl128vei_w(/*__m256i*/ _1, /*ui2*/ _2) ((__m256i)__builtin_lasx_xvrepl128vei_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui1. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvrepl128vei_d(/*__m256i*/ _1, /*ui1*/ _2) ((__m256i)__builtin_lasx_xvrepl128vei_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickev_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickev_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickev_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickev_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickev_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickev_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickev_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickev_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickod_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickod_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickod_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickod_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickod_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickod_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpickod_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpickod_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvh_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvh_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvh_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvh_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvh_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvh_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvh_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvh_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvl_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvl_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvl_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvl_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvl_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvl_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvilvl_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvilvl_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackev_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackev_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackev_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackev_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackev_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackev_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackev_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackev_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackod_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackod_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackod_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackod_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackod_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackod_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpackod_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvpackod_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvshuf_b(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvshuf_b((v32i8)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvshuf_h(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvshuf_h((v16i16)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvshuf_w(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvshuf_w((v8i32)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvshuf_d(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvshuf_d((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvand_v(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvand_v((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvandi_b(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvandi_b((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvor_v(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvor_v((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvori_b(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvori_b((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvnor_v(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvnor_v((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvnori_b(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvnori_b((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvxor_v(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvxor_v((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UQI. */ ++#define __lasx_xvxori_b(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvxori_b((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvbitsel_v(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvbitsel_v((v32u8)_1, (v32u8)_2, (v32u8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI, USI. */ ++#define __lasx_xvbitseli_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvbitseli_b((v32u8)(_1), (v32u8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V32QI, V32QI, USI. */ ++#define __lasx_xvshuf4i_b(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvshuf4i_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V16HI, V16HI, USI. */ ++#define __lasx_xvshuf4i_h(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvshuf4i_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V8SI, V8SI, USI. */ ++#define __lasx_xvshuf4i_w(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvshuf4i_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj. */ ++/* Data types in instruction templates: V32QI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplgr2vr_b(int _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplgr2vr_b((int)_1); ++} ++ ++/* Assembly instruction format: xd, rj. */ ++/* Data types in instruction templates: V16HI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplgr2vr_h(int _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplgr2vr_h((int)_1); ++} ++ ++/* Assembly instruction format: xd, rj. */ ++/* Data types in instruction templates: V8SI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplgr2vr_w(int _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplgr2vr_w((int)_1); ++} ++ ++/* Assembly instruction format: xd, rj. */ ++/* Data types in instruction templates: V4DI, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplgr2vr_d(long int _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplgr2vr_d((long int)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpcnt_b(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvpcnt_b((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpcnt_h(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvpcnt_h((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpcnt_w(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvpcnt_w((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvpcnt_d(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvpcnt_d((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclo_b(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclo_b((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclo_h(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclo_h((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclo_w(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclo_w((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclo_d(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclo_d((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclz_b(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclz_b((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclz_h(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclz_h((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclz_w(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclz_w((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvclz_d(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvclz_d((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfadd_s(__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfadd_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfadd_d(__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfadd_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfsub_s(__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfsub_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfsub_d(__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfsub_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmul_s(__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmul_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmul_d(__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmul_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfdiv_s(__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfdiv_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfdiv_d(__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfdiv_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcvt_h_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcvt_h_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfcvt_s_d(__m256d _1, __m256d _2) ++{ ++ return (__m256)__builtin_lasx_xvfcvt_s_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmin_s(__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmin_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmin_d(__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmin_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmina_s(__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmina_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmina_d(__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmina_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmax_s(__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmax_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmax_d(__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmax_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmaxa_s(__m256 _1, __m256 _2) ++{ ++ return (__m256)__builtin_lasx_xvfmaxa_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmaxa_d(__m256d _1, __m256d _2) ++{ ++ return (__m256d)__builtin_lasx_xvfmaxa_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfclass_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvfclass_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfclass_d(__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvfclass_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfsqrt_s(__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfsqrt_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfsqrt_d(__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfsqrt_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrecip_s(__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrecip_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrecip_d(__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrecip_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrint_s(__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrint_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrint_d(__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrint_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfrsqrt_s(__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvfrsqrt_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfrsqrt_d(__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvfrsqrt_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvflogb_s(__m256 _1) ++{ ++ return (__m256)__builtin_lasx_xvflogb_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvflogb_d(__m256d _1) ++{ ++ return (__m256d)__builtin_lasx_xvflogb_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfcvth_s_h(__m256i _1) ++{ ++ return (__m256)__builtin_lasx_xvfcvth_s_h((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfcvth_d_s(__m256 _1) ++{ ++ return (__m256d)__builtin_lasx_xvfcvth_d_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfcvtl_s_h(__m256i _1) ++{ ++ return (__m256)__builtin_lasx_xvfcvtl_s_h((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfcvtl_d_s(__m256 _1) ++{ ++ return (__m256d)__builtin_lasx_xvfcvtl_d_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_w_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftint_w_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_l_d(__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftint_l_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_wu_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftint_wu_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_lu_d(__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftint_lu_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_w_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_w_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_l_d(__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_l_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_wu_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_wu_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_lu_d(__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_lu_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvffint_s_w(__m256i _1) ++{ ++ return (__m256)__builtin_lasx_xvffint_s_w((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvffint_d_l(__m256i _1) ++{ ++ return (__m256d)__builtin_lasx_xvffint_d_l((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SF, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvffint_s_wu(__m256i _1) ++{ ++ return (__m256)__builtin_lasx_xvffint_s_wu((v8u32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvffint_d_lu(__m256i _1) ++{ ++ return (__m256d)__builtin_lasx_xvffint_d_lu((v4u64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, rk. */ ++/* Data types in instruction templates: V32QI, V32QI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve_b(__m256i _1, int _2) ++{ ++ return (__m256i)__builtin_lasx_xvreplve_b((v32i8)_1, (int)_2); ++} ++ ++/* Assembly instruction format: xd, xj, rk. */ ++/* Data types in instruction templates: V16HI, V16HI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve_h(__m256i _1, int _2) ++{ ++ return (__m256i)__builtin_lasx_xvreplve_h((v16i16)_1, (int)_2); ++} ++ ++/* Assembly instruction format: xd, xj, rk. */ ++/* Data types in instruction templates: V8SI, V8SI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve_w(__m256i _1, int _2) ++{ ++ return (__m256i)__builtin_lasx_xvreplve_w((v8i32)_1, (int)_2); ++} ++ ++/* Assembly instruction format: xd, xj, rk. */ ++/* Data types in instruction templates: V4DI, V4DI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve_d(__m256i _1, int _2) ++{ ++ return (__m256i)__builtin_lasx_xvreplve_d((v4i64)_1, (int)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvpermi_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvpermi_w((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvandn_v(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvandn_v((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvneg_b(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvneg_b((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvneg_h(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvneg_h((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvneg_w(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvneg_w((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvneg_d(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvneg_d((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmuh_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmuh_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V16HI, V32QI, UQI. */ ++#define __lasx_xvsllwil_h_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsllwil_h_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V8SI, V16HI, UQI. */ ++#define __lasx_xvsllwil_w_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsllwil_w_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V4DI, V8SI, UQI. */ ++#define __lasx_xvsllwil_d_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsllwil_d_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: UV16HI, UV32QI, UQI. */ ++#define __lasx_xvsllwil_hu_bu(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvsllwil_hu_bu((v32u8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV8SI, UV16HI, UQI. */ ++#define __lasx_xvsllwil_wu_hu(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvsllwil_wu_hu((v16u16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV4DI, UV8SI, UQI. */ ++#define __lasx_xvsllwil_du_wu(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvsllwil_du_wu((v8u32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsran_b_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsran_b_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsran_h_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsran_h_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsran_w_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsran_w_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_b_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_b_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_h_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_h_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_w_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_w_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_bu_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_bu_h((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_hu_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_hu_w((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssran_wu_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssran_wu_d((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrarn_b_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrarn_b_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrarn_h_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrarn_h_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrarn_w_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrarn_w_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_b_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_b_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_h_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_h_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_w_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_w_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_bu_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_bu_h((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_hu_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_hu_w((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrarn_wu_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrarn_wu_d((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrln_b_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrln_b_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrln_h_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrln_h_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrln_w_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrln_w_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_bu_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_bu_h((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_hu_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_hu_w((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_wu_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_wu_d((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlrn_b_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlrn_b_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlrn_h_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlrn_h_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsrlrn_w_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsrlrn_w_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV32QI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_bu_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_bu_h((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_hu_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_hu_w((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_wu_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_wu_d((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, UQI. */ ++#define __lasx_xvfrstpi_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvfrstpi_b((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, UQI. */ ++#define __lasx_xvfrstpi_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvfrstpi_h((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrstp_b(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvfrstp_b((v32i8)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrstp_h(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvfrstp_h((v16i16)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvshuf4i_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvshuf4i_d((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvbsrl_v(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvbsrl_v((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvbsll_v(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvbsll_v((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvextrins_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvextrins_b((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvextrins_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvextrins_h((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvextrins_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvextrins_w((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvextrins_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvextrins_d((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskltz_b(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskltz_b((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskltz_h(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskltz_h((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskltz_w(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskltz_w((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskltz_d(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskltz_d((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsigncov_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsigncov_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsigncov_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsigncov_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsigncov_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsigncov_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsigncov_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsigncov_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmadd_s(__m256 _1, __m256 _2, __m256 _3) ++{ ++ return (__m256)__builtin_lasx_xvfmadd_s((v8f32)_1, (v8f32)_2, (v8f32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmadd_d(__m256d _1, __m256d _2, __m256d _3) ++{ ++ return (__m256d)__builtin_lasx_xvfmadd_d((v4f64)_1, (v4f64)_2, (v4f64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfmsub_s(__m256 _1, __m256 _2, __m256 _3) ++{ ++ return (__m256)__builtin_lasx_xvfmsub_s((v8f32)_1, (v8f32)_2, (v8f32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfmsub_d(__m256d _1, __m256d _2, __m256d _3) ++{ ++ return (__m256d)__builtin_lasx_xvfmsub_d((v4f64)_1, (v4f64)_2, (v4f64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfnmadd_s(__m256 _1, __m256 _2, __m256 _3) ++{ ++ return (__m256)__builtin_lasx_xvfnmadd_s((v8f32)_1, (v8f32)_2, (v8f32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfnmadd_d(__m256d _1, __m256d _2, __m256d _3) ++{ ++ return (__m256d)__builtin_lasx_xvfnmadd_d((v4f64)_1, (v4f64)_2, (v4f64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V8SF, V8SF, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvfnmsub_s(__m256 _1, __m256 _2, __m256 _3) ++{ ++ return (__m256)__builtin_lasx_xvfnmsub_s((v8f32)_1, (v8f32)_2, (v8f32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk, xa. */ ++/* Data types in instruction templates: V4DF, V4DF, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvfnmsub_d(__m256d _1, __m256d _2, __m256d _3) ++{ ++ return (__m256d)__builtin_lasx_xvfnmsub_d((v4f64)_1, (v4f64)_2, (v4f64)_3); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrne_w_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrne_w_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrne_l_d(__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrne_l_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrp_w_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrp_w_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrp_l_d(__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrp_l_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrm_w_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrm_w_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrm_l_d(__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrm_l_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftint_w_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftint_w_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SF, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256 __lasx_xvffint_s_l(__m256i _1, __m256i _2) ++{ ++ return (__m256)__builtin_lasx_xvffint_s_l((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrz_w_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftintrz_w_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrp_w_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftintrp_w_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrm_w_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftintrm_w_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrne_w_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvftintrne_w_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftinth_l_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftinth_l_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintl_l_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintl_l_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvffinth_d_w(__m256i _1) ++{ ++ return (__m256d)__builtin_lasx_xvffinth_d_w((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DF, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256d __lasx_xvffintl_d_w(__m256i _1) ++{ ++ return (__m256d)__builtin_lasx_xvffintl_d_w((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrzh_l_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrzh_l_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrzl_l_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrzl_l_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrph_l_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrph_l_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrpl_l_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrpl_l_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrmh_l_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrmh_l_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrml_l_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrml_l_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrneh_l_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrneh_l_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvftintrnel_l_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvftintrnel_l_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrintrne_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvfrintrne_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrintrne_d(__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvfrintrne_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrintrz_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvfrintrz_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrintrz_d(__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvfrintrz_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrintrp_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvfrintrp_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrintrp_d(__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvfrintrp_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrintrm_s(__m256 _1) ++{ ++ return (__m256i)__builtin_lasx_xvfrintrm_s((v8f32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfrintrm_d(__m256d _1) ++{ ++ return (__m256i)__builtin_lasx_xvfrintrm_d((v4f64)_1); ++} ++ ++/* Assembly instruction format: xd, rj, si12. */ ++/* Data types in instruction templates: V32QI, CVPOINTER, SI. */ ++#define __lasx_xvld(/*void **/ _1, /*si12*/ _2) ((__m256i)__builtin_lasx_xvld((void *)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj, si12. */ ++/* Data types in instruction templates: VOID, V32QI, CVPOINTER, SI. */ ++#define __lasx_xvst(/*__m256i*/ _1, /*void **/ _2, /*si12*/ _3) ((void)__builtin_lasx_xvst((v32i8)(_1), (void *)(_2), (_3))) ++ ++/* Assembly instruction format: xd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V32QI, CVPOINTER, SI, UQI. */ ++#define __lasx_xvstelm_b(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lasx_xvstelm_b((v32i8)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: xd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V16HI, CVPOINTER, SI, UQI. */ ++#define __lasx_xvstelm_h(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lasx_xvstelm_h((v16i16)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: xd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V8SI, CVPOINTER, SI, UQI. */ ++#define __lasx_xvstelm_w(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lasx_xvstelm_w((v8i32)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: xd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V4DI, CVPOINTER, SI, UQI. */ ++#define __lasx_xvstelm_d(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lasx_xvstelm_d((v4i64)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, UQI. */ ++#define __lasx_xvinsve0_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui3*/ _3) ((__m256i)__builtin_lasx_xvinsve0_w((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui2. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, UQI. */ ++#define __lasx_xvinsve0_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui2*/ _3) ((__m256i)__builtin_lasx_xvinsve0_d((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvpickve_w(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvpickve_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui2. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvpickve_d(/*__m256i*/ _1, /*ui2*/ _2) ((__m256i)__builtin_lasx_xvpickve_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_b_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_b_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_h_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_h_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrlrn_w_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrlrn_w_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_b_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_b_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_h_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_h_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvssrln_w_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvssrln_w_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvorn_v(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvorn_v((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, i13. */ ++/* Data types in instruction templates: V4DI, HI. */ ++#define __lasx_xvldi(/*i13*/ _1) ((__m256i)__builtin_lasx_xvldi((_1))) ++ ++/* Assembly instruction format: xd, rj, rk. */ ++/* Data types in instruction templates: V32QI, CVPOINTER, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvldx(void * _1, long int _2) ++{ ++ return (__m256i)__builtin_lasx_xvldx((void *)_1, (long int)_2); ++} ++ ++/* Assembly instruction format: xd, rj, rk. */ ++/* Data types in instruction templates: VOID, V32QI, CVPOINTER, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++void __lasx_xvstx(__m256i _1, void * _2, long int _3) ++{ ++ return (void)__builtin_lasx_xvstx((v32i8)_1, (void *)_2, (long int)_3); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvextl_qu_du(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvextl_qu_du((v4u64)_1); ++} ++ ++/* Assembly instruction format: xd, rj, ui3. */ ++/* Data types in instruction templates: V8SI, V8SI, SI, UQI. */ ++#define __lasx_xvinsgr2vr_w(/*__m256i*/ _1, /*int*/ _2, /*ui3*/ _3) ((__m256i)__builtin_lasx_xvinsgr2vr_w((v8i32)(_1), (int)(_2), (_3))) ++ ++/* Assembly instruction format: xd, rj, ui2. */ ++/* Data types in instruction templates: V4DI, V4DI, DI, UQI. */ ++#define __lasx_xvinsgr2vr_d(/*__m256i*/ _1, /*long int*/ _2, /*ui2*/ _3) ((__m256i)__builtin_lasx_xvinsgr2vr_d((v4i64)(_1), (long int)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_b(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_b((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_h(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_h((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_w(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_w((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_d(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_d((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvreplve0_q(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvreplve0_q((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_h_b(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_h_b((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_w_h(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_w_h((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_d_w(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_d_w((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_w_b(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_w_b((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_d_h(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_d_h((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_d_b(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_d_b((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_hu_bu(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_hu_bu((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_wu_hu(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_wu_hu((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_du_wu(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_du_wu((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_wu_bu(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_wu_bu((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_du_hu(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_du_hu((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_vext2xv_du_bu(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_vext2xv_du_bu((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvpermi_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) ((__m256i)__builtin_lasx_xvpermi_q((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui8. */ ++/* Data types in instruction templates: V4DI, V4DI, USI. */ ++#define __lasx_xvpermi_d(/*__m256i*/ _1, /*ui8*/ _2) ((__m256i)__builtin_lasx_xvpermi_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvperm_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvperm_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, rj, si12. */ ++/* Data types in instruction templates: V32QI, CVPOINTER, SI. */ ++#define __lasx_xvldrepl_b(/*void **/ _1, /*si12*/ _2) ((__m256i)__builtin_lasx_xvldrepl_b((void *)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj, si11. */ ++/* Data types in instruction templates: V16HI, CVPOINTER, SI. */ ++#define __lasx_xvldrepl_h(/*void **/ _1, /*si11*/ _2) ((__m256i)__builtin_lasx_xvldrepl_h((void *)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj, si10. */ ++/* Data types in instruction templates: V8SI, CVPOINTER, SI. */ ++#define __lasx_xvldrepl_w(/*void **/ _1, /*si10*/ _2) ((__m256i)__builtin_lasx_xvldrepl_w((void *)(_1), (_2))) ++ ++/* Assembly instruction format: xd, rj, si9. */ ++/* Data types in instruction templates: V4DI, CVPOINTER, SI. */ ++#define __lasx_xvldrepl_d(/*void **/ _1, /*si9*/ _2) ((__m256i)__builtin_lasx_xvldrepl_d((void *)(_1), (_2))) ++ ++/* Assembly instruction format: rd, xj, ui3. */ ++/* Data types in instruction templates: SI, V8SI, UQI. */ ++#define __lasx_xvpickve2gr_w(/*__m256i*/ _1, /*ui3*/ _2) ((int)__builtin_lasx_xvpickve2gr_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: rd, xj, ui3. */ ++/* Data types in instruction templates: USI, V8SI, UQI. */ ++#define __lasx_xvpickve2gr_wu(/*__m256i*/ _1, /*ui3*/ _2) ((unsigned int)__builtin_lasx_xvpickve2gr_wu((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: rd, xj, ui2. */ ++/* Data types in instruction templates: DI, V4DI, UQI. */ ++#define __lasx_xvpickve2gr_d(/*__m256i*/ _1, /*ui2*/ _2) ((long int)__builtin_lasx_xvpickve2gr_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: rd, xj, ui2. */ ++/* Data types in instruction templates: UDI, V4DI, UQI. */ ++#define __lasx_xvpickve2gr_du(/*__m256i*/ _1, /*ui2*/ _2) ((unsigned long int)__builtin_lasx_xvpickve2gr_du((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_q_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_q_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_d_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_d_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_w_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_w_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_h_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_h_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_q_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_q_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_d_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_d_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_w_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_w_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_h_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_h_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_q_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_q_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_d_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_d_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_w_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_w_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_h_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_h_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_q_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_q_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_d_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_d_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_w_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_w_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwev_h_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwev_h_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_q_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_q_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_d_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_d_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_w_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_w_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_h_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_h_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_q_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_q_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_d_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_d_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_w_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_w_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_h_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_h_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_q_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_q_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_d_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_d_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_w_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_w_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_h_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_h_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_q_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_q_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_d_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_d_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_w_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_w_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_h_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_h_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_q_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_q_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_d_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_d_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_w_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_w_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_h_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_h_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_q_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_q_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_d_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_d_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_w_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_w_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsubwod_h_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsubwod_h_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_q_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_q_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_d_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_d_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_w_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_w_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_h_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_h_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_q_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_q_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_d_wu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_d_wu((v8u32)_1, (v8u32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_w_hu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_w_hu((v16u16)_1, (v16u16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_h_bu(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_h_bu((v32u8)_1, (v32u8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_d_wu_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_d_wu_w((v8u32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_w_hu_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_w_hu_h((v16u16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_h_bu_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_h_bu_b((v32u8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_d_wu_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_d_wu_w((v8u32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_w_hu_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_w_hu_h((v16u16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_h_bu_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_h_bu_b((v32u8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_d_wu_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_d_wu_w((v8u32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_w_hu_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_w_hu_h((v16u16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_h_bu_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_h_bu_b((v32u8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_d_wu_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_d_wu_w((v8u32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_w_hu_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_w_hu_h((v16u16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_h_bu_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_h_bu_b((v32u8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_q_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_q_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhaddw_qu_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhaddw_qu_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_q_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_q_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvhsubw_qu_du(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvhsubw_qu_du((v4u64)_1, (v4u64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_q_d(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_q_d((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_d_w(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_d_w((v4i64)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_w_h(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_w_h((v8i32)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_h_b(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_h_b((v16i16)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_q_du(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_q_du((v4u64)_1, (v4u64)_2, (v4u64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_d_wu(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_d_wu((v4u64)_1, (v8u32)_2, (v8u32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_w_hu(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_w_hu((v8u32)_1, (v16u16)_2, (v16u16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_h_bu(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_h_bu((v16u16)_1, (v32u8)_2, (v32u8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_q_d(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_q_d((v4i64)_1, (v4i64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_d_w(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_d_w((v4i64)_1, (v8i32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_w_h(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_w_h((v8i32)_1, (v16i16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_h_b(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_h_b((v16i16)_1, (v32i8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_q_du(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_q_du((v4u64)_1, (v4u64)_2, (v4u64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV4DI, UV4DI, UV8SI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_d_wu(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_d_wu((v4u64)_1, (v8u32)_2, (v8u32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV8SI, UV8SI, UV16HI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_w_hu(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_w_hu((v8u32)_1, (v16u16)_2, (v16u16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: UV16HI, UV16HI, UV32QI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_h_bu(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_h_bu((v16u16)_1, (v32u8)_2, (v32u8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_q_du_d(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_q_du_d((v4i64)_1, (v4u64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_d_wu_w(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_d_wu_w((v4i64)_1, (v8u32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_w_hu_h(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_w_hu_h((v8i32)_1, (v16u16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwev_h_bu_b(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwev_h_bu_b((v16i16)_1, (v32u8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_q_du_d(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_q_du_d((v4i64)_1, (v4u64)_2, (v4i64)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, UV8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_d_wu_w(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_d_wu_w((v4i64)_1, (v8u32)_2, (v8i32)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, UV16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_w_hu_h(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_w_hu_h((v8i32)_1, (v16u16)_2, (v16i16)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, UV32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmaddwod_h_bu_b(__m256i _1, __m256i _2, __m256i _3) ++{ ++ return (__m256i)__builtin_lasx_xvmaddwod_h_bu_b((v16i16)_1, (v32u8)_2, (v32i8)_3); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvrotr_b(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvrotr_b((v32i8)_1, (v32i8)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvrotr_h(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvrotr_h((v16i16)_1, (v16i16)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvrotr_w(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvrotr_w((v8i32)_1, (v8i32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvrotr_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvrotr_d((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvadd_q(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvadd_q((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvsub_q(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvsub_q((v4i64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwev_q_du_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwev_q_du_d((v4u64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvaddwod_q_du_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvaddwod_q_du_d((v4u64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwev_q_du_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwev_q_du_d((v4u64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, UV4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmulwod_q_du_d(__m256i _1, __m256i _2) ++{ ++ return (__m256i)__builtin_lasx_xvmulwod_q_du_d((v4u64)_1, (v4i64)_2); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmskgez_b(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmskgez_b((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V32QI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvmsknz_b(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvmsknz_b((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V16HI, V32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_h_b(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_h_b((v32i8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V8SI, V16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_w_h(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_w_h((v16i16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_d_w(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_d_w((v8i32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_q_d(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_q_d((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV16HI, UV32QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_hu_bu(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_hu_bu((v32u8)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV8SI, UV16HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_wu_hu(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_wu_hu((v16u16)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, UV8SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_du_wu(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_du_wu((v8u32)_1); ++} ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: UV4DI, UV4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvexth_qu_du(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvexth_qu_du((v4u64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V32QI, V32QI, UQI. */ ++#define __lasx_xvrotri_b(/*__m256i*/ _1, /*ui3*/ _2) ((__m256i)__builtin_lasx_xvrotri_b((v32i8)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V16HI, V16HI, UQI. */ ++#define __lasx_xvrotri_h(/*__m256i*/ _1, /*ui4*/ _2) ((__m256i)__builtin_lasx_xvrotri_h((v16i16)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V8SI, V8SI, UQI. */ ++#define __lasx_xvrotri_w(/*__m256i*/ _1, /*ui5*/ _2) ((__m256i)__builtin_lasx_xvrotri_w((v8i32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V4DI, V4DI, UQI. */ ++#define __lasx_xvrotri_d(/*__m256i*/ _1, /*ui6*/ _2) ((__m256i)__builtin_lasx_xvrotri_d((v4i64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj. */ ++/* Data types in instruction templates: V4DI, V4DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvextl_q_d(__m256i _1) ++{ ++ return (__m256i)__builtin_lasx_xvextl_q_d((v4i64)_1); ++} ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvsrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvsrlni_b_h((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvsrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvsrlni_h_w((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvsrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvsrlni_w_d((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvsrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvsrlni_d_q((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvsrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvsrlrni_b_h((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvsrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvsrlrni_h_w((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvsrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvsrlrni_w_d((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvsrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvsrlrni_d_q((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvssrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrlni_b_h((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvssrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrlni_h_w((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvssrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrlni_w_d((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvssrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrlni_d_q((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ ++#define __lasx_xvssrlni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrlni_bu_h((v32u8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ ++#define __lasx_xvssrlni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrlni_hu_w((v16u16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ ++#define __lasx_xvssrlni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrlni_wu_d((v8u32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ ++#define __lasx_xvssrlni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrlni_du_q((v4u64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvssrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_b_h((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvssrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_h_w((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvssrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_w_d((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvssrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_d_q((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ ++#define __lasx_xvssrlrni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_bu_h((v32u8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ ++#define __lasx_xvssrlrni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_hu_w((v16u16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ ++#define __lasx_xvssrlrni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_wu_d((v8u32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ ++#define __lasx_xvssrlrni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrlrni_du_q((v4u64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvsrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvsrani_b_h((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvsrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvsrani_h_w((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvsrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvsrani_w_d((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvsrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvsrani_d_q((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvsrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvsrarni_b_h((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvsrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvsrarni_h_w((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvsrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvsrarni_w_d((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvsrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvsrarni_d_q((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvssrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrani_b_h((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvssrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrani_h_w((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvssrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrani_w_d((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvssrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrani_d_q((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ ++#define __lasx_xvssrani_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrani_bu_h((v32u8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ ++#define __lasx_xvssrani_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrani_hu_w((v16u16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ ++#define __lasx_xvssrani_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrani_wu_d((v8u32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ ++#define __lasx_xvssrani_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrani_du_q((v4u64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: V32QI, V32QI, V32QI, USI. */ ++#define __lasx_xvssrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrarni_b_h((v32i8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: V16HI, V16HI, V16HI, USI. */ ++#define __lasx_xvssrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrarni_h_w((v16i16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: V8SI, V8SI, V8SI, USI. */ ++#define __lasx_xvssrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrarni_w_d((v8i32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: V4DI, V4DI, V4DI, USI. */ ++#define __lasx_xvssrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrarni_d_q((v4i64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui4. */ ++/* Data types in instruction templates: UV32QI, UV32QI, V32QI, USI. */ ++#define __lasx_xvssrarni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) ((__m256i)__builtin_lasx_xvssrarni_bu_h((v32u8)(_1), (v32i8)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui5. */ ++/* Data types in instruction templates: UV16HI, UV16HI, V16HI, USI. */ ++#define __lasx_xvssrarni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) ((__m256i)__builtin_lasx_xvssrarni_hu_w((v16u16)(_1), (v16i16)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui6. */ ++/* Data types in instruction templates: UV8SI, UV8SI, V8SI, USI. */ ++#define __lasx_xvssrarni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) ((__m256i)__builtin_lasx_xvssrarni_wu_d((v8u32)(_1), (v8i32)(_2), (_3))) ++ ++/* Assembly instruction format: xd, xj, ui7. */ ++/* Data types in instruction templates: UV4DI, UV4DI, V4DI, USI. */ ++#define __lasx_xvssrarni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) ((__m256i)__builtin_lasx_xvssrarni_du_q((v4u64)(_1), (v4i64)(_2), (_3))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV32QI. */ ++#define __lasx_xbnz_b(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_b((v32u8)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV4DI. */ ++#define __lasx_xbnz_d(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_d((v4u64)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV16HI. */ ++#define __lasx_xbnz_h(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_h((v16u16)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV32QI. */ ++#define __lasx_xbnz_v(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_v((v32u8)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV8SI. */ ++#define __lasx_xbnz_w(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_w((v8u32)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV32QI. */ ++#define __lasx_xbz_b(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_b((v32u8)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV4DI. */ ++#define __lasx_xbz_d(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_d((v4u64)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV16HI. */ ++#define __lasx_xbz_h(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_h((v16u16)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV32QI. */ ++#define __lasx_xbz_v(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_v((v32u8)(_1))) ++ ++/* Assembly instruction format: cd, xj. */ ++/* Data types in instruction templates: SI, UV8SI. */ ++#define __lasx_xbz_w(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_w((v8u32)(_1))) ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_caf_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_caf_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_caf_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_caf_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_ceq_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_ceq_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_ceq_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_ceq_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cle_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cle_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cle_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cle_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_clt_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_clt_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_clt_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_clt_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cne_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cne_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cne_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cne_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cor_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cor_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cor_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cor_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cueq_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cueq_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cueq_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cueq_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cule_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cule_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cule_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cule_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cult_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cult_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cult_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cult_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cun_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cun_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cune_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cune_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cune_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cune_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_cun_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_cun_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_saf_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_saf_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_saf_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_saf_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_seq_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_seq_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_seq_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_seq_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sle_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sle_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sle_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sle_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_slt_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_slt_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_slt_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_slt_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sne_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sne_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sne_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sne_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sor_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sor_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sor_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sor_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sueq_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sueq_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sueq_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sueq_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sule_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sule_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sule_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sule_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sult_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sult_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sult_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sult_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sun_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sun_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V4DI, V4DF, V4DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sune_d(__m256d _1, __m256d _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sune_d((v4f64)_1, (v4f64)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sune_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sune_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, xk. */ ++/* Data types in instruction templates: V8SI, V8SF, V8SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m256i __lasx_xvfcmp_sun_s(__m256 _1, __m256 _2) ++{ ++ return (__m256i)__builtin_lasx_xvfcmp_sun_s((v8f32)_1, (v8f32)_2); ++} ++ ++/* Assembly instruction format: xd, xj, ui2. */ ++/* Data types in instruction templates: V4DF, V4DF, UQI. */ ++#define __lasx_xvpickve_d_f(/*__m256d*/ _1, /*ui2*/ _2) ((__m256d)__builtin_lasx_xvpickve_d_f((v4f64)(_1), (_2))) ++ ++/* Assembly instruction format: xd, xj, ui3. */ ++/* Data types in instruction templates: V8SF, V8SF, UQI. */ ++#define __lasx_xvpickve_w_f(/*__m256*/ _1, /*ui3*/ _2) ((__m256)__builtin_lasx_xvpickve_w_f((v8f32)(_1), (_2))) ++ ++/* Assembly instruction format: xd, si10. */ ++/* Data types in instruction templates: V32QI, HI. */ ++#define __lasx_xvrepli_b(/*si10*/ _1) ((__m256i)__builtin_lasx_xvrepli_b((_1))) ++ ++/* Assembly instruction format: xd, si10. */ ++/* Data types in instruction templates: V4DI, HI. */ ++#define __lasx_xvrepli_d(/*si10*/ _1) ((__m256i)__builtin_lasx_xvrepli_d((_1))) ++ ++/* Assembly instruction format: xd, si10. */ ++/* Data types in instruction templates: V16HI, HI. */ ++#define __lasx_xvrepli_h(/*si10*/ _1) ((__m256i)__builtin_lasx_xvrepli_h((_1))) ++ ++/* Assembly instruction format: xd, si10. */ ++/* Data types in instruction templates: V8SI, HI. */ ++#define __lasx_xvrepli_w(/*si10*/ _1) ((__m256i)__builtin_lasx_xvrepli_w((_1))) ++ ++#endif /* defined(__loongarch_asx). */ ++#endif /* _GCC_LOONGSON_ASXINTRIN_H. */ +diff --git a/gcc/config/loongarch/linux-common.h b/gcc/config/loongarch/linux-common.h +new file mode 100644 +index 000000000..9e1a1b50f +--- /dev/null ++++ b/gcc/config/loongarch/linux-common.h +@@ -0,0 +1,68 @@ ++/* Definitions for LARCH running Linux-based GNU systems with ELF format. ++ Copyright (C) 2012-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#undef TARGET_OS_CPP_BUILTINS ++#define TARGET_OS_CPP_BUILTINS() \ ++ do { \ ++ GNU_USER_TARGET_OS_CPP_BUILTINS(); \ ++ /* The GNU C++ standard library requires this. */ \ ++ if (c_dialect_cxx ()) \ ++ builtin_define ("_GNU_SOURCE"); \ ++ ANDROID_TARGET_OS_CPP_BUILTINS(); \ ++ } while (0) ++ ++#define EXTRA_TARGET_D_OS_VERSIONS() \ ++ ANDROID_TARGET_D_OS_VERSIONS(); ++ ++#undef LINK_SPEC ++#define LINK_SPEC \ ++ LINUX_OR_ANDROID_LD (GNU_USER_TARGET_LINK_SPEC, \ ++ GNU_USER_TARGET_LINK_SPEC " " ANDROID_LINK_SPEC) ++ ++#undef SUBTARGET_CC1_SPEC ++#define SUBTARGET_CC1_SPEC \ ++ LINUX_OR_ANDROID_CC (GNU_USER_TARGET_CC1_SPEC, \ ++ GNU_USER_TARGET_CC1_SPEC " " ANDROID_CC1_SPEC) ++ ++#undef CC1PLUS_SPEC ++#define CC1PLUS_SPEC \ ++ LINUX_OR_ANDROID_CC ("", ANDROID_CC1PLUS_SPEC) ++ ++#undef LIB_SPEC ++#define LIB_SPEC \ ++ LINUX_OR_ANDROID_LD (GNU_USER_TARGET_LIB_SPEC, \ ++ GNU_USER_TARGET_NO_PTHREADS_LIB_SPEC " " ANDROID_LIB_SPEC) ++ ++#undef STARTFILE_SPEC ++#define STARTFILE_SPEC \ ++ LINUX_OR_ANDROID_LD (GNU_USER_TARGET_STARTFILE_SPEC, ANDROID_STARTFILE_SPEC) ++ ++#undef ENDFILE_SPEC ++#define ENDFILE_SPEC \ ++ LINUX_OR_ANDROID_LD (GNU_USER_TARGET_MATHFILE_SPEC " " \ ++ GNU_USER_TARGET_ENDFILE_SPEC, \ ++ GNU_USER_TARGET_MATHFILE_SPEC " " \ ++ ANDROID_ENDFILE_SPEC) ++ ++/* Define this to be nonzero if static stack checking is supported. */ ++#define STACK_CHECK_STATIC_BUILTIN 1 ++ ++/* FIXME*/ ++/* The default value isn't sufficient in 64-bit mode. */ ++#define STACK_CHECK_PROTECT (TARGET_64BIT ? 16 * 1024 : 12 * 1024) +diff --git a/gcc/config/loongarch/linux.h b/gcc/config/loongarch/linux.h +new file mode 100644 +index 000000000..520a8ef32 +--- /dev/null ++++ b/gcc/config/loongarch/linux.h +@@ -0,0 +1,33 @@ ++/* Definitions for LARCH running Linux-based GNU systems with ELF format. ++ Copyright (C) 1998-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#define GNU_USER_LINK_EMULATION32 "elf32loongarch" ++#define GNU_USER_LINK_EMULATION64 "elf64loongarch" ++ ++#define GLIBC_DYNAMIC_LINKERLP32 \ ++ "/lib32/ld.so.1" ++#define GLIBC_DYNAMIC_LINKERLP64 \ ++ "/lib64/ld.so.1" ++ ++#define GNU_USER_DYNAMIC_LINKERLP32 GLIBC_DYNAMIC_LINKERLP32 ++#define GNU_USER_DYNAMIC_LINKERLP64 GLIBC_DYNAMIC_LINKERLP64 ++ ++ ++#undef TARGET_ASM_FILE_END ++#define TARGET_ASM_FILE_END file_end_indicate_exec_stack +diff --git a/gcc/config/loongarch/loongarch-builtins.c b/gcc/config/loongarch/loongarch-builtins.c +new file mode 100644 +index 000000000..9fa68b11f +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-builtins.c +@@ -0,0 +1,3152 @@ ++ ++/* Subroutines used for expanding LOONGARCH builtins. ++ Copyright (C) 2011-2018 Free Software Foundation, Inc. ++ Contributed by Andrew Waterman (andrew@sifive.com). ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#define IN_TARGET_CODE 1 ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "backend.h" ++#include "target.h" ++#include "rtl.h" ++#include "tree.h" ++#include "memmodel.h" ++#include "gimple.h" ++#include "cfghooks.h" ++#include "df.h" ++#include "tm_p.h" ++#include "stringpool.h" ++#include "attribs.h" ++#include "optabs.h" ++#include "regs.h" ++#include "emit-rtl.h" ++#include "recog.h" ++#include "cgraph.h" ++#include "diagnostic.h" ++#include "insn-attr.h" ++#include "output.h" ++#include "alias.h" ++#include "fold-const.h" ++#include "varasm.h" ++#include "stor-layout.h" ++#include "calls.h" ++#include "explow.h" ++#include "expr.h" ++#include "libfuncs.h" ++#include "reload.h" ++#include "common/common-target.h" ++#include "langhooks.h" ++#include "cfgrtl.h" ++#include "cfganal.h" ++#include "sched-int.h" ++#include "gimplify.h" ++#include "target-globals.h" ++#include "tree-pass.h" ++#include "context.h" ++#include "builtins.h" ++#include "rtl-iter.h" ++ ++/* This file should be included last. */ ++#include "target-def.h" ++/* Macros to create an enumeration identifier for a function prototype. */ ++#define LARCH_FTYPE_NAME1(A, B) LARCH_##A##_FTYPE_##B ++#define LARCH_FTYPE_NAME2(A, B, C) LARCH_##A##_FTYPE_##B##_##C ++#define LARCH_FTYPE_NAME3(A, B, C, D) LARCH_##A##_FTYPE_##B##_##C##_##D ++#define LARCH_FTYPE_NAME4(A, B, C, D, E) LARCH_##A##_FTYPE_##B##_##C##_##D##_##E ++ ++/* Classifies the prototype of a built-in function. */ ++enum loongarch_function_type { ++#define DEF_LARCH_FTYPE(NARGS, LIST) LARCH_FTYPE_NAME##NARGS LIST, ++#include "config/loongarch/loongarch-ftypes.def" ++#undef DEF_LARCH_FTYPE ++ LARCH_MAX_FTYPE_MAX ++}; ++ ++/* Specifies how a built-in function should be converted into rtl. */ ++enum loongarch_builtin_type { ++ /* The function corresponds directly to an .md pattern. The return ++ value is mapped to operand 0 and the arguments are mapped to ++ operands 1 and above. */ ++ LARCH_BUILTIN_DIRECT, ++ ++ /* The function corresponds directly to an .md pattern. There is no return ++ value and the arguments are mapped to operands 0 and above. */ ++ LARCH_BUILTIN_DIRECT_NO_TARGET, ++ ++ /* The function corresponds to an LSX conditional branch instruction ++ combined with a compare instruction. */ ++ LARCH_BUILTIN_LSX_TEST_BRANCH, ++ ++ /* For generating LoongArch LSX. */ ++ LARCH_BUILTIN_LSX, ++ ++ /* For generating LoongArch LASX. */ ++ LARCH_BUILTIN_LASX, ++ ++ /* The function corresponds to an LASX conditional branch instruction ++ combined with a compare instruction. */ ++ LARCH_BUILTIN_LASX_TEST_BRANCH, ++ ++}; ++ ++/* Invoke MACRO (COND) for each C.cond.fmt condition. */ ++#define LARCH_FP_CONDITIONS(MACRO) \ ++ MACRO (f), \ ++ MACRO (un), \ ++ MACRO (eq), \ ++ MACRO (ueq), \ ++ MACRO (olt), \ ++ MACRO (ult), \ ++ MACRO (ole), \ ++ MACRO (ule), \ ++ MACRO (sf), \ ++ MACRO (ngle), \ ++ MACRO (seq), \ ++ MACRO (ngl), \ ++ MACRO (lt), \ ++ MACRO (nge), \ ++ MACRO (le), \ ++ MACRO (ngt) ++ ++/* Enumerates the codes above as LARCH_FP_COND_. */ ++#define DECLARE_LARCH_COND(X) LARCH_FP_COND_ ## X ++enum loongarch_fp_condition { ++ LARCH_FP_CONDITIONS (DECLARE_LARCH_COND) ++}; ++#undef DECLARE_LARCH_COND ++ ++/* Index X provides the string representation of LARCH_FP_COND_. */ ++#define STRINGIFY(X) #X ++const char *const loongarch_fp_conditions[16] = { ++ LARCH_FP_CONDITIONS (STRINGIFY) ++}; ++#undef STRINGIFY ++/* Declare an availability predicate for built-in functions that require ++ * COND to be true. NAME is the main part of the predicate's name. */ ++#define AVAIL_ALL(NAME, COND) \ ++ static unsigned int \ ++ loongarch_builtin_avail_##NAME (void) \ ++ { \ ++ return (COND) ? 1 : 0; \ ++ } ++ ++static unsigned int ++loongarch_builtin_avail_default (void) ++{ ++ return 1; ++} ++/* This structure describes a single built-in function. */ ++struct loongarch_builtin_description { ++ /* The code of the main .md file instruction. See loongarch_builtin_type ++ for more information. */ ++ enum insn_code icode; ++ ++ /* The floating-point comparison code to use with ICODE, if any. */ ++ enum loongarch_fp_condition cond; ++ ++ /* The name of the built-in function. */ ++ const char *name; ++ ++ /* Specifies how the function should be expanded. */ ++ enum loongarch_builtin_type builtin_type; ++ ++ /* The function's prototype. */ ++ enum loongarch_function_type function_type; ++ ++ /* Whether the function is available. */ ++ unsigned int (*avail) (void); ++}; ++ ++AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI) ++AVAIL_ALL (lsx, TARGET_LSX) ++AVAIL_ALL (lasx, TARGET_LASX) ++ ++/* Construct a loongarch_builtin_description from the given arguments. ++ ++ INSN is the name of the associated instruction pattern, without the ++ leading CODE_FOR_loongarch_. ++ ++ CODE is the floating-point condition code associated with the ++ function. It can be 'f' if the field is not applicable. ++ ++ NAME is the name of the function itself, without the leading ++ "__builtin_loongarch_". ++ ++ BUILTIN_TYPE and FUNCTION_TYPE are loongarch_builtin_description fields. ++ ++ AVAIL is the name of the availability predicate, without the leading ++ loongarch_builtin_avail_. */ ++#define LARCH_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \ ++ FUNCTION_TYPE, AVAIL) \ ++ { CODE_FOR_loongarch_ ## INSN, LARCH_FP_COND_ ## COND, \ ++ "__builtin_loongarch_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \ ++ loongarch_builtin_avail_ ## AVAIL } ++ ++/* Define __builtin_loongarch_, which is a LARCH_BUILTIN_DIRECT function ++ mapped to instruction CODE_FOR_loongarch_, FUNCTION_TYPE and AVAIL ++ are as for LARCH_BUILTIN. */ ++#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \ ++ LARCH_BUILTIN (INSN, f, #INSN, LARCH_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL) ++ ++/* Define __builtin_loongarch_, which is a LARCH_BUILTIN_DIRECT_NO_TARGET ++ function mapped to instruction CODE_FOR_loongarch_, FUNCTION_TYPE ++ and AVAIL are as for LARCH_BUILTIN. */ ++#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \ ++ LARCH_BUILTIN (INSN, f, #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ ++ FUNCTION_TYPE, AVAIL) ++ ++/* Define an LSX LARCH_BUILTIN_DIRECT function __builtin_lsx_ ++ for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LSX_BUILTIN(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lsx_ ## INSN, LARCH_FP_COND_f, \ ++ "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lsx } ++ ++ ++/* Define an LSX LARCH_BUILTIN_LSX_TEST_BRANCH function __builtin_lsx_ ++ for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LSX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lsx_ ## INSN, LARCH_FP_COND_f, \ ++ "__builtin_lsx_" #INSN, LARCH_BUILTIN_LSX_TEST_BRANCH, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lsx } ++ ++/* Define an LSX LARCH_BUILTIN_DIRECT_NO_TARGET function __builtin_lsx_ ++ for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LSX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lsx_ ## INSN, LARCH_FP_COND_f, \ ++ "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lsx } ++ ++/* Define an LASX LARCH_BUILTIN_DIRECT function __builtin_lasx_ ++ for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LASX_BUILTIN(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lasx_ ## INSN, LARCH_FP_COND_f, \ ++ "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lasx } ++ ++/* Define an LASX LARCH_BUILTIN_DIRECT_NO_TARGET function __builtin_lasx_ ++ for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LASX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lasx_ ## INSN, LARCH_FP_COND_f, \ ++ "__builtin_lasx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lasx } ++ ++/* Define an LASX LARCH_BUILTIN_LASX_TEST_BRANCH function __builtin_lasx_ ++ for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description ++ field. */ ++#define LASX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \ ++ { CODE_FOR_lasx_ ## INSN, LARCH_FP_COND_f, \ ++ "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX_TEST_BRANCH, \ ++ FUNCTION_TYPE, loongarch_builtin_avail_lasx } ++ ++/* LoongArch BASE instructions define CODE_FOR_loongarch_xxx */ ++#define CODE_FOR_loongarch_fmax_sf CODE_FOR_smaxsf3 ++#define CODE_FOR_loongarch_fmax_df CODE_FOR_smaxdf3 ++#define CODE_FOR_loongarch_fmin_sf CODE_FOR_sminsf3 ++#define CODE_FOR_loongarch_fmin_df CODE_FOR_smindf3 ++#define CODE_FOR_loongarch_fmaxa_sf CODE_FOR_smaxasf3 ++#define CODE_FOR_loongarch_fmaxa_df CODE_FOR_smaxadf3 ++#define CODE_FOR_loongarch_fmina_sf CODE_FOR_sminasf3 ++#define CODE_FOR_loongarch_fmina_df CODE_FOR_sminadf3 ++#define CODE_FOR_loongarch_fclass_s CODE_FOR_fclass_s ++#define CODE_FOR_loongarch_fclass_d CODE_FOR_fclass_d ++#define CODE_FOR_loongarch_frint_s CODE_FOR_frint_s ++#define CODE_FOR_loongarch_frint_d CODE_FOR_frint_d ++#define CODE_FOR_loongarch_bytepick_w CODE_FOR_bytepick_w ++#define CODE_FOR_loongarch_bytepick_d CODE_FOR_bytepick_d ++#define CODE_FOR_loongarch_bitrev_4b CODE_FOR_bitrev_4b ++#define CODE_FOR_loongarch_bitrev_8b CODE_FOR_bitrev_8b ++ ++/* LoongArch support crc */ ++#define CODE_FOR_loongarch_crc_w_b_w CODE_FOR_crc_w_b_w ++#define CODE_FOR_loongarch_crc_w_h_w CODE_FOR_crc_w_h_w ++#define CODE_FOR_loongarch_crc_w_w_w CODE_FOR_crc_w_w_w ++#define CODE_FOR_loongarch_crc_w_d_w CODE_FOR_crc_w_d_w ++#define CODE_FOR_loongarch_crcc_w_b_w CODE_FOR_crcc_w_b_w ++#define CODE_FOR_loongarch_crcc_w_h_w CODE_FOR_crcc_w_h_w ++#define CODE_FOR_loongarch_crcc_w_w_w CODE_FOR_crcc_w_w_w ++#define CODE_FOR_loongarch_crcc_w_d_w CODE_FOR_crcc_w_d_w ++ ++/* Privileged state instruction */ ++#define CODE_FOR_loongarch_cpucfg CODE_FOR_cpucfg ++#define CODE_FOR_loongarch_asrtle_d CODE_FOR_asrtle_d ++#define CODE_FOR_loongarch_asrtgt_d CODE_FOR_asrtgt_d ++#define CODE_FOR_loongarch_csrrd CODE_FOR_csrrd ++#define CODE_FOR_loongarch_dcsrrd CODE_FOR_dcsrrd ++#define CODE_FOR_loongarch_csrwr CODE_FOR_csrwr ++#define CODE_FOR_loongarch_dcsrwr CODE_FOR_dcsrwr ++#define CODE_FOR_loongarch_csrxchg CODE_FOR_csrxchg ++#define CODE_FOR_loongarch_dcsrxchg CODE_FOR_dcsrxchg ++#define CODE_FOR_loongarch_iocsrrd_b CODE_FOR_iocsrrd_b ++#define CODE_FOR_loongarch_iocsrrd_h CODE_FOR_iocsrrd_h ++#define CODE_FOR_loongarch_iocsrrd_w CODE_FOR_iocsrrd_w ++#define CODE_FOR_loongarch_iocsrrd_d CODE_FOR_iocsrrd_d ++#define CODE_FOR_loongarch_iocsrwr_b CODE_FOR_iocsrwr_b ++#define CODE_FOR_loongarch_iocsrwr_h CODE_FOR_iocsrwr_h ++#define CODE_FOR_loongarch_iocsrwr_w CODE_FOR_iocsrwr_w ++#define CODE_FOR_loongarch_iocsrwr_d CODE_FOR_iocsrwr_d ++#define CODE_FOR_loongarch_lddir CODE_FOR_lddir ++#define CODE_FOR_loongarch_dlddir CODE_FOR_dlddir ++#define CODE_FOR_loongarch_ldpte CODE_FOR_ldpte ++#define CODE_FOR_loongarch_dldpte CODE_FOR_dldpte ++#define CODE_FOR_loongarch_cacop CODE_FOR_cacop ++#define CODE_FOR_loongarch_dcacop CODE_FOR_dcacop ++#define CODE_FOR_loongarch_dbar CODE_FOR_dbar ++#define CODE_FOR_loongarch_ibar CODE_FOR_ibar ++ ++/* LoongArch SX define CODE_FOR_lsx_xxx */ ++#define CODE_FOR_lsx_vsadd_b CODE_FOR_ssaddv16qi3 ++#define CODE_FOR_lsx_vsadd_h CODE_FOR_ssaddv8hi3 ++#define CODE_FOR_lsx_vsadd_w CODE_FOR_ssaddv4si3 ++#define CODE_FOR_lsx_vsadd_d CODE_FOR_ssaddv2di3 ++#define CODE_FOR_lsx_vsadd_bu CODE_FOR_usaddv16qi3 ++#define CODE_FOR_lsx_vsadd_hu CODE_FOR_usaddv8hi3 ++#define CODE_FOR_lsx_vsadd_wu CODE_FOR_usaddv4si3 ++#define CODE_FOR_lsx_vsadd_du CODE_FOR_usaddv2di3 ++#define CODE_FOR_lsx_vadd_b CODE_FOR_addv16qi3 ++#define CODE_FOR_lsx_vadd_h CODE_FOR_addv8hi3 ++#define CODE_FOR_lsx_vadd_w CODE_FOR_addv4si3 ++#define CODE_FOR_lsx_vadd_d CODE_FOR_addv2di3 ++#define CODE_FOR_lsx_vaddi_bu CODE_FOR_addv16qi3 ++#define CODE_FOR_lsx_vaddi_hu CODE_FOR_addv8hi3 ++#define CODE_FOR_lsx_vaddi_wu CODE_FOR_addv4si3 ++#define CODE_FOR_lsx_vaddi_du CODE_FOR_addv2di3 ++#define CODE_FOR_lsx_vand_v CODE_FOR_andv16qi3 ++#define CODE_FOR_lsx_vandi_b CODE_FOR_andv16qi3 ++#define CODE_FOR_lsx_bnz_v CODE_FOR_lsx_bnz_v_b ++#define CODE_FOR_lsx_bz_v CODE_FOR_lsx_bz_v_b ++#define CODE_FOR_lsx_vbitsel_v CODE_FOR_lsx_vbitsel_b ++#define CODE_FOR_lsx_vseqi_b CODE_FOR_lsx_vseq_b ++#define CODE_FOR_lsx_vseqi_h CODE_FOR_lsx_vseq_h ++#define CODE_FOR_lsx_vseqi_w CODE_FOR_lsx_vseq_w ++#define CODE_FOR_lsx_vseqi_d CODE_FOR_lsx_vseq_d ++#define CODE_FOR_lsx_vslti_b CODE_FOR_lsx_vslt_b ++#define CODE_FOR_lsx_vslti_h CODE_FOR_lsx_vslt_h ++#define CODE_FOR_lsx_vslti_w CODE_FOR_lsx_vslt_w ++#define CODE_FOR_lsx_vslti_d CODE_FOR_lsx_vslt_d ++#define CODE_FOR_lsx_vslti_bu CODE_FOR_lsx_vslt_bu ++#define CODE_FOR_lsx_vslti_hu CODE_FOR_lsx_vslt_hu ++#define CODE_FOR_lsx_vslti_wu CODE_FOR_lsx_vslt_wu ++#define CODE_FOR_lsx_vslti_du CODE_FOR_lsx_vslt_du ++#define CODE_FOR_lsx_vslei_b CODE_FOR_lsx_vsle_b ++#define CODE_FOR_lsx_vslei_h CODE_FOR_lsx_vsle_h ++#define CODE_FOR_lsx_vslei_w CODE_FOR_lsx_vsle_w ++#define CODE_FOR_lsx_vslei_d CODE_FOR_lsx_vsle_d ++#define CODE_FOR_lsx_vslei_bu CODE_FOR_lsx_vsle_bu ++#define CODE_FOR_lsx_vslei_hu CODE_FOR_lsx_vsle_hu ++#define CODE_FOR_lsx_vslei_wu CODE_FOR_lsx_vsle_wu ++#define CODE_FOR_lsx_vslei_du CODE_FOR_lsx_vsle_du ++#define CODE_FOR_lsx_vdiv_b CODE_FOR_divv16qi3 ++#define CODE_FOR_lsx_vdiv_h CODE_FOR_divv8hi3 ++#define CODE_FOR_lsx_vdiv_w CODE_FOR_divv4si3 ++#define CODE_FOR_lsx_vdiv_d CODE_FOR_divv2di3 ++#define CODE_FOR_lsx_vdiv_bu CODE_FOR_udivv16qi3 ++#define CODE_FOR_lsx_vdiv_hu CODE_FOR_udivv8hi3 ++#define CODE_FOR_lsx_vdiv_wu CODE_FOR_udivv4si3 ++#define CODE_FOR_lsx_vdiv_du CODE_FOR_udivv2di3 ++#define CODE_FOR_lsx_vfadd_s CODE_FOR_addv4sf3 ++#define CODE_FOR_lsx_vfadd_d CODE_FOR_addv2df3 ++#define CODE_FOR_lsx_vftintrz_w_s CODE_FOR_fix_truncv4sfv4si2 ++#define CODE_FOR_lsx_vftintrz_l_d CODE_FOR_fix_truncv2dfv2di2 ++#define CODE_FOR_lsx_vftintrz_wu_s CODE_FOR_fixuns_truncv4sfv4si2 ++#define CODE_FOR_lsx_vftintrz_lu_d CODE_FOR_fixuns_truncv2dfv2di2 ++#define CODE_FOR_lsx_vffint_s_w CODE_FOR_floatv4siv4sf2 ++#define CODE_FOR_lsx_vffint_d_l CODE_FOR_floatv2div2df2 ++#define CODE_FOR_lsx_vffint_s_wu CODE_FOR_floatunsv4siv4sf2 ++#define CODE_FOR_lsx_vffint_d_lu CODE_FOR_floatunsv2div2df2 ++#define CODE_FOR_lsx_vfsub_s CODE_FOR_subv4sf3 ++#define CODE_FOR_lsx_vfsub_d CODE_FOR_subv2df3 ++#define CODE_FOR_lsx_vfmul_s CODE_FOR_mulv4sf3 ++#define CODE_FOR_lsx_vfmul_d CODE_FOR_mulv2df3 ++#define CODE_FOR_lsx_vfdiv_s CODE_FOR_divv4sf3 ++#define CODE_FOR_lsx_vfdiv_d CODE_FOR_divv2df3 ++#define CODE_FOR_lsx_vfmax_s CODE_FOR_smaxv4sf3 ++#define CODE_FOR_lsx_vfmax_d CODE_FOR_smaxv2df3 ++#define CODE_FOR_lsx_vfmin_s CODE_FOR_sminv4sf3 ++#define CODE_FOR_lsx_vfmin_d CODE_FOR_sminv2df3 ++#define CODE_FOR_lsx_vfsqrt_s CODE_FOR_sqrtv4sf2 ++#define CODE_FOR_lsx_vfsqrt_d CODE_FOR_sqrtv2df2 ++#define CODE_FOR_lsx_vmax_b CODE_FOR_smaxv16qi3 ++#define CODE_FOR_lsx_vmax_h CODE_FOR_smaxv8hi3 ++#define CODE_FOR_lsx_vmax_w CODE_FOR_smaxv4si3 ++#define CODE_FOR_lsx_vmax_d CODE_FOR_smaxv2di3 ++#define CODE_FOR_lsx_vmaxi_b CODE_FOR_smaxv16qi3 ++#define CODE_FOR_lsx_vmaxi_h CODE_FOR_smaxv8hi3 ++#define CODE_FOR_lsx_vmaxi_w CODE_FOR_smaxv4si3 ++#define CODE_FOR_lsx_vmaxi_d CODE_FOR_smaxv2di3 ++#define CODE_FOR_lsx_vmax_bu CODE_FOR_umaxv16qi3 ++#define CODE_FOR_lsx_vmax_hu CODE_FOR_umaxv8hi3 ++#define CODE_FOR_lsx_vmax_wu CODE_FOR_umaxv4si3 ++#define CODE_FOR_lsx_vmax_du CODE_FOR_umaxv2di3 ++#define CODE_FOR_lsx_vmaxi_bu CODE_FOR_umaxv16qi3 ++#define CODE_FOR_lsx_vmaxi_hu CODE_FOR_umaxv8hi3 ++#define CODE_FOR_lsx_vmaxi_wu CODE_FOR_umaxv4si3 ++#define CODE_FOR_lsx_vmaxi_du CODE_FOR_umaxv2di3 ++#define CODE_FOR_lsx_vmin_b CODE_FOR_sminv16qi3 ++#define CODE_FOR_lsx_vmin_h CODE_FOR_sminv8hi3 ++#define CODE_FOR_lsx_vmin_w CODE_FOR_sminv4si3 ++#define CODE_FOR_lsx_vmin_d CODE_FOR_sminv2di3 ++#define CODE_FOR_lsx_vmini_b CODE_FOR_sminv16qi3 ++#define CODE_FOR_lsx_vmini_h CODE_FOR_sminv8hi3 ++#define CODE_FOR_lsx_vmini_w CODE_FOR_sminv4si3 ++#define CODE_FOR_lsx_vmini_d CODE_FOR_sminv2di3 ++#define CODE_FOR_lsx_vmin_bu CODE_FOR_uminv16qi3 ++#define CODE_FOR_lsx_vmin_hu CODE_FOR_uminv8hi3 ++#define CODE_FOR_lsx_vmin_wu CODE_FOR_uminv4si3 ++#define CODE_FOR_lsx_vmin_du CODE_FOR_uminv2di3 ++#define CODE_FOR_lsx_vmini_bu CODE_FOR_uminv16qi3 ++#define CODE_FOR_lsx_vmini_hu CODE_FOR_uminv8hi3 ++#define CODE_FOR_lsx_vmini_wu CODE_FOR_uminv4si3 ++#define CODE_FOR_lsx_vmini_du CODE_FOR_uminv2di3 ++#define CODE_FOR_lsx_vmod_b CODE_FOR_modv16qi3 ++#define CODE_FOR_lsx_vmod_h CODE_FOR_modv8hi3 ++#define CODE_FOR_lsx_vmod_w CODE_FOR_modv4si3 ++#define CODE_FOR_lsx_vmod_d CODE_FOR_modv2di3 ++#define CODE_FOR_lsx_vmod_bu CODE_FOR_umodv16qi3 ++#define CODE_FOR_lsx_vmod_hu CODE_FOR_umodv8hi3 ++#define CODE_FOR_lsx_vmod_wu CODE_FOR_umodv4si3 ++#define CODE_FOR_lsx_vmod_du CODE_FOR_umodv2di3 ++#define CODE_FOR_lsx_vmul_b CODE_FOR_mulv16qi3 ++#define CODE_FOR_lsx_vmul_h CODE_FOR_mulv8hi3 ++#define CODE_FOR_lsx_vmul_w CODE_FOR_mulv4si3 ++#define CODE_FOR_lsx_vmul_d CODE_FOR_mulv2di3 ++#define CODE_FOR_lsx_vclz_b CODE_FOR_clzv16qi2 ++#define CODE_FOR_lsx_vclz_h CODE_FOR_clzv8hi2 ++#define CODE_FOR_lsx_vclz_w CODE_FOR_clzv4si2 ++#define CODE_FOR_lsx_vclz_d CODE_FOR_clzv2di2 ++#define CODE_FOR_lsx_vnor_v CODE_FOR_lsx_nor_b ++#define CODE_FOR_lsx_vor_v CODE_FOR_iorv16qi3 ++#define CODE_FOR_lsx_vori_b CODE_FOR_iorv16qi3 ++#define CODE_FOR_lsx_vnori_b CODE_FOR_lsx_nor_b ++#define CODE_FOR_lsx_vpcnt_b CODE_FOR_popcountv16qi2 ++#define CODE_FOR_lsx_vpcnt_h CODE_FOR_popcountv8hi2 ++#define CODE_FOR_lsx_vpcnt_w CODE_FOR_popcountv4si2 ++#define CODE_FOR_lsx_vpcnt_d CODE_FOR_popcountv2di2 ++#define CODE_FOR_lsx_vxor_v CODE_FOR_xorv16qi3 ++#define CODE_FOR_lsx_vxori_b CODE_FOR_xorv16qi3 ++#define CODE_FOR_lsx_vsll_b CODE_FOR_vashlv16qi3 ++#define CODE_FOR_lsx_vsll_h CODE_FOR_vashlv8hi3 ++#define CODE_FOR_lsx_vsll_w CODE_FOR_vashlv4si3 ++#define CODE_FOR_lsx_vsll_d CODE_FOR_vashlv2di3 ++#define CODE_FOR_lsx_vslli_b CODE_FOR_vashlv16qi3 ++#define CODE_FOR_lsx_vslli_h CODE_FOR_vashlv8hi3 ++#define CODE_FOR_lsx_vslli_w CODE_FOR_vashlv4si3 ++#define CODE_FOR_lsx_vslli_d CODE_FOR_vashlv2di3 ++#define CODE_FOR_lsx_vsra_b CODE_FOR_vashrv16qi3 ++#define CODE_FOR_lsx_vsra_h CODE_FOR_vashrv8hi3 ++#define CODE_FOR_lsx_vsra_w CODE_FOR_vashrv4si3 ++#define CODE_FOR_lsx_vsra_d CODE_FOR_vashrv2di3 ++#define CODE_FOR_lsx_vsrai_b CODE_FOR_vashrv16qi3 ++#define CODE_FOR_lsx_vsrai_h CODE_FOR_vashrv8hi3 ++#define CODE_FOR_lsx_vsrai_w CODE_FOR_vashrv4si3 ++#define CODE_FOR_lsx_vsrai_d CODE_FOR_vashrv2di3 ++#define CODE_FOR_lsx_vsrl_b CODE_FOR_vlshrv16qi3 ++#define CODE_FOR_lsx_vsrl_h CODE_FOR_vlshrv8hi3 ++#define CODE_FOR_lsx_vsrl_w CODE_FOR_vlshrv4si3 ++#define CODE_FOR_lsx_vsrl_d CODE_FOR_vlshrv2di3 ++#define CODE_FOR_lsx_vsrli_b CODE_FOR_vlshrv16qi3 ++#define CODE_FOR_lsx_vsrli_h CODE_FOR_vlshrv8hi3 ++#define CODE_FOR_lsx_vsrli_w CODE_FOR_vlshrv4si3 ++#define CODE_FOR_lsx_vsrli_d CODE_FOR_vlshrv2di3 ++#define CODE_FOR_lsx_vsub_b CODE_FOR_subv16qi3 ++#define CODE_FOR_lsx_vsub_h CODE_FOR_subv8hi3 ++#define CODE_FOR_lsx_vsub_w CODE_FOR_subv4si3 ++#define CODE_FOR_lsx_vsub_d CODE_FOR_subv2di3 ++#define CODE_FOR_lsx_vsubi_bu CODE_FOR_subv16qi3 ++#define CODE_FOR_lsx_vsubi_hu CODE_FOR_subv8hi3 ++#define CODE_FOR_lsx_vsubi_wu CODE_FOR_subv4si3 ++#define CODE_FOR_lsx_vsubi_du CODE_FOR_subv2di3 ++ ++#define CODE_FOR_lsx_vpackod_d CODE_FOR_lsx_vilvh_d ++#define CODE_FOR_lsx_vpackev_d CODE_FOR_lsx_vilvl_d ++#define CODE_FOR_lsx_vpickod_d CODE_FOR_lsx_vilvh_d ++#define CODE_FOR_lsx_vpickev_d CODE_FOR_lsx_vilvl_d ++ ++#define CODE_FOR_lsx_vrepli_b CODE_FOR_lsx_vrepliv16qi ++#define CODE_FOR_lsx_vrepli_h CODE_FOR_lsx_vrepliv8hi ++#define CODE_FOR_lsx_vrepli_w CODE_FOR_lsx_vrepliv4si ++#define CODE_FOR_lsx_vrepli_d CODE_FOR_lsx_vrepliv2di ++#define CODE_FOR_lsx_vsat_b CODE_FOR_lsx_vsat_s_b ++#define CODE_FOR_lsx_vsat_h CODE_FOR_lsx_vsat_s_h ++#define CODE_FOR_lsx_vsat_w CODE_FOR_lsx_vsat_s_w ++#define CODE_FOR_lsx_vsat_d CODE_FOR_lsx_vsat_s_d ++#define CODE_FOR_lsx_vsat_bu CODE_FOR_lsx_vsat_u_bu ++#define CODE_FOR_lsx_vsat_hu CODE_FOR_lsx_vsat_u_hu ++#define CODE_FOR_lsx_vsat_wu CODE_FOR_lsx_vsat_u_wu ++#define CODE_FOR_lsx_vsat_du CODE_FOR_lsx_vsat_u_du ++#define CODE_FOR_lsx_vavg_b CODE_FOR_lsx_vavg_s_b ++#define CODE_FOR_lsx_vavg_h CODE_FOR_lsx_vavg_s_h ++#define CODE_FOR_lsx_vavg_w CODE_FOR_lsx_vavg_s_w ++#define CODE_FOR_lsx_vavg_d CODE_FOR_lsx_vavg_s_d ++#define CODE_FOR_lsx_vavg_bu CODE_FOR_lsx_vavg_u_bu ++#define CODE_FOR_lsx_vavg_hu CODE_FOR_lsx_vavg_u_hu ++#define CODE_FOR_lsx_vavg_wu CODE_FOR_lsx_vavg_u_wu ++#define CODE_FOR_lsx_vavg_du CODE_FOR_lsx_vavg_u_du ++#define CODE_FOR_lsx_vavgr_b CODE_FOR_lsx_vavgr_s_b ++#define CODE_FOR_lsx_vavgr_h CODE_FOR_lsx_vavgr_s_h ++#define CODE_FOR_lsx_vavgr_w CODE_FOR_lsx_vavgr_s_w ++#define CODE_FOR_lsx_vavgr_d CODE_FOR_lsx_vavgr_s_d ++#define CODE_FOR_lsx_vavgr_bu CODE_FOR_lsx_vavgr_u_bu ++#define CODE_FOR_lsx_vavgr_hu CODE_FOR_lsx_vavgr_u_hu ++#define CODE_FOR_lsx_vavgr_wu CODE_FOR_lsx_vavgr_u_wu ++#define CODE_FOR_lsx_vavgr_du CODE_FOR_lsx_vavgr_u_du ++#define CODE_FOR_lsx_vssub_b CODE_FOR_lsx_vssub_s_b ++#define CODE_FOR_lsx_vssub_h CODE_FOR_lsx_vssub_s_h ++#define CODE_FOR_lsx_vssub_w CODE_FOR_lsx_vssub_s_w ++#define CODE_FOR_lsx_vssub_d CODE_FOR_lsx_vssub_s_d ++#define CODE_FOR_lsx_vssub_bu CODE_FOR_lsx_vssub_u_bu ++#define CODE_FOR_lsx_vssub_hu CODE_FOR_lsx_vssub_u_hu ++#define CODE_FOR_lsx_vssub_wu CODE_FOR_lsx_vssub_u_wu ++#define CODE_FOR_lsx_vssub_du CODE_FOR_lsx_vssub_u_du ++#define CODE_FOR_lsx_vabsd_b CODE_FOR_lsx_vabsd_s_b ++#define CODE_FOR_lsx_vabsd_h CODE_FOR_lsx_vabsd_s_h ++#define CODE_FOR_lsx_vabsd_w CODE_FOR_lsx_vabsd_s_w ++#define CODE_FOR_lsx_vabsd_d CODE_FOR_lsx_vabsd_s_d ++#define CODE_FOR_lsx_vabsd_bu CODE_FOR_lsx_vabsd_u_bu ++#define CODE_FOR_lsx_vabsd_hu CODE_FOR_lsx_vabsd_u_hu ++#define CODE_FOR_lsx_vabsd_wu CODE_FOR_lsx_vabsd_u_wu ++#define CODE_FOR_lsx_vabsd_du CODE_FOR_lsx_vabsd_u_du ++#define CODE_FOR_lsx_vftint_w_s CODE_FOR_lsx_vftint_s_w_s ++#define CODE_FOR_lsx_vftint_l_d CODE_FOR_lsx_vftint_s_l_d ++#define CODE_FOR_lsx_vftint_wu_s CODE_FOR_lsx_vftint_u_wu_s ++#define CODE_FOR_lsx_vftint_lu_d CODE_FOR_lsx_vftint_u_lu_d ++#define CODE_FOR_lsx_vandn_v CODE_FOR_vandnv16qi3 ++#define CODE_FOR_lsx_vorn_v CODE_FOR_vornv16qi3 ++#define CODE_FOR_lsx_vneg_b CODE_FOR_vnegv16qi2 ++#define CODE_FOR_lsx_vneg_h CODE_FOR_vnegv8hi2 ++#define CODE_FOR_lsx_vneg_w CODE_FOR_vnegv4si2 ++#define CODE_FOR_lsx_vneg_d CODE_FOR_vnegv2di2 ++#define CODE_FOR_lsx_vshuf4i_d CODE_FOR_lsx_vshuf4i_d ++#define CODE_FOR_lsx_vbsrl_v CODE_FOR_lsx_vbsrl_b ++#define CODE_FOR_lsx_vbsll_v CODE_FOR_lsx_vbsll_b ++#define CODE_FOR_lsx_vfmadd_s CODE_FOR_vfmaddv4sf4 ++#define CODE_FOR_lsx_vfmadd_d CODE_FOR_vfmaddv2df4 ++#define CODE_FOR_lsx_vfmsub_s CODE_FOR_vfmsubv4sf4 ++#define CODE_FOR_lsx_vfmsub_d CODE_FOR_vfmsubv2df4 ++#define CODE_FOR_lsx_vfnmadd_s CODE_FOR_vfnmaddv4sf4_nmadd4 ++#define CODE_FOR_lsx_vfnmadd_d CODE_FOR_vfnmaddv2df4_nmadd4 ++#define CODE_FOR_lsx_vfnmsub_s CODE_FOR_vfnmsubv4sf4_nmsub4 ++#define CODE_FOR_lsx_vfnmsub_d CODE_FOR_vfnmsubv2df4_nmsub4 ++ ++#define CODE_FOR_lsx_vmuh_b CODE_FOR_lsx_vmuh_s_b ++#define CODE_FOR_lsx_vmuh_h CODE_FOR_lsx_vmuh_s_h ++#define CODE_FOR_lsx_vmuh_w CODE_FOR_lsx_vmuh_s_w ++#define CODE_FOR_lsx_vmuh_d CODE_FOR_lsx_vmuh_s_d ++#define CODE_FOR_lsx_vmuh_bu CODE_FOR_lsx_vmuh_u_bu ++#define CODE_FOR_lsx_vmuh_hu CODE_FOR_lsx_vmuh_u_hu ++#define CODE_FOR_lsx_vmuh_wu CODE_FOR_lsx_vmuh_u_wu ++#define CODE_FOR_lsx_vmuh_du CODE_FOR_lsx_vmuh_u_du ++#define CODE_FOR_lsx_vsllwil_h_b CODE_FOR_lsx_vsllwil_s_h_b ++#define CODE_FOR_lsx_vsllwil_w_h CODE_FOR_lsx_vsllwil_s_w_h ++#define CODE_FOR_lsx_vsllwil_d_w CODE_FOR_lsx_vsllwil_s_d_w ++#define CODE_FOR_lsx_vsllwil_hu_bu CODE_FOR_lsx_vsllwil_u_hu_bu ++#define CODE_FOR_lsx_vsllwil_wu_hu CODE_FOR_lsx_vsllwil_u_wu_hu ++#define CODE_FOR_lsx_vsllwil_du_wu CODE_FOR_lsx_vsllwil_u_du_wu ++#define CODE_FOR_lsx_vssran_b_h CODE_FOR_lsx_vssran_s_b_h ++#define CODE_FOR_lsx_vssran_h_w CODE_FOR_lsx_vssran_s_h_w ++#define CODE_FOR_lsx_vssran_w_d CODE_FOR_lsx_vssran_s_w_d ++#define CODE_FOR_lsx_vssran_bu_h CODE_FOR_lsx_vssran_u_bu_h ++#define CODE_FOR_lsx_vssran_hu_w CODE_FOR_lsx_vssran_u_hu_w ++#define CODE_FOR_lsx_vssran_wu_d CODE_FOR_lsx_vssran_u_wu_d ++#define CODE_FOR_lsx_vssrarn_b_h CODE_FOR_lsx_vssrarn_s_b_h ++#define CODE_FOR_lsx_vssrarn_h_w CODE_FOR_lsx_vssrarn_s_h_w ++#define CODE_FOR_lsx_vssrarn_w_d CODE_FOR_lsx_vssrarn_s_w_d ++#define CODE_FOR_lsx_vssrarn_bu_h CODE_FOR_lsx_vssrarn_u_bu_h ++#define CODE_FOR_lsx_vssrarn_hu_w CODE_FOR_lsx_vssrarn_u_hu_w ++#define CODE_FOR_lsx_vssrarn_wu_d CODE_FOR_lsx_vssrarn_u_wu_d ++#define CODE_FOR_lsx_vssrln_bu_h CODE_FOR_lsx_vssrln_u_bu_h ++#define CODE_FOR_lsx_vssrln_hu_w CODE_FOR_lsx_vssrln_u_hu_w ++#define CODE_FOR_lsx_vssrln_wu_d CODE_FOR_lsx_vssrln_u_wu_d ++#define CODE_FOR_lsx_vssrlrn_bu_h CODE_FOR_lsx_vssrlrn_u_bu_h ++#define CODE_FOR_lsx_vssrlrn_hu_w CODE_FOR_lsx_vssrlrn_u_hu_w ++#define CODE_FOR_lsx_vssrlrn_wu_d CODE_FOR_lsx_vssrlrn_u_wu_d ++ ++/* LoongArch ASX define CODE_FOR_lasx_mxxx */ ++#define CODE_FOR_lasx_xvsadd_b CODE_FOR_ssaddv32qi3 ++#define CODE_FOR_lasx_xvsadd_h CODE_FOR_ssaddv16hi3 ++#define CODE_FOR_lasx_xvsadd_w CODE_FOR_ssaddv8si3 ++#define CODE_FOR_lasx_xvsadd_d CODE_FOR_ssaddv4di3 ++#define CODE_FOR_lasx_xvsadd_bu CODE_FOR_usaddv32qi3 ++#define CODE_FOR_lasx_xvsadd_hu CODE_FOR_usaddv16hi3 ++#define CODE_FOR_lasx_xvsadd_wu CODE_FOR_usaddv8si3 ++#define CODE_FOR_lasx_xvsadd_du CODE_FOR_usaddv4di3 ++#define CODE_FOR_lasx_xvadd_b CODE_FOR_addv32qi3 ++#define CODE_FOR_lasx_xvadd_h CODE_FOR_addv16hi3 ++#define CODE_FOR_lasx_xvadd_w CODE_FOR_addv8si3 ++#define CODE_FOR_lasx_xvadd_d CODE_FOR_addv4di3 ++#define CODE_FOR_lasx_xvaddi_bu CODE_FOR_addv32qi3 ++#define CODE_FOR_lasx_xvaddi_hu CODE_FOR_addv16hi3 ++#define CODE_FOR_lasx_xvaddi_wu CODE_FOR_addv8si3 ++#define CODE_FOR_lasx_xvaddi_du CODE_FOR_addv4di3 ++#define CODE_FOR_lasx_xvand_v CODE_FOR_andv32qi3 ++#define CODE_FOR_lasx_xvandi_b CODE_FOR_andv32qi3 ++#define CODE_FOR_lasx_xvbitsel_v CODE_FOR_lasx_xvbitsel_b ++#define CODE_FOR_lasx_xvseqi_b CODE_FOR_lasx_xvseq_b ++#define CODE_FOR_lasx_xvseqi_h CODE_FOR_lasx_xvseq_h ++#define CODE_FOR_lasx_xvseqi_w CODE_FOR_lasx_xvseq_w ++#define CODE_FOR_lasx_xvseqi_d CODE_FOR_lasx_xvseq_d ++#define CODE_FOR_lasx_xvslti_b CODE_FOR_lasx_xvslt_b ++#define CODE_FOR_lasx_xvslti_h CODE_FOR_lasx_xvslt_h ++#define CODE_FOR_lasx_xvslti_w CODE_FOR_lasx_xvslt_w ++#define CODE_FOR_lasx_xvslti_d CODE_FOR_lasx_xvslt_d ++#define CODE_FOR_lasx_xvslti_bu CODE_FOR_lasx_xvslt_bu ++#define CODE_FOR_lasx_xvslti_hu CODE_FOR_lasx_xvslt_hu ++#define CODE_FOR_lasx_xvslti_wu CODE_FOR_lasx_xvslt_wu ++#define CODE_FOR_lasx_xvslti_du CODE_FOR_lasx_xvslt_du ++#define CODE_FOR_lasx_xvslei_b CODE_FOR_lasx_xvsle_b ++#define CODE_FOR_lasx_xvslei_h CODE_FOR_lasx_xvsle_h ++#define CODE_FOR_lasx_xvslei_w CODE_FOR_lasx_xvsle_w ++#define CODE_FOR_lasx_xvslei_d CODE_FOR_lasx_xvsle_d ++#define CODE_FOR_lasx_xvslei_bu CODE_FOR_lasx_xvsle_bu ++#define CODE_FOR_lasx_xvslei_hu CODE_FOR_lasx_xvsle_hu ++#define CODE_FOR_lasx_xvslei_wu CODE_FOR_lasx_xvsle_wu ++#define CODE_FOR_lasx_xvslei_du CODE_FOR_lasx_xvsle_du ++#define CODE_FOR_lasx_xvdiv_b CODE_FOR_divv32qi3 ++#define CODE_FOR_lasx_xvdiv_h CODE_FOR_divv16hi3 ++#define CODE_FOR_lasx_xvdiv_w CODE_FOR_divv8si3 ++#define CODE_FOR_lasx_xvdiv_d CODE_FOR_divv4di3 ++#define CODE_FOR_lasx_xvdiv_bu CODE_FOR_udivv32qi3 ++#define CODE_FOR_lasx_xvdiv_hu CODE_FOR_udivv16hi3 ++#define CODE_FOR_lasx_xvdiv_wu CODE_FOR_udivv8si3 ++#define CODE_FOR_lasx_xvdiv_du CODE_FOR_udivv4di3 ++#define CODE_FOR_lasx_xvfadd_s CODE_FOR_addv8sf3 ++#define CODE_FOR_lasx_xvfadd_d CODE_FOR_addv4df3 ++#define CODE_FOR_lasx_xvftintrz_w_s CODE_FOR_fix_truncv8sfv8si2 ++#define CODE_FOR_lasx_xvftintrz_l_d CODE_FOR_fix_truncv4dfv4di2 ++#define CODE_FOR_lasx_xvftintrz_wu_s CODE_FOR_fixuns_truncv8sfv8si2 ++#define CODE_FOR_lasx_xvftintrz_lu_d CODE_FOR_fixuns_truncv4dfv4di2 ++#define CODE_FOR_lasx_xvffint_s_w CODE_FOR_floatv8siv8sf2 ++#define CODE_FOR_lasx_xvffint_d_l CODE_FOR_floatv4div4df2 ++#define CODE_FOR_lasx_xvffint_s_wu CODE_FOR_floatunsv8siv8sf2 ++#define CODE_FOR_lasx_xvffint_d_lu CODE_FOR_floatunsv4div4df2 ++#define CODE_FOR_lasx_xvfsub_s CODE_FOR_subv8sf3 ++#define CODE_FOR_lasx_xvfsub_d CODE_FOR_subv4df3 ++#define CODE_FOR_lasx_xvfmul_s CODE_FOR_mulv8sf3 ++#define CODE_FOR_lasx_xvfmul_d CODE_FOR_mulv4df3 ++#define CODE_FOR_lasx_xvfdiv_s CODE_FOR_divv8sf3 ++#define CODE_FOR_lasx_xvfdiv_d CODE_FOR_divv4df3 ++#define CODE_FOR_lasx_xvfmax_s CODE_FOR_smaxv8sf3 ++#define CODE_FOR_lasx_xvfmax_d CODE_FOR_smaxv4df3 ++#define CODE_FOR_lasx_xvfmin_s CODE_FOR_sminv8sf3 ++#define CODE_FOR_lasx_xvfmin_d CODE_FOR_sminv4df3 ++#define CODE_FOR_lasx_xvfsqrt_s CODE_FOR_sqrtv8sf2 ++#define CODE_FOR_lasx_xvfsqrt_d CODE_FOR_sqrtv4df2 ++#define CODE_FOR_lasx_xvmax_b CODE_FOR_smaxv32qi3 ++#define CODE_FOR_lasx_xvmax_h CODE_FOR_smaxv16hi3 ++#define CODE_FOR_lasx_xvmax_w CODE_FOR_smaxv8si3 ++#define CODE_FOR_lasx_xvmax_d CODE_FOR_smaxv4di3 ++#define CODE_FOR_lasx_xvmaxi_b CODE_FOR_smaxv32qi3 ++#define CODE_FOR_lasx_xvmaxi_h CODE_FOR_smaxv16hi3 ++#define CODE_FOR_lasx_xvmaxi_w CODE_FOR_smaxv8si3 ++#define CODE_FOR_lasx_xvmaxi_d CODE_FOR_smaxv4di3 ++#define CODE_FOR_lasx_xvmax_bu CODE_FOR_umaxv32qi3 ++#define CODE_FOR_lasx_xvmax_hu CODE_FOR_umaxv16hi3 ++#define CODE_FOR_lasx_xvmax_wu CODE_FOR_umaxv8si3 ++#define CODE_FOR_lasx_xvmax_du CODE_FOR_umaxv4di3 ++#define CODE_FOR_lasx_xvmaxi_bu CODE_FOR_umaxv32qi3 ++#define CODE_FOR_lasx_xvmaxi_hu CODE_FOR_umaxv16hi3 ++#define CODE_FOR_lasx_xvmaxi_wu CODE_FOR_umaxv8si3 ++#define CODE_FOR_lasx_xvmaxi_du CODE_FOR_umaxv4di3 ++#define CODE_FOR_lasx_xvmin_b CODE_FOR_sminv32qi3 ++#define CODE_FOR_lasx_xvmin_h CODE_FOR_sminv16hi3 ++#define CODE_FOR_lasx_xvmin_w CODE_FOR_sminv8si3 ++#define CODE_FOR_lasx_xvmin_d CODE_FOR_sminv4di3 ++#define CODE_FOR_lasx_xvmini_b CODE_FOR_sminv32qi3 ++#define CODE_FOR_lasx_xvmini_h CODE_FOR_sminv16hi3 ++#define CODE_FOR_lasx_xvmini_w CODE_FOR_sminv8si3 ++#define CODE_FOR_lasx_xvmini_d CODE_FOR_sminv4di3 ++#define CODE_FOR_lasx_xvmin_bu CODE_FOR_uminv32qi3 ++#define CODE_FOR_lasx_xvmin_hu CODE_FOR_uminv16hi3 ++#define CODE_FOR_lasx_xvmin_wu CODE_FOR_uminv8si3 ++#define CODE_FOR_lasx_xvmin_du CODE_FOR_uminv4di3 ++#define CODE_FOR_lasx_xvmini_bu CODE_FOR_uminv32qi3 ++#define CODE_FOR_lasx_xvmini_hu CODE_FOR_uminv16hi3 ++#define CODE_FOR_lasx_xvmini_wu CODE_FOR_uminv8si3 ++#define CODE_FOR_lasx_xvmini_du CODE_FOR_uminv4di3 ++#define CODE_FOR_lasx_xvmod_b CODE_FOR_modv32qi3 ++#define CODE_FOR_lasx_xvmod_h CODE_FOR_modv16hi3 ++#define CODE_FOR_lasx_xvmod_w CODE_FOR_modv8si3 ++#define CODE_FOR_lasx_xvmod_d CODE_FOR_modv4di3 ++#define CODE_FOR_lasx_xvmod_bu CODE_FOR_umodv32qi3 ++#define CODE_FOR_lasx_xvmod_hu CODE_FOR_umodv16hi3 ++#define CODE_FOR_lasx_xvmod_wu CODE_FOR_umodv8si3 ++#define CODE_FOR_lasx_xvmod_du CODE_FOR_umodv4di3 ++#define CODE_FOR_lasx_xvmul_b CODE_FOR_mulv32qi3 ++#define CODE_FOR_lasx_xvmul_h CODE_FOR_mulv16hi3 ++#define CODE_FOR_lasx_xvmul_w CODE_FOR_mulv8si3 ++#define CODE_FOR_lasx_xvmul_d CODE_FOR_mulv4di3 ++#define CODE_FOR_lasx_xvclz_b CODE_FOR_clzv32qi2 ++#define CODE_FOR_lasx_xvclz_h CODE_FOR_clzv16hi2 ++#define CODE_FOR_lasx_xvclz_w CODE_FOR_clzv8si2 ++#define CODE_FOR_lasx_xvclz_d CODE_FOR_clzv4di2 ++#define CODE_FOR_lasx_xvnor_v CODE_FOR_lasx_xvnor_b ++#define CODE_FOR_lasx_xvor_v CODE_FOR_iorv32qi3 ++#define CODE_FOR_lasx_xvori_b CODE_FOR_iorv32qi3 ++#define CODE_FOR_lasx_xvnori_b CODE_FOR_lasx_xvnor_b ++#define CODE_FOR_lasx_xvpcnt_b CODE_FOR_popcountv32qi2 ++#define CODE_FOR_lasx_xvpcnt_h CODE_FOR_popcountv16hi2 ++#define CODE_FOR_lasx_xvpcnt_w CODE_FOR_popcountv8si2 ++#define CODE_FOR_lasx_xvpcnt_d CODE_FOR_popcountv4di2 ++#define CODE_FOR_lasx_xvxor_v CODE_FOR_xorv32qi3 ++#define CODE_FOR_lasx_xvxori_b CODE_FOR_xorv32qi3 ++#define CODE_FOR_lasx_xvsll_b CODE_FOR_vashlv32qi3 ++#define CODE_FOR_lasx_xvsll_h CODE_FOR_vashlv16hi3 ++#define CODE_FOR_lasx_xvsll_w CODE_FOR_vashlv8si3 ++#define CODE_FOR_lasx_xvsll_d CODE_FOR_vashlv4di3 ++#define CODE_FOR_lasx_xvslli_b CODE_FOR_vashlv32qi3 ++#define CODE_FOR_lasx_xvslli_h CODE_FOR_vashlv16hi3 ++#define CODE_FOR_lasx_xvslli_w CODE_FOR_vashlv8si3 ++#define CODE_FOR_lasx_xvslli_d CODE_FOR_vashlv4di3 ++#define CODE_FOR_lasx_xvsra_b CODE_FOR_vashrv32qi3 ++#define CODE_FOR_lasx_xvsra_h CODE_FOR_vashrv16hi3 ++#define CODE_FOR_lasx_xvsra_w CODE_FOR_vashrv8si3 ++#define CODE_FOR_lasx_xvsra_d CODE_FOR_vashrv4di3 ++#define CODE_FOR_lasx_xvsrai_b CODE_FOR_vashrv32qi3 ++#define CODE_FOR_lasx_xvsrai_h CODE_FOR_vashrv16hi3 ++#define CODE_FOR_lasx_xvsrai_w CODE_FOR_vashrv8si3 ++#define CODE_FOR_lasx_xvsrai_d CODE_FOR_vashrv4di3 ++#define CODE_FOR_lasx_xvsrl_b CODE_FOR_vlshrv32qi3 ++#define CODE_FOR_lasx_xvsrl_h CODE_FOR_vlshrv16hi3 ++#define CODE_FOR_lasx_xvsrl_w CODE_FOR_vlshrv8si3 ++#define CODE_FOR_lasx_xvsrl_d CODE_FOR_vlshrv4di3 ++#define CODE_FOR_lasx_xvsrli_b CODE_FOR_vlshrv32qi3 ++#define CODE_FOR_lasx_xvsrli_h CODE_FOR_vlshrv16hi3 ++#define CODE_FOR_lasx_xvsrli_w CODE_FOR_vlshrv8si3 ++#define CODE_FOR_lasx_xvsrli_d CODE_FOR_vlshrv4di3 ++#define CODE_FOR_lasx_xvsub_b CODE_FOR_subv32qi3 ++#define CODE_FOR_lasx_xvsub_h CODE_FOR_subv16hi3 ++#define CODE_FOR_lasx_xvsub_w CODE_FOR_subv8si3 ++#define CODE_FOR_lasx_xvsub_d CODE_FOR_subv4di3 ++#define CODE_FOR_lasx_xvsubi_bu CODE_FOR_subv32qi3 ++#define CODE_FOR_lasx_xvsubi_hu CODE_FOR_subv16hi3 ++#define CODE_FOR_lasx_xvsubi_wu CODE_FOR_subv8si3 ++#define CODE_FOR_lasx_xvsubi_du CODE_FOR_subv4di3 ++#define CODE_FOR_lasx_xvpackod_d CODE_FOR_lasx_xvilvh_d ++#define CODE_FOR_lasx_xvpackev_d CODE_FOR_lasx_xvilvl_d ++#define CODE_FOR_lasx_xvpickod_d CODE_FOR_lasx_xvilvh_d ++#define CODE_FOR_lasx_xvpickev_d CODE_FOR_lasx_xvilvl_d ++#define CODE_FOR_lasx_xvrepli_b CODE_FOR_lasx_xvrepliv32qi ++#define CODE_FOR_lasx_xvrepli_h CODE_FOR_lasx_xvrepliv16hi ++#define CODE_FOR_lasx_xvrepli_w CODE_FOR_lasx_xvrepliv8si ++#define CODE_FOR_lasx_xvrepli_d CODE_FOR_lasx_xvrepliv4di ++ ++#define CODE_FOR_lasx_xvandn_v CODE_FOR_xvandnv32qi3 ++#define CODE_FOR_lasx_xvorn_v CODE_FOR_xvornv32qi3 ++#define CODE_FOR_lasx_xvneg_b CODE_FOR_negv32qi2 ++#define CODE_FOR_lasx_xvneg_h CODE_FOR_negv16hi2 ++#define CODE_FOR_lasx_xvneg_w CODE_FOR_negv8si2 ++#define CODE_FOR_lasx_xvneg_d CODE_FOR_negv4di2 ++#define CODE_FOR_lasx_xvbsrl_v CODE_FOR_lasx_xvbsrl_b ++#define CODE_FOR_lasx_xvbsll_v CODE_FOR_lasx_xvbsll_b ++#define CODE_FOR_lasx_xvfmadd_s CODE_FOR_xvfmaddv8sf4 ++#define CODE_FOR_lasx_xvfmadd_d CODE_FOR_xvfmaddv4df4 ++#define CODE_FOR_lasx_xvfmsub_s CODE_FOR_xvfmsubv8sf4 ++#define CODE_FOR_lasx_xvfmsub_d CODE_FOR_xvfmsubv4df4 ++#define CODE_FOR_lasx_xvfnmadd_s CODE_FOR_xvfnmaddv8sf4_nmadd4 ++#define CODE_FOR_lasx_xvfnmadd_d CODE_FOR_xvfnmaddv4df4_nmadd4 ++#define CODE_FOR_lasx_xvfnmsub_s CODE_FOR_xvfnmsubv8sf4_nmsub4 ++#define CODE_FOR_lasx_xvfnmsub_d CODE_FOR_xvfnmsubv4df4_nmsub4 ++ ++#define CODE_FOR_lasx_xvpermi_q CODE_FOR_lasx_xvpermi_q_v32qi ++#define CODE_FOR_lasx_xbnz_v CODE_FOR_lasx_xbnz_v_b ++#define CODE_FOR_lasx_xbz_v CODE_FOR_lasx_xbz_v_b ++ ++#define CODE_FOR_lasx_xvssub_b CODE_FOR_lasx_xvssub_s_b ++#define CODE_FOR_lasx_xvssub_h CODE_FOR_lasx_xvssub_s_h ++#define CODE_FOR_lasx_xvssub_w CODE_FOR_lasx_xvssub_s_w ++#define CODE_FOR_lasx_xvssub_d CODE_FOR_lasx_xvssub_s_d ++#define CODE_FOR_lasx_xvssub_bu CODE_FOR_lasx_xvssub_u_bu ++#define CODE_FOR_lasx_xvssub_hu CODE_FOR_lasx_xvssub_u_hu ++#define CODE_FOR_lasx_xvssub_wu CODE_FOR_lasx_xvssub_u_wu ++#define CODE_FOR_lasx_xvssub_du CODE_FOR_lasx_xvssub_u_du ++#define CODE_FOR_lasx_xvabsd_b CODE_FOR_lasx_xvabsd_s_b ++#define CODE_FOR_lasx_xvabsd_h CODE_FOR_lasx_xvabsd_s_h ++#define CODE_FOR_lasx_xvabsd_w CODE_FOR_lasx_xvabsd_s_w ++#define CODE_FOR_lasx_xvabsd_d CODE_FOR_lasx_xvabsd_s_d ++#define CODE_FOR_lasx_xvabsd_bu CODE_FOR_lasx_xvabsd_u_bu ++#define CODE_FOR_lasx_xvabsd_hu CODE_FOR_lasx_xvabsd_u_hu ++#define CODE_FOR_lasx_xvabsd_wu CODE_FOR_lasx_xvabsd_u_wu ++#define CODE_FOR_lasx_xvabsd_du CODE_FOR_lasx_xvabsd_u_du ++#define CODE_FOR_lasx_xvavg_b CODE_FOR_lasx_xvavg_s_b ++#define CODE_FOR_lasx_xvavg_h CODE_FOR_lasx_xvavg_s_h ++#define CODE_FOR_lasx_xvavg_w CODE_FOR_lasx_xvavg_s_w ++#define CODE_FOR_lasx_xvavg_d CODE_FOR_lasx_xvavg_s_d ++#define CODE_FOR_lasx_xvavg_bu CODE_FOR_lasx_xvavg_u_bu ++#define CODE_FOR_lasx_xvavg_hu CODE_FOR_lasx_xvavg_u_hu ++#define CODE_FOR_lasx_xvavg_wu CODE_FOR_lasx_xvavg_u_wu ++#define CODE_FOR_lasx_xvavg_du CODE_FOR_lasx_xvavg_u_du ++#define CODE_FOR_lasx_xvavgr_b CODE_FOR_lasx_xvavgr_s_b ++#define CODE_FOR_lasx_xvavgr_h CODE_FOR_lasx_xvavgr_s_h ++#define CODE_FOR_lasx_xvavgr_w CODE_FOR_lasx_xvavgr_s_w ++#define CODE_FOR_lasx_xvavgr_d CODE_FOR_lasx_xvavgr_s_d ++#define CODE_FOR_lasx_xvavgr_bu CODE_FOR_lasx_xvavgr_u_bu ++#define CODE_FOR_lasx_xvavgr_hu CODE_FOR_lasx_xvavgr_u_hu ++#define CODE_FOR_lasx_xvavgr_wu CODE_FOR_lasx_xvavgr_u_wu ++#define CODE_FOR_lasx_xvavgr_du CODE_FOR_lasx_xvavgr_u_du ++#define CODE_FOR_lasx_xvmuh_b CODE_FOR_lasx_xvmuh_s_b ++#define CODE_FOR_lasx_xvmuh_h CODE_FOR_lasx_xvmuh_s_h ++#define CODE_FOR_lasx_xvmuh_w CODE_FOR_lasx_xvmuh_s_w ++#define CODE_FOR_lasx_xvmuh_d CODE_FOR_lasx_xvmuh_s_d ++#define CODE_FOR_lasx_xvmuh_bu CODE_FOR_lasx_xvmuh_u_bu ++#define CODE_FOR_lasx_xvmuh_hu CODE_FOR_lasx_xvmuh_u_hu ++#define CODE_FOR_lasx_xvmuh_wu CODE_FOR_lasx_xvmuh_u_wu ++#define CODE_FOR_lasx_xvmuh_du CODE_FOR_lasx_xvmuh_u_du ++#define CODE_FOR_lasx_xvssran_b_h CODE_FOR_lasx_xvssran_s_b_h ++#define CODE_FOR_lasx_xvssran_h_w CODE_FOR_lasx_xvssran_s_h_w ++#define CODE_FOR_lasx_xvssran_w_d CODE_FOR_lasx_xvssran_s_w_d ++#define CODE_FOR_lasx_xvssran_bu_h CODE_FOR_lasx_xvssran_u_bu_h ++#define CODE_FOR_lasx_xvssran_hu_w CODE_FOR_lasx_xvssran_u_hu_w ++#define CODE_FOR_lasx_xvssran_wu_d CODE_FOR_lasx_xvssran_u_wu_d ++#define CODE_FOR_lasx_xvssrarn_b_h CODE_FOR_lasx_xvssrarn_s_b_h ++#define CODE_FOR_lasx_xvssrarn_h_w CODE_FOR_lasx_xvssrarn_s_h_w ++#define CODE_FOR_lasx_xvssrarn_w_d CODE_FOR_lasx_xvssrarn_s_w_d ++#define CODE_FOR_lasx_xvssrarn_bu_h CODE_FOR_lasx_xvssrarn_u_bu_h ++#define CODE_FOR_lasx_xvssrarn_hu_w CODE_FOR_lasx_xvssrarn_u_hu_w ++#define CODE_FOR_lasx_xvssrarn_wu_d CODE_FOR_lasx_xvssrarn_u_wu_d ++#define CODE_FOR_lasx_xvssrln_bu_h CODE_FOR_lasx_xvssrln_u_bu_h ++#define CODE_FOR_lasx_xvssrln_hu_w CODE_FOR_lasx_xvssrln_u_hu_w ++#define CODE_FOR_lasx_xvssrln_wu_d CODE_FOR_lasx_xvssrln_u_wu_d ++#define CODE_FOR_lasx_xvssrlrn_bu_h CODE_FOR_lasx_xvssrlrn_u_bu_h ++#define CODE_FOR_lasx_xvssrlrn_hu_w CODE_FOR_lasx_xvssrlrn_u_hu_w ++#define CODE_FOR_lasx_xvssrlrn_wu_d CODE_FOR_lasx_xvssrlrn_u_wu_d ++#define CODE_FOR_lasx_xvftint_w_s CODE_FOR_lasx_xvftint_s_w_s ++#define CODE_FOR_lasx_xvftint_l_d CODE_FOR_lasx_xvftint_s_l_d ++#define CODE_FOR_lasx_xvftint_wu_s CODE_FOR_lasx_xvftint_u_wu_s ++#define CODE_FOR_lasx_xvftint_lu_d CODE_FOR_lasx_xvftint_u_lu_d ++#define CODE_FOR_lasx_xvsllwil_h_b CODE_FOR_lasx_xvsllwil_s_h_b ++#define CODE_FOR_lasx_xvsllwil_w_h CODE_FOR_lasx_xvsllwil_s_w_h ++#define CODE_FOR_lasx_xvsllwil_d_w CODE_FOR_lasx_xvsllwil_s_d_w ++#define CODE_FOR_lasx_xvsllwil_hu_bu CODE_FOR_lasx_xvsllwil_u_hu_bu ++#define CODE_FOR_lasx_xvsllwil_wu_hu CODE_FOR_lasx_xvsllwil_u_wu_hu ++#define CODE_FOR_lasx_xvsllwil_du_wu CODE_FOR_lasx_xvsllwil_u_du_wu ++#define CODE_FOR_lasx_xvsat_b CODE_FOR_lasx_xvsat_s_b ++#define CODE_FOR_lasx_xvsat_h CODE_FOR_lasx_xvsat_s_h ++#define CODE_FOR_lasx_xvsat_w CODE_FOR_lasx_xvsat_s_w ++#define CODE_FOR_lasx_xvsat_d CODE_FOR_lasx_xvsat_s_d ++#define CODE_FOR_lasx_xvsat_bu CODE_FOR_lasx_xvsat_u_bu ++#define CODE_FOR_lasx_xvsat_hu CODE_FOR_lasx_xvsat_u_hu ++#define CODE_FOR_lasx_xvsat_wu CODE_FOR_lasx_xvsat_u_wu ++#define CODE_FOR_lasx_xvsat_du CODE_FOR_lasx_xvsat_u_du ++ ++static const struct loongarch_builtin_description loongarch_builtins[] = { ++#define LARCH_MOVFCSR2GR 0 ++ DIRECT_BUILTIN (movfcsr2gr, LARCH_USI_FTYPE_UQI, hard_float), ++#define LARCH_MOVGR2FCSR 1 ++ DIRECT_NO_TARGET_BUILTIN (movgr2fcsr, LARCH_VOID_FTYPE_UQI_USI, hard_float), ++ ++ DIRECT_NO_TARGET_BUILTIN (cacop, LARCH_VOID_FTYPE_USI_USI_SI, default), ++ DIRECT_NO_TARGET_BUILTIN (dcacop, LARCH_VOID_FTYPE_USI_UDI_SI, default), ++ DIRECT_NO_TARGET_BUILTIN (dbar, LARCH_VOID_FTYPE_USI, default), ++ DIRECT_NO_TARGET_BUILTIN (ibar, LARCH_VOID_FTYPE_USI, default), ++ ++ DIRECT_BUILTIN (fmax_sf, LARCH_SF_FTYPE_SF_SF, hard_float), ++ DIRECT_BUILTIN (fmax_df, LARCH_DF_FTYPE_DF_DF, hard_float), ++ DIRECT_BUILTIN (fmin_sf, LARCH_SF_FTYPE_SF_SF, hard_float), ++ DIRECT_BUILTIN (fmin_df, LARCH_DF_FTYPE_DF_DF, hard_float), ++ DIRECT_BUILTIN (fmaxa_sf, LARCH_SF_FTYPE_SF_SF, hard_float), ++ DIRECT_BUILTIN (fmaxa_df, LARCH_DF_FTYPE_DF_DF, hard_float), ++ DIRECT_BUILTIN (fmina_sf, LARCH_SF_FTYPE_SF_SF, hard_float), ++ DIRECT_BUILTIN (fmina_df, LARCH_DF_FTYPE_DF_DF, hard_float), ++ DIRECT_BUILTIN (fclass_s, LARCH_SF_FTYPE_SF, hard_float), ++ DIRECT_BUILTIN (fclass_d, LARCH_DF_FTYPE_DF, hard_float), ++ DIRECT_BUILTIN (frint_s, LARCH_SF_FTYPE_SF, hard_float), ++ DIRECT_BUILTIN (frint_d, LARCH_DF_FTYPE_DF, hard_float), ++ DIRECT_BUILTIN (bytepick_w, LARCH_SI_FTYPE_SI_SI_QI, default), ++ DIRECT_BUILTIN (bytepick_d, LARCH_DI_FTYPE_DI_DI_QI, default), ++ DIRECT_BUILTIN (bitrev_4b, LARCH_SI_FTYPE_SI, default), ++ DIRECT_BUILTIN (bitrev_8b, LARCH_DI_FTYPE_DI, default), ++ DIRECT_BUILTIN (cpucfg, LARCH_USI_FTYPE_USI, default), ++ DIRECT_BUILTIN (asrtle_d, LARCH_VOID_FTYPE_DI_DI, default), ++ DIRECT_BUILTIN (asrtgt_d, LARCH_VOID_FTYPE_DI_DI, default), ++ DIRECT_BUILTIN (dlddir, LARCH_DI_FTYPE_DI_UQI, default), ++ DIRECT_BUILTIN (lddir, LARCH_SI_FTYPE_SI_UQI, default), ++ DIRECT_NO_TARGET_BUILTIN (dldpte, LARCH_VOID_FTYPE_DI_UQI, default), ++ DIRECT_NO_TARGET_BUILTIN (ldpte, LARCH_VOID_FTYPE_SI_UQI, default), ++ ++ /* CRC Instrinsic */ ++ ++ DIRECT_BUILTIN (crc_w_b_w, LARCH_SI_FTYPE_QI_SI, default), ++ DIRECT_BUILTIN (crc_w_h_w, LARCH_SI_FTYPE_HI_SI, default), ++ DIRECT_BUILTIN (crc_w_w_w, LARCH_SI_FTYPE_SI_SI, default), ++ DIRECT_BUILTIN (crc_w_d_w, LARCH_SI_FTYPE_DI_SI, default), ++ DIRECT_BUILTIN (crcc_w_b_w, LARCH_SI_FTYPE_QI_SI, default), ++ DIRECT_BUILTIN (crcc_w_h_w, LARCH_SI_FTYPE_HI_SI, default), ++ DIRECT_BUILTIN (crcc_w_w_w, LARCH_SI_FTYPE_SI_SI, default), ++ DIRECT_BUILTIN (crcc_w_d_w, LARCH_SI_FTYPE_DI_SI, default), ++ ++ DIRECT_BUILTIN (csrrd, LARCH_USI_FTYPE_USI, default), ++ DIRECT_BUILTIN (dcsrrd, LARCH_UDI_FTYPE_USI, default), ++ DIRECT_BUILTIN (csrwr, LARCH_USI_FTYPE_USI_USI, default), ++ DIRECT_BUILTIN (dcsrwr, LARCH_UDI_FTYPE_UDI_USI, default), ++ DIRECT_BUILTIN (csrxchg, LARCH_USI_FTYPE_USI_USI_USI, default), ++ DIRECT_BUILTIN (dcsrxchg, LARCH_UDI_FTYPE_UDI_UDI_USI, default), ++ DIRECT_BUILTIN (iocsrrd_b, LARCH_UQI_FTYPE_USI, default), ++ DIRECT_BUILTIN (iocsrrd_h, LARCH_UHI_FTYPE_USI, default), ++ DIRECT_BUILTIN (iocsrrd_w, LARCH_USI_FTYPE_USI, default), ++ DIRECT_BUILTIN (iocsrrd_d, LARCH_UDI_FTYPE_USI, default), ++ DIRECT_NO_TARGET_BUILTIN (iocsrwr_b, LARCH_VOID_FTYPE_UQI_USI, default), ++ DIRECT_NO_TARGET_BUILTIN (iocsrwr_h, LARCH_VOID_FTYPE_UHI_USI, default), ++ DIRECT_NO_TARGET_BUILTIN (iocsrwr_w, LARCH_VOID_FTYPE_USI_USI, default), ++ DIRECT_NO_TARGET_BUILTIN (iocsrwr_d, LARCH_VOID_FTYPE_UDI_USI, default), ++ ++ /* Built-in functions for LSX. */ ++ LSX_BUILTIN (vsll_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsll_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsll_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsll_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vslli_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vslli_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vslli_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vslli_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsra_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsra_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsra_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsra_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsrai_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsrai_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsrai_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsrai_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsrar_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsrar_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrar_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrar_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsrari_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsrari_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsrari_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsrari_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsrl_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsrl_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrl_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrl_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsrli_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsrli_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsrli_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsrli_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsrlr_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsrlr_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrlr_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrlr_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsrlri_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsrlri_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsrlri_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsrlri_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vbitclr_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vbitclr_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vbitclr_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vbitclr_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vbitclri_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vbitclri_h, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vbitclri_w, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vbitclri_d, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vbitset_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vbitset_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vbitset_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vbitset_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vbitseti_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vbitseti_h, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vbitseti_w, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vbitseti_d, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vbitrev_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vbitrev_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vbitrev_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vbitrev_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vbitrevi_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vbitrevi_h, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vbitrevi_w, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vbitrevi_d, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vaddi_bu, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vaddi_hu, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vaddi_wu, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vaddi_du, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsub_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsub_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsub_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsub_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsubi_bu, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsubi_hu, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsubi_wu, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsubi_du, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vmax_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmax_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmax_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmax_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmaxi_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vmaxi_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vmaxi_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vmaxi_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vmax_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmax_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmax_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmax_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmaxi_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vmaxi_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vmaxi_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vmaxi_du, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vmin_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmin_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmin_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmin_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmini_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vmini_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vmini_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vmini_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vmin_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmin_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmin_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmin_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmini_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vmini_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vmini_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vmini_du, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vseq_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vseq_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vseq_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vseq_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vseqi_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vseqi_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vseqi_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vseqi_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vslti_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vslt_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vslt_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vslt_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vslt_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vslti_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vslti_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vslti_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vslt_bu, LARCH_V16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vslt_hu, LARCH_V8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vslt_wu, LARCH_V4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vslt_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vslti_bu, LARCH_V16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vslti_hu, LARCH_V8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vslti_wu, LARCH_V4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vslti_du, LARCH_V2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vsle_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsle_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsle_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsle_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vslei_b, LARCH_V16QI_FTYPE_V16QI_QI), ++ LSX_BUILTIN (vslei_h, LARCH_V8HI_FTYPE_V8HI_QI), ++ LSX_BUILTIN (vslei_w, LARCH_V4SI_FTYPE_V4SI_QI), ++ LSX_BUILTIN (vslei_d, LARCH_V2DI_FTYPE_V2DI_QI), ++ LSX_BUILTIN (vsle_bu, LARCH_V16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vsle_hu, LARCH_V8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vsle_wu, LARCH_V4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vsle_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vslei_bu, LARCH_V16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vslei_hu, LARCH_V8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vslei_wu, LARCH_V4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vslei_du, LARCH_V2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vsat_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsat_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsat_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsat_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vsat_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vsat_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vsat_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vsat_du, LARCH_UV2DI_FTYPE_UV2DI_UQI), ++ LSX_BUILTIN (vadda_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vadda_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vadda_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vadda_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsadd_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vsadd_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vsadd_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vsadd_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vavg_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vavg_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vavg_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vavg_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vavg_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vavg_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vavg_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vavg_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vavgr_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vavgr_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vavgr_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vavgr_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vavgr_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vavgr_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vavgr_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vavgr_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vssub_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vssub_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssub_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssub_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssub_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vssub_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssub_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssub_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vabsd_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vabsd_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vabsd_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vabsd_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vabsd_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vabsd_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vabsd_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vabsd_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmul_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmul_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmul_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmul_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), ++ LSX_BUILTIN (vmadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), ++ LSX_BUILTIN (vmadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI), ++ LSX_BUILTIN (vmadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vmsub_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), ++ LSX_BUILTIN (vmsub_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), ++ LSX_BUILTIN (vmsub_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI), ++ LSX_BUILTIN (vmsub_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vdiv_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vdiv_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vdiv_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vdiv_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vdiv_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vdiv_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vdiv_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vdiv_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vhaddw_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vhaddw_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vhaddw_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vhaddw_hu_bu, LARCH_UV8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vhaddw_wu_hu, LARCH_UV4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vhaddw_du_wu, LARCH_UV2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vhsubw_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vhsubw_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vhsubw_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vhsubw_hu_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vhsubw_wu_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vhsubw_du_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmod_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmod_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmod_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmod_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmod_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmod_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmod_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmod_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vreplve_b, LARCH_V16QI_FTYPE_V16QI_SI), ++ LSX_BUILTIN (vreplve_h, LARCH_V8HI_FTYPE_V8HI_SI), ++ LSX_BUILTIN (vreplve_w, LARCH_V4SI_FTYPE_V4SI_SI), ++ LSX_BUILTIN (vreplve_d, LARCH_V2DI_FTYPE_V2DI_SI), ++ LSX_BUILTIN (vreplvei_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vreplvei_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vreplvei_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vreplvei_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vpickev_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vpickev_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vpickev_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vpickev_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vpickod_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vpickod_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vpickod_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vpickod_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vilvh_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vilvh_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vilvh_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vilvh_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vilvl_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vilvl_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vilvl_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vilvl_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vpackev_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vpackev_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vpackev_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vpackev_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vpackod_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vpackod_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vpackod_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vpackod_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vshuf_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), ++ LSX_BUILTIN (vshuf_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI), ++ LSX_BUILTIN (vshuf_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vand_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vandi_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vnor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vnori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vxor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vxori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vbitsel_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI), ++ LSX_BUILTIN (vbitseli_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI_USI), ++ LSX_BUILTIN (vshuf4i_b, LARCH_V16QI_FTYPE_V16QI_USI), ++ LSX_BUILTIN (vshuf4i_h, LARCH_V8HI_FTYPE_V8HI_USI), ++ LSX_BUILTIN (vshuf4i_w, LARCH_V4SI_FTYPE_V4SI_USI), ++ LSX_BUILTIN (vreplgr2vr_b, LARCH_V16QI_FTYPE_SI), ++ LSX_BUILTIN (vreplgr2vr_h, LARCH_V8HI_FTYPE_SI), ++ LSX_BUILTIN (vreplgr2vr_w, LARCH_V4SI_FTYPE_SI), ++ LSX_BUILTIN (vreplgr2vr_d, LARCH_V2DI_FTYPE_DI), ++ LSX_BUILTIN (vpcnt_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vpcnt_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vpcnt_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vpcnt_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vclo_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vclo_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vclo_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vclo_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vclz_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vclz_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vclz_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vclz_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vpickve2gr_b, LARCH_SI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vpickve2gr_h, LARCH_SI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vpickve2gr_w, LARCH_SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vpickve2gr_d, LARCH_DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vpickve2gr_bu, LARCH_USI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vpickve2gr_hu, LARCH_USI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vpickve2gr_wu, LARCH_USI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vpickve2gr_du, LARCH_UDI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vinsgr2vr_b, LARCH_V16QI_FTYPE_V16QI_SI_UQI), ++ LSX_BUILTIN (vinsgr2vr_h, LARCH_V8HI_FTYPE_V8HI_SI_UQI), ++ LSX_BUILTIN (vinsgr2vr_w, LARCH_V4SI_FTYPE_V4SI_SI_UQI), ++ LSX_BUILTIN (vinsgr2vr_d, LARCH_V2DI_FTYPE_V2DI_DI_UQI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_b, LARCH_SI_FTYPE_UV16QI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_h, LARCH_SI_FTYPE_UV8HI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_w, LARCH_SI_FTYPE_UV4SI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_d, LARCH_SI_FTYPE_UV2DI), ++ LSX_BUILTIN_TEST_BRANCH (bz_b, LARCH_SI_FTYPE_UV16QI), ++ LSX_BUILTIN_TEST_BRANCH (bz_h, LARCH_SI_FTYPE_UV8HI), ++ LSX_BUILTIN_TEST_BRANCH (bz_w, LARCH_SI_FTYPE_UV4SI), ++ LSX_BUILTIN_TEST_BRANCH (bz_d, LARCH_SI_FTYPE_UV2DI), ++ LSX_BUILTIN_TEST_BRANCH (bz_v, LARCH_SI_FTYPE_UV16QI), ++ LSX_BUILTIN_TEST_BRANCH (bnz_v, LARCH_SI_FTYPE_UV16QI), ++ LSX_BUILTIN (vrepli_b, LARCH_V16QI_FTYPE_HI), ++ LSX_BUILTIN (vrepli_h, LARCH_V8HI_FTYPE_HI), ++ LSX_BUILTIN (vrepli_w, LARCH_V4SI_FTYPE_HI), ++ LSX_BUILTIN (vrepli_d, LARCH_V2DI_FTYPE_HI), ++ LSX_BUILTIN (vfcmp_caf_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_caf_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cor_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cor_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cun_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cun_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cune_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cune_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cueq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cueq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_ceq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_ceq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cne_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cne_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_clt_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_clt_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cult_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cult_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cle_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cle_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_cule_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_cule_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_saf_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_saf_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sor_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sor_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sun_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sun_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sune_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sune_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sueq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sueq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_seq_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_seq_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sne_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sne_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_slt_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_slt_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sult_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sult_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sle_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sle_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcmp_sule_s, LARCH_V4SI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcmp_sule_d, LARCH_V2DI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmul_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmul_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfdiv_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfdiv_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfcvt_h_s, LARCH_V8HI_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfcvt_s_d, LARCH_V4SF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmin_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmin_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmina_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmina_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmax_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmax_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfmaxa_s, LARCH_V4SF_FTYPE_V4SF_V4SF), ++ LSX_BUILTIN (vfmaxa_d, LARCH_V2DF_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vfclass_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vfclass_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vfsqrt_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfsqrt_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrecip_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrecip_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrint_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrint_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrsqrt_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrsqrt_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vflogb_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vflogb_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfcvth_s_h, LARCH_V4SF_FTYPE_V8HI), ++ LSX_BUILTIN (vfcvth_d_s, LARCH_V2DF_FTYPE_V4SF), ++ LSX_BUILTIN (vfcvtl_s_h, LARCH_V4SF_FTYPE_V8HI), ++ LSX_BUILTIN (vfcvtl_d_s, LARCH_V2DF_FTYPE_V4SF), ++ LSX_BUILTIN (vftint_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftint_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftint_wu_s, LARCH_UV4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftint_lu_d, LARCH_UV2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftintrz_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrz_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftintrz_wu_s, LARCH_UV4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrz_lu_d, LARCH_UV2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vffint_s_w, LARCH_V4SF_FTYPE_V4SI), ++ LSX_BUILTIN (vffint_d_l, LARCH_V2DF_FTYPE_V2DI), ++ LSX_BUILTIN (vffint_s_wu, LARCH_V4SF_FTYPE_UV4SI), ++ LSX_BUILTIN (vffint_d_lu, LARCH_V2DF_FTYPE_UV2DI), ++ ++ ++ LSX_BUILTIN (vandn_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vneg_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vneg_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vneg_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vneg_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vmuh_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmuh_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmuh_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmuh_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmuh_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmuh_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmuh_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmuh_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsllwil_h_b, LARCH_V8HI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vsllwil_w_h, LARCH_V4SI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vsllwil_d_w, LARCH_V2DI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vsllwil_hu_bu, LARCH_UV8HI_FTYPE_UV16QI_UQI), ++ LSX_BUILTIN (vsllwil_wu_hu, LARCH_UV4SI_FTYPE_UV8HI_UQI), ++ LSX_BUILTIN (vsllwil_du_wu, LARCH_UV2DI_FTYPE_UV4SI_UQI), ++ LSX_BUILTIN (vsran_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsran_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsran_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssran_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssran_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssran_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssran_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssran_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssran_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsrarn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrarn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrarn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrarn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssrarn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssrarn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrarn_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssrarn_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssrarn_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsrln_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrln_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrln_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrln_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssrln_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssrln_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsrlrn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsrlrn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsrlrn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrlrn_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vssrlrn_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vssrlrn_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vfrstpi_b, LARCH_V16QI_FTYPE_V16QI_V16QI_UQI), ++ LSX_BUILTIN (vfrstpi_h, LARCH_V8HI_FTYPE_V8HI_V8HI_UQI), ++ LSX_BUILTIN (vfrstp_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), ++ LSX_BUILTIN (vfrstp_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI), ++ LSX_BUILTIN (vshuf4i_d, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vbsrl_v, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vbsll_v, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vextrins_b, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vextrins_h, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vextrins_w, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vextrins_d, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vmskltz_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vmskltz_h, LARCH_V8HI_FTYPE_V8HI), ++ LSX_BUILTIN (vmskltz_w, LARCH_V4SI_FTYPE_V4SI), ++ LSX_BUILTIN (vmskltz_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vsigncov_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsigncov_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsigncov_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsigncov_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vfmadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), ++ LSX_BUILTIN (vfmadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), ++ LSX_BUILTIN (vfmsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), ++ LSX_BUILTIN (vfmsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), ++ LSX_BUILTIN (vfnmadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), ++ LSX_BUILTIN (vfnmadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), ++ LSX_BUILTIN (vfnmsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF), ++ LSX_BUILTIN (vfnmsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF), ++ LSX_BUILTIN (vftintrne_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrne_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftintrp_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrp_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftintrm_w_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrm_l_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vftint_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vffint_s_l, LARCH_V4SF_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vftintrz_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vftintrp_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vftintrm_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vftintrne_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF), ++ LSX_BUILTIN (vftintl_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftinth_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vffinth_d_w, LARCH_V2DF_FTYPE_V4SI), ++ LSX_BUILTIN (vffintl_d_w, LARCH_V2DF_FTYPE_V4SI), ++ LSX_BUILTIN (vftintrzl_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrzh_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrpl_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrph_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrml_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrmh_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrnel_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vftintrneh_l_s, LARCH_V2DI_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrne_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrne_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrz_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrz_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrp_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrp_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrm_s, LARCH_V4SI_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrm_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_NO_TARGET_BUILTIN (vstelm_b, LARCH_VOID_FTYPE_V16QI_CVPOINTER_SI_UQI), ++ LSX_NO_TARGET_BUILTIN (vstelm_h, LARCH_VOID_FTYPE_V8HI_CVPOINTER_SI_UQI), ++ LSX_NO_TARGET_BUILTIN (vstelm_w, LARCH_VOID_FTYPE_V4SI_CVPOINTER_SI_UQI), ++ LSX_NO_TARGET_BUILTIN (vstelm_d, LARCH_VOID_FTYPE_V2DI_CVPOINTER_SI_UQI), ++ LSX_BUILTIN (vaddwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vaddwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vaddwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vaddwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vaddwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vaddwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vaddwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vaddwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vaddwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vaddwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vaddwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vaddwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vaddwev_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), ++ LSX_BUILTIN (vaddwev_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), ++ LSX_BUILTIN (vaddwev_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), ++ LSX_BUILTIN (vaddwod_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), ++ LSX_BUILTIN (vaddwod_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), ++ LSX_BUILTIN (vaddwod_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), ++ LSX_BUILTIN (vsubwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsubwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsubwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsubwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vsubwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vsubwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vsubwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vsubwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vsubwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vsubwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vsubwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vsubwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vaddwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vaddwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vaddwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vaddwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsubwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsubwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsubwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vsubwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vaddwev_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), ++ LSX_BUILTIN (vaddwod_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), ++ ++ LSX_BUILTIN (vmulwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmulwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmulwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmulwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vmulwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vmulwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vmulwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmulwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmulwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmulwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI), ++ LSX_BUILTIN (vmulwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI), ++ LSX_BUILTIN (vmulwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI), ++ LSX_BUILTIN (vmulwev_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), ++ LSX_BUILTIN (vmulwev_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), ++ LSX_BUILTIN (vmulwev_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), ++ LSX_BUILTIN (vmulwod_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI), ++ LSX_BUILTIN (vmulwod_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI), ++ LSX_BUILTIN (vmulwod_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI), ++ LSX_BUILTIN (vmulwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmulwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vmulwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmulwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmulwev_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), ++ LSX_BUILTIN (vmulwod_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI), ++ LSX_BUILTIN (vhaddw_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vhaddw_qu_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vhsubw_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vhsubw_qu_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI), ++ LSX_BUILTIN (vmaddwev_d_w, LARCH_V2DI_FTYPE_V2DI_V4SI_V4SI), ++ LSX_BUILTIN (vmaddwev_w_h, LARCH_V4SI_FTYPE_V4SI_V8HI_V8HI), ++ LSX_BUILTIN (vmaddwev_h_b, LARCH_V8HI_FTYPE_V8HI_V16QI_V16QI), ++ LSX_BUILTIN (vmaddwev_d_wu, LARCH_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI), ++ LSX_BUILTIN (vmaddwev_w_hu, LARCH_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI), ++ LSX_BUILTIN (vmaddwev_h_bu, LARCH_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI), ++ LSX_BUILTIN (vmaddwod_d_w, LARCH_V2DI_FTYPE_V2DI_V4SI_V4SI), ++ LSX_BUILTIN (vmaddwod_w_h, LARCH_V4SI_FTYPE_V4SI_V8HI_V8HI), ++ LSX_BUILTIN (vmaddwod_h_b, LARCH_V8HI_FTYPE_V8HI_V16QI_V16QI), ++ LSX_BUILTIN (vmaddwod_d_wu, LARCH_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI), ++ LSX_BUILTIN (vmaddwod_w_hu, LARCH_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI), ++ LSX_BUILTIN (vmaddwod_h_bu, LARCH_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI), ++ LSX_BUILTIN (vmaddwev_d_wu_w, LARCH_V2DI_FTYPE_V2DI_UV4SI_V4SI), ++ LSX_BUILTIN (vmaddwev_w_hu_h, LARCH_V4SI_FTYPE_V4SI_UV8HI_V8HI), ++ LSX_BUILTIN (vmaddwev_h_bu_b, LARCH_V8HI_FTYPE_V8HI_UV16QI_V16QI), ++ LSX_BUILTIN (vmaddwod_d_wu_w, LARCH_V2DI_FTYPE_V2DI_UV4SI_V4SI), ++ LSX_BUILTIN (vmaddwod_w_hu_h, LARCH_V4SI_FTYPE_V4SI_UV8HI_V8HI), ++ LSX_BUILTIN (vmaddwod_h_bu_b, LARCH_V8HI_FTYPE_V8HI_UV16QI_V16QI), ++ LSX_BUILTIN (vmaddwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vmaddwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI), ++ LSX_BUILTIN (vmaddwev_q_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI), ++ LSX_BUILTIN (vmaddwod_q_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI), ++ LSX_BUILTIN (vmaddwev_q_du_d, LARCH_V2DI_FTYPE_V2DI_UV2DI_V2DI), ++ LSX_BUILTIN (vmaddwod_q_du_d, LARCH_V2DI_FTYPE_V2DI_UV2DI_V2DI), ++ LSX_BUILTIN (vrotr_b, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vrotr_h, LARCH_V8HI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vrotr_w, LARCH_V4SI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vrotr_d, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vadd_q, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vsub_q, LARCH_V2DI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vldrepl_b, LARCH_V16QI_FTYPE_CVPOINTER_SI), ++ LSX_BUILTIN (vldrepl_h, LARCH_V8HI_FTYPE_CVPOINTER_SI), ++ LSX_BUILTIN (vldrepl_w, LARCH_V4SI_FTYPE_CVPOINTER_SI), ++ LSX_BUILTIN (vldrepl_d, LARCH_V2DI_FTYPE_CVPOINTER_SI), ++ ++ LSX_BUILTIN (vmskgez_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vmsknz_b, LARCH_V16QI_FTYPE_V16QI), ++ LSX_BUILTIN (vexth_h_b, LARCH_V8HI_FTYPE_V16QI), ++ LSX_BUILTIN (vexth_w_h, LARCH_V4SI_FTYPE_V8HI), ++ LSX_BUILTIN (vexth_d_w, LARCH_V2DI_FTYPE_V4SI), ++ LSX_BUILTIN (vexth_q_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vexth_hu_bu, LARCH_UV8HI_FTYPE_UV16QI), ++ LSX_BUILTIN (vexth_wu_hu, LARCH_UV4SI_FTYPE_UV8HI), ++ LSX_BUILTIN (vexth_du_wu, LARCH_UV2DI_FTYPE_UV4SI), ++ LSX_BUILTIN (vexth_qu_du, LARCH_UV2DI_FTYPE_UV2DI), ++ LSX_BUILTIN (vrotri_b, LARCH_V16QI_FTYPE_V16QI_UQI), ++ LSX_BUILTIN (vrotri_h, LARCH_V8HI_FTYPE_V8HI_UQI), ++ LSX_BUILTIN (vrotri_w, LARCH_V4SI_FTYPE_V4SI_UQI), ++ LSX_BUILTIN (vrotri_d, LARCH_V2DI_FTYPE_V2DI_UQI), ++ LSX_BUILTIN (vextl_q_d, LARCH_V2DI_FTYPE_V2DI), ++ LSX_BUILTIN (vsrlni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vsrlni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vsrlni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vsrlni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vsrlrni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vsrlrni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vsrlrni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vsrlrni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrlni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vssrlni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vssrlni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vssrlni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrlni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), ++ LSX_BUILTIN (vssrlni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), ++ LSX_BUILTIN (vssrlni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), ++ LSX_BUILTIN (vssrlni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), ++ LSX_BUILTIN (vssrlrni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vssrlrni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vssrlrni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vssrlrni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrlrni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), ++ LSX_BUILTIN (vssrlrni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), ++ LSX_BUILTIN (vssrlrni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), ++ LSX_BUILTIN (vssrlrni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), ++ LSX_BUILTIN (vsrani_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vsrani_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vsrani_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vsrani_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vsrarni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vsrarni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vsrarni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vsrarni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrani_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vssrani_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vssrani_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vssrani_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrani_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), ++ LSX_BUILTIN (vssrani_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), ++ LSX_BUILTIN (vssrani_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), ++ LSX_BUILTIN (vssrani_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), ++ LSX_BUILTIN (vssrarni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI), ++ LSX_BUILTIN (vssrarni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI), ++ LSX_BUILTIN (vssrarni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vssrarni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI), ++ LSX_BUILTIN (vssrarni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI), ++ LSX_BUILTIN (vssrarni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI), ++ LSX_BUILTIN (vssrarni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI), ++ LSX_BUILTIN (vssrarni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI), ++ LSX_BUILTIN (vpermi_w, LARCH_V4SI_FTYPE_V4SI_V4SI_USI), ++ LSX_BUILTIN (vld, LARCH_V16QI_FTYPE_CVPOINTER_SI), ++ LSX_NO_TARGET_BUILTIN (vst, LARCH_VOID_FTYPE_V16QI_CVPOINTER_SI), ++ LSX_BUILTIN (vssrlrn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssrlrn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssrlrn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vssrln_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI), ++ LSX_BUILTIN (vssrln_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI), ++ LSX_BUILTIN (vssrln_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI), ++ LSX_BUILTIN (vorn_v, LARCH_V16QI_FTYPE_V16QI_V16QI), ++ LSX_BUILTIN (vldi, LARCH_V2DI_FTYPE_HI), ++ LSX_BUILTIN (vshuf_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI), ++ LSX_BUILTIN (vldx, LARCH_V16QI_FTYPE_CVPOINTER_DI), ++ LSX_NO_TARGET_BUILTIN (vstx, LARCH_VOID_FTYPE_V16QI_CVPOINTER_DI), ++ LSX_BUILTIN (vextl_qu_du, LARCH_UV2DI_FTYPE_UV2DI), ++ ++ /* Built-in functions for LASX */ ++ LASX_BUILTIN (xvsll_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsll_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsll_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsll_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvslli_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvslli_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvslli_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvslli_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsra_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsra_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsra_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsra_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsrai_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsrai_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsrai_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsrai_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsrar_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsrar_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrar_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrar_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsrari_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsrari_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsrari_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsrari_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsrl_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsrl_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrl_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrl_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsrli_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsrli_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsrli_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsrli_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsrlr_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsrlr_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrlr_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrlr_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsrlri_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsrlri_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsrlri_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsrlri_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvbitclr_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvbitclr_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvbitclr_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvbitclr_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvbitclri_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvbitclri_h, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvbitclri_w, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvbitclri_d, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvbitset_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvbitset_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvbitset_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvbitset_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvbitseti_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvbitseti_h, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvbitseti_w, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvbitseti_d, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvbitrev_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvbitrev_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvbitrev_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvbitrev_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvbitrevi_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvbitrevi_h, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvbitrevi_w, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvbitrevi_d, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvaddi_bu, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvaddi_hu, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvaddi_wu, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvaddi_du, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsub_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsub_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsub_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsub_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsubi_bu, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsubi_hu, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsubi_wu, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsubi_du, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvmax_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmax_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmax_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmax_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmaxi_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvmaxi_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvmaxi_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvmaxi_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvmax_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmax_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmax_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmax_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmaxi_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvmaxi_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvmaxi_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvmaxi_du, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvmin_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmin_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmin_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmin_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmini_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvmini_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvmini_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvmini_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvmin_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmin_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmin_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmin_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmini_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvmini_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvmini_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvmini_du, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvseq_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvseq_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvseq_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvseq_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvseqi_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvseqi_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvseqi_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvseqi_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvslt_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvslt_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvslt_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvslt_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvslti_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvslti_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvslti_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvslti_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvslt_bu, LARCH_V32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvslt_hu, LARCH_V16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvslt_wu, LARCH_V8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvslt_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvslti_bu, LARCH_V32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvslti_hu, LARCH_V16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvslti_wu, LARCH_V8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvslti_du, LARCH_V4DI_FTYPE_UV4DI_UQI), ++ LASX_BUILTIN (xvsle_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsle_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsle_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsle_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvslei_b, LARCH_V32QI_FTYPE_V32QI_QI), ++ LASX_BUILTIN (xvslei_h, LARCH_V16HI_FTYPE_V16HI_QI), ++ LASX_BUILTIN (xvslei_w, LARCH_V8SI_FTYPE_V8SI_QI), ++ LASX_BUILTIN (xvslei_d, LARCH_V4DI_FTYPE_V4DI_QI), ++ LASX_BUILTIN (xvsle_bu, LARCH_V32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvsle_hu, LARCH_V16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvsle_wu, LARCH_V8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvsle_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvslei_bu, LARCH_V32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvslei_hu, LARCH_V16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvslei_wu, LARCH_V8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvslei_du, LARCH_V4DI_FTYPE_UV4DI_UQI), ++ ++ LASX_BUILTIN (xvsat_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsat_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsat_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsat_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvsat_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvsat_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvsat_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvsat_du, LARCH_UV4DI_FTYPE_UV4DI_UQI), ++ ++ LASX_BUILTIN (xvadda_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvadda_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvadda_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvadda_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsadd_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvsadd_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvsadd_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvsadd_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvavg_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvavg_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvavg_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvavg_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvavg_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvavg_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvavg_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvavg_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvavgr_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvavgr_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvavgr_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvavgr_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvavgr_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvavgr_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvavgr_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvavgr_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvssub_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvssub_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssub_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssub_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssub_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvssub_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssub_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssub_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvabsd_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvabsd_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvabsd_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvabsd_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvabsd_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvabsd_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvabsd_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvabsd_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ LASX_BUILTIN (xvmul_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmul_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmul_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmul_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), ++ LASX_BUILTIN (xvmadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), ++ LASX_BUILTIN (xvmadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI), ++ LASX_BUILTIN (xvmadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvmsub_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), ++ LASX_BUILTIN (xvmsub_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), ++ LASX_BUILTIN (xvmsub_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI), ++ LASX_BUILTIN (xvmsub_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvdiv_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvdiv_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvdiv_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvdiv_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvdiv_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvdiv_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvdiv_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvdiv_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvhaddw_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvhaddw_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvhaddw_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvhaddw_hu_bu, LARCH_UV16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvhaddw_wu_hu, LARCH_UV8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvhaddw_du_wu, LARCH_UV4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvhsubw_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvhsubw_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvhsubw_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvhsubw_hu_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvhsubw_wu_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvhsubw_du_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmod_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmod_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmod_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmod_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmod_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmod_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmod_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmod_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ ++ ++ LASX_BUILTIN (xvrepl128vei_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvrepl128vei_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvrepl128vei_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvrepl128vei_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvpickev_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvpickev_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvpickev_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvpickev_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvpickod_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvpickod_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvpickod_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvpickod_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvilvh_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvilvh_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvilvh_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvilvh_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvilvl_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvilvl_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvilvl_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvilvl_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvpackev_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvpackev_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvpackev_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvpackev_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvpackod_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvpackod_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvpackod_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvpackod_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvshuf_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), ++ LASX_BUILTIN (xvshuf_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), ++ LASX_BUILTIN (xvshuf_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI), ++ LASX_BUILTIN (xvshuf_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvand_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvandi_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvnor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvnori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvxor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvxori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI), ++ LASX_BUILTIN (xvbitsel_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI_UV32QI), ++ LASX_BUILTIN (xvbitseli_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI_USI), ++ ++ LASX_BUILTIN (xvshuf4i_b, LARCH_V32QI_FTYPE_V32QI_USI), ++ LASX_BUILTIN (xvshuf4i_h, LARCH_V16HI_FTYPE_V16HI_USI), ++ LASX_BUILTIN (xvshuf4i_w, LARCH_V8SI_FTYPE_V8SI_USI), ++ ++ LASX_BUILTIN (xvreplgr2vr_b, LARCH_V32QI_FTYPE_SI), ++ LASX_BUILTIN (xvreplgr2vr_h, LARCH_V16HI_FTYPE_SI), ++ LASX_BUILTIN (xvreplgr2vr_w, LARCH_V8SI_FTYPE_SI), ++ LASX_BUILTIN (xvreplgr2vr_d, LARCH_V4DI_FTYPE_DI), ++ LASX_BUILTIN (xvpcnt_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvpcnt_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvpcnt_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvpcnt_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvclo_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvclo_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvclo_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvclo_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvclz_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvclz_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvclz_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvclz_d, LARCH_V4DI_FTYPE_V4DI), ++ ++ LASX_BUILTIN (xvrepli_b, LARCH_V32QI_FTYPE_HI), ++ LASX_BUILTIN (xvrepli_h, LARCH_V16HI_FTYPE_HI), ++ LASX_BUILTIN (xvrepli_w, LARCH_V8SI_FTYPE_HI), ++ LASX_BUILTIN (xvrepli_d, LARCH_V4DI_FTYPE_HI), ++ LASX_BUILTIN (xvfcmp_caf_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_caf_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cor_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cor_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cun_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cun_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cune_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cune_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cueq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cueq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_ceq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_ceq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cne_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cne_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_clt_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_clt_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cult_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cult_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cle_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cle_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_cule_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_cule_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_saf_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_saf_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sor_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sor_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sun_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sun_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sune_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sune_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sueq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sueq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_seq_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_seq_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sne_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sne_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_slt_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_slt_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sult_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sult_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sle_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sle_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcmp_sule_s, LARCH_V8SI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcmp_sule_d, LARCH_V4DI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmul_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmul_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfdiv_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfdiv_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfcvt_h_s, LARCH_V16HI_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfcvt_s_d, LARCH_V8SF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmin_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmin_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmina_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmina_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmax_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmax_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfmaxa_s, LARCH_V8SF_FTYPE_V8SF_V8SF), ++ LASX_BUILTIN (xvfmaxa_d, LARCH_V4DF_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvfclass_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvfclass_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvfsqrt_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfsqrt_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrecip_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrecip_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrint_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrint_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrsqrt_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrsqrt_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvflogb_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvflogb_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfcvth_s_h, LARCH_V8SF_FTYPE_V16HI), ++ LASX_BUILTIN (xvfcvth_d_s, LARCH_V4DF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfcvtl_s_h, LARCH_V8SF_FTYPE_V16HI), ++ LASX_BUILTIN (xvfcvtl_d_s, LARCH_V4DF_FTYPE_V8SF), ++ LASX_BUILTIN (xvftint_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftint_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftint_wu_s, LARCH_UV8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftint_lu_d, LARCH_UV4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftintrz_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrz_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftintrz_wu_s, LARCH_UV8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrz_lu_d, LARCH_UV4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvffint_s_w, LARCH_V8SF_FTYPE_V8SI), ++ LASX_BUILTIN (xvffint_d_l, LARCH_V4DF_FTYPE_V4DI), ++ LASX_BUILTIN (xvffint_s_wu, LARCH_V8SF_FTYPE_UV8SI), ++ LASX_BUILTIN (xvffint_d_lu, LARCH_V4DF_FTYPE_UV4DI), ++ ++ LASX_BUILTIN (xvreplve_b, LARCH_V32QI_FTYPE_V32QI_SI), ++ LASX_BUILTIN (xvreplve_h, LARCH_V16HI_FTYPE_V16HI_SI), ++ LASX_BUILTIN (xvreplve_w, LARCH_V8SI_FTYPE_V8SI_SI), ++ LASX_BUILTIN (xvreplve_d, LARCH_V4DI_FTYPE_V4DI_SI), ++ LASX_BUILTIN (xvpermi_w, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ ++ LASX_BUILTIN (xvandn_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvneg_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvneg_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvneg_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvneg_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvmuh_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmuh_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmuh_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmuh_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmuh_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmuh_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmuh_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmuh_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsllwil_h_b, LARCH_V16HI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvsllwil_w_h, LARCH_V8SI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvsllwil_d_w, LARCH_V4DI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvsllwil_hu_bu, LARCH_UV16HI_FTYPE_UV32QI_UQI), /* FIXME: U? */ ++ LASX_BUILTIN (xvsllwil_wu_hu, LARCH_UV8SI_FTYPE_UV16HI_UQI), ++ LASX_BUILTIN (xvsllwil_du_wu, LARCH_UV4DI_FTYPE_UV8SI_UQI), ++ LASX_BUILTIN (xvsran_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsran_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsran_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssran_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssran_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssran_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssran_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssran_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssran_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsrarn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrarn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrarn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrarn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssrarn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssrarn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrarn_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssrarn_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssrarn_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsrln_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrln_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrln_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrln_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssrln_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssrln_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsrlrn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsrlrn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsrlrn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrlrn_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvssrlrn_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvssrlrn_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvfrstpi_b, LARCH_V32QI_FTYPE_V32QI_V32QI_UQI), ++ LASX_BUILTIN (xvfrstpi_h, LARCH_V16HI_FTYPE_V16HI_V16HI_UQI), ++ LASX_BUILTIN (xvfrstp_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI), ++ LASX_BUILTIN (xvfrstp_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI), ++ LASX_BUILTIN (xvshuf4i_d, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvbsrl_v, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvbsll_v, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvextrins_b, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvextrins_h, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvextrins_w, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvextrins_d, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvmskltz_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvmskltz_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvmskltz_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvmskltz_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvsigncov_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsigncov_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsigncov_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsigncov_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvfmadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), ++ LASX_BUILTIN (xvfmadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), ++ LASX_BUILTIN (xvfmsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), ++ LASX_BUILTIN (xvfmsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), ++ LASX_BUILTIN (xvfnmadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), ++ LASX_BUILTIN (xvfnmadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), ++ LASX_BUILTIN (xvfnmsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF), ++ LASX_BUILTIN (xvfnmsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF), ++ LASX_BUILTIN (xvftintrne_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrne_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftintrp_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrp_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftintrm_w_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrm_l_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvftint_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvffint_s_l, LARCH_V8SF_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvftintrz_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvftintrp_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvftintrm_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvftintrne_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF), ++ LASX_BUILTIN (xvftinth_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintl_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvffinth_d_w, LARCH_V4DF_FTYPE_V8SI), ++ LASX_BUILTIN (xvffintl_d_w, LARCH_V4DF_FTYPE_V8SI), ++ LASX_BUILTIN (xvftintrzh_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrzl_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrph_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrpl_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrmh_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrml_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrneh_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvftintrnel_l_s, LARCH_V4DI_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrne_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrne_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrz_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrz_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrp_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrp_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrm_s, LARCH_V8SI_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrm_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvld, LARCH_V32QI_FTYPE_CVPOINTER_SI), ++ LASX_NO_TARGET_BUILTIN (xvst, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI), ++ LASX_NO_TARGET_BUILTIN (xvstelm_b, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI_UQI), ++ LASX_NO_TARGET_BUILTIN (xvstelm_h, LARCH_VOID_FTYPE_V16HI_CVPOINTER_SI_UQI), ++ LASX_NO_TARGET_BUILTIN (xvstelm_w, LARCH_VOID_FTYPE_V8SI_CVPOINTER_SI_UQI), ++ LASX_NO_TARGET_BUILTIN (xvstelm_d, LARCH_VOID_FTYPE_V4DI_CVPOINTER_SI_UQI), ++ LASX_BUILTIN (xvinsve0_w, LARCH_V8SI_FTYPE_V8SI_V8SI_UQI), ++ LASX_BUILTIN (xvinsve0_d, LARCH_V4DI_FTYPE_V4DI_V4DI_UQI), ++ LASX_BUILTIN (xvpickve_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvpickve_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvpickve_w_f, LARCH_V8SF_FTYPE_V8SF_UQI), ++ LASX_BUILTIN (xvpickve_d_f, LARCH_V4DF_FTYPE_V4DF_UQI), ++ LASX_BUILTIN (xvssrlrn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssrlrn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssrlrn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvssrln_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvssrln_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvssrln_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvorn_v, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvldi, LARCH_V4DI_FTYPE_HI), ++ LASX_BUILTIN (xvldx, LARCH_V32QI_FTYPE_CVPOINTER_DI), ++ LASX_NO_TARGET_BUILTIN (xvstx, LARCH_VOID_FTYPE_V32QI_CVPOINTER_DI), ++ LASX_BUILTIN (xvextl_qu_du, LARCH_UV4DI_FTYPE_UV4DI), ++ ++ /* LASX */ ++ LASX_BUILTIN (xvinsgr2vr_w, LARCH_V8SI_FTYPE_V8SI_SI_UQI), ++ LASX_BUILTIN (xvinsgr2vr_d, LARCH_V4DI_FTYPE_V4DI_DI_UQI), ++ ++ LASX_BUILTIN (xvreplve0_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvreplve0_h, LARCH_V16HI_FTYPE_V16HI), ++ LASX_BUILTIN (xvreplve0_w, LARCH_V8SI_FTYPE_V8SI), ++ LASX_BUILTIN (xvreplve0_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvreplve0_q, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_h_b, LARCH_V16HI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_w_h, LARCH_V8SI_FTYPE_V16HI), ++ LASX_BUILTIN (vext2xv_d_w, LARCH_V4DI_FTYPE_V8SI), ++ LASX_BUILTIN (vext2xv_w_b, LARCH_V8SI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_d_h, LARCH_V4DI_FTYPE_V16HI), ++ LASX_BUILTIN (vext2xv_d_b, LARCH_V4DI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_hu_bu, LARCH_V16HI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_wu_hu, LARCH_V8SI_FTYPE_V16HI), ++ LASX_BUILTIN (vext2xv_du_wu, LARCH_V4DI_FTYPE_V8SI), ++ LASX_BUILTIN (vext2xv_wu_bu, LARCH_V8SI_FTYPE_V32QI), ++ LASX_BUILTIN (vext2xv_du_hu, LARCH_V4DI_FTYPE_V16HI), ++ LASX_BUILTIN (vext2xv_du_bu, LARCH_V4DI_FTYPE_V32QI), ++ LASX_BUILTIN (xvpermi_q, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvpermi_d, LARCH_V4DI_FTYPE_V4DI_USI), ++ LASX_BUILTIN (xvperm_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_b, LARCH_SI_FTYPE_UV32QI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_h, LARCH_SI_FTYPE_UV16HI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_w, LARCH_SI_FTYPE_UV8SI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_d, LARCH_SI_FTYPE_UV4DI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_b, LARCH_SI_FTYPE_UV32QI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_h, LARCH_SI_FTYPE_UV16HI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_w, LARCH_SI_FTYPE_UV8SI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_d, LARCH_SI_FTYPE_UV4DI), ++ LASX_BUILTIN_TEST_BRANCH (xbz_v, LARCH_SI_FTYPE_UV32QI), ++ LASX_BUILTIN_TEST_BRANCH (xbnz_v, LARCH_SI_FTYPE_UV32QI), ++ LASX_BUILTIN (xvldrepl_b, LARCH_V32QI_FTYPE_CVPOINTER_SI), ++ LASX_BUILTIN (xvldrepl_h, LARCH_V16HI_FTYPE_CVPOINTER_SI), ++ LASX_BUILTIN (xvldrepl_w, LARCH_V8SI_FTYPE_CVPOINTER_SI), ++ LASX_BUILTIN (xvldrepl_d, LARCH_V4DI_FTYPE_CVPOINTER_SI), ++ LASX_BUILTIN (xvpickve2gr_w, LARCH_SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvpickve2gr_wu, LARCH_USI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvpickve2gr_d, LARCH_DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvpickve2gr_du, LARCH_UDI_FTYPE_V4DI_UQI), ++ ++ ++ LASX_BUILTIN (xvaddwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvaddwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvaddwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvaddwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvaddwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvaddwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvaddwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvaddwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvsubwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsubwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsubwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsubwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsubwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsubwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvsubwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvsubwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmulwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmulwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmulwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmulwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmulwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmulwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmulwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmulwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvaddwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvaddwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvaddwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvaddwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvaddwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvaddwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvaddwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvaddwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvsubwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsubwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvsubwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvsubwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvsubwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvsubwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvsubwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvsubwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmulwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvmulwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvmulwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvmulwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvmulwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmulwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmulwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmulwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI), ++ LASX_BUILTIN (xvaddwev_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), ++ LASX_BUILTIN (xvaddwev_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), ++ LASX_BUILTIN (xvaddwev_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), ++ LASX_BUILTIN (xvmulwev_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), ++ LASX_BUILTIN (xvmulwev_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), ++ LASX_BUILTIN (xvmulwev_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), ++ LASX_BUILTIN (xvaddwod_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), ++ LASX_BUILTIN (xvaddwod_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), ++ LASX_BUILTIN (xvaddwod_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), ++ LASX_BUILTIN (xvmulwod_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI), ++ LASX_BUILTIN (xvmulwod_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI), ++ LASX_BUILTIN (xvmulwod_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI), ++ LASX_BUILTIN (xvhaddw_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvhaddw_qu_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvhsubw_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvhsubw_qu_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmaddwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvmaddwev_d_w, LARCH_V4DI_FTYPE_V4DI_V8SI_V8SI), ++ LASX_BUILTIN (xvmaddwev_w_h, LARCH_V8SI_FTYPE_V8SI_V16HI_V16HI), ++ LASX_BUILTIN (xvmaddwev_h_b, LARCH_V16HI_FTYPE_V16HI_V32QI_V32QI), ++ LASX_BUILTIN (xvmaddwev_q_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmaddwev_d_wu, LARCH_UV4DI_FTYPE_UV4DI_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmaddwev_w_hu, LARCH_UV8SI_FTYPE_UV8SI_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmaddwev_h_bu, LARCH_UV16HI_FTYPE_UV16HI_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmaddwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI), ++ LASX_BUILTIN (xvmaddwod_d_w, LARCH_V4DI_FTYPE_V4DI_V8SI_V8SI), ++ LASX_BUILTIN (xvmaddwod_w_h, LARCH_V8SI_FTYPE_V8SI_V16HI_V16HI), ++ LASX_BUILTIN (xvmaddwod_h_b, LARCH_V16HI_FTYPE_V16HI_V32QI_V32QI), ++ LASX_BUILTIN (xvmaddwod_q_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI_UV4DI), ++ LASX_BUILTIN (xvmaddwod_d_wu, LARCH_UV4DI_FTYPE_UV4DI_UV8SI_UV8SI), ++ LASX_BUILTIN (xvmaddwod_w_hu, LARCH_UV8SI_FTYPE_UV8SI_UV16HI_UV16HI), ++ LASX_BUILTIN (xvmaddwod_h_bu, LARCH_UV16HI_FTYPE_UV16HI_UV32QI_UV32QI), ++ LASX_BUILTIN (xvmaddwev_q_du_d, LARCH_V4DI_FTYPE_V4DI_UV4DI_V4DI), ++ LASX_BUILTIN (xvmaddwev_d_wu_w, LARCH_V4DI_FTYPE_V4DI_UV8SI_V8SI), ++ LASX_BUILTIN (xvmaddwev_w_hu_h, LARCH_V8SI_FTYPE_V8SI_UV16HI_V16HI), ++ LASX_BUILTIN (xvmaddwev_h_bu_b, LARCH_V16HI_FTYPE_V16HI_UV32QI_V32QI), ++ LASX_BUILTIN (xvmaddwod_q_du_d, LARCH_V4DI_FTYPE_V4DI_UV4DI_V4DI), ++ LASX_BUILTIN (xvmaddwod_d_wu_w, LARCH_V4DI_FTYPE_V4DI_UV8SI_V8SI), ++ LASX_BUILTIN (xvmaddwod_w_hu_h, LARCH_V8SI_FTYPE_V8SI_UV16HI_V16HI), ++ LASX_BUILTIN (xvmaddwod_h_bu_b, LARCH_V16HI_FTYPE_V16HI_UV32QI_V32QI), ++ LASX_BUILTIN (xvrotr_b, LARCH_V32QI_FTYPE_V32QI_V32QI), ++ LASX_BUILTIN (xvrotr_h, LARCH_V16HI_FTYPE_V16HI_V16HI), ++ LASX_BUILTIN (xvrotr_w, LARCH_V8SI_FTYPE_V8SI_V8SI), ++ LASX_BUILTIN (xvrotr_d, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvadd_q, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvsub_q, LARCH_V4DI_FTYPE_V4DI_V4DI), ++ LASX_BUILTIN (xvaddwev_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), ++ LASX_BUILTIN (xvaddwod_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), ++ LASX_BUILTIN (xvmulwev_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), ++ LASX_BUILTIN (xvmulwod_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI), ++ LASX_BUILTIN (xvmskgez_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvmsknz_b, LARCH_V32QI_FTYPE_V32QI), ++ LASX_BUILTIN (xvexth_h_b, LARCH_V16HI_FTYPE_V32QI), ++ LASX_BUILTIN (xvexth_w_h, LARCH_V8SI_FTYPE_V16HI), ++ LASX_BUILTIN (xvexth_d_w, LARCH_V4DI_FTYPE_V8SI), ++ LASX_BUILTIN (xvexth_q_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvexth_hu_bu, LARCH_UV16HI_FTYPE_UV32QI), ++ LASX_BUILTIN (xvexth_wu_hu, LARCH_UV8SI_FTYPE_UV16HI), ++ LASX_BUILTIN (xvexth_du_wu, LARCH_UV4DI_FTYPE_UV8SI), ++ LASX_BUILTIN (xvexth_qu_du, LARCH_UV4DI_FTYPE_UV4DI), ++ LASX_BUILTIN (xvrotri_b, LARCH_V32QI_FTYPE_V32QI_UQI), ++ LASX_BUILTIN (xvrotri_h, LARCH_V16HI_FTYPE_V16HI_UQI), ++ LASX_BUILTIN (xvrotri_w, LARCH_V8SI_FTYPE_V8SI_UQI), ++ LASX_BUILTIN (xvrotri_d, LARCH_V4DI_FTYPE_V4DI_UQI), ++ LASX_BUILTIN (xvextl_q_d, LARCH_V4DI_FTYPE_V4DI), ++ LASX_BUILTIN (xvsrlni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvsrlni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvsrlni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvsrlni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvsrlrni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvsrlrni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvsrlrni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvsrlrni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrlni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrlni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrlni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrlni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrlni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrlni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrlni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrlni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrlrni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrlrni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrlrni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrlrni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrlrni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrlrni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrlrni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrlrni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), ++ LASX_BUILTIN (xvsrani_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvsrani_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvsrani_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvsrani_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvsrarni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvsrarni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvsrarni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvsrarni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrani_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrani_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrani_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrani_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrani_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrani_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrani_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrani_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrarni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrarni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrarni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrarni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI), ++ LASX_BUILTIN (xvssrarni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI), ++ LASX_BUILTIN (xvssrarni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI), ++ LASX_BUILTIN (xvssrarni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI), ++ LASX_BUILTIN (xvssrarni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), ++}; ++ ++ ++/* MODE is a vector mode whose elements have type TYPE. Return the type ++ of the vector itself. */ ++ ++static tree ++loongarch_builtin_vector_type (tree type, machine_mode mode) ++{ ++ static tree types[2 * (int) MAX_MACHINE_MODE]; ++ int mode_index; ++ ++ mode_index = (int) mode; ++ ++ if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type)) ++ mode_index += MAX_MACHINE_MODE; ++ ++ if (types[mode_index] == NULL_TREE) ++ types[mode_index] = build_vector_type_for_mode (type, mode); ++ return types[mode_index]; ++} ++ ++/* Return a type for 'const volatile void *'. */ ++ ++static tree ++loongarch_build_cvpointer_type (void) ++{ ++ static tree cache; ++ ++ if (cache == NULL_TREE) ++ cache = build_pointer_type (build_qualified_type ++ (void_type_node, ++ TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)); ++ return cache; ++} ++ ++/* Source-level argument types. */ ++#define LARCH_ATYPE_VOID void_type_node ++#define LARCH_ATYPE_INT integer_type_node ++#define LARCH_ATYPE_POINTER ptr_type_node ++#define LARCH_ATYPE_CVPOINTER loongarch_build_cvpointer_type () ++#define LARCH_ATYPE_BOOLEAN boolean_type_node ++/* Standard mode-based argument types. */ ++#define LARCH_ATYPE_QI intQI_type_node ++#define LARCH_ATYPE_UQI unsigned_intQI_type_node ++#define LARCH_ATYPE_HI intHI_type_node ++#define LARCH_ATYPE_UHI unsigned_intHI_type_node ++#define LARCH_ATYPE_SI intSI_type_node ++#define LARCH_ATYPE_USI unsigned_intSI_type_node ++#define LARCH_ATYPE_DI intDI_type_node ++#define LARCH_ATYPE_UDI unsigned_intDI_type_node ++#define LARCH_ATYPE_SF float_type_node ++#define LARCH_ATYPE_DF double_type_node ++ ++/* Vector argument types. */ ++#define LARCH_ATYPE_V2SF loongarch_builtin_vector_type (float_type_node, V2SFmode) ++#define LARCH_ATYPE_V2HI loongarch_builtin_vector_type (intHI_type_node, V2HImode) ++#define LARCH_ATYPE_V2SI loongarch_builtin_vector_type (intSI_type_node, V2SImode) ++#define LARCH_ATYPE_V4QI loongarch_builtin_vector_type (intQI_type_node, V4QImode) ++#define LARCH_ATYPE_V4HI loongarch_builtin_vector_type (intHI_type_node, V4HImode) ++#define LARCH_ATYPE_V8QI loongarch_builtin_vector_type (intQI_type_node, V8QImode) ++ ++#define LARCH_ATYPE_V2DI \ ++ loongarch_builtin_vector_type (long_long_integer_type_node, V2DImode) ++#define LARCH_ATYPE_V4SI loongarch_builtin_vector_type (intSI_type_node, V4SImode) ++#define LARCH_ATYPE_V8HI loongarch_builtin_vector_type (intHI_type_node, V8HImode) ++#define LARCH_ATYPE_V16QI loongarch_builtin_vector_type (intQI_type_node, V16QImode) ++#define LARCH_ATYPE_V2DF loongarch_builtin_vector_type (double_type_node, V2DFmode) ++#define LARCH_ATYPE_V4SF loongarch_builtin_vector_type (float_type_node, V4SFmode) ++ ++/* LoongArch ASX. */ ++#define LARCH_ATYPE_V4DI \ ++ loongarch_builtin_vector_type (long_long_integer_type_node, V4DImode) ++#define LARCH_ATYPE_V8SI loongarch_builtin_vector_type (intSI_type_node, V8SImode) ++#define LARCH_ATYPE_V16HI loongarch_builtin_vector_type (intHI_type_node, V16HImode) ++#define LARCH_ATYPE_V32QI loongarch_builtin_vector_type (intQI_type_node, V32QImode) ++#define LARCH_ATYPE_V4DF loongarch_builtin_vector_type (double_type_node, V4DFmode) ++#define LARCH_ATYPE_V8SF loongarch_builtin_vector_type (float_type_node, V8SFmode) ++ ++#define LARCH_ATYPE_UV2DI \ ++ loongarch_builtin_vector_type (long_long_unsigned_type_node, V2DImode) ++#define LARCH_ATYPE_UV4SI \ ++ loongarch_builtin_vector_type (unsigned_intSI_type_node, V4SImode) ++#define LARCH_ATYPE_UV8HI \ ++ loongarch_builtin_vector_type (unsigned_intHI_type_node, V8HImode) ++#define LARCH_ATYPE_UV16QI \ ++ loongarch_builtin_vector_type (unsigned_intQI_type_node, V16QImode) ++ ++#define LARCH_ATYPE_UV4DI \ ++ loongarch_builtin_vector_type (long_long_unsigned_type_node, V4DImode) ++#define LARCH_ATYPE_UV8SI \ ++ loongarch_builtin_vector_type (unsigned_intSI_type_node, V8SImode) ++#define LARCH_ATYPE_UV16HI \ ++ loongarch_builtin_vector_type (unsigned_intHI_type_node, V16HImode) ++#define LARCH_ATYPE_UV32QI \ ++ loongarch_builtin_vector_type (unsigned_intQI_type_node, V32QImode) ++ ++#define LARCH_ATYPE_UV2SI \ ++ loongarch_builtin_vector_type (unsigned_intSI_type_node, V2SImode) ++#define LARCH_ATYPE_UV4HI \ ++ loongarch_builtin_vector_type (unsigned_intHI_type_node, V4HImode) ++#define LARCH_ATYPE_UV8QI \ ++ loongarch_builtin_vector_type (unsigned_intQI_type_node, V8QImode) ++ ++/* LARCH_FTYPE_ATYPESN takes N LARCH_FTYPES-like type codes and lists ++ their associated LARCH_ATYPEs. */ ++#define LARCH_FTYPE_ATYPES1(A, B) \ ++ LARCH_ATYPE_##A, LARCH_ATYPE_##B ++ ++#define LARCH_FTYPE_ATYPES2(A, B, C) \ ++ LARCH_ATYPE_##A, LARCH_ATYPE_##B, LARCH_ATYPE_##C ++ ++#define LARCH_FTYPE_ATYPES3(A, B, C, D) \ ++ LARCH_ATYPE_##A, LARCH_ATYPE_##B, LARCH_ATYPE_##C, LARCH_ATYPE_##D ++ ++#define LARCH_FTYPE_ATYPES4(A, B, C, D, E) \ ++ LARCH_ATYPE_##A, LARCH_ATYPE_##B, LARCH_ATYPE_##C, LARCH_ATYPE_##D, \ ++ LARCH_ATYPE_##E ++ ++/* Index I is the function declaration for loongarch_builtins[I], or null if the ++ function isn't defined on this target. */ ++static GTY(()) tree loongarch_builtin_decls[ARRAY_SIZE (loongarch_builtins)]; ++/* Get the index I of the function declaration for loongarch_builtin_decls[I] ++ using the instruction code or return null if not defined for the target. */ ++static GTY(()) int loongarch_get_builtin_decl_index[NUM_INSN_CODES]; ++ ++/* Return the function type associated with function prototype TYPE. */ ++ ++static tree ++loongarch_build_function_type (enum loongarch_function_type type) ++{ ++ static tree types[(int) LARCH_MAX_FTYPE_MAX]; ++ ++ if (types[(int) type] == NULL_TREE) ++ switch (type) ++ { ++#define DEF_LARCH_FTYPE(NUM, ARGS) \ ++ case LARCH_FTYPE_NAME##NUM ARGS: \ ++ types[(int) type] \ ++ = build_function_type_list (LARCH_FTYPE_ATYPES##NUM ARGS, \ ++ NULL_TREE); \ ++ break; ++#include "config/loongarch/loongarch-ftypes.def" ++#undef DEF_LARCH_FTYPE ++ default: ++ gcc_unreachable (); ++ } ++ ++ return types[(int) type]; ++} ++ ++/* Implement TARGET_INIT_BUILTINS. */ ++ ++void ++loongarch_init_builtins (void) ++{ ++ const struct loongarch_builtin_description *d; ++ unsigned int i; ++ ++ /* Iterate through all of the bdesc arrays, initializing all of the ++ builtin functions. */ ++ for (i = 0; i < ARRAY_SIZE (loongarch_builtins); i++) ++ { ++ d = &loongarch_builtins[i]; ++ if (d->avail ()) ++ { ++ loongarch_builtin_decls[i] ++ = add_builtin_function (d->name, ++ loongarch_build_function_type (d->function_type), ++ i, BUILT_IN_MD, NULL, NULL); ++ loongarch_get_builtin_decl_index[d->icode] = i; ++ } ++ } ++} ++ ++/* Implement TARGET_BUILTIN_DECL. */ ++ ++tree ++loongarch_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED) ++{ ++ if (code >= ARRAY_SIZE (loongarch_builtins)) ++ return error_mark_node; ++ return loongarch_builtin_decls[code]; ++} ++ ++/* Implement TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION. */ ++ ++tree ++loongarch_builtin_vectorized_function (unsigned int fn, tree type_out, tree type_in) ++{ ++ machine_mode in_mode, out_mode; ++ int in_n, out_n; ++ ++ if (TREE_CODE (type_out) != VECTOR_TYPE ++ || TREE_CODE (type_in) != VECTOR_TYPE ++ || !ISA_HAS_LSX) ++ return NULL_TREE; ++ ++ out_mode = TYPE_MODE (TREE_TYPE (type_out)); ++ out_n = TYPE_VECTOR_SUBPARTS (type_out); ++ in_mode = TYPE_MODE (TREE_TYPE (type_in)); ++ in_n = TYPE_VECTOR_SUBPARTS (type_in); ++ ++ /* INSN is the name of the associated instruction pattern, without ++ the leading CODE_FOR_. */ ++#define LARCH_GET_BUILTIN(INSN) \ ++ loongarch_builtin_decls[loongarch_get_builtin_decl_index[CODE_FOR_##INSN]] ++ ++ switch (fn) ++ { ++ case BUILT_IN_SQRT: ++ if (out_mode == DFmode && out_n == 2 ++ && in_mode == DFmode && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfsqrt_d); ++ break; ++ case BUILT_IN_SQRTF: ++ if (out_mode == SFmode && out_n == 4 ++ && in_mode == SFmode && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfsqrt_s); ++ break; ++ default: ++ break; ++ } ++ ++ return NULL_TREE; ++} ++ ++/* Take argument ARGNO from EXP's argument list and convert it into ++ an expand operand. Store the operand in *OP. */ ++ ++static void ++loongarch_prepare_builtin_arg (struct expand_operand *op, tree exp, ++ unsigned int argno) ++{ ++ tree arg; ++ rtx value; ++ ++ arg = CALL_EXPR_ARG (exp, argno); ++ value = expand_normal (arg); ++ create_input_operand (op, value, TYPE_MODE (TREE_TYPE (arg))); ++} ++ ++/* Return a const_int vector of VAL with mode MODE. */ ++ ++rtx ++loongarch_gen_const_int_vector (machine_mode mode, HOST_WIDE_INT val) ++{ ++ rtx c = gen_int_mode (val, GET_MODE_INNER (mode)); ++ return gen_const_vec_duplicate (mode, c); ++} ++ ++/* Expand instruction ICODE as part of a built-in function sequence. ++ Use the first NOPS elements of OPS as the instruction's operands. ++ HAS_TARGET_P is true if operand 0 is a target; it is false if the ++ instruction has no target. ++ ++ Return the target rtx if HAS_TARGET_P, otherwise return const0_rtx. */ ++ ++static rtx ++loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, ++ struct expand_operand *ops, bool has_target_p) ++{ ++ machine_mode imode; ++ int rangelo = 0, rangehi = 0, error_opno = 0; ++ rtx sireg; ++ ++ switch (icode) ++ { ++ case CODE_FOR_lsx_vaddi_bu: ++ case CODE_FOR_lsx_vaddi_hu: ++ case CODE_FOR_lsx_vaddi_wu: ++ case CODE_FOR_lsx_vaddi_du: ++ case CODE_FOR_lsx_vslti_bu: ++ case CODE_FOR_lsx_vslti_hu: ++ case CODE_FOR_lsx_vslti_wu: ++ case CODE_FOR_lsx_vslti_du: ++ case CODE_FOR_lsx_vslei_bu: ++ case CODE_FOR_lsx_vslei_hu: ++ case CODE_FOR_lsx_vslei_wu: ++ case CODE_FOR_lsx_vslei_du: ++ case CODE_FOR_lsx_vmaxi_bu: ++ case CODE_FOR_lsx_vmaxi_hu: ++ case CODE_FOR_lsx_vmaxi_wu: ++ case CODE_FOR_lsx_vmaxi_du: ++ case CODE_FOR_lsx_vmini_bu: ++ case CODE_FOR_lsx_vmini_hu: ++ case CODE_FOR_lsx_vmini_wu: ++ case CODE_FOR_lsx_vmini_du: ++ case CODE_FOR_lsx_vsubi_bu: ++ case CODE_FOR_lsx_vsubi_hu: ++ case CODE_FOR_lsx_vsubi_wu: ++ case CODE_FOR_lsx_vsubi_du: ++ case CODE_FOR_lasx_xvaddi_bu: ++ case CODE_FOR_lasx_xvaddi_hu: ++ case CODE_FOR_lasx_xvaddi_wu: ++ case CODE_FOR_lasx_xvaddi_du: ++ case CODE_FOR_lasx_xvslti_bu: ++ case CODE_FOR_lasx_xvslti_hu: ++ case CODE_FOR_lasx_xvslti_wu: ++ case CODE_FOR_lasx_xvslti_du: ++ case CODE_FOR_lasx_xvslei_bu: ++ case CODE_FOR_lasx_xvslei_hu: ++ case CODE_FOR_lasx_xvslei_wu: ++ case CODE_FOR_lasx_xvslei_du: ++ case CODE_FOR_lasx_xvmaxi_bu: ++ case CODE_FOR_lasx_xvmaxi_hu: ++ case CODE_FOR_lasx_xvmaxi_wu: ++ case CODE_FOR_lasx_xvmaxi_du: ++ case CODE_FOR_lasx_xvmini_bu: ++ case CODE_FOR_lasx_xvmini_hu: ++ case CODE_FOR_lasx_xvmini_wu: ++ case CODE_FOR_lasx_xvmini_du: ++ case CODE_FOR_lasx_xvsubi_bu: ++ case CODE_FOR_lasx_xvsubi_hu: ++ case CODE_FOR_lasx_xvsubi_wu: ++ case CODE_FOR_lasx_xvsubi_du: ++ gcc_assert (has_target_p && nops == 3); ++ /* We only generate a vector of constants iff the second argument ++ is an immediate. We also validate the range of the immediate. */ ++ if (CONST_INT_P (ops[2].value)) ++ { ++ rangelo = 0; ++ rangehi = 31; ++ if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi)) ++ { ++ ops[2].mode = ops[0].mode; ++ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, ++ INTVAL (ops[2].value)); ++ } ++ else ++ error_opno = 2; ++ } ++ break; ++ ++ case CODE_FOR_lsx_vseqi_b: ++ case CODE_FOR_lsx_vseqi_h: ++ case CODE_FOR_lsx_vseqi_w: ++ case CODE_FOR_lsx_vseqi_d: ++ case CODE_FOR_lsx_vslti_b: ++ case CODE_FOR_lsx_vslti_h: ++ case CODE_FOR_lsx_vslti_w: ++ case CODE_FOR_lsx_vslti_d: ++ case CODE_FOR_lsx_vslei_b: ++ case CODE_FOR_lsx_vslei_h: ++ case CODE_FOR_lsx_vslei_w: ++ case CODE_FOR_lsx_vslei_d: ++ case CODE_FOR_lsx_vmaxi_b: ++ case CODE_FOR_lsx_vmaxi_h: ++ case CODE_FOR_lsx_vmaxi_w: ++ case CODE_FOR_lsx_vmaxi_d: ++ case CODE_FOR_lsx_vmini_b: ++ case CODE_FOR_lsx_vmini_h: ++ case CODE_FOR_lsx_vmini_w: ++ case CODE_FOR_lsx_vmini_d: ++ case CODE_FOR_lasx_xvseqi_b: ++ case CODE_FOR_lasx_xvseqi_h: ++ case CODE_FOR_lasx_xvseqi_w: ++ case CODE_FOR_lasx_xvseqi_d: ++ case CODE_FOR_lasx_xvslti_b: ++ case CODE_FOR_lasx_xvslti_h: ++ case CODE_FOR_lasx_xvslti_w: ++ case CODE_FOR_lasx_xvslti_d: ++ case CODE_FOR_lasx_xvslei_b: ++ case CODE_FOR_lasx_xvslei_h: ++ case CODE_FOR_lasx_xvslei_w: ++ case CODE_FOR_lasx_xvslei_d: ++ case CODE_FOR_lasx_xvmaxi_b: ++ case CODE_FOR_lasx_xvmaxi_h: ++ case CODE_FOR_lasx_xvmaxi_w: ++ case CODE_FOR_lasx_xvmaxi_d: ++ case CODE_FOR_lasx_xvmini_b: ++ case CODE_FOR_lasx_xvmini_h: ++ case CODE_FOR_lasx_xvmini_w: ++ case CODE_FOR_lasx_xvmini_d: ++ gcc_assert (has_target_p && nops == 3); ++ /* We only generate a vector of constants iff the second argument ++ is an immediate. We also validate the range of the immediate. */ ++ if (CONST_INT_P (ops[2].value)) ++ { ++ rangelo = -16; ++ rangehi = 15; ++ if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi)) ++ { ++ ops[2].mode = ops[0].mode; ++ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, ++ INTVAL (ops[2].value)); ++ } ++ else ++ error_opno = 2; ++ } ++ break; ++ ++ case CODE_FOR_lsx_vandi_b: ++ case CODE_FOR_lsx_vori_b: ++ case CODE_FOR_lsx_vnori_b: ++ case CODE_FOR_lsx_vxori_b: ++ case CODE_FOR_lasx_xvandi_b: ++ case CODE_FOR_lasx_xvori_b: ++ case CODE_FOR_lasx_xvnori_b: ++ case CODE_FOR_lasx_xvxori_b: ++ gcc_assert (has_target_p && nops == 3); ++ if (!CONST_INT_P (ops[2].value)) ++ break; ++ ops[2].mode = ops[0].mode; ++ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, ++ INTVAL (ops[2].value)); ++ break; ++ ++ case CODE_FOR_lsx_vbitseli_b: ++ case CODE_FOR_lasx_xvbitseli_b: ++ gcc_assert (has_target_p && nops == 4); ++ if (!CONST_INT_P (ops[3].value)) ++ break; ++ ops[3].mode = ops[0].mode; ++ ops[3].value = loongarch_gen_const_int_vector (ops[3].mode, ++ INTVAL (ops[3].value)); ++ break; ++ ++ case CODE_FOR_lsx_vreplgr2vr_b: ++ case CODE_FOR_lsx_vreplgr2vr_h: ++ case CODE_FOR_lsx_vreplgr2vr_w: ++ case CODE_FOR_lsx_vreplgr2vr_d: ++ case CODE_FOR_lasx_xvreplgr2vr_b: ++ case CODE_FOR_lasx_xvreplgr2vr_h: ++ case CODE_FOR_lasx_xvreplgr2vr_w: ++ case CODE_FOR_lasx_xvreplgr2vr_d: ++ /* Map the built-ins to vector fill operations. We need fix up the mode ++ for the element being inserted. */ ++ gcc_assert (has_target_p && nops == 2); ++ imode = GET_MODE_INNER (ops[0].mode); ++ ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode); ++ ops[1].mode = imode; ++ break; ++ ++ case CODE_FOR_lsx_vilvh_b: ++ case CODE_FOR_lsx_vilvh_h: ++ case CODE_FOR_lsx_vilvh_w: ++ case CODE_FOR_lsx_vilvh_d: ++ case CODE_FOR_lsx_vilvl_b: ++ case CODE_FOR_lsx_vilvl_h: ++ case CODE_FOR_lsx_vilvl_w: ++ case CODE_FOR_lsx_vilvl_d: ++ case CODE_FOR_lsx_vpackev_b: ++ case CODE_FOR_lsx_vpackev_h: ++ case CODE_FOR_lsx_vpackev_w: ++ case CODE_FOR_lsx_vpackod_b: ++ case CODE_FOR_lsx_vpackod_h: ++ case CODE_FOR_lsx_vpackod_w: ++ case CODE_FOR_lsx_vpickev_b: ++ case CODE_FOR_lsx_vpickev_h: ++ case CODE_FOR_lsx_vpickev_w: ++ case CODE_FOR_lsx_vpickod_b: ++ case CODE_FOR_lsx_vpickod_h: ++ case CODE_FOR_lsx_vpickod_w: ++ case CODE_FOR_lasx_xvilvh_b: ++ case CODE_FOR_lasx_xvilvh_h: ++ case CODE_FOR_lasx_xvilvh_w: ++ case CODE_FOR_lasx_xvilvh_d: ++ case CODE_FOR_lasx_xvilvl_b: ++ case CODE_FOR_lasx_xvilvl_h: ++ case CODE_FOR_lasx_xvilvl_w: ++ case CODE_FOR_lasx_xvilvl_d: ++ case CODE_FOR_lasx_xvpackev_b: ++ case CODE_FOR_lasx_xvpackev_h: ++ case CODE_FOR_lasx_xvpackev_w: ++ case CODE_FOR_lasx_xvpackod_b: ++ case CODE_FOR_lasx_xvpackod_h: ++ case CODE_FOR_lasx_xvpackod_w: ++ case CODE_FOR_lasx_xvpickev_b: ++ case CODE_FOR_lasx_xvpickev_h: ++ case CODE_FOR_lasx_xvpickev_w: ++ case CODE_FOR_lasx_xvpickod_b: ++ case CODE_FOR_lasx_xvpickod_h: ++ case CODE_FOR_lasx_xvpickod_w: ++ /* Swap the operands 1 and 2 for interleave operations. Built-ins follow ++ convention of ISA, which have op1 as higher component and op2 as lower ++ component. However, the VEC_PERM op in tree and vec_concat in RTL ++ expects first operand to be lower component, because of which this ++ swap is needed for builtins. */ ++ gcc_assert (has_target_p && nops == 3); ++ std::swap (ops[1], ops[2]); ++ break; ++ ++ case CODE_FOR_lsx_vslli_b: ++ case CODE_FOR_lsx_vslli_h: ++ case CODE_FOR_lsx_vslli_w: ++ case CODE_FOR_lsx_vslli_d: ++ case CODE_FOR_lsx_vsrai_b: ++ case CODE_FOR_lsx_vsrai_h: ++ case CODE_FOR_lsx_vsrai_w: ++ case CODE_FOR_lsx_vsrai_d: ++ case CODE_FOR_lsx_vsrli_b: ++ case CODE_FOR_lsx_vsrli_h: ++ case CODE_FOR_lsx_vsrli_w: ++ case CODE_FOR_lsx_vsrli_d: ++ case CODE_FOR_lasx_xvslli_b: ++ case CODE_FOR_lasx_xvslli_h: ++ case CODE_FOR_lasx_xvslli_w: ++ case CODE_FOR_lasx_xvslli_d: ++ case CODE_FOR_lasx_xvsrai_b: ++ case CODE_FOR_lasx_xvsrai_h: ++ case CODE_FOR_lasx_xvsrai_w: ++ case CODE_FOR_lasx_xvsrai_d: ++ case CODE_FOR_lasx_xvsrli_b: ++ case CODE_FOR_lasx_xvsrli_h: ++ case CODE_FOR_lasx_xvsrli_w: ++ case CODE_FOR_lasx_xvsrli_d: ++ gcc_assert (has_target_p && nops == 3); ++ if (CONST_INT_P (ops[2].value)) ++ { ++ rangelo = 0; ++ rangehi = GET_MODE_UNIT_BITSIZE (ops[0].mode) - 1; ++ if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi)) ++ { ++ ops[2].mode = ops[0].mode; ++ ops[2].value = loongarch_gen_const_int_vector (ops[2].mode, ++ INTVAL (ops[2].value)); ++ } ++ else ++ error_opno = 2; ++ } ++ break; ++ ++ case CODE_FOR_lsx_vinsgr2vr_b: ++ case CODE_FOR_lsx_vinsgr2vr_h: ++ case CODE_FOR_lsx_vinsgr2vr_w: ++ case CODE_FOR_lsx_vinsgr2vr_d: ++ /* Map the built-ins to insert operations. We need to swap operands, ++ fix up the mode for the element being inserted, and generate ++ a bit mask for vec_merge. */ ++ gcc_assert (has_target_p && nops == 4); ++ std::swap (ops[1], ops[2]); ++// std::swap (ops[1], ops[3]); ++ imode = GET_MODE_INNER (ops[0].mode); ++ ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode); ++ ops[1].mode = imode; ++ rangelo = 0; ++ rangehi = GET_MODE_NUNITS (ops[0].mode) - 1; ++ if (CONST_INT_P (ops[3].value) ++ && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi)) ++ ops[3].value = GEN_INT (1 << INTVAL (ops[3].value)); ++ else ++ error_opno = 2; ++ break; ++ ++ /* Map the built-ins to element insert operations. We need to swap ++ operands and generate a bit mask. */ ++ gcc_assert (has_target_p && nops == 4); ++ std::swap (ops[1], ops[2]); ++ std::swap (ops[1], ops[3]); ++ rangelo = 0; ++ rangehi = GET_MODE_NUNITS (ops[0].mode) - 1; ++ if (CONST_INT_P (ops[3].value) ++ && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi)) ++ ops[3].value = GEN_INT (1 << INTVAL (ops[3].value)); ++ else ++ error_opno = 2; ++ break; ++ ++ case CODE_FOR_lsx_vshuf4i_b: ++ case CODE_FOR_lsx_vshuf4i_h: ++ case CODE_FOR_lsx_vshuf4i_w: ++ case CODE_FOR_lsx_vshuf4i_w_f: ++ gcc_assert (has_target_p && nops == 3); ++ ops[2].value = loongarch_gen_const_int_vector_shuffle (ops[0].mode, ++ INTVAL (ops[2].value)); ++ break; ++ ++ case CODE_FOR_lasx_xvinsgr2vr_w: ++ case CODE_FOR_lasx_xvinsgr2vr_d: ++ /* Map the built-ins to insert operations. We need to swap operands, ++ fix up the mode for the element being inserted, and generate ++ a bit mask for vec_merge. */ ++ gcc_assert (has_target_p && nops == 4); ++ std::swap (ops[1], ops[2]); ++// std::swap (ops[1], ops[3]); ++ imode = GET_MODE_INNER (ops[0].mode); ++ ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode); ++ ops[1].mode = imode; ++ rangelo = 0; ++ rangehi = GET_MODE_NUNITS (ops[0].mode) - 1; ++ if (CONST_INT_P (ops[3].value) ++ && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi)) ++ ops[3].value = GEN_INT (1 << INTVAL (ops[3].value)); ++ else ++ error_opno = 2; ++ break; ++ ++ default: ++ break; ++ } ++ ++ if (error_opno != 0) ++ { ++ error ("argument %d to the built-in must be a constant" ++ " in range %d to %d", error_opno, rangelo, rangehi); ++ return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx; ++ } ++ else if (!maybe_expand_insn (icode, nops, ops)) ++ { ++ error ("invalid argument to built-in function"); ++ return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx; ++ } ++ return has_target_p ? ops[0].value : const0_rtx; ++} ++ ++/* Expand a LARCH_BUILTIN_DIRECT or LARCH_BUILTIN_DIRECT_NO_TARGET function; ++ HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function ++ and ICODE is the code of the associated .md pattern. TARGET, if nonnull, ++ suggests a good place to put the result. */ ++ ++static rtx ++loongarch_expand_builtin_direct (enum insn_code icode, rtx target, tree exp, ++ bool has_target_p) ++{ ++ struct expand_operand ops[MAX_RECOG_OPERANDS]; ++ int opno, argno; ++ ++ /* Map any target to operand 0. */ ++ opno = 0; ++ if (has_target_p) ++ create_output_operand (&ops[opno++], target, TYPE_MODE (TREE_TYPE (exp))); ++ ++ /* Map the arguments to the other operands. */ ++ gcc_assert (opno + call_expr_nargs (exp) ++ == insn_data[icode].n_generator_args); ++ for (argno = 0; argno < call_expr_nargs (exp); argno++) ++ loongarch_prepare_builtin_arg (&ops[opno++], exp, argno); ++ ++ return loongarch_expand_builtin_insn (icode, opno, ops, has_target_p); ++} ++ ++/* Expand an LSX built-in for a compare and branch instruction specified by ++ ICODE, set a general-purpose register to 1 if the branch was taken, ++ 0 otherwise. */ ++ ++static rtx ++loongarch_expand_builtin_lsx_test_branch (enum insn_code icode, tree exp) ++{ ++ struct expand_operand ops[3]; ++ rtx_insn *cbranch; ++ rtx_code_label *true_label, *done_label; ++ rtx cmp_result; ++ ++ true_label = gen_label_rtx (); ++ done_label = gen_label_rtx (); ++ ++ create_input_operand (&ops[0], true_label, TYPE_MODE (TREE_TYPE (exp))); ++ loongarch_prepare_builtin_arg (&ops[1], exp, 0); ++ create_fixed_operand (&ops[2], const0_rtx); ++ ++ /* Make sure that the operand 1 is a REG. */ ++ if (GET_CODE (ops[1].value) != REG) ++ ops[1].value = force_reg (ops[1].mode, ops[1].value); ++ ++ if ((cbranch = maybe_gen_insn (icode, 3, ops)) == NULL_RTX) ++ error ("failed to expand built-in function"); ++ ++ cmp_result = gen_reg_rtx (SImode); ++ ++ /* First assume that CMP_RESULT is false. */ ++ loongarch_emit_move (cmp_result, const0_rtx); ++ ++ /* Branch to TRUE_LABEL if CBRANCH is taken and DONE_LABEL otherwise. */ ++ emit_jump_insn (cbranch); ++ emit_jump_insn (gen_jump (done_label)); ++ emit_barrier (); ++ ++ /* Set CMP_RESULT to true if the branch was taken. */ ++ emit_label (true_label); ++ loongarch_emit_move (cmp_result, const1_rtx); ++ ++ emit_label (done_label); ++ return cmp_result; ++} ++ ++/* Implement TARGET_EXPAND_BUILTIN. */ ++ ++rtx ++loongarch_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, ++ machine_mode mode, int ignore) ++{ ++ tree fndecl; ++ unsigned int fcode, avail; ++ const struct loongarch_builtin_description *d; ++ ++ fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); ++ fcode = DECL_FUNCTION_CODE (fndecl); ++ gcc_assert (fcode < ARRAY_SIZE (loongarch_builtins)); ++ d = &loongarch_builtins[fcode]; ++ avail = d->avail (); ++ gcc_assert (avail != 0); ++ switch (d->builtin_type) ++ { ++ case LARCH_BUILTIN_DIRECT: ++ case LARCH_BUILTIN_LSX: ++ case LARCH_BUILTIN_LASX: ++ return loongarch_expand_builtin_direct (d->icode, target, exp, true); ++ ++ case LARCH_BUILTIN_DIRECT_NO_TARGET: ++ return loongarch_expand_builtin_direct (d->icode, target, exp, false); ++ ++ case LARCH_BUILTIN_LSX_TEST_BRANCH: ++ case LARCH_BUILTIN_LASX_TEST_BRANCH: ++ return loongarch_expand_builtin_lsx_test_branch (d->icode, exp); ++ } ++ gcc_unreachable (); ++} ++/* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */ ++ ++void ++loongarch_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update) ++{ ++ if (!TARGET_HARD_FLOAT_ABI) ++ return; ++ tree exceptions_var = create_tmp_var_raw (LARCH_ATYPE_USI); ++ tree fcsr_orig_var = create_tmp_var_raw (LARCH_ATYPE_USI); ++ tree fcsr_mod_var = create_tmp_var_raw (LARCH_ATYPE_USI); ++ tree const0 = build_int_cst (LARCH_ATYPE_UQI, 0); ++ tree get_fcsr = loongarch_builtin_decls[LARCH_MOVFCSR2GR]; ++ tree set_fcsr = loongarch_builtin_decls[LARCH_MOVGR2FCSR]; ++ tree get_fcsr_hold_call = build_call_expr (get_fcsr, 1, const0); ++ tree hold_assign_orig = build4 (TARGET_EXPR, LARCH_ATYPE_USI, ++ fcsr_orig_var, get_fcsr_hold_call, ++ NULL, NULL); ++ tree hold_mod_val = build2 (BIT_AND_EXPR, LARCH_ATYPE_USI, fcsr_orig_var, ++ build_int_cst (LARCH_ATYPE_USI, 0xffe0ffe0)); ++ tree hold_assign_mod = build4 (TARGET_EXPR, LARCH_ATYPE_USI, ++ fcsr_mod_var, hold_mod_val, NULL, NULL); ++ tree set_fcsr_hold_call = build_call_expr (set_fcsr, 2, const0, fcsr_mod_var); ++ tree hold_all = build2 (COMPOUND_EXPR, LARCH_ATYPE_USI, ++ hold_assign_orig, hold_assign_mod); ++ *hold = build2 (COMPOUND_EXPR, void_type_node, hold_all, ++ set_fcsr_hold_call); ++ ++ *clear = build_call_expr (set_fcsr, 2, const0, fcsr_mod_var); ++ ++ tree get_fcsr_update_call = build_call_expr (get_fcsr, 1, const0); ++ *update = build4 (TARGET_EXPR, LARCH_ATYPE_USI, exceptions_var, ++ get_fcsr_update_call, NULL, NULL); ++ tree set_fcsr_update_call = build_call_expr (set_fcsr, 2, const0, fcsr_orig_var); ++ *update = build2 (COMPOUND_EXPR, void_type_node, *update, ++ set_fcsr_update_call); ++ tree atomic_feraiseexcept ++ = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT); ++ tree int_exceptions_var = fold_convert (integer_type_node, ++ exceptions_var); ++ tree atomic_feraiseexcept_call = build_call_expr (atomic_feraiseexcept, ++ 1, int_exceptions_var); ++ *update = build2 (COMPOUND_EXPR, void_type_node, *update, ++ atomic_feraiseexcept_call); ++} ++ ++/* Implement TARGET_BUILTIN_VA_LIST. */ ++ ++tree ++loongarch_build_builtin_va_list (void) ++{ ++ return ptr_type_node; ++} ++ +diff --git a/gcc/config/loongarch/loongarch-c.c b/gcc/config/loongarch/loongarch-c.c +new file mode 100644 +index 000000000..6eac43bdf +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-c.c +@@ -0,0 +1,135 @@ ++/* LoongArch-specific code for C family languages. ++ Copyright (C) 2020-2021 Free Software Foundation, Inc. ++ Contributed by Andrew Waterman (zhouyingkun@mail.loongson.cn). ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify ++ it under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3, or (at your option) ++ any later version. ++ ++ GCC is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ GNU General Public License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with GCC; see the file COPYING3. If not see ++ . */ ++ ++#define IN_TARGET_CODE 1 ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tm.h" ++#include "c-family/c-common.h" ++#include "cpplib.h" ++ ++#define preprocessing_asm_p() (cpp_get_options (pfile)->lang == CLK_ASM) ++#define builtin_define(TXT) cpp_define (pfile, TXT) ++#define builtin_assert(TXT) cpp_assert (pfile, TXT) ++ ++/* TODO: what is the pfile technique ??? !!! */ ++ ++void ++loongarch_cpu_cpp_builtins (cpp_reader *pfile) ++{ ++ builtin_assert ("machine=loongarch"); ++ builtin_assert ("cpu=loongarch"); ++ builtin_define ("__loongarch__"); ++ ++ if (TARGET_FLOAT64) ++ builtin_define ("__loongarch_fpr=64"); ++ else ++ builtin_define ("__loongarch_fpr=32"); ++ ++ if (ISA_HAS_LSX) ++ { ++ builtin_define ("__loongarch_simd"); ++ builtin_define ("__loongarch_sx"); ++ builtin_define ("__loongarch_sx_width=128"); ++ ++ if (!ISA_HAS_LASX) ++ builtin_define ("__loongarch_simd_width=128"); ++ } ++ ++ if (ISA_HAS_LASX) ++ { ++ builtin_define ("__loongarch_asx"); ++ builtin_define ("__loongarch_asx_width=256"); ++ builtin_define ("__loongarch_simd_width=256"); ++ } ++ ++ LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", loongarch_arch_info); ++ LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", loongarch_tune_info); ++ ++ ++ switch (loongarch_abi) ++ { ++ case ABILP32: ++ builtin_define ("_ABILP32=1"); ++ builtin_define ("_LOONGARCH_SIM=_ABILP32"); ++ builtin_define ("__loongarch32"); ++ break; ++ ++ case ABILPX32: ++ builtin_define ("_ABILPX32=2"); ++ builtin_define ("_LOONGARCH_SIM=_ABILPX32"); ++ break; ++ ++ case ABILP64: ++ builtin_define ("_ABILP64=3"); ++ builtin_define ("_LOONGARCH_SIM=_ABILP64"); ++ builtin_define ("__loongarch64"); ++ break; ++ } ++ ++ builtin_define_with_int_value ("_LOONGARCH_SZINT", INT_TYPE_SIZE); ++ builtin_define_with_int_value ("_LOONGARCH_SZLONG", LONG_TYPE_SIZE); ++ builtin_define_with_int_value ("_LOONGARCH_SZPTR", POINTER_SIZE); ++ builtin_define_with_int_value ("_LOONGARCH_FPSET", ++ 32 / MAX_FPRS_PER_FMT); ++ builtin_define_with_int_value ("_LOONGARCH_SPFPSET", ++ 32); ++ ++ /* These defines reflect the ABI in use, not whether the ++ FPU is directly accessible. */ ++ if (TARGET_NO_FLOAT) ++ builtin_define ("__loongarch_no_float"); ++ else if (TARGET_HARD_FLOAT_ABI) ++ builtin_define ("__loongarch_hard_float"); ++ else ++ builtin_define ("__loongarch_soft_float"); ++ ++ if (TARGET_SINGLE_FLOAT) ++ builtin_define ("__loongarch_single_float"); ++ ++ /* Macros dependent on the C dialect. */ ++ if (preprocessing_asm_p ()) ++ { ++ builtin_define_std ("LANGUAGE_ASSEMBLY"); ++ builtin_define ("_LANGUAGE_ASSEMBLY"); ++ } ++ else if (c_dialect_cxx ()) ++ { ++ builtin_define ("_LANGUAGE_C_PLUS_PLUS"); ++ builtin_define ("__LANGUAGE_C_PLUS_PLUS"); ++ builtin_define ("__LANGUAGE_C_PLUS_PLUS__"); ++ } ++ else ++ { ++ builtin_define_std ("LANGUAGE_C"); ++ builtin_define ("_LANGUAGE_C"); ++ } ++ ++ if (c_dialect_objc ()) ++ { ++ builtin_define ("_LANGUAGE_OBJECTIVE_C"); ++ builtin_define ("__LANGUAGE_OBJECTIVE_C"); ++ /* Bizarre, but retained for backwards compatibility. */ ++ builtin_define_std ("LANGUAGE_C"); ++ builtin_define ("_LANGUAGE_C"); ++ } ++} +diff --git a/gcc/config/loongarch/loongarch-cpus.def b/gcc/config/loongarch/loongarch-cpus.def +new file mode 100644 +index 000000000..7ce2508e3 +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-cpus.def +@@ -0,0 +1,38 @@ ++/* LARCH CPU names. ++ Copyright (C) 1989-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* A table describing all the processors GCC knows about. The first ++ mention of an ISA level is taken as the canonical name for that ++ ISA. ++ ++ To ease comparison, please keep this table in the same order ++ as GAS's loongarch_cpu_info_table. Please also make sure that ++ LARCH_ISA_LEVEL_SPEC and LARCH_ARCH_FLOAT_SPEC handle all -march ++ options correctly. ++ ++ Before including this file, define a macro: ++ ++ LARCH_CPU (NAME, CPU, ISA, FLAGS) ++ ++ where the arguments are the fields of struct loongarch_cpu_info. */ ++ ++/* Entries for generic ISAs. */ ++LARCH_CPU ("loongarch64", PROCESSOR_LOONGARCH64, 0, 0) ++LARCH_CPU ("la464", PROCESSOR_LA464, 0, 0) ++ +diff --git a/gcc/config/loongarch/loongarch-d.c b/gcc/config/loongarch/loongarch-d.c +new file mode 100644 +index 000000000..971e5d33e +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-d.c +@@ -0,0 +1,31 @@ ++/* Subroutines for the D front end on the LARCH architecture. ++ Copyright (C) 2017 Free Software Foundation, Inc. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tm.h" ++#include "d/d-target.h" ++#include "d/d-target-def.h" ++ ++/* Implement TARGET_D_CPU_VERSIONS for LARCH targets. */ ++ ++void ++loongarch_d_target_versions (void) ++{ ++ // need to be improved !! ++} +diff --git a/gcc/config/loongarch/loongarch-ftypes.def b/gcc/config/loongarch/loongarch-ftypes.def +new file mode 100644 +index 000000000..a10a025ba +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-ftypes.def +@@ -0,0 +1,719 @@ ++/* Definitions of prototypes for LARCH built-in functions. -*- C -*- ++ Copyright (C) 2007-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Invoke DEF_LARCH_FTYPE (NARGS, LIST) for each prototype used by ++ LARCH built-in functions, where: ++ ++ NARGS is the number of arguments. ++ LIST contains the return-type code followed by the codes for each ++ argument type. ++ ++ Argument- and return-type codes are either modes or one of the following: ++ ++ VOID for void_type_node ++ INT for integer_type_node ++ POINTER for ptr_type_node ++ ++ (we don't use PTR because that's a ANSI-compatibillity macro). ++ ++ Please keep this list lexicographically sorted by the LIST argument. */ ++DEF_LARCH_FTYPE (1, (DF, DF)) ++DEF_LARCH_FTYPE (2, (DF, DF, DF)) ++DEF_LARCH_FTYPE (1, (DF, V2DF)) ++DEF_LARCH_FTYPE (1, (DF, V4DF)) ++ ++DEF_LARCH_FTYPE (1, (DI, DI)) ++DEF_LARCH_FTYPE (1, (DI, SI)) ++DEF_LARCH_FTYPE (1, (DI, UQI)) ++DEF_LARCH_FTYPE (1, (UDI, USI)) ++DEF_LARCH_FTYPE (1, (UQI, USI)) ++DEF_LARCH_FTYPE (1, (USI, UQI)) ++DEF_LARCH_FTYPE (1, (UHI, USI)) ++DEF_LARCH_FTYPE (2, (DI, DI, DI)) ++DEF_LARCH_FTYPE (2, (DI, DI, SI)) ++DEF_LARCH_FTYPE (2, (DI, DI, UQI)) ++DEF_LARCH_FTYPE (2, (VOID, DI, UQI)) ++DEF_LARCH_FTYPE (2, (VOID, SI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, UDI, USI)) ++DEF_LARCH_FTYPE (3, (DI, DI, SI, SI)) ++DEF_LARCH_FTYPE (3, (DI, DI, USI, USI)) ++DEF_LARCH_FTYPE (3, (DI, DI, DI, QI)) ++DEF_LARCH_FTYPE (3, (UDI, UDI, UDI, USI)) ++DEF_LARCH_FTYPE (3, (DI, DI, V2HI, V2HI)) ++DEF_LARCH_FTYPE (3, (DI, DI, V4QI, V4QI)) ++DEF_LARCH_FTYPE (2, (DI, POINTER, SI)) ++DEF_LARCH_FTYPE (2, (DI, SI, SI)) ++DEF_LARCH_FTYPE (2, (DI, USI, USI)) ++DEF_LARCH_FTYPE (2, (DI, V2DI, UQI)) ++DEF_LARCH_FTYPE (2, (DI, V4DI, UQI)) ++ ++DEF_LARCH_FTYPE (2, (INT, DF, DF)) ++DEF_LARCH_FTYPE (2, (INT, SF, SF)) ++DEF_LARCH_FTYPE (2, (INT, V2SF, V2SF)) ++DEF_LARCH_FTYPE (4, (INT, V2SF, V2SF, V2SF, V2SF)) ++ ++DEF_LARCH_FTYPE (1, (SF, SF)) ++DEF_LARCH_FTYPE (2, (SF, SF, SF)) ++DEF_LARCH_FTYPE (1, (SF, V2SF)) ++DEF_LARCH_FTYPE (1, (SF, V4SF)) ++ ++DEF_LARCH_FTYPE (2, (SI, DI, SI)) ++DEF_LARCH_FTYPE (2, (SI, POINTER, SI)) ++DEF_LARCH_FTYPE (1, (SI, SI)) ++DEF_LARCH_FTYPE (1, (USI, USI)) ++DEF_LARCH_FTYPE (1, (SI, UDI)) ++DEF_LARCH_FTYPE (2, (QI, QI, QI)) ++DEF_LARCH_FTYPE (2, (HI, HI, HI)) ++DEF_LARCH_FTYPE (2, (SI, QI, SI)) ++DEF_LARCH_FTYPE (2, (SI, HI, SI)) ++DEF_LARCH_FTYPE (2, (SI, SI, SI)) ++DEF_LARCH_FTYPE (2, (SI, SI, UQI)) ++DEF_LARCH_FTYPE (2, (USI, USI, USI)) ++DEF_LARCH_FTYPE (3, (SI, SI, SI, SI)) ++DEF_LARCH_FTYPE (3, (SI, SI, SI, QI)) ++DEF_LARCH_FTYPE (3, (USI, USI, USI, USI)) ++DEF_LARCH_FTYPE (1, (SI, UQI)) ++DEF_LARCH_FTYPE (1, (SI, UV16QI)) ++DEF_LARCH_FTYPE (1, (SI, UV32QI)) ++DEF_LARCH_FTYPE (1, (SI, UV2DI)) ++DEF_LARCH_FTYPE (1, (SI, UV4DI)) ++DEF_LARCH_FTYPE (1, (SI, UV4SI)) ++DEF_LARCH_FTYPE (1, (SI, UV8SI)) ++DEF_LARCH_FTYPE (1, (SI, UV8HI)) ++DEF_LARCH_FTYPE (1, (SI, UV16HI)) ++DEF_LARCH_FTYPE (2, (SI, V16QI, UQI)) ++DEF_LARCH_FTYPE (2, (SI, V32QI, UQI)) ++DEF_LARCH_FTYPE (1, (SI, V2HI)) ++DEF_LARCH_FTYPE (2, (SI, V2HI, V2HI)) ++DEF_LARCH_FTYPE (1, (SI, V4QI)) ++DEF_LARCH_FTYPE (2, (SI, V4QI, V4QI)) ++DEF_LARCH_FTYPE (2, (SI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (SI, V8SI, UQI)) ++DEF_LARCH_FTYPE (2, (SI, V8HI, UQI)) ++DEF_LARCH_FTYPE (1, (SI, VOID)) ++ ++DEF_LARCH_FTYPE (2, (UDI, UDI, UDI)) ++DEF_LARCH_FTYPE (2, (USI, V32QI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, UV2SI, UV2SI)) ++DEF_LARCH_FTYPE (2, (USI, V8SI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, V2DI, UQI)) ++DEF_LARCH_FTYPE (2, (USI, V16HI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, V4DI, UQI)) ++ ++DEF_LARCH_FTYPE (2, (USI, V16QI, UQI)) ++DEF_LARCH_FTYPE (2, (USI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (USI, V8HI, UQI)) ++DEF_LARCH_FTYPE (1, (USI, VOID)) ++ ++DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, UQI)) ++DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, USI)) ++DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, UV16QI, UQI)) ++DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, UV16QI, USI)) ++DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, V16QI)) ++ ++DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, UQI)) ++DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, UV2DI)) ++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV2DI, UQI)) ++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV2DI, UV2DI)) ++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, V2DI)) ++DEF_LARCH_FTYPE (2, (UV2DI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (1, (UV2DI, V2DF)) ++ ++DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, UQI)) ++DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, USI)) ++DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, UV32QI, UQI)) ++DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, UV32QI, USI)) ++DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, V32QI)) ++ ++DEF_LARCH_FTYPE (2, (UV4DI, UV4DI, UQI)) ++DEF_LARCH_FTYPE (2, (UV4DI, UV4DI, UV4DI)) ++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV4DI, UQI)) ++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV4DI, UV4DI)) ++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (2, (UV4DI, UV4DI, V4DI)) ++DEF_LARCH_FTYPE (2, (UV4DI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (1, (UV4DI, V4DF)) ++ ++DEF_LARCH_FTYPE (2, (UV2SI, UV2SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV2SI, UV2SI, UV2SI)) ++ ++DEF_LARCH_FTYPE (2, (UV4HI, UV4HI, UQI)) ++DEF_LARCH_FTYPE (2, (UV4HI, UV4HI, USI)) ++DEF_LARCH_FTYPE (2, (UV4HI, UV4HI, UV4HI)) ++DEF_LARCH_FTYPE (3, (UV4HI, UV4HI, UV4HI, UQI)) ++DEF_LARCH_FTYPE (3, (UV4HI, UV4HI, UV4HI, USI)) ++DEF_LARCH_FTYPE (1, (UV4HI, UV8QI)) ++DEF_LARCH_FTYPE (2, (UV4HI, UV8QI, UV8QI)) ++ ++DEF_LARCH_FTYPE (2, (UV4SI, UV4SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV4SI, UQI)) ++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (1, (UV4SI, V4SF)) ++ ++DEF_LARCH_FTYPE (2, (UV8HI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, UQI)) ++DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV8HI, UQI)) ++DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, V8HI)) ++ ++DEF_LARCH_FTYPE (2, (UV8SI, UV8SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV8SI, UQI)) ++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV8SI, V8SI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (1, (UV8SI, V8SF)) ++ ++DEF_LARCH_FTYPE (2, (UV16HI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (2, (UV16HI, UV16HI, UQI)) ++DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (2, (UV16HI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, UV16HI, UQI)) ++DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (UV16HI, UV16HI, V16HI)) ++ ++DEF_LARCH_FTYPE (2, (UV8QI, UV4HI, UV4HI)) ++DEF_LARCH_FTYPE (1, (UV8QI, UV8QI)) ++DEF_LARCH_FTYPE (2, (UV8QI, UV8QI, UV8QI)) ++ ++DEF_LARCH_FTYPE (2, (V16QI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (2, (V16QI, CVPOINTER, DI)) ++DEF_LARCH_FTYPE (1, (V16QI, HI)) ++DEF_LARCH_FTYPE (1, (V16QI, SI)) ++DEF_LARCH_FTYPE (2, (V16QI, UV16QI, UQI)) ++DEF_LARCH_FTYPE (2, (V16QI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (1, (V16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, QI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, SI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, USI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, UQI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, SI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, V16QI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, V16QI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, SI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, UQI)) ++DEF_LARCH_FTYPE (4, (V16QI, V16QI, V16QI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, USI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, V16QI)) ++ ++DEF_LARCH_FTYPE (2, (V32QI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (2, (V32QI, CVPOINTER, DI)) ++DEF_LARCH_FTYPE (1, (V32QI, HI)) ++DEF_LARCH_FTYPE (1, (V32QI, SI)) ++DEF_LARCH_FTYPE (2, (V32QI, UV32QI, UQI)) ++DEF_LARCH_FTYPE (2, (V32QI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (1, (V32QI, V32QI)) ++DEF_LARCH_FTYPE (2, (V32QI, V32QI, QI)) ++DEF_LARCH_FTYPE (2, (V32QI, V32QI, SI)) ++DEF_LARCH_FTYPE (2, (V32QI, V32QI, UQI)) ++DEF_LARCH_FTYPE (2, (V32QI, V32QI, USI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, SI, UQI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, UQI, V32QI)) ++DEF_LARCH_FTYPE (2, (V32QI, V32QI, V32QI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, SI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, UQI)) ++DEF_LARCH_FTYPE (4, (V32QI, V32QI, V32QI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, USI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, V32QI)) ++ ++DEF_LARCH_FTYPE (1, (V2DF, DF)) ++DEF_LARCH_FTYPE (1, (V2DF, UV2DI)) ++DEF_LARCH_FTYPE (1, (V2DF, V2DF)) ++DEF_LARCH_FTYPE (2, (V2DF, V2DF, V2DF)) ++DEF_LARCH_FTYPE (3, (V2DF, V2DF, V2DF, V2DF)) ++DEF_LARCH_FTYPE (2, (V2DF, V2DF, V2DI)) ++DEF_LARCH_FTYPE (1, (V2DF, V2DI)) ++DEF_LARCH_FTYPE (1, (V2DF, V4SF)) ++DEF_LARCH_FTYPE (1, (V2DF, V4SI)) ++ ++DEF_LARCH_FTYPE (1, (V4DF, DF)) ++DEF_LARCH_FTYPE (1, (V4DF, UV4DI)) ++DEF_LARCH_FTYPE (1, (V4DF, V4DF)) ++DEF_LARCH_FTYPE (2, (V4DF, V4DF, V4DF)) ++DEF_LARCH_FTYPE (3, (V4DF, V4DF, V4DF, V4DF)) ++DEF_LARCH_FTYPE (2, (V4DF, V4DF, V4DI)) ++DEF_LARCH_FTYPE (1, (V4DF, V4DI)) ++DEF_LARCH_FTYPE (1, (V4DF, V8SF)) ++DEF_LARCH_FTYPE (1, (V4DF, V8SI)) ++ ++DEF_LARCH_FTYPE (2, (V2DI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (1, (V2DI, DI)) ++DEF_LARCH_FTYPE (1, (V2DI, HI)) ++DEF_LARCH_FTYPE (2, (V2DI, UV2DI, UQI)) ++DEF_LARCH_FTYPE (2, (V2DI, UV2DI, UV2DI)) ++DEF_LARCH_FTYPE (2, (V2DI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (1, (V2DI, V2DF)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DF, V2DF)) ++DEF_LARCH_FTYPE (1, (V2DI, V2DI)) ++DEF_LARCH_FTYPE (1, (UV2DI, UV2DI)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DI, QI)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DI, SI)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DI, UQI)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DI, USI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, DI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, V2DI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DI, V2DI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, SI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, UQI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, USI)) ++DEF_LARCH_FTYPE (4, (V2DI, V2DI, V2DI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, V2DI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (V2DI, V4SI, V4SI)) ++ ++DEF_LARCH_FTYPE (2, (V4DI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (1, (V4DI, DI)) ++DEF_LARCH_FTYPE (1, (V4DI, HI)) ++DEF_LARCH_FTYPE (2, (V4DI, UV4DI, UQI)) ++DEF_LARCH_FTYPE (2, (V4DI, UV4DI, UV4DI)) ++DEF_LARCH_FTYPE (2, (V4DI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (1, (V4DI, V4DF)) ++DEF_LARCH_FTYPE (2, (V4DI, V4DF, V4DF)) ++DEF_LARCH_FTYPE (1, (V4DI, V4DI)) ++DEF_LARCH_FTYPE (1, (UV4DI, UV4DI)) ++DEF_LARCH_FTYPE (2, (V4DI, V4DI, QI)) ++DEF_LARCH_FTYPE (2, (V4DI, V4DI, SI)) ++DEF_LARCH_FTYPE (2, (V4DI, V4DI, UQI)) ++DEF_LARCH_FTYPE (2, (V4DI, V4DI, USI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, DI, UQI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, UQI, V4DI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (2, (V4DI, V4DI, V4DI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, SI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, USI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, UQI)) ++DEF_LARCH_FTYPE (4, (V4DI, V4DI, V4DI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, V4DI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V8SI, V8SI)) ++DEF_LARCH_FTYPE (2, (V4DI, V8SI, V8SI)) ++ ++DEF_LARCH_FTYPE (1, (V2HI, SI)) ++DEF_LARCH_FTYPE (2, (V2HI, SI, SI)) ++DEF_LARCH_FTYPE (3, (V2HI, SI, SI, SI)) ++DEF_LARCH_FTYPE (1, (V2HI, V2HI)) ++DEF_LARCH_FTYPE (2, (V2HI, V2HI, SI)) ++DEF_LARCH_FTYPE (2, (V2HI, V2HI, V2HI)) ++DEF_LARCH_FTYPE (1, (V2HI, V4QI)) ++DEF_LARCH_FTYPE (2, (V2HI, V4QI, V2HI)) ++ ++DEF_LARCH_FTYPE (2, (V2SF, SF, SF)) ++DEF_LARCH_FTYPE (1, (V2SF, V2SF)) ++DEF_LARCH_FTYPE (2, (V2SF, V2SF, V2SF)) ++DEF_LARCH_FTYPE (3, (V2SF, V2SF, V2SF, INT)) ++DEF_LARCH_FTYPE (4, (V2SF, V2SF, V2SF, V2SF, V2SF)) ++ ++DEF_LARCH_FTYPE (2, (V2SI, V2SI, UQI)) ++DEF_LARCH_FTYPE (2, (V2SI, V2SI, V2SI)) ++DEF_LARCH_FTYPE (2, (V2SI, V4HI, V4HI)) ++ ++DEF_LARCH_FTYPE (2, (V4HI, V2SI, V2SI)) ++DEF_LARCH_FTYPE (2, (V4HI, V4HI, UQI)) ++DEF_LARCH_FTYPE (2, (V4HI, V4HI, USI)) ++DEF_LARCH_FTYPE (2, (V4HI, V4HI, V4HI)) ++DEF_LARCH_FTYPE (3, (V4HI, V4HI, V4HI, UQI)) ++DEF_LARCH_FTYPE (3, (V4HI, V4HI, V4HI, USI)) ++ ++DEF_LARCH_FTYPE (1, (V4QI, SI)) ++DEF_LARCH_FTYPE (2, (V4QI, V2HI, V2HI)) ++DEF_LARCH_FTYPE (1, (V4QI, V4QI)) ++DEF_LARCH_FTYPE (2, (V4QI, V4QI, SI)) ++DEF_LARCH_FTYPE (2, (V4QI, V4QI, V4QI)) ++ ++DEF_LARCH_FTYPE (1, (V4SF, SF)) ++DEF_LARCH_FTYPE (1, (V4SF, UV4SI)) ++DEF_LARCH_FTYPE (2, (V4SF, V2DF, V2DF)) ++DEF_LARCH_FTYPE (1, (V4SF, V4SF)) ++DEF_LARCH_FTYPE (2, (V4SF, V4SF, V4SF)) ++DEF_LARCH_FTYPE (3, (V4SF, V4SF, V4SF, V4SF)) ++DEF_LARCH_FTYPE (2, (V4SF, V4SF, V4SI)) ++DEF_LARCH_FTYPE (1, (V4SF, V4SI)) ++DEF_LARCH_FTYPE (1, (V4SF, V8HI)) ++DEF_LARCH_FTYPE (1, (V8SF, V16HI)) ++ ++DEF_LARCH_FTYPE (1, (V8SF, SF)) ++DEF_LARCH_FTYPE (1, (V8SF, UV8SI)) ++DEF_LARCH_FTYPE (2, (V8SF, V4DF, V4DF)) ++DEF_LARCH_FTYPE (1, (V8SF, V8SF)) ++DEF_LARCH_FTYPE (2, (V8SF, V8SF, V8SF)) ++DEF_LARCH_FTYPE (3, (V8SF, V8SF, V8SF, V8SF)) ++DEF_LARCH_FTYPE (2, (V8SF, V8SF, V8SI)) ++DEF_LARCH_FTYPE (1, (V8SF, V8SI)) ++DEF_LARCH_FTYPE (1, (V8SF, V8HI)) ++ ++DEF_LARCH_FTYPE (2, (V4SI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (1, (V4SI, HI)) ++DEF_LARCH_FTYPE (1, (V4SI, SI)) ++DEF_LARCH_FTYPE (2, (V4SI, UV4SI, UQI)) ++DEF_LARCH_FTYPE (2, (V4SI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (2, (V4SI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (V4SI, V2DF, V2DF)) ++DEF_LARCH_FTYPE (2, (V8SI, V4DF, V4DF)) ++DEF_LARCH_FTYPE (1, (V4SI, V4SF)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SF, V4SF)) ++DEF_LARCH_FTYPE (1, (V4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SI, QI)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SI, SI)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SI, USI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, SI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, V4SI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SI, V4SI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, SI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, UQI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, USI)) ++DEF_LARCH_FTYPE (4, (V4SI, V4SI, V4SI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, V4SI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (V4SI, V8HI, V8HI)) ++ ++DEF_LARCH_FTYPE (2, (V8SI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (1, (V8SI, HI)) ++DEF_LARCH_FTYPE (1, (V8SI, SI)) ++DEF_LARCH_FTYPE (2, (V8SI, UV8SI, UQI)) ++DEF_LARCH_FTYPE (2, (V8SI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (2, (V8SI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (V8SI, V2DF, V2DF)) ++DEF_LARCH_FTYPE (1, (V8SI, V8SF)) ++DEF_LARCH_FTYPE (2, (V8SI, V8SF, V8SF)) ++DEF_LARCH_FTYPE (1, (V8SI, V8SI)) ++DEF_LARCH_FTYPE (2, (V8SI, V8SI, QI)) ++DEF_LARCH_FTYPE (2, (V8SI, V8SI, SI)) ++DEF_LARCH_FTYPE (2, (V8SI, V8SI, UQI)) ++DEF_LARCH_FTYPE (2, (V8SI, V8SI, USI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, SI, UQI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, UQI, V8SI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (V8SI, V8SI, V8SI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, SI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, UQI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, USI)) ++DEF_LARCH_FTYPE (4, (V8SI, V8SI, V8SI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, V8SI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V16HI, V16HI)) ++DEF_LARCH_FTYPE (2, (V8SI, V16HI, V16HI)) ++ ++DEF_LARCH_FTYPE (2, (V8HI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (1, (V8HI, HI)) ++DEF_LARCH_FTYPE (1, (V8HI, SI)) ++DEF_LARCH_FTYPE (2, (V8HI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (V8HI, UV8HI, UQI)) ++DEF_LARCH_FTYPE (2, (V8HI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (V8HI, V16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (V8HI, V4SF, V4SF)) ++DEF_LARCH_FTYPE (1, (V8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (V8HI, V8HI, QI)) ++DEF_LARCH_FTYPE (2, (V8HI, V8HI, SI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, SI, UQI)) ++DEF_LARCH_FTYPE (2, (V8HI, V8HI, UQI)) ++DEF_LARCH_FTYPE (2, (V8HI, V8HI, USI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, SI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, V8HI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (V8HI, V8HI, V8HI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, SI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, UQI)) ++DEF_LARCH_FTYPE (4, (V8HI, V8HI, V8HI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, USI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, V8HI)) ++ ++DEF_LARCH_FTYPE (2, (V16HI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (1, (V16HI, HI)) ++DEF_LARCH_FTYPE (1, (V16HI, SI)) ++DEF_LARCH_FTYPE (2, (V16HI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (2, (V16HI, UV16HI, UQI)) ++DEF_LARCH_FTYPE (2, (V16HI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (V16HI, V32QI, V32QI)) ++DEF_LARCH_FTYPE (2, (V16HI, V8SF, V8SF)) ++DEF_LARCH_FTYPE (1, (V16HI, V16HI)) ++DEF_LARCH_FTYPE (2, (V16HI, V16HI, QI)) ++DEF_LARCH_FTYPE (2, (V16HI, V16HI, SI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, SI, UQI)) ++DEF_LARCH_FTYPE (2, (V16HI, V16HI, UQI)) ++DEF_LARCH_FTYPE (2, (V16HI, V16HI, USI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UQI, SI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UQI, V16HI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V32QI, V32QI)) ++DEF_LARCH_FTYPE (2, (V16HI, V16HI, V16HI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, SI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, UQI)) ++DEF_LARCH_FTYPE (4, (V16HI, V16HI, V16HI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, USI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, V16HI)) ++ ++DEF_LARCH_FTYPE (2, (V8QI, V4HI, V4HI)) ++DEF_LARCH_FTYPE (1, (V8QI, V8QI)) ++DEF_LARCH_FTYPE (2, (V8QI, V8QI, V8QI)) ++ ++DEF_LARCH_FTYPE (2, (VOID, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (VOID, SI, SI)) ++DEF_LARCH_FTYPE (2, (VOID, DI, DI)) ++DEF_LARCH_FTYPE (2, (VOID, UQI, SI)) ++DEF_LARCH_FTYPE (1, (VOID, USI)) ++DEF_LARCH_FTYPE (2, (VOID, USI, UQI)) ++DEF_LARCH_FTYPE (1, (VOID, UHI)) ++DEF_LARCH_FTYPE (2, (VOID, UQI, USI)) ++DEF_LARCH_FTYPE (2, (VOID, UHI, USI)) ++DEF_LARCH_FTYPE (2, (VOID, USI, USI)) ++DEF_LARCH_FTYPE (2, (VOID, UDI, USI)) ++DEF_LARCH_FTYPE (3, (VOID, USI, USI, SI)) ++DEF_LARCH_FTYPE (3, (VOID, USI, UDI, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, DI)) ++DEF_LARCH_FTYPE (3, (VOID, V32QI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V32QI, CVPOINTER, DI)) ++DEF_LARCH_FTYPE (3, (VOID, V4DF, POINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V2DF, POINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V2DI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V4DI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (2, (VOID, V2HI, V2HI)) ++DEF_LARCH_FTYPE (2, (VOID, V4QI, V4QI)) ++DEF_LARCH_FTYPE (3, (VOID, V4SF, POINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V8SF, POINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V4SI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V8SI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V8HI, CVPOINTER, SI)) ++DEF_LARCH_FTYPE (3, (VOID, V16HI, CVPOINTER, SI)) ++ ++DEF_LARCH_FTYPE (1, (V16HI, V32QI)) ++DEF_LARCH_FTYPE (1, (UV16HI, UV32QI)) ++DEF_LARCH_FTYPE (1, (V8SI, V32QI)) ++DEF_LARCH_FTYPE (1, (V4DI, V32QI)) ++DEF_LARCH_FTYPE (1, (V8HI, V16QI)) ++DEF_LARCH_FTYPE (1, (V4SI, V16QI)) ++DEF_LARCH_FTYPE (1, (V2DI, V16QI)) ++DEF_LARCH_FTYPE (1, (UV8SI, UV16HI)) ++DEF_LARCH_FTYPE (1, (V8SI, V16HI)) ++DEF_LARCH_FTYPE (1, (V4DI, V16HI)) ++DEF_LARCH_FTYPE (1, (V4SI, V8HI)) ++DEF_LARCH_FTYPE (1, (V2DI, V8HI)) ++DEF_LARCH_FTYPE (1, (V2DI, V4SI)) ++DEF_LARCH_FTYPE (1, (V4DI, V8SI)) ++DEF_LARCH_FTYPE (1, (UV4DI, UV8SI)) ++DEF_LARCH_FTYPE (1, (UV16HI, V32QI)) ++DEF_LARCH_FTYPE (1, (UV8SI, V32QI)) ++DEF_LARCH_FTYPE (1, (UV4DI, V32QI)) ++DEF_LARCH_FTYPE (1, (UV8HI, V16QI)) ++DEF_LARCH_FTYPE (1, (UV4SI, V16QI)) ++DEF_LARCH_FTYPE (1, (UV2DI, V16QI)) ++DEF_LARCH_FTYPE (1, (UV8SI, V16HI)) ++DEF_LARCH_FTYPE (1, (UV4DI, V16HI)) ++DEF_LARCH_FTYPE (1, (UV4SI, V8HI)) ++DEF_LARCH_FTYPE (1, (UV2DI, V8HI)) ++DEF_LARCH_FTYPE (1, (UV2DI, V4SI)) ++DEF_LARCH_FTYPE (1, (UV4DI, V8SI)) ++DEF_LARCH_FTYPE (1, (UV8HI, UV16QI)) ++DEF_LARCH_FTYPE (1, (UV4SI, UV16QI)) ++DEF_LARCH_FTYPE (1, (UV2DI, UV16QI)) ++DEF_LARCH_FTYPE (1, (UV4DI, UV32QI)) ++DEF_LARCH_FTYPE (1, (UV4SI, UV8HI)) ++DEF_LARCH_FTYPE (1, (UV2DI, UV8HI)) ++DEF_LARCH_FTYPE (1, (UV2DI, UV4SI)) ++DEF_LARCH_FTYPE (2, (UV8HI, V16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (UV4SI, V8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (UV2DI, V4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (V16HI, V32QI, UQI)) ++DEF_LARCH_FTYPE (2, (V8SI, V16HI, UQI)) ++DEF_LARCH_FTYPE (2, (V4DI, V8SI, UQI)) ++DEF_LARCH_FTYPE (2, (V8HI, V16QI, UQI)) ++DEF_LARCH_FTYPE (2, (V4SI, V8HI, UQI)) ++DEF_LARCH_FTYPE (2, (V2DI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV16HI, UV32QI, UQI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV16HI, UQI)) ++DEF_LARCH_FTYPE (2, (UV4DI, UV8SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV8HI, UV16QI, UQI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV8HI, UQI)) ++DEF_LARCH_FTYPE (2, (UV2DI, UV4SI, UQI)) ++DEF_LARCH_FTYPE (2, (V32QI, V16HI, V16HI)) ++DEF_LARCH_FTYPE (2, (V16HI, V8SI, V8SI)) ++DEF_LARCH_FTYPE (2, (V8SI, V4DI, V4DI)) ++DEF_LARCH_FTYPE (2, (V16QI, V8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (V8HI, V4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (V4SI, V2DI, V2DI)) ++DEF_LARCH_FTYPE (2, (UV32QI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (UV16HI, UV8SI, UV8SI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV4DI, UV4DI)) ++DEF_LARCH_FTYPE (2, (UV16QI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (UV8HI, UV4SI, UV4SI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV2DI, UV2DI)) ++DEF_LARCH_FTYPE (2, (V32QI, V16HI, UQI)) ++DEF_LARCH_FTYPE (2, (V16HI, V8SI, UQI)) ++DEF_LARCH_FTYPE (2, (V8SI, V4DI, UQI)) ++DEF_LARCH_FTYPE (2, (V16QI, V8HI, UQI)) ++DEF_LARCH_FTYPE (2, (V8HI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (V4SI, V2DI, UQI)) ++DEF_LARCH_FTYPE (2, (UV32QI, UV16HI, UQI)) ++DEF_LARCH_FTYPE (2, (UV16HI, UV8SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV4DI, UQI)) ++DEF_LARCH_FTYPE (2, (UV16QI, UV8HI, UQI)) ++DEF_LARCH_FTYPE (2, (UV8HI, UV4SI, UQI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV2DI, UQI)) ++DEF_LARCH_FTYPE (2, (V32QI, V32QI, DI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, DI)) ++DEF_LARCH_FTYPE (2, (V32QI, UQI, UQI)) ++DEF_LARCH_FTYPE (2, (V16QI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V32QI, V32QI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, UQI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, UQI)) ++DEF_LARCH_FTYPE (2, (V8SF, V4DI, V4DI)) ++DEF_LARCH_FTYPE (2, (V4SF, V2DI, V2DI)) ++DEF_LARCH_FTYPE (1, (V4DI, V8SF)) ++DEF_LARCH_FTYPE (1, (V2DI, V4SF)) ++DEF_LARCH_FTYPE (2, (V4DI, UQI, USI)) ++DEF_LARCH_FTYPE (2, (V2DI, UQI, USI)) ++DEF_LARCH_FTYPE (2, (V4DI, UQI, UQI)) ++DEF_LARCH_FTYPE (2, (V2DI, UQI, UQI)) ++DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V16QI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V8HI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V4SI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V2DI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V16QI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V8HI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V4SI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V2DI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, V32QI, UQI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, V16HI, UQI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, V8SI, UQI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (4, (VOID, V4DI, UQI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (3, (VOID, V32QI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V32QI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V16HI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V8SI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V4DI, SI, CVPOINTER)) ++DEF_LARCH_FTYPE (1, (V32QI, POINTER)) ++DEF_LARCH_FTYPE (2, (VOID, V32QI, POINTER)) ++DEF_LARCH_FTYPE (2, (V8HI, UV16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (V16QI, V16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (UV16QI, V16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (V8HI, V8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (UV8HI, V8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (V4SI, V4SI, UV4SI)) ++DEF_LARCH_FTYPE (2, (UV4SI, V4SI, UV4SI)) ++DEF_LARCH_FTYPE (2, (V4SI, V16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (V4SI, UV16QI, V16QI)) ++DEF_LARCH_FTYPE (2, (UV4SI, UV16QI, UV16QI)) ++DEF_LARCH_FTYPE (2, (V2DI, V2DI, UV2DI)) ++DEF_LARCH_FTYPE (2, (UV2DI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (2, (V4SI, UV8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (V2DI, UV4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (V2DI, UV2DI, V2DI)) ++DEF_LARCH_FTYPE (2, (V2DI, V8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (V2DI, UV8HI, V8HI)) ++DEF_LARCH_FTYPE (2, (UV2DI, V2DI, UV2DI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV8HI, V8HI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV2DI, V2DI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV4SI, V4SI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V8HI, V8HI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV8HI, V8HI)) ++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV8HI, UV8HI)) ++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UV16QI, V16QI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V16QI, V16QI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV16QI, V16QI)) ++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV16QI, UV16QI)) ++ ++ ++DEF_LARCH_FTYPE(2,(V4DI,V16HI,V16HI)) ++DEF_LARCH_FTYPE(2,(V4DI,UV4SI,V4SI)) ++DEF_LARCH_FTYPE(2,(V8SI,UV16HI,V16HI)) ++DEF_LARCH_FTYPE(2,(V16HI,UV32QI,V32QI)) ++DEF_LARCH_FTYPE(2,(V4DI,UV8SI,V8SI)) ++DEF_LARCH_FTYPE(3,(V4DI,V4DI,V16HI,V16HI)) ++DEF_LARCH_FTYPE(2,(UV32QI,V32QI,UV32QI)) ++DEF_LARCH_FTYPE(2,(UV16HI,V16HI,UV16HI)) ++DEF_LARCH_FTYPE(2,(UV8SI,V8SI,UV8SI)) ++DEF_LARCH_FTYPE(2,(UV4DI,V4DI,UV4DI)) ++DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV4DI,V4DI)) ++DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV8SI,V8SI)) ++DEF_LARCH_FTYPE(3,(V8SI,V8SI,UV16HI,V16HI)) ++DEF_LARCH_FTYPE(3,(V16HI,V16HI,UV32QI,V32QI)) ++DEF_LARCH_FTYPE(2,(V4DI,UV4DI,V4DI)) ++DEF_LARCH_FTYPE(2,(V8SI,V32QI,V32QI)) ++DEF_LARCH_FTYPE(2,(UV4DI,UV16HI,UV16HI)) ++DEF_LARCH_FTYPE(2,(V4DI,UV16HI,V16HI)) ++DEF_LARCH_FTYPE(3,(V8SI,V8SI,V32QI,V32QI)) ++DEF_LARCH_FTYPE(3,(UV8SI,UV8SI,UV32QI,UV32QI)) ++DEF_LARCH_FTYPE(3,(UV4DI,UV4DI,UV16HI,UV16HI)) ++DEF_LARCH_FTYPE(3,(V8SI,V8SI,UV32QI,V32QI)) ++DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV16HI,V16HI)) ++DEF_LARCH_FTYPE(2,(UV8SI,UV32QI,UV32QI)) ++DEF_LARCH_FTYPE(2,(V8SI,UV32QI,V32QI)) ++ ++DEF_LARCH_FTYPE(4,(VOID,V16QI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE(4,(VOID,V8HI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE(4,(VOID,V4SI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE(4,(VOID,V2DI,CVPOINTER,SI,UQI)) ++ ++DEF_LARCH_FTYPE (2, (DI, V16QI, UQI)) ++DEF_LARCH_FTYPE (2, (DI, V8HI, UQI)) ++DEF_LARCH_FTYPE (2, (DI, V4SI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, V16QI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, V8HI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, V4SI, UQI)) ++ ++DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, V16QI, USI)) ++DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, V8HI, USI)) ++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, V4SI, USI)) ++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, V2DI, USI)) ++ ++DEF_LARCH_FTYPE (2, (DI, V8SI, UQI)) ++DEF_LARCH_FTYPE (2, (UDI, V8SI, UQI)) ++ ++DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, V32QI, USI)) ++DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, V16HI, USI)) ++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, V8SI, USI)) ++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, V4DI, USI)) ++ ++DEF_LARCH_FTYPE(4,(VOID,V32QI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE(4,(VOID,V16HI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE(4,(VOID,V8SI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE(4,(VOID,V4DI,CVPOINTER,SI,UQI)) ++ ++DEF_LARCH_FTYPE (1, (BOOLEAN,V16QI)) ++DEF_LARCH_FTYPE(2,(V16QI,CVPOINTER,CVPOINTER)) ++DEF_LARCH_FTYPE(3,(VOID,V16QI,CVPOINTER,CVPOINTER)) ++DEF_LARCH_FTYPE(2,(V32QI,CVPOINTER,CVPOINTER)) ++DEF_LARCH_FTYPE(3,(VOID,V32QI,CVPOINTER,CVPOINTER)) ++ ++DEF_LARCH_FTYPE (3, (V16QI, V16QI, SI, UQI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, SI, UQI)) ++DEF_LARCH_FTYPE (3, (V2DI, V2DI, DI, UQI)) ++DEF_LARCH_FTYPE (3, (V4SI, V4SI, SI, UQI)) ++ ++DEF_LARCH_FTYPE (2, (V8SF, V8SF, UQI)) ++DEF_LARCH_FTYPE (2, (V4DF, V4DF, UQI)) +diff --git a/gcc/config/loongarch/loongarch-modes.def b/gcc/config/loongarch/loongarch-modes.def +new file mode 100644 +index 000000000..fe5bc38d9 +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-modes.def +@@ -0,0 +1,64 @@ ++/* LARCH extra machine modes. ++ Copyright (C) 2003-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++FLOAT_MODE (TF, 16, ieee_quad_format); ++ ++/* Vector modes. */ ++VECTOR_MODES (INT, 4); /* V4QI V2HI */ ++VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */ ++VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */ ++ ++/* For LARCH LSX 128 bits. */ ++VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI */ ++VECTOR_MODES (FLOAT, 16); /* V4SF V2DF */ ++ ++/* For LARCH LASX 256 bits. */ ++VECTOR_MODES (INT, 32); /* V32QI V16HI V8SI V4DI */ ++VECTOR_MODES (FLOAT, 32); /* V8SF V4DF */ ++ ++/* Double-sized vector modes for vec_concat. */ ++/* VECTOR_MODE (INT, QI, 32); V32QI */ ++/* VECTOR_MODE (INT, HI, 16); V16HI */ ++/* VECTOR_MODE (INT, SI, 8); V8SI */ ++/* VECTOR_MODE (INT, DI, 4); V4DI */ ++/* VECTOR_MODE (FLOAT, SF, 8); V8SF */ ++/* VECTOR_MODE (FLOAT, DF, 4); V4DF */ ++ ++VECTOR_MODE (INT, QI, 64); /* V64QI */ ++VECTOR_MODE (INT, HI, 32); /* V32HI */ ++VECTOR_MODE (INT, SI, 16); /* V16SI */ ++VECTOR_MODE (INT, DI, 8); /* V8DI */ ++VECTOR_MODE (FLOAT, SF, 16); /* V16SF */ ++VECTOR_MODE (FLOAT, DF, 8); /* V8DF */ ++ ++VECTOR_MODES (FRACT, 4); /* V4QQ V2HQ */ ++VECTOR_MODES (UFRACT, 4); /* V4UQQ V2UHQ */ ++VECTOR_MODES (ACCUM, 4); /* V2HA */ ++VECTOR_MODES (UACCUM, 4); /* V2UHA */ ++ ++/* For floating point conditions in FCC registers. */ ++CC_MODE (FCC); ++ ++INT_MODE (OI, 32); ++ ++/* Keep the OI modes from confusing the compiler into thinking ++ that these modes could actually be used for computation. They are ++ only holders for vectors during data movement. */ ++#define MAX_BITSIZE_MODE_ANY_INT (128) ++ +diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h +new file mode 100644 +index 000000000..21639fa74 +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-opts.h +@@ -0,0 +1,34 @@ ++/* Definitions for option handling for LARCH. ++ Copyright (C) 1989-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#ifndef LARCH_OPTS_H ++#define LARCH_OPTS_H ++ ++#define LARCH_ARCH_OPTION_NATIVE -1 ++ ++ ++enum loongarch_code_model { ++ LARCH_CMODEL_NORMAL, ++ LARCH_CMODEL_TINY, ++ LARCH_CMODEL_TINY_STATIC, ++ LARCH_CMODEL_LARGE, ++ LARCH_CMODEL_EXTREME ++}; ++ ++#endif +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +new file mode 100644 +index 000000000..c36fdd37d +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -0,0 +1,290 @@ ++/* Prototypes of target machine for GNU compiler. LARCH version. ++ Copyright (C) 1989-2018 Free Software Foundation, Inc. ++ Contributed by A. Lichnewsky (lich@inria.inria.fr). ++ Changed by Michael Meissner (meissner@osf.org). ++ 64-bit r4000 support by Ian Lance Taylor (ian@cygnus.com) and ++ Brendan Eich (brendan@microunity.com). ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#ifndef GCC_LARCH_PROTOS_H ++#define GCC_LARCH_PROTOS_H ++ ++/* Describes how a symbol is used. ++ ++ SYMBOL_CONTEXT_CALL ++ The symbol is used as the target of a call instruction. ++ ++ SYMBOL_CONTEXT_LEA ++ The symbol is used in a load-address operation. ++ ++ SYMBOL_CONTEXT_MEM ++ The symbol is used as the address in a MEM. */ ++enum loongarch_symbol_context { ++ SYMBOL_CONTEXT_CALL, ++ SYMBOL_CONTEXT_LEA, ++ SYMBOL_CONTEXT_MEM ++}; ++ ++/* Classifies a SYMBOL_REF, LABEL_REF or UNSPEC address. ++ ++ SYMBOL_GOT_DISP ++ The symbol's value will be loaded directly from the GOT. ++ ++ SYMBOL_TLS ++ A thread-local symbol. ++ ++ SYMBOL_TLSGD ++ SYMBOL_TLSLDM ++ UNSPEC wrappers around SYMBOL_TLS, corresponding to the ++ thread-local storage relocation operators. ++ */ ++enum loongarch_symbol_type { ++ SYMBOL_GOT_DISP, ++ SYMBOL_TLS, ++ SYMBOL_TLSGD, ++ SYMBOL_TLSLDM, ++}; ++#define NUM_SYMBOL_TYPES (SYMBOL_TLSLDM + 1) ++ ++/* Classifies a type of call. ++ ++ LARCH_CALL_NORMAL ++ A normal call or call_value pattern. ++ ++ LARCH_CALL_SIBCALL ++ A sibcall or sibcall_value pattern. ++ ++ LARCH_CALL_EPILOGUE ++ A call inserted in the epilogue. */ ++enum loongarch_call_type { ++ LARCH_CALL_NORMAL, ++ LARCH_CALL_SIBCALL, ++ LARCH_CALL_EPILOGUE ++}; ++ ++/* Controls the conditions under which certain instructions are split. ++ ++ SPLIT_IF_NECESSARY ++ Only perform splits that are necessary for correctness ++ (because no unsplit version exists). ++ ++ SPLIT_FOR_SPEED ++ Perform splits that are necessary for correctness or ++ beneficial for code speed. ++ ++ SPLIT_FOR_SIZE ++ Perform splits that are necessary for correctness or ++ beneficial for code size. */ ++enum loongarch_split_type { ++ SPLIT_IF_NECESSARY, ++ SPLIT_FOR_SPEED, ++ SPLIT_FOR_SIZE ++}; ++extern const char *const loongarch_fp_conditions[16]; ++ ++extern const char *loongarch_output_gpr_save (unsigned); ++extern HOST_WIDE_INT loongarch_initial_elimination_offset (int, int); ++extern void loongarch_expand_prologue (void); ++extern void loongarch_expand_epilogue (bool); ++extern bool loongarch_can_use_return_insn (void); ++extern rtx loongarch_function_value (const_tree, const_tree, enum machine_mode); ++extern bool loongarch_symbolic_constant_p (rtx, enum loongarch_symbol_context, ++ enum loongarch_symbol_type *); ++extern int loongarch_regno_mode_ok_for_base_p (int, machine_mode, bool); ++extern bool loongarch_stack_address_p (rtx, machine_mode); ++extern int loongarch_address_insns (rtx, machine_mode, bool); ++extern int loongarch_const_insns (rtx); ++extern int loongarch_split_const_insns (rtx); ++extern int loongarch_split_128bit_const_insns (rtx); ++extern int loongarch_load_store_insns (rtx, rtx_insn *); ++extern int loongarch_idiv_insns (machine_mode); ++extern rtx loongarch_emit_move (rtx, rtx); ++#ifdef RTX_CODE ++extern void loongarch_emit_binary (enum rtx_code, rtx, rtx, rtx); ++#endif ++extern rtx loongarch_pic_base_register (rtx); ++extern bool loongarch_split_symbol (rtx, rtx, machine_mode, rtx *); ++extern rtx loongarch_unspec_address (rtx, enum loongarch_symbol_type); ++extern rtx loongarch_strip_unspec_address (rtx); ++extern void loongarch_move_integer (rtx, rtx, unsigned HOST_WIDE_INT); ++extern bool loongarch_legitimize_move (machine_mode, rtx, rtx); ++extern rtx loongarch_legitimize_call_address (rtx); ++ ++extern rtx loongarch_subword (rtx, bool); ++extern bool loongarch_split_move_p (rtx, rtx, enum loongarch_split_type); ++extern void loongarch_split_move (rtx, rtx, enum loongarch_split_type, rtx); ++extern bool loongarch_split_move_insn_p (rtx, rtx, rtx); ++extern void loongarch_split_move_insn (rtx, rtx, rtx); ++extern void loongarch_split_128bit_move (rtx, rtx); ++extern bool loongarch_split_128bit_move_p (rtx, rtx); ++extern void loongarch_split_256bit_move (rtx, rtx); ++extern bool loongarch_split_256bit_move_p (rtx, rtx); ++extern void loongarch_split_lsx_copy_d (rtx, rtx, rtx, rtx (*)(rtx, rtx, rtx)); ++extern void loongarch_split_lsx_insert_d (rtx, rtx, rtx, rtx); ++extern void loongarch_split_lsx_fill_d (rtx, rtx); ++extern const char *loongarch_output_move (rtx, rtx); ++extern bool loongarch_cfun_has_cprestore_slot_p (void); ++extern bool loongarch_cprestore_address_p (rtx, bool); ++#ifdef RTX_CODE ++extern void loongarch_expand_scc (rtx *); ++extern bool loongarch_expand_int_vec_cmp (rtx *); ++extern bool loongarch_expand_fp_vec_cmp (rtx *); ++extern void loongarch_expand_conditional_branch (rtx *); ++extern void loongarch_expand_conditional_move (rtx *); ++extern void loongarch_expand_conditional_trap (rtx); ++#endif ++extern bool loongarch_get_pic_call_symbol (rtx *, int); ++extern void loongarch_set_return_address (rtx, rtx); ++extern bool loongarch_move_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int); ++extern bool loongarch_store_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int); ++extern bool loongarch_expand_block_move (rtx, rtx, rtx); ++ ++extern void loongarch_init_cumulative_args (CUMULATIVE_ARGS *, tree); ++extern bool loongarch_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT, ++ HOST_WIDE_INT, bool); ++extern bool loongarch_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT, ++ HOST_WIDE_INT); ++extern bool loongarch_mem_fits_mode_p (machine_mode mode, rtx x); ++extern HOST_WIDE_INT loongarch_debugger_offset (rtx, HOST_WIDE_INT); ++ ++extern void loongarch_push_asm_switch (struct loongarch_asm_switch *); ++extern void loongarch_pop_asm_switch (struct loongarch_asm_switch *); ++extern void loongarch_output_external (FILE *, tree, const char *); ++extern void loongarch_output_ascii (FILE *, const char *, size_t); ++extern void loongarch_output_aligned_decl_common (FILE *, tree, const char *, ++ unsigned HOST_WIDE_INT, ++ unsigned int); ++extern void loongarch_declare_common_object (FILE *, const char *, ++ const char *, unsigned HOST_WIDE_INT, ++ unsigned int, bool); ++extern void loongarch_declare_object (FILE *, const char *, const char *, ++ const char *, ...) ATTRIBUTE_PRINTF_4; ++extern void loongarch_declare_object_name (FILE *, const char *, tree); ++extern void loongarch_finish_declare_object (FILE *, tree, int, int); ++extern void loongarch_set_text_contents_type (FILE *, const char *, ++ unsigned long, bool); ++ ++extern bool loongarch_small_data_pattern_p (rtx); ++extern rtx loongarch_rewrite_small_data (rtx); ++extern rtx loongarch_return_addr (int, rtx); ++extern bool loongarch_must_initialize_gp_p (void); ++ ++extern bool loongarch_const_vector_same_val_p (rtx, machine_mode); ++extern bool loongarch_const_vector_same_bytes_p (rtx, machine_mode); ++extern bool loongarch_const_vector_same_int_p (rtx, machine_mode, HOST_WIDE_INT, ++ HOST_WIDE_INT); ++extern bool loongarch_const_vector_shuffle_set_p (rtx, machine_mode); ++extern bool loongarch_const_vector_bitimm_set_p (rtx, machine_mode); ++extern bool loongarch_const_vector_bitimm_clr_p (rtx, machine_mode); ++extern rtx loongarch_lsx_vec_parallel_const_half (machine_mode, bool); ++extern rtx loongarch_gen_const_int_vector (machine_mode, HOST_WIDE_INT); ++extern enum reg_class loongarch_secondary_reload_class (enum reg_class, ++ machine_mode, ++ rtx, bool); ++extern int loongarch_class_max_nregs (enum reg_class, machine_mode); ++ ++extern machine_mode loongarch_hard_regno_caller_save_mode (unsigned int, ++ unsigned int, ++ machine_mode); ++extern int loongarch_adjust_insn_length (rtx_insn *, int); ++extern const char *loongarch_output_conditional_branch (rtx_insn *, rtx *, ++ const char *, const char *); ++extern const char *loongarch_output_order_conditional_branch (rtx_insn *, rtx *, ++ bool); ++extern const char *loongarch_output_equal_conditional_branch (rtx_insn *, rtx *, ++ bool); ++extern const char *loongarch_output_division (const char *, rtx *); ++extern const char *loongarch_lsx_output_division (const char *, rtx *); ++extern const char *loongarch_output_probe_stack_range (rtx, rtx, rtx); ++extern bool loongarch_hard_regno_rename_ok (unsigned int, unsigned int); ++extern bool loongarch_linked_madd_p (rtx_insn *, rtx_insn *); ++extern bool loongarch_store_data_bypass_p (rtx_insn *, rtx_insn *); ++extern int loongarch_dspalu_bypass_p (rtx, rtx); ++extern rtx loongarch_prefetch_cookie (rtx, rtx); ++ ++extern bool loongarch_global_symbol_p (const_rtx); ++extern bool loongarch_global_symbol_noweak_p (const_rtx); ++extern bool loongarch_weak_symbol_p (const_rtx); ++extern bool loongarch_symbol_binds_local_p (const_rtx); ++ ++extern const char *current_section_name (void); ++extern unsigned int current_section_flags (void); ++extern bool loongarch_use_ins_ext_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT); ++ ++extern bool and_operands_ok (machine_mode, rtx, rtx); ++extern bool loongarch_fmadd_bypass (rtx_insn *, rtx_insn *); ++ ++union loongarch_gen_fn_ptrs ++{ ++ rtx (*fn_8) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx); ++ rtx (*fn_7) (rtx, rtx, rtx, rtx, rtx, rtx, rtx); ++ rtx (*fn_6) (rtx, rtx, rtx, rtx, rtx, rtx); ++ rtx (*fn_5) (rtx, rtx, rtx, rtx, rtx); ++ rtx (*fn_4) (rtx, rtx, rtx, rtx); ++}; ++ ++extern void loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs, ++ rtx, rtx, rtx, rtx, rtx); ++ ++extern void loongarch_expand_vector_init (rtx, rtx); ++extern void loongarch_expand_vec_unpack (rtx op[2], bool, bool); ++ ++extern int loongarch_ldst_scaled_shift (machine_mode); ++extern bool loongarch_signed_immediate_p (unsigned HOST_WIDE_INT, int, int); ++extern bool loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT, int, int); ++extern bool loongarch_load_store_pair_p (bool, rtx *); ++extern bool loongarch_movep_target_p (rtx, rtx); ++extern bool loongarch_12bit_offset_address_p (rtx, machine_mode); ++extern bool loongarch_14bit_shifted_offset_address_p (rtx, machine_mode); ++extern bool loongarch_9bit_offset_address_p (rtx, machine_mode); ++extern bool lwsp_swsp_address_p (rtx, machine_mode); ++extern rtx loongarch_expand_thread_pointer (rtx); ++ ++extern bool loongarch_eh_uses (unsigned int); ++extern bool loongarch_epilogue_uses (unsigned int); ++extern int loongarch_trampoline_code_size (void); ++extern bool loongarch_load_store_bonding_p (rtx *, machine_mode, bool); ++extern bool loongarch_la464_128_store_p (rtx[]); ++extern bool loongarch_la464_128_load_p (rtx[]); ++extern void loongarch_la464_emit_128bit_store (rtx[]); ++extern void loongarch_la464_emit_128bit_load (rtx[]); ++extern bool loongarch_split_symbol_type (enum loongarch_symbol_type); ++ ++typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx); ++ ++extern void loongarch_register_frame_header_opt (void); ++extern void loongarch_expand_vec_cond_expr (machine_mode, machine_mode, rtx *); ++ ++extern void loongarch_declare_function_name(FILE *, const char *, tree); ++/* Routines implemented in loongarch-d.c */ ++extern void loongarch_d_target_versions (void); ++ ++/* Routines implemented in loongarch-c.c. */ ++void loongarch_cpu_cpp_builtins (cpp_reader *); ++ ++extern void loongarch_init_builtins (void); ++extern void loongarch_atomic_assign_expand_fenv (tree *, tree *, tree *); ++extern tree loongarch_builtin_decl (unsigned int, bool); ++extern rtx loongarch_expand_builtin (tree, rtx, rtx subtarget ATTRIBUTE_UNUSED, ++ machine_mode, int); ++extern tree loongarch_builtin_vectorized_function (unsigned int, tree, tree); ++extern rtx loongarch_gen_const_int_vector_shuffle (machine_mode, int); ++extern tree loongarch_build_builtin_va_list (void); ++ ++extern rtx loongarch_build_signbit_mask (machine_mode, bool, bool); ++#endif /* ! GCC_LARCH_PROTOS_H */ +diff --git a/gcc/config/loongarch/loongarch-tables.opt b/gcc/config/loongarch/loongarch-tables.opt +new file mode 100644 +index 000000000..80794b564 +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-tables.opt +@@ -0,0 +1,34 @@ ++; -*- buffer-read-only: t -*- ++; Generated automatically by genopt.sh from loongarch-cpus.def. ++ ++; Copyright (C) 2011-2018 Free Software Foundation, Inc. ++; ++; This file is part of GCC. ++; ++; GCC is free software; you can redistribute it and/or modify it under ++; the terms of the GNU General Public License as published by the Free ++; Software Foundation; either version 3, or (at your option) any later ++; version. ++; ++; GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++; WARRANTY; without even the implied warranty of MERCHANTABILITY or ++; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++; for more details. ++; ++; You should have received a copy of the GNU General Public License ++; along with GCC; see the file COPYING3. If not see ++; . ++ ++Enum ++Name(loongarch_arch_opt_value) Type(int) ++Known LARCH CPUs (for use with the -march= and -mtune= options): ++ ++EnumValue ++Enum(loongarch_arch_opt_value) String(native) Value(LARCH_ARCH_OPTION_NATIVE) DriverOnly ++ ++EnumValue ++Enum(loongarch_arch_opt_value) String(loongarch64) Value(0) Canonical ++ ++EnumValue ++Enum(loongarch_arch_opt_value) String(la464) Value(1) Canonical ++ +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +new file mode 100644 +index 000000000..e556f81e4 +--- /dev/null ++++ b/gcc/config/loongarch/loongarch.c +@@ -0,0 +1,10465 @@ ++/* Subroutines used for LARCH code generation. ++ Copyright (C) 1989-2018 Free Software Foundation, Inc. ++ Contributed by A. Lichnewsky, lich@inria.inria.fr. ++ Changes by Michael Meissner, meissner@osf.org. ++ 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and ++ Brendan Eich, brendan@microunity.com. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#define IN_TARGET_CODE 1 ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "backend.h" ++#include "target.h" ++#include "rtl.h" ++#include "tree.h" ++#include "memmodel.h" ++#include "gimple.h" ++#include "cfghooks.h" ++#include "df.h" ++#include "tm_p.h" ++#include "stringpool.h" ++#include "attribs.h" ++#include "optabs.h" ++#include "regs.h" ++#include "emit-rtl.h" ++#include "recog.h" ++#include "cgraph.h" ++#include "diagnostic.h" ++#include "insn-attr.h" ++#include "output.h" ++#include "alias.h" ++#include "fold-const.h" ++#include "varasm.h" ++#include "stor-layout.h" ++#include "calls.h" ++#include "explow.h" ++#include "expr.h" ++#include "libfuncs.h" ++#include "reload.h" ++#include "common/common-target.h" ++#include "langhooks.h" ++#include "cfgrtl.h" ++#include "cfganal.h" ++#include "sched-int.h" ++#include "gimplify.h" ++#include "target-globals.h" ++#include "tree-pass.h" ++#include "context.h" ++#include "builtins.h" ++#include "rtl-iter.h" ++ ++/* This file should be included last. */ ++#include "target-def.h" ++ ++/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */ ++#define UNSPEC_ADDRESS_P(X) \ ++ (GET_CODE (X) == UNSPEC \ ++ && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \ ++ && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES) ++ ++/* Extract the symbol or label from UNSPEC wrapper X. */ ++#define UNSPEC_ADDRESS(X) \ ++ XVECEXP (X, 0, 0) ++ ++/* Extract the symbol type from UNSPEC wrapper X. */ ++#define UNSPEC_ADDRESS_TYPE(X) \ ++ ((enum loongarch_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST)) ++ ++/* The maximum distance between the top of the stack frame and the ++ value $sp has when we save and restore registers. ++*/ ++#define LARCH_MAX_FIRST_STACK_STEP 0x7f0 ++ ++/* True if INSN is a loongarch.md pattern or asm statement. */ ++/* ??? This test exists through the compiler, perhaps it should be ++ moved to rtl.h. */ ++#define USEFUL_INSN_P(INSN) \ ++ (NONDEBUG_INSN_P (INSN) \ ++ && GET_CODE (PATTERN (INSN)) != USE \ ++ && GET_CODE (PATTERN (INSN)) != CLOBBER) ++ ++/* If INSN is a delayed branch sequence, return the first instruction ++ in the sequence, otherwise return INSN itself. */ ++#define SEQ_BEGIN(INSN) \ ++ (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \ ++ ? as_a (XVECEXP (PATTERN (INSN), 0, 0)) \ ++ : (INSN)) ++ ++/* Likewise for the last instruction in a delayed branch sequence. */ ++#define SEQ_END(INSN) \ ++ (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \ ++ ? as_a (XVECEXP (PATTERN (INSN), \ ++ 0, \ ++ XVECLEN (PATTERN (INSN), 0) - 1)) \ ++ : (INSN)) ++ ++/* Execute the following loop body with SUBINSN set to each instruction ++ between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */ ++#define FOR_EACH_SUBINSN(SUBINSN, INSN) \ ++ for ((SUBINSN) = SEQ_BEGIN (INSN); \ ++ (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \ ++ (SUBINSN) = NEXT_INSN (SUBINSN)) ++ ++/* True if bit BIT is set in VALUE. */ ++#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0) ++ ++/* Classifies an address. ++ ++ ADDRESS_REG ++ A natural register + offset address. The register satisfies ++ loongarch_valid_base_register_p and the offset is a const_arith_operand. ++ ++ ADDRESS_CONST_INT ++ A signed 16-bit constant address. ++ ++ ADDRESS_SYMBOLIC: ++ A constant symbolic address. */ ++enum loongarch_address_type { ++ ADDRESS_REG, ++ ADDRESS_CONST_INT, ++ ADDRESS_SYMBOLIC ++}; ++ ++ ++/* A class used to control a comdat-style stub that we output in each ++ translation unit that needs it. */ ++class loongarch_one_only_stub { ++public: ++ virtual ~loongarch_one_only_stub () {} ++ ++ /* Return the name of the stub. */ ++ virtual const char *get_name () = 0; ++ ++ /* Output the body of the function to asm_out_file. */ ++ virtual void output_body () = 0; ++}; ++ ++/* Tuning information that is automatically derived from other sources ++ (such as the scheduler). */ ++static struct { ++ /* The architecture and tuning settings that this structure describes. */ ++ enum processor arch; ++ enum processor tune; ++ ++ /* True if the structure has been initialized. */ ++ bool initialized_p; ++ ++} loongarch_tuning_info; ++ ++/* Information about an address described by loongarch_address_type. ++ ++ ADDRESS_CONST_INT ++ No fields are used. ++ ++ ADDRESS_REG ++ REG is the base register and OFFSET is the constant offset. ++ ++ ADDRESS_SYMBOLIC ++ SYMBOL_TYPE is the type of symbol that the address references. */ ++struct loongarch_address_info { ++ enum loongarch_address_type type; ++ rtx reg; ++ rtx offset; ++ enum loongarch_symbol_type symbol_type; ++}; ++ ++/* Method to load immediate number fields. ++ ++ METHOD_NORMAL: ++ load immediate number 0-31 bit ++ ++ METHOD_LU32I: ++ load imm 32-51 bit ++ ++ METHOD_LU52I: ++ load imm 52-63 bit ++ ++ METHOD_INSV: ++ imm 0xfff00000fffffxxx ++ */ ++enum loongarch_load_imm_method { ++ METHOD_NORMAL, ++ METHOD_LU32I, ++ METHOD_LU52I, ++ METHOD_INSV ++}; ++ ++/* One stage in a constant building sequence. These sequences have ++ the form: ++ ++ A = VALUE[0] ++ A = A CODE[1] VALUE[1] ++ A = A CODE[2] VALUE[2] ++ ... ++ ++ where A is an accumulator, each CODE[i] is a binary rtl operation ++ and each VALUE[i] is a constant integer. CODE[0] is undefined. */ ++struct loongarch_integer_op { ++ enum rtx_code code; ++ unsigned HOST_WIDE_INT value; ++ enum loongarch_load_imm_method method; ++}; ++ ++/* The largest number of operations needed to load an integer constant. ++ The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI. ++ When the lowest bit is clear, we can try, but reject a sequence with ++ an extra SLL at the end. */ ++#define LARCH_MAX_INTEGER_OPS 9 ++ ++/* Costs of various operations on the different architectures. */ ++ ++struct loongarch_rtx_cost_data ++{ ++ unsigned short fp_add; ++ unsigned short fp_mult_sf; ++ unsigned short fp_mult_df; ++ unsigned short fp_div_sf; ++ unsigned short fp_div_df; ++ unsigned short int_mult_si; ++ unsigned short int_mult_di; ++ unsigned short int_div_si; ++ unsigned short int_div_di; ++ unsigned short branch_cost; ++ unsigned short memory_latency; ++}; ++ ++/* Global variables for machine-dependent things. */ ++ ++/* The -G setting, or the configuration's default small-data limit if ++ no -G option is given. */ ++static unsigned int loongarch_small_data_threshold; ++ ++/* The number of file directives written by loongarch_output_filename. */ ++int num_source_filenames; ++ ++/* The name that appeared in the last .file directive written by ++ loongarch_output_filename, or "" if loongarch_output_filename hasn't ++ written anything yet. */ ++const char *current_function_file = ""; ++ ++/* Arrays that map GCC register numbers to debugger register numbers. */ ++int loongarch_dbx_regno[FIRST_PSEUDO_REGISTER]; ++int loongarch_dwarf_regno[FIRST_PSEUDO_REGISTER]; ++ ++/* The current instruction-set architecture. */ ++enum processor loongarch_arch; ++const struct loongarch_cpu_info *loongarch_arch_info; ++ ++/* The processor that we should tune the code for. */ ++enum processor loongarch_tune; ++const struct loongarch_cpu_info *loongarch_tune_info; ++ ++/* The ISA level associated with loongarch_arch. */ ++int loongarch_isa; ++ ++/* The ISA revision level. */ ++int loongarch_isa_rev; ++ ++/* Which cost information to use. */ ++static const struct loongarch_rtx_cost_data *loongarch_cost; ++ ++/* Index [M][R] is true if register R is allowed to hold a value of mode M. */ ++static bool loongarch_hard_regno_mode_ok_p[MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER]; ++ ++/* Index C is true if character C is a valid PRINT_OPERAND punctation ++ character. */ ++static bool loongarch_print_operand_punct[256]; ++ ++static GTY (()) int loongarch_output_filename_first_time = 1; ++ ++/* loongarch_use_pcrel_pool_p[X] is true if symbols of type X should be ++ forced into a PC-relative constant pool. */ ++bool loongarch_use_pcrel_pool_p[NUM_SYMBOL_TYPES]; ++ ++/* Cached value of can_issue_more. This is cached in loongarch_variable_issue hook ++ and returned from loongarch_sched_reorder2. */ ++static int cached_can_issue_more; ++ ++/* Index R is the smallest register class that contains register R. */ ++const enum reg_class loongarch_regno_to_class[FIRST_PSEUDO_REGISTER] = { ++ GR_REGS, GR_REGS, GR_REGS, GR_REGS, ++ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, ++ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, ++ SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, ++ SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, ++ SIBCALL_REGS, GR_REGS, GR_REGS, JALR_REGS, ++ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, ++ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, ++ ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ ST_REGS, ST_REGS, ST_REGS, ST_REGS, ++ ST_REGS, ST_REGS, ST_REGS, ST_REGS, ++ FRAME_REGS, FRAME_REGS ++}; ++ ++static tree loongarch_handle_interrupt_attr (tree *, tree, tree, int, bool *); ++static tree loongarch_handle_use_shadow_register_set_attr (tree *, tree, tree, int, ++ bool *); ++ ++/* The value of TARGET_ATTRIBUTE_TABLE. */ ++static const struct attribute_spec loongarch_attribute_table[] = { ++ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, ++ affects_type_identity, handler, exclude } */ ++ { "long_call", 0, 0, false, true, true, false, NULL, NULL }, ++ { "short_call", 0, 0, false, true, true, false, NULL, NULL }, ++ { "far", 0, 0, false, true, true, false, NULL, NULL }, ++ { "near", 0, 0, false, true, true, false, NULL, NULL }, ++ { "nocompression", 0, 0, true, false, false, false, NULL, NULL }, ++ /* Allow functions to be specified as interrupt handlers */ ++ { "interrupt", 0, 1, false, true, true, false, loongarch_handle_interrupt_attr, ++ NULL }, ++ { "use_shadow_register_set", 0, 1, false, true, true, false, ++ loongarch_handle_use_shadow_register_set_attr, NULL }, ++ { "keep_interrupts_masked", 0, 0, false, true, true, false, NULL, NULL }, ++ { "use_debug_exception_return", 0, 0, false, true, true, false, NULL, NULL }, ++ { NULL, 0, 0, false, false, false, false, NULL, NULL } ++}; ++ ++/* A table describing all the processors GCC knows about; see ++ loongarch-cpus.def for details. */ ++static const struct loongarch_cpu_info loongarch_cpu_info_table[] = { ++#define LARCH_CPU(NAME, CPU, ISA, FLAGS) \ ++ { NAME, CPU, ISA, FLAGS }, ++#include "loongarch-cpus.def" ++#undef LARCH_CPU ++}; ++ ++/* Default costs. If these are used for a processor we should look ++ up the actual costs. */ ++#define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \ ++ COSTS_N_INSNS (7), /* fp_mult_sf */ \ ++ COSTS_N_INSNS (8), /* fp_mult_df */ \ ++ COSTS_N_INSNS (23), /* fp_div_sf */ \ ++ COSTS_N_INSNS (36), /* fp_div_df */ \ ++ COSTS_N_INSNS (10), /* int_mult_si */ \ ++ COSTS_N_INSNS (10), /* int_mult_di */ \ ++ COSTS_N_INSNS (69), /* int_div_si */ \ ++ COSTS_N_INSNS (69), /* int_div_di */ \ ++ 2, /* branch_cost */ \ ++ 4 /* memory_latency */ ++ ++/* Floating-point costs for processors without an FPU. Just assume that ++ all floating-point libcalls are very expensive. */ ++#define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \ ++ COSTS_N_INSNS (256), /* fp_mult_sf */ \ ++ COSTS_N_INSNS (256), /* fp_mult_df */ \ ++ COSTS_N_INSNS (256), /* fp_div_sf */ \ ++ COSTS_N_INSNS (256) /* fp_div_df */ ++ ++/* Costs to use when optimizing for size. */ ++static const struct loongarch_rtx_cost_data loongarch_rtx_cost_optimize_size = { ++ COSTS_N_INSNS (1), /* fp_add */ ++ COSTS_N_INSNS (1), /* fp_mult_sf */ ++ COSTS_N_INSNS (1), /* fp_mult_df */ ++ COSTS_N_INSNS (1), /* fp_div_sf */ ++ COSTS_N_INSNS (1), /* fp_div_df */ ++ COSTS_N_INSNS (1), /* int_mult_si */ ++ COSTS_N_INSNS (1), /* int_mult_di */ ++ COSTS_N_INSNS (1), /* int_div_si */ ++ COSTS_N_INSNS (1), /* int_div_di */ ++ 2, /* branch_cost */ ++ 4 /* memory_latency */ ++}; ++ ++/* Costs to use when optimizing for speed, indexed by processor. */ ++static const struct loongarch_rtx_cost_data ++ loongarch_rtx_cost_data[NUM_PROCESSOR_VALUES] = { ++ { /* loongarch */ ++ DEFAULT_COSTS ++ }, ++ { /* loongarch64 */ ++ DEFAULT_COSTS ++ }, ++ { /* la464 */ ++ DEFAULT_COSTS ++ } ++}; ++ ++/* Information about a single argument. */ ++struct loongarch_arg_info { ++ /* True if the argument is at least partially passed on the stack. */ ++ bool stack_p; ++ ++ /* The number of integer registers allocated to this argument. */ ++ unsigned int num_gprs; ++ ++ /* The offset of the first register used, provided num_gprs is nonzero. ++ If passed entirely on the stack, the value is MAX_ARGS_IN_REGISTERS. */ ++ unsigned int gpr_offset; ++ ++ /* The number of floating-point registers allocated to this argument. */ ++ unsigned int num_fprs; ++ ++ /* The offset of the first register used, provided num_fprs is nonzero. */ ++ unsigned int fpr_offset; ++}; ++ ++ ++/* Emit a move from SRC to DEST. Assume that the move expanders can ++ handle all moves if !can_create_pseudo_p (). The distinction is ++ important because, unlike emit_move_insn, the move expanders know ++ how to force Pmode objects into the constant pool even when the ++ constant pool address is not itself legitimate. */ ++ ++rtx ++loongarch_emit_move (rtx dest, rtx src) ++{ ++ return (can_create_pseudo_p () ++ ? emit_move_insn (dest, src) ++ : emit_move_insn_1 (dest, src)); ++} ++ ++/* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at ++ least PARM_BOUNDARY bits of alignment, but will be given anything up ++ to PREFERRED_STACK_BOUNDARY bits if the type requires it. */ ++ ++static unsigned int ++loongarch_function_arg_boundary (machine_mode mode, const_tree type) ++{ ++ unsigned int alignment; ++ ++ /* Use natural alignment if the type is not aggregate data. */ ++ if (type && !AGGREGATE_TYPE_P (type)) ++ alignment = TYPE_ALIGN (TYPE_MAIN_VARIANT (type)); ++ else ++ alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode); ++ ++ return MIN (PREFERRED_STACK_BOUNDARY, MAX (PARM_BOUNDARY, alignment)); ++} ++ ++/* If MODE represents an argument that can be passed or returned in ++ floating-point registers, return the number of registers, else 0. */ ++ ++static unsigned ++loongarch_pass_mode_in_fpr_p (machine_mode mode) ++{ ++ if (GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FP_ARG) ++ { ++ if (GET_MODE_CLASS (mode) == MODE_FLOAT) ++ return 1; ++ ++ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) ++ return 2; ++ } ++ ++ return 0; ++} ++ ++typedef struct { ++ const_tree type; ++ HOST_WIDE_INT offset; ++} loongarch_aggregate_field; ++ ++/* Identify subfields of aggregates that are candidates for passing in ++ floating-point registers. */ ++ ++static int ++loongarch_flatten_aggregate_field (const_tree type, ++ loongarch_aggregate_field fields[2], ++ int n, HOST_WIDE_INT offset, ++ const int use_vecarg_p) ++{ ++ switch (TREE_CODE (type)) ++ { ++ case RECORD_TYPE: ++ /* Can't handle incomplete types nor sizes that are not fixed. */ ++ if (!COMPLETE_TYPE_P (type) ++ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST ++ || !tree_fits_uhwi_p (TYPE_SIZE (type))) ++ return -1; ++ ++ for (tree f = TYPE_FIELDS (type); f; f = DECL_CHAIN (f)) ++ if (TREE_CODE (f) == FIELD_DECL) ++ { ++ if (!TYPE_P (TREE_TYPE (f))) ++ return -1; ++ ++ HOST_WIDE_INT pos = offset + int_byte_position (f); ++ n = loongarch_flatten_aggregate_field (TREE_TYPE (f), fields, n, pos, 0); ++ if (n < 0) ++ return -1; ++ } ++ return n; ++ ++ case ARRAY_TYPE: ++ { ++ HOST_WIDE_INT n_elts; ++ loongarch_aggregate_field subfields[2]; ++ tree index = TYPE_DOMAIN (type); ++ tree elt_size = TYPE_SIZE_UNIT (TREE_TYPE (type)); ++ int n_subfields = loongarch_flatten_aggregate_field (TREE_TYPE (type), ++ subfields, 0, offset, 0); ++ ++ /* Can't handle incomplete types nor sizes that are not fixed. */ ++ if (n_subfields <= 0 ++ || !COMPLETE_TYPE_P (type) ++ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST ++ || !index ++ || !TYPE_MAX_VALUE (index) ++ || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index)) ++ || !TYPE_MIN_VALUE (index) ++ || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index)) ++ || !tree_fits_uhwi_p (elt_size)) ++ return -1; ++ ++ n_elts = 1 + tree_to_uhwi (TYPE_MAX_VALUE (index)) ++ - tree_to_uhwi (TYPE_MIN_VALUE (index)); ++ gcc_assert (n_elts >= 0); ++ ++ for (HOST_WIDE_INT i = 0; i < n_elts; i++) ++ for (int j = 0; j < n_subfields; j++) ++ { ++ if (n >= 2) ++ return -1; ++ ++ fields[n] = subfields[j]; ++ fields[n++].offset += i * tree_to_uhwi (elt_size); ++ } ++ ++ return n; ++ } ++ ++ case COMPLEX_TYPE: ++ { ++ /* Complex type need consume 2 field, so n must be 0. */ ++ if (n != 0) ++ return -1; ++ ++ HOST_WIDE_INT elt_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (type))); ++ ++ if (elt_size <= UNITS_PER_FP_ARG) ++ { ++ fields[0].type = TREE_TYPE (type); ++ fields[0].offset = offset; ++ fields[1].type = TREE_TYPE (type); ++ fields[1].offset = offset + elt_size; ++ ++ return 2; ++ } ++ ++ return -1; ++ } ++ ++ default: ++ if (n < 2 ++ && ((SCALAR_FLOAT_TYPE_P (type) ++ && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FP_ARG) ++ || (INTEGRAL_TYPE_P (type) ++ && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD)) ++ || (use_vecarg_p && VECTOR_TYPE_P (type) ++ && ((ISA_HAS_LSX && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_LSX_REG) ++ || (ISA_HAS_LASX && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_LASX_REG)))) ++ { ++ fields[n].type = type; ++ fields[n].offset = offset; ++ return n + 1; ++ } ++ else ++ return -1; ++ } ++} ++ ++/* Identify candidate aggregates for passing in floating-point registers. ++ Candidates have at most two fields after flattening. */ ++ ++static int ++loongarch_flatten_aggregate_argument (const_tree type, ++ loongarch_aggregate_field fields[2], ++ const int use_vecarg_p) ++{ ++ if (!type || !((TREE_CODE (type) == RECORD_TYPE) ++ || (use_vecarg_p && TREE_CODE (type) == VECTOR_TYPE))) ++ return -1; ++ ++ return loongarch_flatten_aggregate_field (type, fields, 0, 0, use_vecarg_p); ++} ++ ++/* See whether TYPE is a record whose fields should be returned in one or ++ two floating-point registers. If so, populate FIELDS accordingly. */ ++ ++static unsigned ++loongarch_pass_aggregate_in_fpr_pair_p (const_tree type, ++ loongarch_aggregate_field fields[2], ++ const int use_vecarg_p) ++{ ++ int n = loongarch_flatten_aggregate_argument (type, fields, use_vecarg_p); ++ ++ for (int i = 0; i < n; i++) ++ if (!SCALAR_FLOAT_TYPE_P (fields[i].type) && !VECTOR_TYPE_P (fields[i].type)) ++ return 0; ++ ++ return n > 0 ? n : 0; ++} ++ ++/* See whether TYPE is a record whose fields should be returned in one or ++ floating-point register and one integer register. If so, populate ++ FIELDS accordingly. */ ++ ++static bool ++loongarch_pass_aggregate_in_fpr_and_gpr_p (const_tree type, ++ loongarch_aggregate_field fields[2]) ++{ ++ unsigned num_int = 0, num_float = 0; ++ int n = loongarch_flatten_aggregate_argument (type, fields, 0); ++ ++ for (int i = 0; i < n; i++) ++ { ++ num_float += SCALAR_FLOAT_TYPE_P (fields[i].type); ++ num_int += INTEGRAL_TYPE_P (fields[i].type); ++ } ++ ++ return num_int == 1 && num_float == 1; ++} ++ ++/* Return the representation of an argument passed or returned in an FPR ++ when the value has mode VALUE_MODE and the type has TYPE_MODE. The ++ two modes may be different for structures like: ++ ++ struct __attribute__((packed)) foo { float f; } ++ ++ where the SFmode value "f" is passed in REGNO but the struct itself ++ has mode BLKmode. */ ++ ++static rtx ++loongarch_pass_fpr_single (machine_mode type_mode, unsigned regno, ++ machine_mode value_mode) ++{ ++ rtx x = gen_rtx_REG (value_mode, regno); ++ ++ if (type_mode != value_mode) ++ { ++ x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx); ++ x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x)); ++ } ++ return x; ++} ++ ++/* Pass or return a composite value in the FPR pair REGNO and REGNO + 1. ++ MODE is the mode of the composite. MODE1 and OFFSET1 are the mode and ++ byte offset for the first value, likewise MODE2 and OFFSET2 for the ++ second value. */ ++ ++static rtx ++loongarch_pass_fpr_pair (machine_mode mode, unsigned regno1, ++ machine_mode mode1, HOST_WIDE_INT offset1, ++ unsigned regno2, machine_mode mode2, ++ HOST_WIDE_INT offset2) ++{ ++ return gen_rtx_PARALLEL ++ (mode, ++ gen_rtvec (2, ++ gen_rtx_EXPR_LIST (VOIDmode, ++ gen_rtx_REG (mode1, regno1), ++ GEN_INT (offset1)), ++ gen_rtx_EXPR_LIST (VOIDmode, ++ gen_rtx_REG (mode2, regno2), ++ GEN_INT (offset2)))); ++} ++ ++/* Fill INFO with information about a single argument, and return an ++ RTL pattern to pass or return the argument. CUM is the cumulative ++ state for earlier arguments. MODE is the mode of this argument and ++ TYPE is its type (if known). NAMED is true if this is a named ++ (fixed) argument rather than a variable one. RETURN_P is true if ++ returning the argument, or false if passing the argument. */ ++ ++static rtx ++loongarch_get_arg_info (struct loongarch_arg_info *info, const CUMULATIVE_ARGS *cum, ++ machine_mode mode, const_tree type, bool named, ++ bool return_p) ++{ ++ unsigned num_bytes, num_words; ++ unsigned fpr_base = return_p ? FP_RETURN : FP_ARG_FIRST; ++ unsigned gpr_base = return_p ? GP_RETURN : GP_ARG_FIRST; ++ unsigned alignment = loongarch_function_arg_boundary (mode, type); ++ ++ int use_vecarg_p = TARGET_VECARG ++ && (LSX_SUPPORTED_MODE_P (mode) ++ || LASX_SUPPORTED_MODE_P (mode)); ++ ++ memset (info, 0, sizeof (*info)); ++ info->gpr_offset = cum->num_gprs; ++ info->fpr_offset = cum->num_fprs; ++ ++ if (named) ++ { ++ loongarch_aggregate_field fields[2]; ++ unsigned fregno = fpr_base + info->fpr_offset; ++ unsigned gregno = gpr_base + info->gpr_offset; ++ ++ /* Pass one- or two-element floating-point aggregates in FPRs. */ ++ if ((info->num_fprs = loongarch_pass_aggregate_in_fpr_pair_p (type, fields, use_vecarg_p)) ++ && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS) ++ switch (info->num_fprs) ++ { ++ case 1: ++ return loongarch_pass_fpr_single (mode, fregno, ++ TYPE_MODE (fields[0].type)); ++ ++ case 2: ++ return loongarch_pass_fpr_pair (mode, fregno, ++ TYPE_MODE (fields[0].type), ++ fields[0].offset, ++ fregno + 1, ++ TYPE_MODE (fields[1].type), ++ fields[1].offset); ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ /* Pass real and complex floating-point numbers in FPRs. */ ++ if ((info->num_fprs = loongarch_pass_mode_in_fpr_p (mode)) ++ && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS) ++ switch (GET_MODE_CLASS (mode)) ++ { ++ case MODE_FLOAT: ++ return gen_rtx_REG (mode, fregno); ++ ++ case MODE_COMPLEX_FLOAT: ++ return loongarch_pass_fpr_pair (mode, fregno, GET_MODE_INNER (mode), 0, ++ fregno + 1, GET_MODE_INNER (mode), ++ GET_MODE_UNIT_SIZE (mode)); ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ /* Pass structs with one float and one integer in an FPR and a GPR. */ ++ if (loongarch_pass_aggregate_in_fpr_and_gpr_p (type, fields) ++ && info->gpr_offset < MAX_ARGS_IN_REGISTERS ++ && info->fpr_offset < MAX_ARGS_IN_REGISTERS) ++ { ++ info->num_gprs = 1; ++ info->num_fprs = 1; ++ ++ if (!SCALAR_FLOAT_TYPE_P (fields[0].type)) ++ std::swap (fregno, gregno); ++ ++ return loongarch_pass_fpr_pair (mode, fregno, TYPE_MODE (fields[0].type), ++ fields[0].offset, ++ gregno, TYPE_MODE (fields[1].type), ++ fields[1].offset); ++ } ++ } ++ ++ /* Work out the size of the argument. */ ++ num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode); ++ num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD; ++ ++ /* Doubleword-aligned varargs start on an even register boundary. */ ++ if (!named && num_bytes != 0 && alignment > BITS_PER_WORD) ++ info->gpr_offset += info->gpr_offset & 1; ++ ++ /* Partition the argument between registers and stack. */ ++ info->num_fprs = 0; ++ info->num_gprs = MIN (num_words, MAX_ARGS_IN_REGISTERS - info->gpr_offset); ++ info->stack_p = (num_words - info->num_gprs) != 0; ++ ++ if (info->num_gprs || return_p) ++ return gen_rtx_REG (mode, gpr_base + info->gpr_offset); ++ ++ return NULL_RTX; ++} ++ ++/* Implement TARGET_FUNCTION_ARG. */ ++ ++static rtx ++loongarch_function_arg (cumulative_args_t cum_v, machine_mode mode, ++ const_tree type, bool named) ++{ ++ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); ++ struct loongarch_arg_info info; ++ ++ if (mode == VOIDmode) ++ return NULL; ++ ++ return loongarch_get_arg_info (&info, cum, mode, type, named, false); ++} ++ ++/* Implement TARGET_FUNCTION_ARG_ADVANCE. */ ++ ++static void ++loongarch_function_arg_advance (cumulative_args_t cum_v, machine_mode mode, ++ const_tree type, bool named) ++{ ++ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); ++ struct loongarch_arg_info info; ++ ++ loongarch_get_arg_info (&info, cum, mode, type, named, false); ++ ++ /* Advance the register count. This has the effect of setting ++ num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned ++ argument required us to skip the final GPR and pass the whole ++ argument on the stack. */ ++ cum->num_fprs = info.fpr_offset + info.num_fprs; ++ cum->num_gprs = info.gpr_offset + info.num_gprs; ++} ++ ++/* Implement TARGET_ARG_PARTIAL_BYTES. */ ++ ++static int ++loongarch_arg_partial_bytes (cumulative_args_t cum, ++ machine_mode mode, tree type, bool named) ++{ ++ struct loongarch_arg_info arg; ++ ++ loongarch_get_arg_info (&arg, get_cumulative_args (cum), mode, type, named, false); ++ return arg.stack_p ? arg.num_gprs * UNITS_PER_WORD : 0; ++} ++ ++/* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls, ++ VALTYPE is the return type and MODE is VOIDmode. For libcalls, ++ VALTYPE is null and MODE is the mode of the return value. */ ++ ++rtx ++loongarch_function_value (const_tree type, const_tree func, machine_mode mode) ++{ ++ struct loongarch_arg_info info; ++ CUMULATIVE_ARGS args; ++ ++ if (type) ++ { ++ int unsigned_p = TYPE_UNSIGNED (type); ++ ++ mode = TYPE_MODE (type); ++ ++ /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes, ++ return values, promote the mode here too. */ ++ mode = promote_function_mode (type, mode, &unsigned_p, func, 1); ++ } ++ ++ memset (&args, 0, sizeof args); ++ return loongarch_get_arg_info (&info, &args, mode, type, true, true); ++} ++ ++/* Implement TARGET_PASS_BY_REFERENCE. */ ++ ++static bool ++loongarch_pass_by_reference (cumulative_args_t cum_v, machine_mode mode, ++ const_tree type, bool named) ++{ ++ HOST_WIDE_INT size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode); ++ struct loongarch_arg_info info; ++ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); ++ ++ /* ??? std_gimplify_va_arg_expr passes NULL for cum. Fortunately, we ++ never pass variadic arguments in floating-point registers, so we can ++ avoid the call to loongarch_get_arg_info in this case. */ ++ if (cum != NULL) ++ { ++ /* Don't pass by reference if we can use a floating-point register. */ ++ loongarch_get_arg_info (&info, cum, mode, type, named, false); ++ if (info.num_fprs) ++ return false; ++ } ++ ++ /* Pass by reference if the data do not fit in two integer registers. */ ++ return !IN_RANGE (size, 0, 2 * UNITS_PER_WORD); ++} ++ ++/* Implement TARGET_RETURN_IN_MEMORY. */ ++ ++static bool ++loongarch_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED) ++{ ++ CUMULATIVE_ARGS args; ++ cumulative_args_t cum = pack_cumulative_args (&args); ++ ++ /* The rules for returning in memory are the same as for passing the ++ first named argument by reference. */ ++ memset (&args, 0, sizeof args); ++ return loongarch_pass_by_reference (cum, TYPE_MODE (type), type, true); ++} ++ ++/* Implement TARGET_SETUP_INCOMING_VARARGS. */ ++ ++static void ++loongarch_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode, ++ tree type, int *pretend_size ATTRIBUTE_UNUSED, ++ int no_rtl) ++{ ++ CUMULATIVE_ARGS local_cum; ++ int gp_saved; ++ ++ /* The caller has advanced CUM up to, but not beyond, the last named ++ argument. Advance a local copy of CUM past the last "real" named ++ argument, to find out how many registers are left over. */ ++ local_cum = *get_cumulative_args (cum); ++ loongarch_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1); ++ ++ /* Found out how many registers we need to save. */ ++ gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs; ++ ++ if (!no_rtl && gp_saved > 0) ++ { ++ rtx ptr = plus_constant (Pmode, virtual_incoming_args_rtx, ++ REG_PARM_STACK_SPACE (cfun->decl) ++ - gp_saved * UNITS_PER_WORD); ++ rtx mem = gen_frame_mem (BLKmode, ptr); ++ set_mem_alias_set (mem, get_varargs_alias_set ()); ++ ++ move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST, ++ mem, gp_saved); ++ } ++ if (REG_PARM_STACK_SPACE (cfun->decl) == 0) ++ cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD; ++} ++ ++/* Make the last instruction frame-related and note that it performs ++ the operation described by FRAME_PATTERN. */ ++ ++static void ++loongarch_set_frame_expr (rtx frame_pattern) ++{ ++ rtx insn; ++ ++ insn = get_last_insn (); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR, ++ frame_pattern, ++ REG_NOTES (insn)); ++} ++ ++/* Return a frame-related rtx that stores REG at MEM. ++ REG must be a single register. */ ++ ++static rtx ++loongarch_frame_set (rtx mem, rtx reg) ++{ ++ rtx set = gen_rtx_SET (mem, reg); ++ RTX_FRAME_RELATED_P (set) = 1; ++ return set; ++} ++ ++/* Return true if the current function must save register REGNO. */ ++ ++static bool ++loongarch_save_reg_p (unsigned int regno) ++{ ++ bool call_saved = !global_regs[regno] && !call_used_regs[regno]; ++ bool might_clobber = crtl->saves_all_registers ++ || df_regs_ever_live_p (regno); ++ ++ if (call_saved && might_clobber) ++ return true; ++ ++ if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed) ++ return true; ++ ++ if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return) ++ return true; ++ ++ return false; ++} ++ ++/* Determine whether to call GPR save/restore routines. */ ++static bool ++loongarch_use_save_libcall (const struct loongarch_frame_info *frame) ++{ ++ // FIXME: if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed) ++ return false; ++ ++} ++ ++/* Determine which GPR save/restore routine to call. */ ++ ++static unsigned ++loongarch_save_libcall_count (unsigned mask) ++{ ++ for (unsigned n = GP_REG_LAST; n > GP_REG_FIRST; n--) ++ if (BITSET_P (mask, n)) ++ return CALLEE_SAVED_REG_NUMBER (n) + 1; ++ abort (); ++} ++ ++/* Populate the current function's loongarch_frame_info structure. ++ ++ LARCH stack frames grown downward. High addresses are at the top. ++ ++ +-------------------------------+ ++ | | ++ | incoming stack arguments | ++ | | ++ +-------------------------------+ <-- incoming stack pointer ++ | | ++ | callee-allocated save area | ++ | for arguments that are | ++ | split between registers and | ++ | the stack | ++ | | ++ +-------------------------------+ <-- arg_pointer_rtx ++ | | ++ | callee-allocated save area | ++ | for register varargs | ++ | | ++ +-------------------------------+ <-- hard_frame_pointer_rtx; ++ | | stack_pointer_rtx + gp_sp_offset ++ | GPR save area | + UNITS_PER_WORD ++ | | ++ +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset ++ | | + UNITS_PER_HWVALUE ++ | FPR save area | ++ | | ++ +-------------------------------+ <-- frame_pointer_rtx (virtual) ++ | | ++ | local variables | ++ | | ++ P +-------------------------------+ ++ | | ++ | outgoing stack arguments | ++ | | ++ +-------------------------------+ <-- stack_pointer_rtx ++ ++ Dynamic stack allocations such as alloca insert data at point P. ++ They decrease stack_pointer_rtx but leave frame_pointer_rtx and ++ hard_frame_pointer_rtx unchanged. */ ++ ++static void ++loongarch_compute_frame_info (void) ++{ ++ struct loongarch_frame_info *frame; ++ HOST_WIDE_INT offset; ++ unsigned int regno, i, num_x_saved = 0, num_f_saved = 0; ++ ++ frame = &cfun->machine->frame; ++ memset (frame, 0, sizeof (*frame)); ++ ++ /* Find out which GPRs we need to save. */ ++ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) ++ if (loongarch_save_reg_p (regno)) ++ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++; ++ ++ /* If this function calls eh_return, we must also save and restore the ++ EH data registers. */ ++ if (crtl->calls_eh_return) ++ for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++) ++ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++; ++ ++ /* Find out which FPRs we need to save. This loop must iterate over ++ the same space as its companion in loongarch_for_each_saved_reg. */ ++ if (TARGET_HARD_FLOAT) ++ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) ++ if (loongarch_save_reg_p (regno)) ++ frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++; ++ ++ /* At the bottom of the frame are any outgoing stack arguments. */ ++ offset = LARCH_STACK_ALIGN (crtl->outgoing_args_size); ++ /* Next are local stack variables. */ ++ offset += LARCH_STACK_ALIGN (get_frame_size ()); ++ /* The virtual frame pointer points above the local variables. */ ++ frame->frame_pointer_offset = offset; ++ /* Next are the callee-saved FPRs. */ ++ if (frame->fmask) ++ offset += LARCH_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG); ++ frame->fp_sp_offset = offset - UNITS_PER_FP_REG; ++ /* Next are the callee-saved GPRs. */ ++ if (frame->mask) ++ { ++ unsigned x_save_size = LARCH_STACK_ALIGN (num_x_saved * UNITS_PER_WORD); ++ unsigned num_save_restore = 1 + loongarch_save_libcall_count (frame->mask); ++ ++ /* Only use save/restore routines if they don't alter the stack size. */ ++ if (LARCH_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size) ++ frame->save_libcall_adjustment = x_save_size; ++ ++ offset += x_save_size; ++ } ++ frame->gp_sp_offset = offset - UNITS_PER_WORD; ++ /* The hard frame pointer points above the callee-saved GPRs. */ ++ frame->hard_frame_pointer_offset = offset; ++ /* Above the hard frame pointer is the callee-allocated varags save area. */ ++ offset += LARCH_STACK_ALIGN (cfun->machine->varargs_size); ++ /* Next is the callee-allocated area for pretend stack arguments. */ ++ offset += LARCH_STACK_ALIGN (crtl->args.pretend_args_size); ++ /* Arg pointer must be below pretend args, but must be above alignment ++ padding. */ ++ frame->arg_pointer_offset = offset - crtl->args.pretend_args_size; ++ frame->total_size = offset; ++ /* Next points the incoming stack pointer and any incoming arguments. */ ++ ++ /* Only use save/restore routines when the GPRs are atop the frame. */ ++ if (frame->hard_frame_pointer_offset != frame->total_size) ++ frame->save_libcall_adjustment = 0; ++} ++ ++/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer ++ or argument pointer. TO is either the stack pointer or hard frame ++ pointer. */ ++ ++HOST_WIDE_INT ++loongarch_initial_elimination_offset (int from, int to) ++{ ++ HOST_WIDE_INT src, dest; ++ ++ loongarch_compute_frame_info (); ++ ++ if (to == HARD_FRAME_POINTER_REGNUM) ++ dest = cfun->machine->frame.hard_frame_pointer_offset; ++ else if (to == STACK_POINTER_REGNUM) ++ dest = 0; /* The stack pointer is the base of all offsets, hence 0. */ ++ else ++ gcc_unreachable (); ++ ++ if (from == FRAME_POINTER_REGNUM) ++ src = cfun->machine->frame.frame_pointer_offset; ++ else if (from == ARG_POINTER_REGNUM) ++ src = cfun->machine->frame.arg_pointer_offset; ++ else ++ gcc_unreachable (); ++ ++ return src - dest; ++} ++ ++/* A function to save or store a register. The first argument is the ++ register and the second is the stack slot. */ ++typedef void (*loongarch_save_restore_fn) (rtx, rtx); ++ ++/* Use FN to save or restore register REGNO. MODE is the register's ++ mode and OFFSET is the offset of its save slot from the current ++ stack pointer. */ ++ ++static void ++loongarch_save_restore_reg (machine_mode mode, int regno, ++ HOST_WIDE_INT offset, loongarch_save_restore_fn fn) ++{ ++ rtx mem; ++ ++ mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset)); ++ fn (gen_rtx_REG (mode, regno), mem); ++} ++ ++/* Call FN for each register that is saved by the current function. ++ SP_OFFSET is the offset of the current stack pointer from the start ++ of the frame. */ ++ ++static void ++loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset, loongarch_save_restore_fn fn) ++{ ++ HOST_WIDE_INT offset; ++ ++ /* Save the link register and s-registers. */ ++ offset = cfun->machine->frame.gp_sp_offset - sp_offset; ++ for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) ++ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) ++ { ++ loongarch_save_restore_reg (word_mode, regno, offset, fn); ++ offset -= UNITS_PER_WORD; ++ } ++ ++ /* This loop must iterate over the same space as its companion in ++ loongarch_compute_frame_info. */ ++ offset = cfun->machine->frame.fp_sp_offset - sp_offset; ++ for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) ++ if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST)) ++ { ++ machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode; ++ ++ loongarch_save_restore_reg (mode, regno, offset, fn); ++ offset -= GET_MODE_SIZE (mode); ++ } ++} ++ ++/* Save register REG to MEM. Make the instruction frame-related. */ ++ ++static void ++loongarch_save_reg (rtx reg, rtx mem) ++{ ++ loongarch_emit_move (mem, reg); ++ loongarch_set_frame_expr (loongarch_frame_set (mem, reg)); ++} ++ ++/* Restore register REG from MEM. */ ++ ++static void ++loongarch_restore_reg (rtx reg, rtx mem) ++{ ++ rtx insn = loongarch_emit_move (reg, mem); ++ rtx dwarf = NULL_RTX; ++ dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf); ++ REG_NOTES (insn) = dwarf; ++ ++ RTX_FRAME_RELATED_P (insn) = 1; ++} ++ ++/* Return the code to invoke the GPR save routine. */ ++ ++const char * ++loongarch_output_gpr_save (unsigned mask) ++{ ++ static char s[32]; ++ unsigned n = loongarch_save_libcall_count (mask); ++ ++ ssize_t bytes = snprintf (s, sizeof (s), "call\tt0,__loongarch_save_%u", n); ++ gcc_assert ((size_t) bytes < sizeof (s)); ++ ++ return s; ++} ++ ++#define IMM_BITS 12 ++ ++#define IMM_REACH (1LL << IMM_BITS) ++ ++/* For stack frames that can't be allocated with a single ADDI instruction, ++ compute the best value to initially allocate. It must at a minimum ++ allocate enough space to spill the callee-saved registers. If TARGET_RVC, ++ try to pick a value that will allow compression of the register saves ++ without adding extra instructions. */ ++ ++static HOST_WIDE_INT ++loongarch_first_stack_step (struct loongarch_frame_info *frame) ++{ ++ if (SMALL_OPERAND (frame->total_size)) ++ return frame->total_size; ++ ++ HOST_WIDE_INT min_first_step = ++ LARCH_STACK_ALIGN (frame->total_size - frame->fp_sp_offset); ++ HOST_WIDE_INT max_first_step = IMM_REACH / 2 - PREFERRED_STACK_BOUNDARY / 8; ++ HOST_WIDE_INT min_second_step = frame->total_size - max_first_step; ++ gcc_assert (min_first_step <= max_first_step); ++ ++ /* As an optimization, use the least-significant bits of the total frame ++ size, so that the second adjustment step is just LUI + ADD. */ ++ if (!SMALL_OPERAND (min_second_step) ++ && frame->total_size % IMM_REACH < IMM_REACH / 2 ++ && frame->total_size % IMM_REACH >= min_first_step) ++ return frame->total_size % IMM_REACH; ++ ++ return max_first_step; ++} ++ ++static rtx ++loongarch_adjust_libcall_cfi_prologue () ++{ ++ rtx dwarf = NULL_RTX; ++ rtx adjust_sp_rtx, reg, mem, insn; ++ int saved_size = cfun->machine->frame.save_libcall_adjustment; ++ int offset; ++ ++ for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) ++ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) ++ { ++ /* The save order is ra, s0 to s8. */ ++ if (regno == RETURN_ADDR_REGNUM) ++ offset = saved_size - UNITS_PER_WORD; ++ else ++ offset = saved_size - ((regno - S0_REGNUM + 2) * UNITS_PER_WORD); ++ ++ reg = gen_rtx_REG (SImode, regno); ++ mem = gen_frame_mem (SImode, plus_constant (Pmode, ++ stack_pointer_rtx, ++ offset)); ++ ++ insn = gen_rtx_SET (mem, reg); ++ dwarf = alloc_reg_note (REG_CFA_OFFSET, insn, dwarf); ++ } ++ ++ /* Debug info for adjust sp. */ ++ adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx, ++ stack_pointer_rtx, GEN_INT (-saved_size)); ++ dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx, ++ dwarf); ++ return dwarf; ++} ++ ++static void ++loongarch_emit_stack_tie (void) ++{ ++ if (Pmode == SImode) ++ emit_insn (gen_stack_tiesi (stack_pointer_rtx, hard_frame_pointer_rtx)); ++ else ++ emit_insn (gen_stack_tiedi (stack_pointer_rtx, hard_frame_pointer_rtx)); ++} ++ ++/* Return nonzero if this function is known to have a null epilogue. ++ This allows the optimizer to omit jumps to jumps if no stack ++ was created. */ ++ ++bool ++loongarch_can_use_return_insn (void) ++{ ++ return reload_completed && cfun->machine->frame.total_size == 0; ++} ++ ++static rtx ++loongarch_adjust_libcall_cfi_epilogue () ++{ ++ rtx dwarf = NULL_RTX; ++ rtx adjust_sp_rtx, reg; ++ int saved_size = cfun->machine->frame.save_libcall_adjustment; ++ ++ /* Debug info for adjust sp. */ ++ adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx, ++ stack_pointer_rtx, GEN_INT (saved_size)); ++ dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx, ++ dwarf); ++ ++ for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) ++ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) ++ { ++ reg = gen_rtx_REG (SImode, regno); ++ dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf); ++ } ++ ++ return dwarf; ++} ++ ++/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P ++ says which. */ ++ ++void ++loongarch_expand_epilogue (bool sibcall_p) ++{ ++ /* Split the frame into two. STEP1 is the amount of stack we should ++ deallocate before restoring the registers. STEP2 is the amount we ++ should deallocate afterwards. ++ ++ Start off by assuming that no registers need to be restored. */ ++ struct loongarch_frame_info *frame = &cfun->machine->frame; ++ unsigned mask = frame->mask; ++ HOST_WIDE_INT step1 = frame->total_size; ++ HOST_WIDE_INT step2 = 0; ++ bool use_restore_libcall = !sibcall_p && loongarch_use_save_libcall (frame); ++ rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM); ++ rtx insn; ++ ++ /* We need to add memory barrier to prevent read from deallocated stack. */ ++ bool need_barrier_p = (get_frame_size () ++ + cfun->machine->frame.arg_pointer_offset) != 0; ++ ++ if (!sibcall_p && loongarch_can_use_return_insn ()) ++ { ++ emit_jump_insn (gen_return ()); ++ return; ++ } ++ ++ /* Move past any dynamic stack allocations. */ ++ if (cfun->calls_alloca) ++ { ++ /* Emit a barrier to prevent loads from a deallocated stack. */ ++ loongarch_emit_stack_tie (); ++ need_barrier_p = false; ++ ++ rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset); ++ if (!SMALL_OPERAND (INTVAL (adjust))) ++ { ++ loongarch_emit_move (N_LARCH_PROLOGUE_TEMP (Pmode), adjust); ++ adjust = N_LARCH_PROLOGUE_TEMP (Pmode); ++ } ++ ++ insn = emit_insn ( ++ gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx, ++ adjust)); ++ ++ rtx dwarf = NULL_RTX; ++ rtx cfa_adjust_value = gen_rtx_PLUS ( ++ Pmode, hard_frame_pointer_rtx, ++ GEN_INT (-frame->hard_frame_pointer_offset)); ++ rtx cfa_adjust_rtx = gen_rtx_SET (stack_pointer_rtx, cfa_adjust_value); ++ dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, cfa_adjust_rtx, dwarf); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ ++ REG_NOTES (insn) = dwarf; ++ } ++ ++ /* If we need to restore registers, deallocate as much stack as ++ possible in the second step without going out of range. */ ++ if ((frame->mask | frame->fmask) != 0) ++ { ++ step2 = loongarch_first_stack_step (frame); ++ step1 -= step2; ++ } ++ ++ /* Set TARGET to BASE + STEP1. */ ++ if (step1 > 0) ++ { ++ /* Emit a barrier to prevent loads from a deallocated stack. */ ++ loongarch_emit_stack_tie (); ++ need_barrier_p = false; ++ ++ /* Get an rtx for STEP1 that we can add to BASE. */ ++ rtx adjust = GEN_INT (step1); ++ if (!SMALL_OPERAND (step1)) ++ { ++ loongarch_emit_move (N_LARCH_PROLOGUE_TEMP (Pmode), adjust); ++ adjust = N_LARCH_PROLOGUE_TEMP (Pmode); ++ } ++ ++ insn = emit_insn ( ++ gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust)); ++ ++ rtx dwarf = NULL_RTX; ++ rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx, ++ GEN_INT (step2)); ++ ++ dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ ++ REG_NOTES (insn) = dwarf; ++ } ++ ++ if (use_restore_libcall) ++ frame->mask = 0; /* Temporarily fib that we need not save GPRs. */ ++ ++ /* Restore the registers. */ ++ loongarch_for_each_saved_reg (frame->total_size - step2, loongarch_restore_reg); ++ ++ if (use_restore_libcall) ++ { ++ frame->mask = mask; /* Undo the above fib. */ ++ gcc_assert (step2 >= frame->save_libcall_adjustment); ++ step2 -= frame->save_libcall_adjustment; ++ } ++ ++ if (need_barrier_p) ++ loongarch_emit_stack_tie (); ++ ++ /* Deallocate the final bit of the frame. */ ++ if (step2 > 0) ++ { ++ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, ++ GEN_INT (step2))); ++ ++ rtx dwarf = NULL_RTX; ++ rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx, ++ const0_rtx); ++ dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ ++ REG_NOTES (insn) = dwarf; ++ } ++ ++ if (use_restore_libcall) ++ { ++ rtx dwarf = loongarch_adjust_libcall_cfi_epilogue (); ++ insn = emit_insn (gen_gpr_restore (GEN_INT (loongarch_save_libcall_count (mask)))); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ REG_NOTES (insn) = dwarf; ++ ++ emit_jump_insn (gen_gpr_restore_return (ra)); ++ return; ++ } ++ ++ /* Add in the __builtin_eh_return stack adjustment. */ ++ if (crtl->calls_eh_return) ++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, ++ EH_RETURN_STACKADJ_RTX)); ++ ++ if (!sibcall_p) ++ emit_jump_insn (gen_simple_return_internal (ra)); ++} ++ ++ ++static rtx loongarch_find_pic_call_symbol (rtx_insn *, rtx, bool); ++static int loongarch_register_move_cost (machine_mode, reg_class_t, ++ reg_class_t); ++ ++/* Predicates to test for presence of "near"/"short_call" and "far"/"long_call" ++ attributes on the given TYPE. */ ++ ++static bool ++loongarch_near_type_p (const_tree type) ++{ ++ return (lookup_attribute ("short_call", TYPE_ATTRIBUTES (type)) != NULL ++ || lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL); ++} ++ ++static bool ++loongarch_far_type_p (const_tree type) ++{ ++ return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL ++ || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL); ++} ++ ++ ++/* Check if the interrupt attribute is set for a function. */ ++ ++static bool ++loongarch_interrupt_type_p (tree type) ++{ ++ return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL; ++} ++ ++/* Implement TARGET_COMP_TYPE_ATTRIBUTES. */ ++ ++static int ++loongarch_comp_type_attributes (const_tree type1, const_tree type2) ++{ ++ /* Disallow mixed near/far attributes. */ ++ if (loongarch_far_type_p (type1) && loongarch_near_type_p (type2)) ++ return 0; ++ if (loongarch_near_type_p (type1) && loongarch_far_type_p (type2)) ++ return 0; ++ return 1; ++} ++ ++/* Implement TARGET_INSERT_ATTRIBUTES. */ ++ ++static void ++loongarch_insert_attributes (tree decl, tree *attributes) ++{ ++} ++ ++/* Implement TARGET_MERGE_DECL_ATTRIBUTES. */ ++ ++static tree ++loongarch_merge_decl_attributes (tree olddecl, tree newdecl) ++{ ++ return merge_attributes (DECL_ATTRIBUTES (olddecl), ++ DECL_ATTRIBUTES (newdecl)); ++} ++ ++/* Implement TARGET_CAN_INLINE_P. */ ++ ++static bool ++loongarch_can_inline_p (tree caller, tree callee) ++{ ++ return default_target_can_inline_p (caller, callee); ++} ++ ++/* Handle an "interrupt" attribute with an optional argument. */ ++ ++static tree ++loongarch_handle_interrupt_attr (tree *node ATTRIBUTE_UNUSED, tree name, tree args, ++ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) ++{ ++ /* Check for an argument. */ ++ if (is_attribute_p ("interrupt", name) && args != NULL) ++ { ++ tree cst; ++ ++ cst = TREE_VALUE (args); ++ if (TREE_CODE (cst) != STRING_CST) ++ { ++ warning (OPT_Wattributes, ++ "%qE attribute requires a string argument", ++ name); ++ *no_add_attrs = true; ++ } ++ else if (strcmp (TREE_STRING_POINTER (cst), "eic") != 0 ++ && strncmp (TREE_STRING_POINTER (cst), "vector=", 7) != 0) ++ { ++ warning (OPT_Wattributes, ++ "argument to %qE attribute is neither eic, nor " ++ "vector=", name); ++ *no_add_attrs = true; ++ } ++ else if (strncmp (TREE_STRING_POINTER (cst), "vector=", 7) == 0) ++ { ++ const char *arg = TREE_STRING_POINTER (cst) + 7; ++ ++ /* Acceptable names are: sw0,sw1,hw0,hw1,hw2,hw3,hw4,hw5. */ ++ if (strlen (arg) != 3 ++ || (arg[0] != 's' && arg[0] != 'h') ++ || arg[1] != 'w' ++ || (arg[0] == 's' && arg[2] != '0' && arg[2] != '1') ++ || (arg[0] == 'h' && (arg[2] < '0' || arg[2] > '5'))) ++ { ++ warning (OPT_Wattributes, ++ "interrupt vector to %qE attribute is not " ++ "vector=(sw0|sw1|hw0|hw1|hw2|hw3|hw4|hw5)", ++ name); ++ *no_add_attrs = true; ++ } ++ } ++ ++ return NULL_TREE; ++ } ++ ++ return NULL_TREE; ++} ++ ++/* Handle a "use_shadow_register_set" attribute with an optional argument. */ ++ ++static tree ++loongarch_handle_use_shadow_register_set_attr (tree *node ATTRIBUTE_UNUSED, ++ tree name, tree args, ++ int flags ATTRIBUTE_UNUSED, ++ bool *no_add_attrs) ++{ ++ /* Check for an argument. */ ++ if (is_attribute_p ("use_shadow_register_set", name) && args != NULL) ++ { ++ tree cst; ++ ++ cst = TREE_VALUE (args); ++ if (TREE_CODE (cst) != STRING_CST) ++ { ++ warning (OPT_Wattributes, ++ "%qE attribute requires a string argument", ++ name); ++ *no_add_attrs = true; ++ } ++ else if (strcmp (TREE_STRING_POINTER (cst), "intstack") != 0) ++ { ++ warning (OPT_Wattributes, ++ "argument to %qE attribute is not intstack", name); ++ *no_add_attrs = true; ++ } ++ ++ return NULL_TREE; ++ } ++ ++ return NULL_TREE; ++} ++ ++/* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR ++ and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */ ++ ++static void ++loongarch_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr) ++{ ++ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))) ++ { ++ *base_ptr = XEXP (x, 0); ++ *offset_ptr = INTVAL (XEXP (x, 1)); ++ } ++ else ++ { ++ *base_ptr = x; ++ *offset_ptr = 0; ++ } ++} ++ ++static unsigned int loongarch_build_integer (struct loongarch_integer_op *, ++ unsigned HOST_WIDE_INT); ++ ++/* Fill CODES with a sequence of rtl operations to load VALUE. ++ Return the number of operations needed. ++ Split interger in loongarch_output_move. */ ++ ++static unsigned int ++loongarch_build_integer (struct loongarch_integer_op *codes, ++ unsigned HOST_WIDE_INT value) ++{ ++ uint32_t hi32, lo32; ++ char all0_bit_vec, sign_bit_vec, allf_bit_vec, paritial_is_sext_of_prev; ++ unsigned int cost = 0; ++ ++ lo32 = value & 0xffffffff; ++ hi32 = value >> 32; ++ ++ all0_bit_vec = (((hi32 & 0xfff00000) == 0) << 3) ++ | (((hi32 & 0x000fffff) == 0) << 2) ++ | (((lo32 & 0xfffff000) == 0) << 1) ++ | ((lo32 & 0x00000fff) == 0); ++ sign_bit_vec = (((hi32 & 0x80000000) != 0) << 3) ++ | (((hi32 & 0x00080000) != 0) << 2) ++ | (((lo32 & 0x80000000) != 0) << 1) ++ | ((lo32 & 0x00000800) != 0); ++ allf_bit_vec = (((hi32 & 0xfff00000) == 0xfff00000) << 3) ++ | (((hi32 & 0x000fffff) == 0x000fffff) << 2) ++ | (((lo32 & 0xfffff000) == 0xfffff000) << 1) ++ | ((lo32 & 0x00000fff) == 0x00000fff); ++ paritial_is_sext_of_prev = (all0_bit_vec ^ allf_bit_vec) ++ & (all0_bit_vec ^ (sign_bit_vec << 1)); ++ ++ do ++ { ++ if (paritial_is_sext_of_prev == 0x7) ++ { ++ codes[0].code = UNKNOWN; ++ codes[0].method = METHOD_LU52I; ++ codes[0].value = value & 0xfff0000000000000; ++ cost++; ++ break; ++ } ++ if ((all0_bit_vec & 0x3) == 0x2) ++ { ++ codes[cost].code = UNKNOWN; ++ codes[cost].method = METHOD_NORMAL; ++ codes[cost].value = value & 0xfff; ++ cost++; ++ } ++ else ++ { ++ switch (paritial_is_sext_of_prev & 0x3) ++ { ++ case 0: ++ codes[cost].code = UNKNOWN; ++ codes[cost].method = METHOD_NORMAL; ++ codes[cost].value = ((HOST_WIDE_INT)value << 32 >> 32) & 0xfffffffffffff000; ++ cost++; ++ codes[cost].code = IOR; ++ codes[cost].method = METHOD_NORMAL; ++ codes[cost].value = value & 0xfff; ++ cost++; ++ break; ++ case 1: ++ codes[cost].code = UNKNOWN; ++ codes[cost].method = METHOD_NORMAL; ++ codes[cost].value = ((HOST_WIDE_INT)value << 32 >> 32) & 0xfffffffffffff000; ++ cost++; ++ break; ++ case 2: ++ codes[cost].code = UNKNOWN; ++ codes[cost].method = METHOD_NORMAL; ++ codes[cost].value = (HOST_WIDE_INT)value << 52 >> 52; ++ cost++; ++ break; ++ case 3: ++ codes[cost].code = UNKNOWN; ++ codes[cost].method = METHOD_NORMAL; ++ codes[cost].value = 0; ++ cost++; ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ ++ if (((value & 0xfffffffffffff800) ^ 0xfff00000fffff800) == 0) ++ { ++ codes[cost].method = METHOD_INSV; ++ cost++; ++ break; ++ } ++ ++ switch (paritial_is_sext_of_prev >> 2) ++ { ++ case 0: ++ codes[cost].method = METHOD_LU32I; ++ codes[cost].value = ((HOST_WIDE_INT)value << 12 >> 12) & 0xffffffff00000000; ++ cost++; ++ case 1: ++ codes[cost].method = METHOD_LU52I; ++ codes[cost].value = value & 0xfff0000000000000; ++ cost++; ++ break; ++ case 2: ++ codes[cost].method = METHOD_LU32I; ++ codes[cost].value = ((HOST_WIDE_INT)value << 12 >> 12) & 0xffffffff00000000; ++ cost++; ++ break; ++ case 3: ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ while (0); ++ ++ return cost; ++} ++ ++/* Fill CODES with a sequence of rtl operations to load VALUE. ++ Return the number of operations needed. ++ Split interger in loongarch_output_move. */ ++ ++static unsigned int ++loongarch_integer_cost (HOST_WIDE_INT value) ++{ ++ struct loongarch_integer_op codes[LARCH_MAX_INTEGER_OPS]; ++ return loongarch_build_integer(codes, value); ++} ++ ++/* Implement TARGET_LEGITIMATE_CONSTANT_P. */ ++ ++static bool ++loongarch_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x) ++{ ++ return loongarch_const_insns (x) > 0; ++} ++ ++ ++/* Return true if X is a thread-local symbol. */ ++ ++static bool ++loongarch_tls_symbol_p (rtx x) ++{ ++ return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0; ++} ++ ++/* Return true if SYMBOL_REF X is associated with a global symbol ++ (in the STB_GLOBAL sense). */ ++ ++bool ++loongarch_global_symbol_p (const_rtx x) ++{ ++ if (GET_CODE (x) == LABEL_REF) ++ return false; ++ ++ const_tree decl = SYMBOL_REF_DECL (x); ++ ++ if (!decl) ++ return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x); ++ ++ /* Weakref symbols are not TREE_PUBLIC, but their targets are global ++ or weak symbols. Relocations in the object file will be against ++ the target symbol, so it's that symbol's binding that matters here. */ ++ return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl)); ++} ++ ++bool ++loongarch_global_symbol_noweak_p (const_rtx x) ++{ ++ if (GET_CODE (x) == LABEL_REF) ++ return false; ++ ++ const_tree decl = SYMBOL_REF_DECL (x); ++ ++ if (!decl) ++ return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x); ++ ++ /* Weakref symbols are not TREE_PUBLIC, but their targets are global ++ or weak symbols. Relocations in the object file will be against ++ the target symbol, so it's that symbol's binding that matters here. */ ++ return DECL_P (decl) && TREE_PUBLIC (decl); ++} ++ ++bool ++loongarch_weak_symbol_p (const_rtx x) ++{ ++ const_tree decl; ++ if (GET_CODE (x) == LABEL_REF || !(decl = SYMBOL_REF_DECL (x))) ++ return false; ++ return DECL_P (decl) && DECL_WEAK (decl); ++} ++ ++ ++/* Return true if SYMBOL_REF X binds locally. */ ++ ++bool ++loongarch_symbol_binds_local_p (const_rtx x) ++{ ++ if (GET_CODE (x) == LABEL_REF) ++ return false; ++ ++ return (SYMBOL_REF_DECL (x) ++ ? targetm.binds_local_p (SYMBOL_REF_DECL (x)) ++ : SYMBOL_REF_LOCAL_P (x)); ++} ++ ++/* Return true if OP is a constant vector with the number of units in MODE, ++ and each unit has the same bit set. */ ++ ++bool ++loongarch_const_vector_bitimm_set_p (rtx op, machine_mode mode) ++{ ++ if (GET_CODE (op) == CONST_VECTOR && op != CONST0_RTX (mode)) ++ { ++ unsigned HOST_WIDE_INT val = UINTVAL (CONST_VECTOR_ELT (op, 0)); ++ int vlog2 = exact_log2 (val & GET_MODE_MASK (GET_MODE_INNER (mode))); ++ ++ if (vlog2 != -1) ++ { ++ gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT); ++ gcc_assert (vlog2 >= 0 && vlog2 <= GET_MODE_UNIT_BITSIZE (mode) - 1); ++ return loongarch_const_vector_same_val_p (op, mode); ++ } ++ } ++ ++ return false; ++} ++ ++/* Return true if OP is a constant vector with the number of units in MODE, ++ and each unit has the same bit clear. */ ++ ++bool ++loongarch_const_vector_bitimm_clr_p (rtx op, machine_mode mode) ++{ ++ if (GET_CODE (op) == CONST_VECTOR && op != CONSTM1_RTX (mode)) ++ { ++ unsigned HOST_WIDE_INT val = ~UINTVAL (CONST_VECTOR_ELT (op, 0)); ++ int vlog2 = exact_log2 (val & GET_MODE_MASK (GET_MODE_INNER (mode))); ++ ++ if (vlog2 != -1) ++ { ++ gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT); ++ gcc_assert (vlog2 >= 0 && vlog2 <= GET_MODE_UNIT_BITSIZE (mode) - 1); ++ return loongarch_const_vector_same_val_p (op, mode); ++ } ++ } ++ ++ return false; ++} ++ ++/* Return true if OP is a constant vector with the number of units in MODE, ++ and each unit has the same value. */ ++ ++bool ++loongarch_const_vector_same_val_p (rtx op, machine_mode mode) ++{ ++ int i, nunits = GET_MODE_NUNITS (mode); ++ rtx first; ++ ++ if (GET_CODE (op) != CONST_VECTOR || GET_MODE (op) != mode) ++ return false; ++ ++ first = CONST_VECTOR_ELT (op, 0); ++ for (i = 1; i < nunits; i++) ++ if (!rtx_equal_p (first, CONST_VECTOR_ELT (op, i))) ++ return false; ++ ++ return true; ++} ++ ++/* Return true if OP is a constant vector with the number of units in MODE, ++ and each unit has the same value as well as replicated bytes in the value. ++*/ ++ ++bool ++loongarch_const_vector_same_bytes_p (rtx op, machine_mode mode) ++{ ++ int i, bytes; ++ HOST_WIDE_INT val, first_byte; ++ rtx first; ++ ++ if (!loongarch_const_vector_same_val_p (op, mode)) ++ return false; ++ ++ first = CONST_VECTOR_ELT (op, 0); ++ bytes = GET_MODE_UNIT_SIZE (mode); ++ val = INTVAL (first); ++ first_byte = val & 0xff; ++ for (i = 1; i < bytes; i++) ++ { ++ val >>= 8; ++ if ((val & 0xff) != first_byte) ++ return false; ++ } ++ ++ return true; ++} ++ ++/* Return true if OP is a constant vector with the number of units in MODE, ++ and each unit has the same integer value in the range [LOW, HIGH]. */ ++ ++bool ++loongarch_const_vector_same_int_p (rtx op, machine_mode mode, HOST_WIDE_INT low, ++ HOST_WIDE_INT high) ++{ ++ HOST_WIDE_INT value; ++ rtx elem0; ++ ++ if (!loongarch_const_vector_same_val_p (op, mode)) ++ return false; ++ ++ elem0 = CONST_VECTOR_ELT (op, 0); ++ if (!CONST_INT_P (elem0)) ++ return false; ++ ++ value = INTVAL (elem0); ++ return (value >= low && value <= high); ++} ++ ++/* Return true if OP is a constant vector with repeated 4-element sets ++ in mode MODE. */ ++ ++bool ++loongarch_const_vector_shuffle_set_p (rtx op, machine_mode mode) ++{ ++ int nunits = GET_MODE_NUNITS (mode); ++ int nsets = nunits / 4; ++ int set = 0; ++ int i, j; ++ ++ /* Check if we have the same 4-element sets. */ ++ for (j = 0; j < nsets; j++, set = 4 * j) ++ for (i = 0; i < 4; i++) ++ if ((INTVAL (XVECEXP (op, 0, i)) ++ != (INTVAL (XVECEXP (op, 0, set + i)) - set)) ++ || !IN_RANGE (INTVAL (XVECEXP (op, 0, set + i)), 0, set + 3)) ++ return false; ++ return true; ++} ++ ++/* Return true if rtx constants of mode MODE should be put into a small ++ data section. */ ++ ++static bool ++loongarch_rtx_constant_in_small_data_p (machine_mode mode) ++{ ++ return (GET_MODE_SIZE (mode) <= loongarch_small_data_threshold); ++} ++ ++/* Return the method that should be used to access SYMBOL_REF or ++ LABEL_REF X in context CONTEXT. */ ++ ++static enum loongarch_symbol_type ++loongarch_classify_symbol (const_rtx x, enum loongarch_symbol_context context) ++{ ++ if (TARGET_RTP_PIC) ++ return SYMBOL_GOT_DISP; ++ ++ if (GET_CODE (x) == LABEL_REF) ++ { ++ return SYMBOL_GOT_DISP; ++ } ++ ++ gcc_assert (GET_CODE (x) == SYMBOL_REF); ++ ++ if (SYMBOL_REF_TLS_MODEL (x)) ++ return SYMBOL_TLS; ++ ++ if (GET_CODE (x) == SYMBOL_REF) ++ return SYMBOL_GOT_DISP; ++} ++ ++/* Return true if X is a symbolic constant that can be used in context ++ CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */ ++ ++bool ++loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_context context, ++ enum loongarch_symbol_type *symbol_type) ++{ ++ rtx offset; ++ ++ split_const (x, &x, &offset); ++ if (UNSPEC_ADDRESS_P (x)) ++ { ++ *symbol_type = UNSPEC_ADDRESS_TYPE (x); ++ x = UNSPEC_ADDRESS (x); ++ } ++ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF) ++ { ++ *symbol_type = loongarch_classify_symbol (x, context); ++ if (*symbol_type == SYMBOL_TLS) ++ return true; ++ } ++ else ++ return false; ++ ++ if (offset == const0_rtx) ++ return true; ++ ++ /* Check whether a nonzero offset is valid for the underlying ++ relocations. */ ++ switch (*symbol_type) ++ { ++ /* Fall through. */ ++ ++ case SYMBOL_GOT_DISP: ++ case SYMBOL_TLSGD: ++ case SYMBOL_TLSLDM: ++ case SYMBOL_TLS: ++ return false; ++ } ++ gcc_unreachable (); ++} ++ ++/* Like loongarch_symbol_insns We rely on the fact that, in the worst case. */ ++ ++static int ++loongarch_symbol_insns_1 (enum loongarch_symbol_type type, machine_mode mode) ++{ ++ if (loongarch_use_pcrel_pool_p[(int) type]) ++ { ++ /* The constant must be loaded and then dereferenced. */ ++ return 0; ++ } ++ ++ switch (type) ++ { ++ case SYMBOL_GOT_DISP: ++ /* The constant will have to be loaded from the GOT before it ++ is used in an address. */ ++ if (mode != MAX_MACHINE_MODE) ++ return 0; ++ ++ /* Fall through. */ ++ ++ return 3; ++ ++ case SYMBOL_TLSGD: ++ case SYMBOL_TLSLDM: ++ return 1; ++ ++ case SYMBOL_TLS: ++ /* We don't treat a bare TLS symbol as a constant. */ ++ return 0; ++ } ++ gcc_unreachable (); ++} ++ ++/* If MODE is MAX_MACHINE_MODE, return the number of instructions needed ++ to load symbols of type TYPE into a register. Return 0 if the given ++ type of symbol cannot be used as an immediate operand. ++ ++ Otherwise, return the number of instructions needed to load or store ++ values of mode MODE to or from addresses of type TYPE. Return 0 if ++ the given type of symbol is not valid in addresses. ++ ++ In both cases, instruction counts are based off BASE_INSN_LENGTH. */ ++ ++static int ++loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode) ++{ ++ /* LSX LD.* and ST.* cannot support loading symbols via an immediate ++ operand. */ ++ if (LSX_SUPPORTED_MODE_P (mode)) ++ return 0; ++ ++ if (LASX_SUPPORTED_MODE_P (mode)) ++ return 0; ++ ++ return loongarch_symbol_insns_1 (type, mode) * (1); ++} ++ ++/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */ ++ ++static bool ++loongarch_cannot_force_const_mem (machine_mode mode, rtx x) ++{ ++ enum loongarch_symbol_type type; ++ rtx base, offset; ++ ++ /* There is no assembler syntax for expressing an address-sized ++ high part. */ ++ if (GET_CODE (x) == HIGH) ++ return true; ++ ++ /* As an optimization, reject constants that loongarch_legitimize_move ++ can expand inline. ++ ++ Suppose we have a multi-instruction sequence that loads constant C ++ into register R. If R does not get allocated a hard register, and ++ R is used in an operand that allows both registers and memory ++ references, reload will consider forcing C into memory and using ++ one of the instruction's memory alternatives. Returning false ++ here will force it to use an input reload instead. */ ++ if (CONST_INT_P (x) && loongarch_legitimate_constant_p (mode, x)) ++ return true; ++ ++ split_const (x, &base, &offset); ++ if (loongarch_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type)) ++ { ++ /* See whether we explicitly want these symbols in the pool. */ ++ if (loongarch_use_pcrel_pool_p[(int) type]) ++ return false; ++ ++ /* The same optimization as for CONST_INT. */ ++ if (SMALL_INT (offset) && loongarch_symbol_insns (type, MAX_MACHINE_MODE) > 0) ++ return true; ++ ++ } ++ ++ /* TLS symbols must be computed by loongarch_legitimize_move. */ ++ if (tls_referenced_p (x)) ++ return true; ++ ++ return false; ++} ++ ++/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for ++ constants when we're using a per-function constant pool. */ ++ ++static bool ++loongarch_use_blocks_for_constant_p (machine_mode mode ATTRIBUTE_UNUSED, ++ const_rtx x ATTRIBUTE_UNUSED) ++{ ++ return 1; ++} ++ ++/* Return true if register REGNO is a valid base register for mode MODE. ++ STRICT_P is true if REG_OK_STRICT is in effect. */ ++ ++int ++loongarch_regno_mode_ok_for_base_p (int regno, machine_mode mode, ++ bool strict_p) ++{ ++ if (!HARD_REGISTER_NUM_P (regno)) ++ { ++ if (!strict_p) ++ return true; ++ regno = reg_renumber[regno]; ++ } ++ ++ /* These fake registers will be eliminated to either the stack or ++ hard frame pointer, both of which are usually valid base registers. ++ Reload deals with the cases where the eliminated form isn't valid. */ ++ if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM) ++ return true; ++ ++ ++ return GP_REG_P (regno); ++} ++ ++/* Return true if X is a valid base register for mode MODE. ++ STRICT_P is true if REG_OK_STRICT is in effect. */ ++ ++static bool ++loongarch_valid_base_register_p (rtx x, machine_mode mode, bool strict_p) ++{ ++ if (!strict_p && GET_CODE (x) == SUBREG) ++ x = SUBREG_REG (x); ++ ++ return (REG_P (x) ++ && loongarch_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p)); ++} ++ ++/* Return true if, for every base register BASE_REG, (plus BASE_REG X) ++ can address a value of mode MODE. */ ++ ++static bool ++loongarch_valid_offset_p (rtx x, machine_mode mode) ++{ ++ /* Check that X is a signed 12-bit number, ++ * or check that X is a signed 16-bit number ++ * and offset 4 byte aligned */ ++ if (!(const_arith_operand (x, Pmode) ++ || ((mode == E_SImode || mode == E_DImode) ++ && const_imm16_operand (x, Pmode) ++ && (loongarch_signed_immediate_p (INTVAL (x), 14, 2))))) ++ return false; ++ ++ /* We may need to split multiword moves, so make sure that every word ++ is accessible. */ ++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD ++ && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD)) ++ return false; ++ ++ /* LSX LD.* and ST.* supports 10-bit signed offsets. */ ++ if (LSX_SUPPORTED_MODE_P (mode) ++ && !loongarch_signed_immediate_p (INTVAL (x), 10, ++ loongarch_ldst_scaled_shift (mode))) ++ return false; ++ ++ /* LASX XVLD.B and XVST.B supports 10-bit signed offsets without shift. */ ++ if (LASX_SUPPORTED_MODE_P (mode) ++ && !loongarch_signed_immediate_p (INTVAL (x), 10, 0)) ++ return false; ++ ++ return true; ++} ++ ++/* Return true if X is a valid address for machine mode MODE. If it is, ++ fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in ++ effect. */ ++ ++static bool ++loongarch_classify_address (struct loongarch_address_info *info, rtx x, ++ machine_mode mode, bool strict_p) ++{ ++ switch (GET_CODE (x)) ++ { ++ case REG: ++ case SUBREG: ++ info->type = ADDRESS_REG; ++ info->reg = x; ++ info->offset = const0_rtx; ++ return loongarch_valid_base_register_p (info->reg, mode, strict_p); ++ ++ case PLUS: ++ info->type = ADDRESS_REG; ++ info->reg = XEXP (x, 0); ++ info->offset = XEXP (x, 1); ++ return (loongarch_valid_base_register_p (info->reg, mode, strict_p) ++ && loongarch_valid_offset_p (info->offset, mode)); ++ #if 0 ++ case LABEL_REF: ++ case SYMBOL_REF: ++ info->type = ADDRESS_SYMBOLIC; ++ return (loongarch_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM, ++ &info->symbol_type) ++ && loongarch_symbol_insns (info->symbol_type, mode) > 0 ++ && !loongarch_split_p[info->symbol_type]); ++ ++ #endif ++ default: ++ return false; ++ } ++} ++ ++/* Implement TARGET_LEGITIMATE_ADDRESS_P. */ ++ ++static bool ++loongarch_legitimate_address_p (machine_mode mode, rtx x, bool strict_p) ++{ ++ struct loongarch_address_info addr; ++ ++ return loongarch_classify_address (&addr, x, mode, strict_p); ++} ++ ++/* Return true if X is a legitimate $sp-based address for mode MODE. */ ++ ++bool ++loongarch_stack_address_p (rtx x, machine_mode mode) ++{ ++ struct loongarch_address_info addr; ++ ++ return (loongarch_classify_address (&addr, x, mode, false) ++ && addr.type == ADDRESS_REG ++ && addr.reg == stack_pointer_rtx); ++} ++ ++/* Return true if ADDR matches the pattern for the L{B,H,W,D}{,U}X load ++ indexed address instruction. Note that such addresses are ++ not considered legitimate in the TARGET_LEGITIMATE_ADDRESS_P ++ sense, because their use is so restricted. */ ++ ++static bool ++loongarch_lx_address_p (rtx addr, machine_mode mode) ++{ ++ if (GET_CODE (addr) != PLUS ++ || !REG_P (XEXP (addr, 0)) ++ || !REG_P (XEXP (addr, 1))) ++ return false; ++ if (LSX_SUPPORTED_MODE_P (mode)) ++ return true; ++ return false; ++} ++ ++ ++/* Return the number of instructions needed to load or store a value ++ of mode MODE at address X, assuming that BASE_INSN_LENGTH is the ++ length of one instruction. Return 0 if X isn't valid for MODE. ++ Assume that multiword moves may need to be split into word moves ++ if MIGHT_SPLIT_P, otherwise assume that a single load or store is ++ enough. */ ++ ++int ++loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) ++{ ++ struct loongarch_address_info addr; ++ int factor; ++ bool lsx_p = (!might_split_p && (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))); ++ ++ if (!loongarch_classify_address (&addr, x, mode, false)) ++ return 0; ++ ++ /* BLKmode is used for single unaligned loads and stores and should ++ not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty ++ meaningless, so we have to single it out as a special case one way ++ or the other.) */ ++ if (mode != BLKmode && might_split_p) ++ factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; ++ else ++ factor = 1; ++ ++ if (loongarch_classify_address (&addr, x, mode, false)) ++ switch (addr.type) ++ { ++ case ADDRESS_REG: ++ if (lsx_p) ++ { ++ /* LSX LD.* and ST.* supports 10-bit signed offsets. */ ++ if (loongarch_signed_immediate_p (INTVAL (addr.offset), 10, ++ loongarch_ldst_scaled_shift (mode))) ++ return 1; ++ else ++ return 0; ++ } ++ return factor; ++ ++ case ADDRESS_CONST_INT: ++ return lsx_p ? 0 : factor; ++ ++ case ADDRESS_SYMBOLIC: ++ return lsx_p ? 0 : factor * loongarch_symbol_insns (addr.symbol_type, mode); ++ } ++ return 0; ++} ++ ++/* Return true if X fits within an unsigned field of BITS bits that is ++ shifted left SHIFT bits before being used. */ ++ ++bool ++loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0) ++{ ++ return (x & ((1 << shift) - 1)) == 0 && x < ((unsigned) 1 << (shift + bits)); ++} ++ ++/* Return true if X fits within a signed field of BITS bits that is ++ shifted left SHIFT bits before being used. */ ++ ++bool ++loongarch_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0) ++{ ++ x += 1 << (bits + shift - 1); ++ return loongarch_unsigned_immediate_p (x, bits, shift); ++} ++ ++/* Return the scale shift that applied to LSX LD/ST address offset. */ ++ ++int ++loongarch_ldst_scaled_shift (machine_mode mode) ++{ ++ int shift = exact_log2 (GET_MODE_UNIT_SIZE (mode)); ++ ++ if (shift < 0 || shift > 8) ++ gcc_unreachable (); ++ ++ return shift; ++} ++ ++/* Return true if X is a legitimate address that conforms to the requirements ++ for a microLARCH LWSP or SWSP insn. */ ++ ++bool ++lwsp_swsp_address_p (rtx x, machine_mode mode) ++{ ++ struct loongarch_address_info addr; ++ ++ return (loongarch_classify_address (&addr, x, mode, false) ++ && addr.type == ADDRESS_REG ++ && REGNO (addr.reg) == STACK_POINTER_REGNUM ++ && uw5_operand (addr.offset, mode)); ++} ++ ++/* Return true if X is a legitimate address with a 12-bit offset. ++ MODE is the mode of the value being accessed. */ ++ ++bool ++loongarch_12bit_offset_address_p (rtx x, machine_mode mode) ++{ ++ struct loongarch_address_info addr; ++ ++ return (loongarch_classify_address (&addr, x, mode, false) ++ && addr.type == ADDRESS_REG ++ && CONST_INT_P (addr.offset) ++ && ULARCH_12BIT_OFFSET_P (INTVAL (addr.offset))); ++} ++ ++/* Return true if X is a legitimate address with a 9-bit offset. ++ MODE is the mode of the value being accessed. */ ++ ++bool ++loongarch_9bit_offset_address_p (rtx x, machine_mode mode) ++{ ++ struct loongarch_address_info addr; ++ ++ return (loongarch_classify_address (&addr, x, mode, false) ++ && addr.type == ADDRESS_REG ++ && CONST_INT_P (addr.offset) ++ && LARCH_9BIT_OFFSET_P (INTVAL (addr.offset))); ++} ++ ++/* Return true if X is a legitimate address with a 14-bit offset shifted 2. ++ MODE is the mode of the value being accessed. */ ++ ++bool ++loongarch_14bit_shifted_offset_address_p (rtx x, machine_mode mode) ++{ ++ struct loongarch_address_info addr; ++ ++ return (loongarch_classify_address (&addr, x, mode, false) ++ && addr.type == ADDRESS_REG ++ && CONST_INT_P (addr.offset) ++ && LISA_16BIT_OFFSET_P (INTVAL (addr.offset)) ++ && LISA_SHIFT_2_OFFSET_P (INTVAL (addr.offset))); ++} ++ ++ ++/* Return the number of instructions needed to load constant X, ++ assuming that BASE_INSN_LENGTH is the length of one instruction. ++ Return 0 if X isn't a valid constant. */ ++ ++int ++loongarch_const_insns (rtx x) ++{ ++ struct loongarch_integer_op codes[LARCH_MAX_INTEGER_OPS]; ++ enum loongarch_symbol_type symbol_type; ++ rtx offset; ++ ++ switch (GET_CODE (x)) ++ { ++ case CONST_INT: ++ return loongarch_build_integer (codes, INTVAL (x)); ++ ++ case CONST_VECTOR: ++ if ((ISA_HAS_LSX || ISA_HAS_LASX) ++ && loongarch_const_vector_same_int_p (x, GET_MODE (x), -512, 511)) ++ return 1; ++ /* Fall through. */ ++ case CONST_DOUBLE: ++ /* Allow zeros for normal mode, where we can use $0. */ ++ return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0; ++ ++ case CONST: ++ /* See if we can refer to X directly. */ ++ if (loongarch_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type)) ++ return loongarch_symbol_insns (symbol_type, MAX_MACHINE_MODE); ++ ++ /* Otherwise try splitting the constant into a base and offset. ++ If the offset is a 16-bit value, we can load the base address ++ into a register and then use (D)ADDIU to add in the offset. ++ If the offset is larger, we can load the base and offset ++ into separate registers and add them together with (D)ADDU. ++ However, the latter is only possible before reload; during ++ and after reload, we must have the option of forcing the ++ constant into the pool instead. */ ++ split_const (x, &x, &offset); ++ if (offset != 0) ++ { ++ int n = loongarch_const_insns (x); ++ if (n != 0) ++ { ++ if (SMALL_INT (offset)) ++ return n + 1; ++ else if (!targetm.cannot_force_const_mem (GET_MODE (x), x)) ++ return n + 1 + loongarch_build_integer (codes, INTVAL (offset)); ++ } ++ } ++ return 0; ++ ++ case SYMBOL_REF: ++ case LABEL_REF: ++ return loongarch_symbol_insns (loongarch_classify_symbol (x, SYMBOL_CONTEXT_LEA), ++ MAX_MACHINE_MODE); ++ ++ default: ++ return 0; ++ } ++} ++ ++/* X is a doubleword constant that can be handled by splitting it into ++ two words and loading each word separately. Return the number of ++ instructions required to do this, assuming that BASE_INSN_LENGTH ++ is the length of one instruction. */ ++ ++int ++loongarch_split_const_insns (rtx x) ++{ ++ unsigned int low, high; ++ ++ low = loongarch_const_insns (loongarch_subword (x, false)); ++ high = loongarch_const_insns (loongarch_subword (x, true)); ++ gcc_assert (low > 0 && high > 0); ++ return low + high; ++} ++ ++/* Return one word of 128-bit value OP, taking into account the fixed ++ endianness of certain registers. BYTE selects from the byte address. */ ++ ++rtx ++loongarch_subword_at_byte (rtx op, unsigned int byte) ++{ ++ machine_mode mode; ++ ++ mode = GET_MODE (op); ++ if (mode == VOIDmode) ++ mode = TImode; ++ ++ gcc_assert (!FP_REG_RTX_P (op)); ++ ++ if (MEM_P (op)) ++ return loongarch_rewrite_small_data (adjust_address (op, word_mode, byte)); ++ ++ return simplify_gen_subreg (word_mode, op, mode, byte); ++} ++ ++/* Return the number of instructions needed to implement INSN, ++ given that it loads from or stores to MEM. Assume that ++ BASE_INSN_LENGTH is the length of one instruction. */ ++ ++int ++loongarch_load_store_insns (rtx mem, rtx_insn *insn) ++{ ++ machine_mode mode; ++ bool might_split_p; ++ rtx set; ++ ++ gcc_assert (MEM_P (mem)); ++ mode = GET_MODE (mem); ++ ++ /* Try to prove that INSN does not need to be split. */ ++ might_split_p = GET_MODE_SIZE (mode) > UNITS_PER_WORD; ++ if (might_split_p) ++ { ++ set = single_set (insn); ++ if (set && !loongarch_split_move_insn_p (SET_DEST (set), SET_SRC (set), insn)) ++ might_split_p = false; ++ } ++ ++ return loongarch_address_insns (XEXP (mem, 0), mode, might_split_p); ++} ++ ++/* Return the number of instructions needed for an integer division, ++ assuming that BASE_INSN_LENGTH is the length of one instruction. */ ++ ++int ++loongarch_idiv_insns (machine_mode mode) ++{ ++ int count; ++ ++ count = 1; ++ if (TARGET_CHECK_ZERO_DIV) ++ count += 2; ++ ++ return count; ++} ++ ++ ++/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */ ++ ++void ++loongarch_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1) ++{ ++ emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (code, GET_MODE (target), ++ op0, op1))); ++} ++ ++/* Compute (CODE OP0 OP1) and store the result in a new register ++ of mode MODE. Return that new register. */ ++ ++static rtx ++loongarch_force_binary (machine_mode mode, enum rtx_code code, rtx op0, rtx op1) ++{ ++ rtx reg; ++ ++ reg = gen_reg_rtx (mode); ++ loongarch_emit_binary (code, reg, op0, op1); ++ return reg; ++} ++ ++/* Copy VALUE to a register and return that register. If new pseudos ++ are allowed, copy it into a new register, otherwise use DEST. */ ++ ++static rtx ++loongarch_force_temporary (rtx dest, rtx value) ++{ ++ if (can_create_pseudo_p ()) ++ return force_reg (Pmode, value); ++ else ++ { ++ loongarch_emit_move (dest, value); ++ return dest; ++ } ++} ++ ++ ++/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE, ++ then add CONST_INT OFFSET to the result. */ ++ ++static rtx ++loongarch_unspec_address_offset (rtx base, rtx offset, ++ enum loongarch_symbol_type symbol_type) ++{ ++ base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base), ++ UNSPEC_ADDRESS_FIRST + symbol_type); ++ if (offset != const0_rtx) ++ base = gen_rtx_PLUS (Pmode, base, offset); ++ return gen_rtx_CONST (Pmode, base); ++} ++ ++/* Return an UNSPEC address with underlying address ADDRESS and symbol ++ type SYMBOL_TYPE. */ ++ ++rtx ++loongarch_unspec_address (rtx address, enum loongarch_symbol_type symbol_type) ++{ ++ rtx base, offset; ++ ++ split_const (address, &base, &offset); ++ return loongarch_unspec_address_offset (base, offset, symbol_type); ++} ++ ++/* If OP is an UNSPEC address, return the address to which it refers, ++ otherwise return OP itself. */ ++ ++rtx ++loongarch_strip_unspec_address (rtx op) ++{ ++ rtx base, offset; ++ ++ split_const (op, &base, &offset); ++ if (UNSPEC_ADDRESS_P (base)) ++ op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset)); ++ return op; ++} ++ ++ ++/* Return a base register that holds pic_offset_table_rtx. ++ TEMP, if nonnull, is a scratch Pmode base register. */ ++ ++rtx ++loongarch_pic_base_register (rtx temp) ++{ ++ return pic_offset_table_rtx; ++ ++} ++ ++/* If SRC is the RHS of a load_call insn, return the underlying symbol ++ reference. Return NULL_RTX otherwise. */ ++ ++static rtx ++loongarch_strip_unspec_call (rtx src) ++{ ++ if (GET_CODE (src) == UNSPEC && XINT (src, 1) == UNSPEC_LOAD_CALL) ++ return loongarch_strip_unspec_address (XVECEXP (src, 0, 1)); ++ return NULL_RTX; ++} ++ ++/* Return a legitimate address for REG + OFFSET. TEMP is as for ++ loongarch_force_temporary; it is only needed when OFFSET is not a ++ SMALL_OPERAND. */ ++ ++static rtx ++loongarch_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset) ++{ ++ if (!SMALL_OPERAND (offset)) ++ { ++ rtx high; ++ ++ /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. ++ The addition inside the macro CONST_HIGH_PART may cause an ++ overflow, so we need to force a sign-extension check. */ ++ high = gen_int_mode (CONST_HIGH_PART (offset), Pmode); ++ offset = CONST_LOW_PART (offset); ++ high = loongarch_force_temporary (temp, high); ++ reg = loongarch_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg)); ++ } ++ return plus_constant (Pmode, reg, offset); ++} ++ ++/* The __tls_get_attr symbol. */ ++static GTY(()) rtx loongarch_tls_symbol; ++ ++/* Load an entry from the GOT for a TLS GD access. */ ++ ++static rtx loongarch_got_load_tls_gd (rtx dest, rtx sym) ++{ ++ if (Pmode == DImode) ++ return gen_got_load_tls_gddi (dest, sym); ++ else ++ return gen_got_load_tls_gdsi (dest, sym); ++} ++ ++/* Load an entry from the GOT for a TLS LD access. */ ++ ++static rtx loongarch_got_load_tls_ld (rtx dest, rtx sym) ++{ ++ if (Pmode == DImode) ++ return gen_got_load_tls_lddi (dest, sym); ++ else ++ return gen_got_load_tls_ldsi (dest, sym); ++} ++ ++ ++/* Load an entry from the GOT for a TLS IE access. */ ++ ++static rtx loongarch_got_load_tls_ie (rtx dest, rtx sym) ++{ ++ if (Pmode == DImode) ++ return gen_got_load_tls_iedi (dest, sym); ++ else ++ return gen_got_load_tls_iesi (dest, sym); ++} ++ ++/* Add in the thread pointer for a TLS LE access. */ ++ ++static rtx loongarch_got_load_tls_le (rtx dest, rtx sym) ++{ ++ if (Pmode == DImode) ++ return gen_got_load_tls_ledi (dest, sym); ++ else ++ return gen_got_load_tls_lesi (dest, sym); ++} ++ ++/* Return an instruction sequence that calls __tls_get_addr. SYM is ++ the TLS symbol we are referencing and TYPE is the symbol type to use ++ (either global dynamic or local dynamic). V0 is an RTX for the ++ return value location. */ ++ ++static rtx_insn * ++loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) ++{ ++ rtx loc, a0; ++ rtx_insn *insn; ++ ++ a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST); ++ ++ if (!loongarch_tls_symbol) ++ loongarch_tls_symbol = init_one_libfunc ("__tls_get_addr"); ++ ++ loc = loongarch_unspec_address (sym, type); ++ ++ start_sequence (); ++ ++ if (type == SYMBOL_TLSLDM) ++ emit_insn (loongarch_got_load_tls_ld (a0, loc)); ++ else if (type == SYMBOL_TLSGD) ++ emit_insn (loongarch_got_load_tls_gd (a0, loc)); ++ else ++ gcc_unreachable (); ++ ++ insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol, const0_rtx)); ++ RTL_CONST_CALL_P (insn) = 1; ++ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0); ++ insn = get_insns (); ++ ++ end_sequence (); ++ ++ return insn; ++} ++ ++/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return ++ its address. The return value will be both a valid address and a valid ++ SET_SRC (either a REG or a LO_SUM). */ ++ ++static rtx ++loongarch_legitimize_tls_address (rtx loc) ++{ ++ rtx dest, tp, tmp; ++ enum tls_model model = SYMBOL_REF_TLS_MODEL (loc); ++ rtx_insn *insn; ++ ++ /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */ ++ #if 0 ++ if (!flag_pic) ++ model = TLS_MODEL_LOCAL_EXEC; ++ #endif ++ ++ switch (model) ++ { ++ case TLS_MODEL_LOCAL_DYNAMIC: ++ tmp = gen_rtx_REG (Pmode, GP_RETURN); ++ dest = gen_reg_rtx (Pmode); ++ insn = loongarch_call_tls_get_addr (loc, SYMBOL_TLSLDM, tmp); ++ emit_libcall_block (insn, dest, tmp, loc); ++ break; ++ ++ case TLS_MODEL_GLOBAL_DYNAMIC: ++ tmp = gen_rtx_REG (Pmode, GP_RETURN); ++ dest = gen_reg_rtx (Pmode); ++ insn = loongarch_call_tls_get_addr (loc, SYMBOL_TLSGD, tmp); ++ emit_libcall_block (insn, dest, tmp, loc); ++ break; ++ ++ case TLS_MODEL_INITIAL_EXEC: ++ /* la.tls.ie; tp-relative add */ ++ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); ++ tmp = gen_reg_rtx (Pmode); ++ emit_insn (loongarch_got_load_tls_ie (tmp, loc)); ++ dest = gen_reg_rtx (Pmode); ++ emit_insn (gen_add3_insn (dest, tmp, tp)); ++ break; ++ ++ case TLS_MODEL_LOCAL_EXEC: ++ /* la.tls.le; tp-relative add */ ++ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); ++ tmp = gen_reg_rtx (Pmode); ++ emit_insn (loongarch_got_load_tls_le (tmp, loc)); ++ dest = gen_reg_rtx (Pmode); ++ emit_insn (gen_add3_insn (dest, tmp, tp)); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ return dest; ++} ++ ++rtx ++loongarch_legitimize_call_address (rtx addr) ++{ ++ if (!call_insn_operand (addr, VOIDmode)) ++ { ++ rtx reg = gen_reg_rtx (Pmode); ++ loongarch_emit_move (reg, addr); ++ return reg; ++ } ++ return addr; ++} ++ ++/* If X is not a valid address for mode MODE, force it into a register. */ ++ ++static rtx ++loongarch_force_address (rtx x, machine_mode mode) ++{ ++ if (!loongarch_legitimate_address_p (mode, x, false)) ++ x = force_reg (Pmode, x); ++ return x; ++} ++ ++/* This function is used to implement LEGITIMIZE_ADDRESS. If X can ++ be legitimized in a way that the generic machinery might not expect, ++ return a new address, otherwise return NULL. MODE is the mode of ++ the memory being accessed. */ ++ ++static rtx ++loongarch_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, ++ machine_mode mode) ++{ ++ rtx base, addr; ++ HOST_WIDE_INT offset; ++ ++ if (loongarch_tls_symbol_p (x)) ++ return loongarch_legitimize_tls_address (x); ++ ++ /* Handle BASE + OFFSET using loongarch_add_offset. */ ++ loongarch_split_plus (x, &base, &offset); ++ if (offset != 0) ++ { ++ if (!loongarch_valid_base_register_p (base, mode, false)) ++ base = copy_to_mode_reg (Pmode, base); ++ addr = loongarch_add_offset (NULL, base, offset); ++ return loongarch_force_address (addr, mode); ++ } ++ ++ return x; ++} ++ ++/* Load VALUE into DEST. TEMP is as for loongarch_force_temporary. */ ++ ++void ++loongarch_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value) ++{ ++ struct loongarch_integer_op codes[LARCH_MAX_INTEGER_OPS]; ++ machine_mode mode; ++ unsigned int i, num_ops; ++ rtx x; ++ ++ mode = GET_MODE (dest); ++ num_ops = loongarch_build_integer (codes, value); ++ ++ /* Apply each binary operation to X. Invariant: X is a legitimate ++ source operand for a SET pattern. */ ++ x = GEN_INT (codes[0].value); ++ for (i = 1; i < num_ops; i++) ++ { ++ if (!can_create_pseudo_p ()) ++ { ++ emit_insn (gen_rtx_SET (temp, x)); ++ x = temp; ++ } ++ else ++ x = force_reg (mode, x); ++ switch (codes[i].method) ++ { ++ case METHOD_NORMAL: ++ x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value)); ++ break; ++ case METHOD_LU32I: ++ emit_insn (gen_rtx_SET (x, gen_rtx_IOR (DImode, ++ gen_rtx_ZERO_EXTEND (DImode, ++ gen_rtx_SUBREG (SImode, x, 0)), ++ GEN_INT (codes[i].value)))); ++ break; ++ case METHOD_LU52I: ++ emit_insn (gen_lu52i_d (x, x, ++ GEN_INT (0xfffffffffffff), ++ GEN_INT (codes[i].value))); ++ break; ++ case METHOD_INSV: ++ emit_insn (gen_rtx_SET (gen_rtx_ZERO_EXTRACT (DImode, ++ x, ++ GEN_INT (20), ++ GEN_INT (32)), ++ gen_rtx_REG (DImode, 0))); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ ++ emit_insn (gen_rtx_SET (dest, x)); ++} ++ ++/* Subroutine of loongarch_legitimize_move. Move constant SRC into register ++ DEST given that SRC satisfies immediate_operand but doesn't satisfy ++ move_operand. */ ++ ++static void ++loongarch_legitimize_const_move (machine_mode mode, rtx dest, rtx src) ++{ ++ rtx base, offset; ++ ++ /* Split moves of big integers into smaller pieces. */ ++ if (splittable_const_int_operand (src, mode)) ++ { ++ loongarch_move_integer (dest, dest, INTVAL (src)); ++ return; ++ } ++ ++ /* Generate the appropriate access sequences for TLS symbols. */ ++ if (loongarch_tls_symbol_p (src)) ++ { ++ loongarch_emit_move (dest, loongarch_legitimize_tls_address (src)); ++ return; ++ } ++ ++ /* If we have (const (plus symbol offset)), and that expression cannot ++ be forced into memory, load the symbol first and add in the offset. ++ prefer to do this even if the constant _can_ be forced into memory, ++ as it usually produces better code. */ ++ split_const (src, &base, &offset); ++ if (offset != const0_rtx ++ && (targetm.cannot_force_const_mem (mode, src) ++ || (can_create_pseudo_p ()))) ++ { ++ base = loongarch_force_temporary (dest, base); ++ loongarch_emit_move (dest, loongarch_add_offset (NULL, base, INTVAL (offset))); ++ return; ++ } ++ ++ src = force_const_mem (mode, src); ++ ++ loongarch_emit_move (dest, src); ++} ++ ++/* If (set DEST SRC) is not a valid move instruction, emit an equivalent ++ sequence that is valid. */ ++ ++bool ++loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src) ++{ ++ ++ if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode)) ++ { ++ loongarch_emit_move (dest, force_reg (mode, src)); ++ return true; ++ } ++ ++ /* Both src and dest are non-registers; one special case is supported where ++ the source is (const_int 0) and the store can source the zero register. ++ LSX and lasx are never able to source the zero register directly in ++ memory operations. */ ++ if (!register_operand (dest, mode) ++ && !register_operand (src, mode) ++ && (!const_0_operand (src, mode) ++ || LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))) ++ { ++ loongarch_emit_move (dest, force_reg (mode, src)); ++ return true; ++ } ++ ++ /* We need to deal with constants that would be legitimate ++ immediate_operands but aren't legitimate move_operands. */ ++ if (CONSTANT_P (src) && !move_operand (src, mode)) ++ { ++ loongarch_legitimize_const_move (mode, dest, src); ++ set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src)); ++ return true; ++ } ++ ++ if ((GET_CODE (src) == SYMBOL_REF || GET_CODE (src) == LABEL_REF) ++ && symbolic_operand (src, VOIDmode) ++ && (loongarch_cmodel_var == LARCH_CMODEL_EXTREME)) ++ { ++ rtx temp = gen_reg_rtx (GET_MODE (dest)); ++ rtx x = gen_rtx_UNSPEC_VOLATILE (GET_MODE (dest), gen_rtvec (1, src), UNSPECV_MOVE_EXTREME); ++ temp = gen_rtx_USE(VOIDmode, temp); ++ temp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec(2, gen_rtx_SET (dest, x), temp)); ++ emit_insn (temp); ++ return true; ++ } ++ ++ return false; ++} ++ ++/* Return true if OP refers to small data symbols directly, not through ++ a LO_SUM. CONTEXT is the context in which X appears. */ ++ ++static int ++loongarch_small_data_pattern_1 (rtx x, enum loongarch_symbol_context context) ++{ ++ subrtx_var_iterator::array_type array; ++ FOR_EACH_SUBRTX_VAR (iter, array, x, ALL) ++ { ++ rtx x = *iter; ++ ++ /* Ignore things like "g" constraints in asms. We make no particular ++ guarantee about which symbolic constants are acceptable as asm operands ++ versus which must be forced into a GPR. */ ++ if (GET_CODE (x) == ASM_OPERANDS) ++ iter.skip_subrtxes (); ++ else if (MEM_P (x)) ++ { ++ if (loongarch_small_data_pattern_1 (XEXP (x, 0), SYMBOL_CONTEXT_MEM)) ++ return true; ++ iter.skip_subrtxes (); ++ } ++ } ++ return false; ++} ++ ++/* Return true if OP refers to small data symbols directly, not through ++ a LO_SUM. */ ++ ++bool ++loongarch_small_data_pattern_p (rtx op) ++{ ++ return loongarch_small_data_pattern_1 (op, SYMBOL_CONTEXT_LEA); ++} ++ ++/* Rewrite *LOC so that it refers to small data using explicit ++ relocations. CONTEXT is the context in which *LOC appears. */ ++ ++static void ++loongarch_rewrite_small_data_1 (rtx *loc, enum loongarch_symbol_context context) ++{ ++ subrtx_ptr_iterator::array_type array; ++ FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL) ++ { ++ rtx *loc = *iter; ++ if (MEM_P (*loc)) ++ { ++ loongarch_rewrite_small_data_1 (&XEXP (*loc, 0), SYMBOL_CONTEXT_MEM); ++ iter.skip_subrtxes (); ++ } ++ } ++} ++ ++/* Rewrite instruction pattern PATTERN so that it refers to small data ++ using explicit relocations. */ ++ ++rtx ++loongarch_rewrite_small_data (rtx pattern) ++{ ++ pattern = copy_insn (pattern); ++ loongarch_rewrite_small_data_1 (&pattern, SYMBOL_CONTEXT_LEA); ++ return pattern; ++} ++ ++/* The cost of loading values from the constant pool. It should be ++ larger than the cost of any constant we want to synthesize inline. */ ++#define CONSTANT_POOL_COST COSTS_N_INSNS (8) ++ ++/* Return true if there is a instruction that implements CODE ++ and if that instruction accepts X as an immediate operand. */ ++ ++static int ++loongarch_immediate_operand_p (int code, HOST_WIDE_INT x) ++{ ++ switch (code) ++ { ++ case ASHIFT: ++ case ASHIFTRT: ++ case LSHIFTRT: ++ /* All shift counts are truncated to a valid constant. */ ++ return true; ++ ++ case ROTATE: ++ case ROTATERT: ++ /* Likewise rotates, if the target supports rotates at all. */ ++ return true; ++ ++ case AND: ++ case IOR: ++ case XOR: ++ /* These instructions take 12-bit unsigned immediates. */ ++ return SMALL_OPERAND_UNSIGNED (x); ++ ++ case PLUS: ++ case LT: ++ case LTU: ++ /* These instructions take 12-bit signed immediates. */ ++ return SMALL_OPERAND (x); ++ ++ case EQ: ++ case NE: ++ case GT: ++ case GTU: ++ /* The "immediate" forms of these instructions are really ++ implemented as comparisons with register 0. */ ++ return x == 0; ++ ++ case GE: ++ case GEU: ++ /* Likewise, meaning that the only valid immediate operand is 1. */ ++ return x == 1; ++ ++ case LE: ++ /* We add 1 to the immediate and use SLT. */ ++ return SMALL_OPERAND (x + 1); ++ ++ case LEU: ++ /* Likewise SLTU, but reject the always-true case. */ ++ return SMALL_OPERAND (x + 1) && x + 1 != 0; ++ ++ case SIGN_EXTRACT: ++ case ZERO_EXTRACT: ++ /* The bit position and size are immediate operands. */ ++ return 1; ++ ++ default: ++ /* By default assume that $0 can be used for 0. */ ++ return x == 0; ++ } ++} ++ ++/* Return the cost of binary operation X, given that the instruction ++ sequence for a word-sized or smaller operation has cost SINGLE_COST ++ and that the sequence of a double-word operation has cost DOUBLE_COST. ++ If SPEED is true, optimize for speed otherwise optimize for size. */ ++ ++static int ++loongarch_binary_cost (rtx x, int single_cost, int double_cost, bool speed) ++{ ++ int cost; ++ ++ if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2) ++ cost = double_cost; ++ else ++ cost = single_cost; ++ return (cost ++ + set_src_cost (XEXP (x, 0), GET_MODE (x), speed) ++ + rtx_cost (XEXP (x, 1), GET_MODE (x), GET_CODE (x), 1, speed)); ++} ++ ++/* Return the cost of floating-point multiplications of mode MODE. */ ++ ++static int ++loongarch_fp_mult_cost (machine_mode mode) ++{ ++ return mode == DFmode ? loongarch_cost->fp_mult_df : loongarch_cost->fp_mult_sf; ++} ++ ++/* Return the cost of floating-point divisions of mode MODE. */ ++ ++static int ++loongarch_fp_div_cost (machine_mode mode) ++{ ++ return mode == DFmode ? loongarch_cost->fp_div_df : loongarch_cost->fp_div_sf; ++} ++ ++/* Return the cost of sign-extending OP to mode MODE, not including the ++ cost of OP itself. */ ++ ++static int ++loongarch_sign_extend_cost (machine_mode mode, rtx op) ++{ ++ if (MEM_P (op)) ++ /* Extended loads are as cheap as unextended ones. */ ++ return 0; ++ ++ if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) ++ /* A sign extension from SImode to DImode in 64-bit mode is free. */ ++ return 0; ++ ++ return COSTS_N_INSNS (1); ++} ++ ++/* Return the cost of zero-extending OP to mode MODE, not including the ++ cost of OP itself. */ ++ ++static int ++loongarch_zero_extend_cost (machine_mode mode, rtx op) ++{ ++ if (MEM_P (op)) ++ /* Extended loads are as cheap as unextended ones. */ ++ return 0; ++ ++ if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) ++ /* We need a shift left by 32 bits and a shift right by 32 bits. */ ++ return COSTS_N_INSNS (2); ++ ++ /* We can use ANDI. */ ++ return COSTS_N_INSNS (1); ++} ++ ++/* Return the cost of moving between two registers of mode MODE, ++ assuming that the move will be in pieces of at most UNITS bytes. */ ++ ++static int ++loongarch_set_reg_reg_piece_cost (machine_mode mode, unsigned int units) ++{ ++ return COSTS_N_INSNS ((GET_MODE_SIZE (mode) + units - 1) / units); ++} ++ ++/* Return the cost of moving between two registers of mode MODE. */ ++ ++static int ++loongarch_set_reg_reg_cost (machine_mode mode) ++{ ++ switch (GET_MODE_CLASS (mode)) ++ { ++ case MODE_FCC: ++ return loongarch_set_reg_reg_piece_cost (mode, GET_MODE_SIZE (FCCmode)); ++ ++ case MODE_FLOAT: ++ case MODE_COMPLEX_FLOAT: ++ case MODE_VECTOR_FLOAT: ++ if (TARGET_HARD_FLOAT) ++ return loongarch_set_reg_reg_piece_cost (mode, UNITS_PER_HWFPVALUE); ++ /* Fall through */ ++ ++ default: ++ return loongarch_set_reg_reg_piece_cost (mode, UNITS_PER_WORD); ++ } ++} ++ ++/* Implement TARGET_RTX_COSTS. */ ++ ++static bool ++loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, ++ int opno ATTRIBUTE_UNUSED, int *total, bool speed) ++{ ++ int code = GET_CODE (x); ++ bool float_mode_p = FLOAT_MODE_P (mode); ++ int cost; ++ rtx addr; ++ ++ /* The cost of a COMPARE is hard to define for LARCH. COMPAREs don't ++ appear in the instruction stream, and the cost of a comparison is ++ really the cost of the branch or scc condition. At the time of ++ writing, GCC only uses an explicit outer COMPARE code when optabs ++ is testing whether a constant is expensive enough to force into a ++ register. We want optabs to pass such constants through the LARCH ++ expanders instead, so make all constants very cheap here. */ ++ if (outer_code == COMPARE) ++ { ++ gcc_assert (CONSTANT_P (x)); ++ *total = 0; ++ return true; ++ } ++ ++ switch (code) ++ { ++ case CONST_INT: ++ /* Treat *clear_upper32-style ANDs as having zero cost in the ++ second operand. The cost is entirely in the first operand. ++ ++ ??? This is needed because we would otherwise try to CSE ++ the constant operand. Although that's the right thing for ++ instructions that continue to be a register operation throughout ++ compilation, it is disastrous for instructions that could ++ later be converted into a memory operation. */ ++ if (TARGET_64BIT ++ && outer_code == AND ++ && UINTVAL (x) == 0xffffffff) ++ { ++ *total = 0; ++ return true; ++ } ++ ++ /* When not optimizing for size, we care more about the cost ++ of hot code, and hot code is often in a loop. If a constant ++ operand needs to be forced into a register, we will often be ++ able to hoist the constant load out of the loop, so the load ++ should not contribute to the cost. */ ++ if (speed || loongarch_immediate_operand_p (outer_code, INTVAL (x))) ++ { ++ *total = 0; ++ return true; ++ } ++ /* Fall through. */ ++ ++ case CONST: ++ case SYMBOL_REF: ++ case LABEL_REF: ++ case CONST_DOUBLE: ++ if (force_to_mem_operand (x, VOIDmode)) ++ { ++ *total = COSTS_N_INSNS (1); ++ return true; ++ } ++ cost = loongarch_const_insns (x); ++ if (cost > 0) ++ { ++ /* If the constant is likely to be stored in a GPR, SETs of ++ single-insn constants are as cheap as register sets; we ++ never want to CSE them. ++ ++ Don't reduce the cost of storing a floating-point zero in ++ FPRs. If we have a zero in an FPR for other reasons, we ++ can get better cfg-cleanup and delayed-branch results by ++ using it consistently, rather than using $0 sometimes and ++ an FPR at other times. Also, moves between floating-point ++ registers are sometimes cheaper than MOVGR2FR.W/MOVGR2FR.D $0. */ ++ if (cost == 1 ++ && outer_code == SET ++ && !(float_mode_p && TARGET_HARD_FLOAT)) ++ cost = 0; ++ /* When code loads a constant N>1 times, we rarely ++ want to CSE the constant itself. It is usually better to ++ have N copies of the last operation in the sequence and one ++ shared copy of the other operations. ++ ++ Also, if we have a CONST_INT, we don't know whether it is ++ for a word or doubleword operation, so we cannot rely on ++ the result of loongarch_build_integer. */ ++ else if ((outer_code == SET || GET_MODE (x) == VOIDmode)) ++ cost = 1; ++ *total = COSTS_N_INSNS (cost); ++ return true; ++ } ++ /* The value will need to be fetched from the constant pool. */ ++ *total = CONSTANT_POOL_COST; ++ return true; ++ ++ case MEM: ++ /* If the address is legitimate, return the number of ++ instructions it needs. */ ++ addr = XEXP (x, 0); ++ cost = loongarch_address_insns (addr, mode, true); ++ if (cost > 0) ++ { ++ *total = COSTS_N_INSNS (cost + 1); ++ return true; ++ } ++ /* Check for a scaled indexed address. */ ++ if (loongarch_lx_address_p (addr, mode)) ++ { ++ *total = COSTS_N_INSNS (2); ++ return true; ++ } ++ /* Otherwise use the default handling. */ ++ return false; ++ ++ case FFS: ++ *total = COSTS_N_INSNS (6); ++ return false; ++ ++ case NOT: ++ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1); ++ return false; ++ ++ case AND: ++ /* Check for a *clear_upper32 pattern and treat it like a zero ++ extension. See the pattern's comment for details. */ ++ if (TARGET_64BIT ++ && mode == DImode ++ && CONST_INT_P (XEXP (x, 1)) ++ && UINTVAL (XEXP (x, 1)) == 0xffffffff) ++ { ++ *total = (loongarch_zero_extend_cost (mode, XEXP (x, 0)) ++ + set_src_cost (XEXP (x, 0), mode, speed)); ++ return true; ++ } ++ /* (AND (NOT op0) (NOT op1) is a nor operation that can be done in ++ a single instruction. */ ++ if (GET_CODE (XEXP (x, 0)) == NOT ++ && GET_CODE (XEXP (x, 1)) == NOT) ++ { ++ cost = GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1; ++ *total = (COSTS_N_INSNS (cost) ++ + set_src_cost (XEXP (XEXP (x, 0), 0), mode, speed) ++ + set_src_cost (XEXP (XEXP (x, 1), 0), mode, speed)); ++ return true; ++ } ++ ++ /* Fall through. */ ++ ++ case IOR: ++ case XOR: ++ /* Double-word operations use two single-word operations. */ ++ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2), ++ speed); ++ return true; ++ ++ case ASHIFT: ++ case ASHIFTRT: ++ case LSHIFTRT: ++ case ROTATE: ++ case ROTATERT: ++ if (CONSTANT_P (XEXP (x, 1))) ++ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4), ++ speed); ++ else ++ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12), ++ speed); ++ return true; ++ ++ case ABS: ++ if (float_mode_p) ++ *total = loongarch_cost->fp_add; ++ else ++ *total = COSTS_N_INSNS (4); ++ return false; ++ ++ case LT: ++ case LTU: ++ case LE: ++ case LEU: ++ case GT: ++ case GTU: ++ case GE: ++ case GEU: ++ case EQ: ++ case NE: ++ case UNORDERED: ++ case LTGT: ++ case UNGE: ++ case UNGT: ++ case UNLE: ++ case UNLT: ++ /* Branch comparisons have VOIDmode, so use the first operand's ++ mode instead. */ ++ mode = GET_MODE (XEXP (x, 0)); ++ if (FLOAT_MODE_P (mode)) ++ { ++ *total = loongarch_cost->fp_add; ++ return false; ++ } ++ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4), ++ speed); ++ return true; ++ ++ case MINUS: ++ case PLUS: ++ if (float_mode_p) ++ { ++ *total = loongarch_cost->fp_add; ++ return false; ++ } ++ ++ /* If it's an add + mult (which is equivalent to shift left) and ++ it's immediate operand satisfies const_immlsa_operand predicate. */ ++ if (((ISA_HAS_LSA && mode == SImode) ++ || (ISA_HAS_DLSA && mode == DImode)) ++ && GET_CODE (XEXP (x, 0)) == MULT) ++ { ++ rtx op2 = XEXP (XEXP (x, 0), 1); ++ if (const_immlsa_operand (op2, mode)) ++ { ++ *total = (COSTS_N_INSNS (1) ++ + set_src_cost (XEXP (XEXP (x, 0), 0), mode, speed) ++ + set_src_cost (XEXP (x, 1), mode, speed)); ++ return true; ++ } ++ } ++ ++ /* Double-word operations require three single-word operations and ++ an SLTU. */ ++ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), ++ COSTS_N_INSNS (4), ++ speed); ++ return true; ++ ++ case NEG: ++ if (float_mode_p) ++ *total = loongarch_cost->fp_add; ++ else ++ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1); ++ return false; ++ ++ case FMA: ++ *total = loongarch_fp_mult_cost (mode); ++ return false; ++ ++ case MULT: ++ if (float_mode_p) ++ *total = loongarch_fp_mult_cost (mode); ++ else if (mode == DImode && !TARGET_64BIT) ++ /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions, ++ where the mulsidi3 always includes an MFHI and an MFLO. */ ++ // FIXED ME??? ++ *total = (speed ++ ? loongarch_cost->int_mult_si * 3 + 6 ++ : COSTS_N_INSNS (7)); ++ else if (!speed) ++ *total = COSTS_N_INSNS (1) + 1; ++ else if (mode == DImode) ++ *total = loongarch_cost->int_mult_di; ++ else ++ *total = loongarch_cost->int_mult_si; ++ return false; ++ ++ case DIV: ++ /* Check for a reciprocal. */ ++ if (float_mode_p ++ && ISA_HAS_FP_RECIP_RSQRT (mode) ++ && flag_unsafe_math_optimizations ++ && XEXP (x, 0) == CONST1_RTX (mode)) ++ { ++ if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT) ++ /* An rsqrta or rsqrtb pattern. Count the ++ division as being free. */ ++ *total = set_src_cost (XEXP (x, 1), mode, speed); ++ else ++ *total = (loongarch_fp_div_cost (mode) ++ + set_src_cost (XEXP (x, 1), mode, speed)); ++ return true; ++ } ++ /* Fall through. */ ++ ++ case SQRT: ++ case MOD: ++ if (float_mode_p) ++ { ++ *total = loongarch_fp_div_cost (mode); ++ return false; ++ } ++ /* Fall through. */ ++ ++ case UDIV: ++ case UMOD: ++ if (!speed) ++ { ++ *total = COSTS_N_INSNS (loongarch_idiv_insns (mode)); ++ } ++ else if (mode == DImode) ++ *total = loongarch_cost->int_div_di; ++ else ++ *total = loongarch_cost->int_div_si; ++ return false; ++ ++ case SIGN_EXTEND: ++ *total = loongarch_sign_extend_cost (mode, XEXP (x, 0)); ++ return false; ++ ++ case ZERO_EXTEND: ++ *total = loongarch_zero_extend_cost (mode, XEXP (x, 0)); ++ return false; ++ case TRUNCATE: ++ /* Costings for highpart multiplies. Matching patterns of the form: ++ ++ (lshiftrt:DI (mult:DI (sign_extend:DI (...) ++ (sign_extend:DI (...)) ++ (const_int 32) ++ */ ++ if ((GET_CODE (XEXP (x, 0)) == ASHIFTRT ++ || GET_CODE (XEXP (x, 0)) == LSHIFTRT) ++ && CONST_INT_P (XEXP (XEXP (x, 0), 1)) ++ && ((INTVAL (XEXP (XEXP (x, 0), 1)) == 32 ++ && GET_MODE (XEXP (x, 0)) == DImode) ++ || (ISA_HAS_DMUL ++ && INTVAL (XEXP (XEXP (x, 0), 1)) == 64 ++ && GET_MODE (XEXP (x, 0)) == TImode)) ++ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT ++ && ((GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND ++ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SIGN_EXTEND) ++ || (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND ++ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) ++ == ZERO_EXTEND)))) ++ { ++ if (!speed) ++ *total = COSTS_N_INSNS (1) + 1; ++ else if (mode == DImode) ++ *total = loongarch_cost->int_mult_di; ++ else ++ *total = loongarch_cost->int_mult_si; ++ ++ /* Sign extension is free, zero extension costs for DImode when ++ on a 64bit core / when DMUL is present. */ ++ for (int i = 0; i < 2; ++i) ++ { ++ rtx op = XEXP (XEXP (XEXP (x, 0), 0), i); ++ if (ISA_HAS_DMUL ++ && GET_CODE (op) == ZERO_EXTEND ++ && GET_MODE (op) == DImode) ++ *total += rtx_cost (op, DImode, MULT, i, speed); ++ else ++ *total += rtx_cost (XEXP (op, 0), VOIDmode, GET_CODE (op), ++ 0, speed); ++ } ++ ++ return true; ++ } ++ return false; ++ ++ case FLOAT: ++ case UNSIGNED_FLOAT: ++ case FIX: ++ case FLOAT_EXTEND: ++ case FLOAT_TRUNCATE: ++ *total = loongarch_cost->fp_add; ++ return false; ++ ++ case SET: ++ if (register_operand (SET_DEST (x), VOIDmode) ++ && reg_or_0_operand (SET_SRC (x), VOIDmode)) ++ { ++ *total = loongarch_set_reg_reg_cost (GET_MODE (SET_DEST (x))); ++ return true; ++ } ++ return false; ++ ++ default: ++ return false; ++ } ++} ++ ++/* Vectorizer cost model implementation. */ ++ ++/* Implement targetm.vectorize.builtin_vectorization_cost. */ ++ ++static int ++loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, ++ tree vectype, ++ int misalign ATTRIBUTE_UNUSED) ++{ ++ unsigned elements; ++ ++ switch (type_of_cost) ++ { ++ case scalar_stmt: ++ case scalar_load: ++ case vector_stmt: ++ case vector_load: ++ case vec_to_scalar: ++ case scalar_to_vec: ++ case cond_branch_not_taken: ++ case vec_perm: ++ case vec_promote_demote: ++ case scalar_store: ++ case vector_store: ++ return 1; ++ ++ case unaligned_load: ++ case vector_gather_load: ++ return 2; ++ ++ case unaligned_store: ++ case vector_scatter_store: ++ return 10; ++ ++ case cond_branch_taken: ++ return 3; ++ ++ case vec_construct: ++ elements = TYPE_VECTOR_SUBPARTS (vectype); ++ return elements / 2 + 1; ++ ++ default: ++ gcc_unreachable (); ++ } ++} ++ ++ ++/* Implement TARGET_ADDRESS_COST. */ ++ ++static int ++loongarch_address_cost (rtx addr, machine_mode mode, ++ addr_space_t as ATTRIBUTE_UNUSED, ++ bool speed ATTRIBUTE_UNUSED) ++{ ++ return loongarch_address_insns (addr, mode, false); ++} ++ ++ ++/* Return one word of double-word value OP, taking into account the fixed ++ endianness of certain registers. HIGH_P is true to select the high part, ++ false to select the low part. */ ++ ++rtx ++loongarch_subword (rtx op, bool high_p) ++{ ++ unsigned int byte, offset; ++ machine_mode mode; ++ ++ mode = GET_MODE (op); ++ if (mode == VOIDmode) ++ mode = TARGET_64BIT ? TImode : DImode; ++ ++ if (high_p) ++ byte = UNITS_PER_WORD; ++ else ++ byte = 0; ++ ++ if (FP_REG_RTX_P (op)) ++ { ++ /* Paired FPRs are always ordered little-endian. */ ++ offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0); ++ return gen_rtx_REG (word_mode, REGNO (op) + offset); ++ } ++ ++ if (MEM_P (op)) ++ return loongarch_rewrite_small_data (adjust_address (op, word_mode, byte)); ++ ++ return simplify_gen_subreg (word_mode, op, mode, byte); ++} ++ ++/* Return true if a move from SRC to DEST should be split into two. ++ SPLIT_TYPE describes the split condition. */ ++ ++bool ++loongarch_split_move_p (rtx dest, rtx src, enum loongarch_split_type split_type) ++{ ++ /* FPR-to-FPR moves can be done in a single instruction, if they're ++ allowed at all. */ ++ unsigned int size = GET_MODE_SIZE (GET_MODE (dest)); ++ if (size == 8 && FP_REG_RTX_P (src) && FP_REG_RTX_P (dest)) ++ return false; ++ ++ /* Check for floating-point loads and stores. */ ++ if (size == 8) ++ { ++ if (FP_REG_RTX_P (dest) && MEM_P (src)) ++ return false; ++ if (FP_REG_RTX_P (src) && MEM_P (dest)) ++ return false; ++ } ++ ++ /* Check if LSX moves need splitting. */ ++ if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))) ++ return loongarch_split_128bit_move_p (dest, src); ++ ++ /* Check if LASX moves need splitting. */ ++ if (LASX_SUPPORTED_MODE_P (GET_MODE (dest))) ++ return loongarch_split_256bit_move_p (dest, src); ++ ++ /* Otherwise split all multiword moves. */ ++ return size > UNITS_PER_WORD; ++} ++ ++/* Split a move from SRC to DEST, given that loongarch_split_move_p holds. ++ SPLIT_TYPE describes the split condition. */ ++ ++void ++loongarch_split_move (rtx dest, rtx src, enum loongarch_split_type split_type, rtx insn_) ++{ ++ rtx low_dest; ++ ++ gcc_checking_assert (loongarch_split_move_p (dest, src, split_type)); ++ if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))) ++ loongarch_split_128bit_move (dest, src); ++ else if (LASX_SUPPORTED_MODE_P (GET_MODE (dest))) ++ loongarch_split_256bit_move (dest, src); ++ else if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src)) ++ { ++ if (!TARGET_64BIT && GET_MODE (dest) == DImode) ++ emit_insn (gen_move_doubleword_fprdi (dest, src)); ++ else if (!TARGET_64BIT && GET_MODE (dest) == DFmode) ++ emit_insn (gen_move_doubleword_fprdf (dest, src)); ++ else if (TARGET_64BIT && GET_MODE (dest) == TFmode) ++ emit_insn (gen_move_doubleword_fprtf (dest, src)); ++ else ++ gcc_unreachable (); ++ } ++ else ++ { ++ /* The operation can be split into two normal moves. Decide in ++ which order to do them. */ ++ low_dest = loongarch_subword (dest, false); ++ if (REG_P (low_dest) ++ && reg_overlap_mentioned_p (low_dest, src)) ++ { ++ loongarch_emit_move (loongarch_subword (dest, true), loongarch_subword (src, true)); ++ loongarch_emit_move (low_dest, loongarch_subword (src, false)); ++ } ++ else ++ { ++ loongarch_emit_move (low_dest, loongarch_subword (src, false)); ++ loongarch_emit_move (loongarch_subword (dest, true), loongarch_subword (src, true)); ++ } ++ } ++ ++ /* This is a hack. See if the next insn uses DEST and if so, see if we ++ can forward SRC for DEST. This is most useful if the next insn is a ++ simple store. */ ++ rtx_insn *insn = (rtx_insn *)insn_; ++ struct loongarch_address_info addr = {}; ++ if (insn) ++ { ++ rtx_insn *next = next_nonnote_nondebug_insn_bb (insn); ++ if (next) ++ { ++ rtx set = single_set (next); ++ if (set && SET_SRC (set) == dest) ++ { ++ if (MEM_P (src)) ++ { ++ rtx tmp = XEXP (src, 0); ++ loongarch_classify_address (&addr, tmp, GET_MODE (tmp), true); ++ if (addr.reg && !reg_overlap_mentioned_p (dest, addr.reg)) ++ validate_change (next, &SET_SRC (set), src, false); ++ } ++ else ++ validate_change (next, &SET_SRC (set), src, false); ++ } ++ } ++ } ++} ++ ++/* Return the split type for instruction INSN. */ ++ ++static enum loongarch_split_type ++loongarch_insn_split_type (rtx insn) ++{ ++ basic_block bb = BLOCK_FOR_INSN (insn); ++ if (bb) ++ { ++ if (optimize_bb_for_speed_p (bb)) ++ return SPLIT_FOR_SPEED; ++ else ++ return SPLIT_FOR_SIZE; ++ } ++ /* Once CFG information has been removed, we should trust the optimization ++ decisions made by previous passes and only split where necessary. */ ++ return SPLIT_IF_NECESSARY; ++} ++ ++/* Return true if a 128-bit move from SRC to DEST should be split. */ ++ ++bool ++loongarch_split_128bit_move_p (rtx dest, rtx src) ++{ ++ /* LSX-to-LSX moves can be done in a single instruction. */ ++ if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest)) ++ return false; ++ ++ /* Check for LSX loads and stores. */ ++ if (FP_REG_RTX_P (dest) && MEM_P (src)) ++ return false; ++ if (FP_REG_RTX_P (src) && MEM_P (dest)) ++ return false; ++ ++ /* Check for LSX set to an immediate const vector with valid replicated ++ element. */ ++ if (FP_REG_RTX_P (dest) ++ && loongarch_const_vector_same_int_p (src, GET_MODE (src), -512, 511)) ++ return false; ++ ++ /* Check for LSX load zero immediate. */ ++ if (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src))) ++ return false; ++ ++ return true; ++} ++ ++/* Return true if a 256-bit move from SRC to DEST should be split. */ ++ ++bool ++loongarch_split_256bit_move_p (rtx dest, rtx src) ++{ ++ /* LSX-to-LSX moves can be done in a single instruction. */ ++ if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest)) ++ return false; ++ ++ /* Check for LSX loads and stores. */ ++ if (FP_REG_RTX_P (dest) && MEM_P (src)) ++ return false; ++ if (FP_REG_RTX_P (src) && MEM_P (dest)) ++ return false; ++ ++ /* Check for LSX set to an immediate const vector with valid replicated ++ element. */ ++ if (FP_REG_RTX_P (dest) ++ && loongarch_const_vector_same_int_p (src, GET_MODE (src), -512, 511)) ++ return false; ++ ++ /* Check for LSX load zero immediate. */ ++ if (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src))) ++ return false; ++ ++ return true; ++} ++ ++/* Split a 128-bit move from SRC to DEST. */ ++ ++void ++loongarch_split_128bit_move (rtx dest, rtx src) ++{ ++ int byte, index; ++ rtx low_dest, low_src, d, s; ++ ++ if (FP_REG_RTX_P (dest)) ++ { ++ gcc_assert (!MEM_P (src)); ++ ++ rtx new_dest = dest; ++ if (!TARGET_64BIT) ++ { ++ if (GET_MODE (dest) != V4SImode) ++ new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0); ++ } ++ else ++ { ++ if (GET_MODE (dest) != V2DImode) ++ new_dest = simplify_gen_subreg (V2DImode, dest, GET_MODE (dest), 0); ++ } ++ ++ for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode); ++ byte += UNITS_PER_WORD, index++) ++ { ++ s = loongarch_subword_at_byte (src, byte); ++ if (!TARGET_64BIT) ++ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, s, new_dest, ++ GEN_INT (1 << index))); ++ else ++ emit_insn (gen_lsx_vinsgr2vr_d (new_dest, s, new_dest, ++ GEN_INT (1 << index))); ++ } ++ } ++ else if (FP_REG_RTX_P (src)) ++ { ++ gcc_assert (!MEM_P (dest)); ++ ++ rtx new_src = src; ++ if (!TARGET_64BIT) ++ { ++ if (GET_MODE (src) != V4SImode) ++ new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0); ++ } ++ else ++ { ++ if (GET_MODE (src) != V2DImode) ++ new_src = simplify_gen_subreg (V2DImode, src, GET_MODE (src), 0); ++ } ++ ++ for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode); ++ byte += UNITS_PER_WORD, index++) ++ { ++ d = loongarch_subword_at_byte (dest, byte); ++ if (!TARGET_64BIT) ++ emit_insn (gen_lsx_vpickve2gr_w (d, new_src, GEN_INT (index))); ++ else ++ emit_insn (gen_lsx_vpickve2gr_d (d, new_src, GEN_INT (index))); ++ } ++ } ++ else ++ { ++ low_dest = loongarch_subword_at_byte (dest, 0); ++ low_src = loongarch_subword_at_byte (src, 0); ++ gcc_assert (REG_P (low_dest) && REG_P (low_src)); ++ /* Make sure the source register is not written before reading. */ ++ if (REGNO (low_dest) <= REGNO (low_src)) ++ { ++ for (byte = 0; byte < GET_MODE_SIZE (TImode); ++ byte += UNITS_PER_WORD) ++ { ++ d = loongarch_subword_at_byte (dest, byte); ++ s = loongarch_subword_at_byte (src, byte); ++ loongarch_emit_move (d, s); ++ } ++ } ++ else ++ { ++ for (byte = GET_MODE_SIZE (TImode) - UNITS_PER_WORD; byte >= 0; ++ byte -= UNITS_PER_WORD) ++ { ++ d = loongarch_subword_at_byte (dest, byte); ++ s = loongarch_subword_at_byte (src, byte); ++ loongarch_emit_move (d, s); ++ } ++ } ++ } ++} ++ ++/* Split a 256-bit move from SRC to DEST. */ ++ ++void ++loongarch_split_256bit_move (rtx dest, rtx src) ++{ ++ int byte, index; ++ rtx low_dest, low_src, d, s; ++ ++ if (FP_REG_RTX_P (dest)) ++ { ++ gcc_assert (!MEM_P (src)); ++ ++ rtx new_dest = dest; ++ if (!TARGET_64BIT) ++ { ++ if (GET_MODE (dest) != V8SImode) ++ new_dest = simplify_gen_subreg (V8SImode, dest, GET_MODE (dest), 0); ++ } ++ else ++ { ++ if (GET_MODE (dest) != V4DImode) ++ new_dest = simplify_gen_subreg (V4DImode, dest, GET_MODE (dest), 0); ++ } ++ ++ for (byte = 0, index = 0; byte < GET_MODE_SIZE (GET_MODE (dest)); ++ byte += UNITS_PER_WORD, index++) ++ { ++ s = loongarch_subword_at_byte (src, byte); ++ if (!TARGET_64BIT) ++ emit_insn (gen_lasx_xvinsgr2vr_w (new_dest, s, new_dest, ++ GEN_INT (1 << index))); ++ else ++ emit_insn (gen_lasx_xvinsgr2vr_d (new_dest, s, new_dest, ++ GEN_INT (1 << index))); ++ } ++ } ++ else if (FP_REG_RTX_P (src)) ++ { ++ gcc_assert (!MEM_P (dest)); ++ ++ rtx new_src = src; ++ if (!TARGET_64BIT) ++ { ++ if (GET_MODE (src) != V8SImode) ++ new_src = simplify_gen_subreg (V8SImode, src, GET_MODE (src), 0); ++ } ++ else ++ { ++ if (GET_MODE (src) != V4DImode) ++ new_src = simplify_gen_subreg (V4DImode, src, GET_MODE (src), 0); ++ } ++ ++ for (byte = 0, index = 0; byte < GET_MODE_SIZE (GET_MODE (src)); ++ byte += UNITS_PER_WORD, index++) ++ { ++ d = loongarch_subword_at_byte (dest, byte); ++ if (!TARGET_64BIT) ++ emit_insn (gen_lsx_vpickve2gr_w (d, new_src, GEN_INT (index))); ++ else ++ emit_insn (gen_lsx_vpickve2gr_d (d, new_src, GEN_INT (index))); ++ } ++ } ++ else ++ { ++ low_dest = loongarch_subword_at_byte (dest, 0); ++ low_src = loongarch_subword_at_byte (src, 0); ++ gcc_assert (REG_P (low_dest) && REG_P (low_src)); ++ /* Make sure the source register is not written before reading. */ ++ if (REGNO (low_dest) <= REGNO (low_src)) ++ { ++ for (byte = 0; byte < GET_MODE_SIZE (TImode); ++ byte += UNITS_PER_WORD) ++ { ++ d = loongarch_subword_at_byte (dest, byte); ++ s = loongarch_subword_at_byte (src, byte); ++ loongarch_emit_move (d, s); ++ } ++ } ++ else ++ { ++ for (byte = GET_MODE_SIZE (TImode) - UNITS_PER_WORD; byte >= 0; ++ byte -= UNITS_PER_WORD) ++ { ++ d = loongarch_subword_at_byte (dest, byte); ++ s = loongarch_subword_at_byte (src, byte); ++ loongarch_emit_move (d, s); ++ } ++ } ++ } ++} ++ ++ ++/* Split a COPY_S.D with operands DEST, SRC and INDEX. GEN is a function ++ used to generate subregs. */ ++ ++void ++loongarch_split_lsx_copy_d (rtx dest, rtx src, rtx index, ++ rtx (*gen_fn)(rtx, rtx, rtx)) ++{ ++ gcc_assert ((GET_MODE (src) == V2DImode && GET_MODE (dest) == DImode) ++ || (GET_MODE (src) == V2DFmode && GET_MODE (dest) == DFmode)); ++ ++ /* Note that low is always from the lower index, and high is always ++ from the higher index. */ ++ rtx low = loongarch_subword (dest, false); ++ rtx high = loongarch_subword (dest, true); ++ rtx new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0); ++ ++ emit_insn (gen_fn (low, new_src, GEN_INT (INTVAL (index) * 2))); ++ emit_insn (gen_fn (high, new_src, GEN_INT (INTVAL (index) * 2 + 1))); ++} ++ ++/* Split a INSERT.D with operand DEST, SRC1.INDEX and SRC2. */ ++ ++void ++loongarch_split_lsx_insert_d (rtx dest, rtx src1, rtx index, rtx src2) ++{ ++ int i; ++ gcc_assert (GET_MODE (dest) == GET_MODE (src1)); ++ gcc_assert ((GET_MODE (dest) == V2DImode ++ && (GET_MODE (src2) == DImode || src2 == const0_rtx)) ++ || (GET_MODE (dest) == V2DFmode && GET_MODE (src2) == DFmode)); ++ ++ /* Note that low is always from the lower index, and high is always ++ from the higher index. */ ++ rtx low = loongarch_subword (src2, false); ++ rtx high = loongarch_subword (src2, true); ++ rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0); ++ rtx new_src1 = simplify_gen_subreg (V4SImode, src1, GET_MODE (src1), 0); ++ i = exact_log2 (INTVAL (index)); ++ gcc_assert (i != -1); ++ ++ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, low, new_src1, ++ GEN_INT (1 << (i * 2)))); ++ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, ++ GEN_INT (1 << (i * 2 + 1)))); ++} ++ ++/* Split FILL.D. */ ++ ++void ++loongarch_split_lsx_fill_d (rtx dest, rtx src) ++{ ++ gcc_assert ((GET_MODE (dest) == V2DImode ++ && (GET_MODE (src) == DImode || src == const0_rtx)) ++ || (GET_MODE (dest) == V2DFmode && GET_MODE (src) == DFmode)); ++ ++ /* Note that low is always from the lower index, and high is always ++ from the higher index. */ ++ rtx low, high; ++ if (src == const0_rtx) ++ { ++ low = src; ++ high = src; ++ } ++ else ++ { ++ low = loongarch_subword (src, false); ++ high = loongarch_subword (src, true); ++ } ++ rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0); ++ emit_insn (gen_lsx_vreplgr2vr_w (new_dest, low)); ++ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 1))); ++ emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 3))); ++} ++ ++/* Return true if a move from SRC to DEST in INSN should be split. */ ++ ++bool ++loongarch_split_move_insn_p (rtx dest, rtx src, rtx insn) ++{ ++ return loongarch_split_move_p (dest, src, loongarch_insn_split_type (insn)); ++} ++ ++/* Split a move from SRC to DEST in INSN, given that loongarch_split_move_insn_p ++ holds. */ ++ ++void ++loongarch_split_move_insn (rtx dest, rtx src, rtx insn) ++{ ++ loongarch_split_move (dest, src, loongarch_insn_split_type (insn), insn); ++} ++ ++ ++/* Forward declaration. Used below */ ++static HOST_WIDE_INT ++loongarch_constant_alignment (const_tree exp, HOST_WIDE_INT align); ++ ++/* Return the appropriate instructions to move SRC into DEST. Assume ++ that SRC is operand 1 and DEST is operand 0. */ ++ ++const char * ++loongarch_output_move (rtx dest, rtx src) ++{ ++ enum rtx_code dest_code = GET_CODE (dest); ++ enum rtx_code src_code = GET_CODE (src); ++ machine_mode mode = GET_MODE (dest); ++ bool dbl_p = (GET_MODE_SIZE (mode) == 8); ++ bool lsx_p = LSX_SUPPORTED_MODE_P (mode); ++ bool lasx_p = LASX_SUPPORTED_MODE_P (mode); ++ enum loongarch_symbol_type symbol_type; ++ ++ if (loongarch_split_move_p (dest, src, SPLIT_IF_NECESSARY)) ++ return "#"; ++ ++ if ((lsx_p || lasx_p) ++ && dest_code == REG && FP_REG_P (REGNO (dest)) ++ && src_code == CONST_VECTOR ++ && CONST_INT_P (CONST_VECTOR_ELT (src, 0))) ++ { ++ gcc_assert (loongarch_const_vector_same_int_p (src, mode, -512, 511)); ++ if(lsx_p || lasx_p) ++ { ++ switch (GET_MODE_SIZE (mode)) ++ { ++ case 16: ++ return "vrepli.%v0\t%w0,%E1"; ++ case 32: ++ return "xvrepli.%v0\t%u0,%E1"; ++ default: gcc_unreachable (); ++ } ++ } ++ } ++ ++ if ((src_code == REG && GP_REG_P (REGNO (src))) ++ || (src == CONST0_RTX (mode))) ++ { ++ if (dest_code == REG) ++ { ++ if (GP_REG_P (REGNO (dest))) ++ return "or\t%0,%z1,$r0"; ++ ++ if (FP_REG_P (REGNO (dest))) ++ { ++ if (lsx_p || lasx_p) ++ { ++ gcc_assert (src == CONST0_RTX (GET_MODE (src))); ++ switch (GET_MODE_SIZE (mode)) ++ { ++ case 16: ++ return "vrepli.b\t%w0,0"; ++ case 32: ++ return "xvrepli.b\t%u0,0"; ++ default: gcc_unreachable (); ++ } ++ } ++ ++ return dbl_p ? "movgr2fr.d\t%0,%z1" : "movgr2fr.w\t%0,%z1"; ++ } ++ } ++ if (dest_code == MEM) ++ { ++ rtx offset = XEXP (dest, 0); ++ if (GET_CODE(offset) == PLUS) ++ offset = XEXP(offset, 1); ++ switch (GET_MODE_SIZE (mode)) ++ { ++ case 1: return "st.b\t%z1,%0"; ++ case 2: return "st.h\t%z1,%0"; ++ case 4: ++ if (const_arith_operand (offset, Pmode)) ++ return "st.w\t%z1,%0"; ++ else ++ return "stptr.w\t%z1,%0"; ++ case 8: ++ if (const_arith_operand (offset, Pmode)) ++ return "st.d\t%z1,%0"; ++ else ++ return "stptr.d\t%z1,%0"; ++ default: gcc_unreachable (); ++ } ++ } ++ } ++ if (dest_code == REG && GP_REG_P (REGNO (dest))) ++ { ++ if (src_code == REG) ++ { ++ if (FP_REG_P (REGNO (src))) ++ { ++ gcc_assert (!lsx_p); ++ return dbl_p ? "movfr2gr.d\t%0,%1" : "movfr2gr.s\t%0,%1"; ++ } ++ } ++ ++ if (src_code == MEM) ++ { ++ rtx offset = XEXP (src, 0); ++ if (GET_CODE(offset) == PLUS) ++ offset = XEXP(offset, 1); ++ switch (GET_MODE_SIZE (mode)) ++ { ++ case 1: return "ld.bu\t%0,%1"; ++ case 2: return "ld.hu\t%0,%1"; ++ case 4: ++ if (const_arith_operand (offset, Pmode)) ++ return "ld.w\t%0,%1"; ++ else ++ return "ldptr.w\t%0,%1"; ++ case 8: ++ if (const_arith_operand (offset, Pmode)) ++ return "ld.d\t%0,%1"; ++ else ++ return "ldptr.d\t%0,%1"; ++ default: gcc_unreachable (); ++ } ++ } ++ ++ if (src_code == CONST_INT) ++ { ++ if (LUI_INT (src)) ++ return "lu12i.w\t%0,%1>>12\t\t\t# %X1"; ++ else if (SMALL_INT (src)) ++ return "addi.w\t%0,$r0,%1\t\t\t# %X1"; ++ else if (SMALL_INT_UNSIGNED (src)) ++ return "ori\t%0,$r0,%1\t\t\t# %X1"; ++ else if (LU52I_INT (src)) ++ return "lu52i.d\t%0,$r0,%X1>>52\t\t\t# %1"; ++ else ++ gcc_unreachable (); ++ } ++ ++ if (symbolic_operand (src, VOIDmode)) ++ { ++ ++ switch (loongarch_cmodel_var) ++ { ++ case LARCH_CMODEL_TINY: ++ do ++ { ++ if (loongarch_global_symbol_p (src) ++ && !loongarch_symbol_binds_local_p (src)) ++ break; ++ case LARCH_CMODEL_TINY_STATIC: ++ if (loongarch_weak_symbol_p (src)) ++ break; ++ ++ /* The symbol must be aligned to 4 byte. */ ++ unsigned int align; ++ ++ if (GET_CODE (src) == LABEL_REF) ++ align = 128 /* whatever */; ++ /* copy from aarch64 */ ++ else if (CONSTANT_POOL_ADDRESS_P (src)) ++ align = GET_MODE_ALIGNMENT (get_pool_mode (src)); ++ else if (TREE_CONSTANT_POOL_ADDRESS_P (src)) ++ { ++ tree exp = SYMBOL_REF_DECL (src); ++ align = TYPE_ALIGN (TREE_TYPE (exp)); ++ align = loongarch_constant_alignment (exp, align); ++ } ++ else if (SYMBOL_REF_DECL (src)) ++ align = DECL_ALIGN (SYMBOL_REF_DECL (src)); ++ else if (SYMBOL_REF_HAS_BLOCK_INFO_P (src) ++ && SYMBOL_REF_BLOCK (src) != NULL) ++ align = SYMBOL_REF_BLOCK (src)->alignment; ++ else ++ align = BITS_PER_UNIT; ++ ++ if (align % (4 * 8) == 0) ++ return "pcaddi\t%0,%%pcrel(%1)>>2"; ++ } ++ while (0); ++ case LARCH_CMODEL_NORMAL: ++ case LARCH_CMODEL_LARGE: ++ if (!loongarch_global_symbol_p (src) ++ || loongarch_symbol_binds_local_p (src)) ++ return "la.local\t%0,%1"; ++ else ++ return "la.global\t%0,%1"; ++ case LARCH_CMODEL_EXTREME: ++ default: ++ gcc_unreachable (); ++ } ++ } ++ } ++ if (src_code == REG && FP_REG_P (REGNO (src))) ++ { ++ if (dest_code == REG && FP_REG_P (REGNO (dest))) ++ { ++ if (lsx_p || lasx_p) ++ { ++ ++ switch (GET_MODE_SIZE (mode)) ++ { ++ case 16: ++ return "vori.b\t%w0,%w1,0"; ++ case 32: ++ return "xvori.b\t%u0,%u1,0"; ++ default: gcc_unreachable (); ++ } ++ } ++ else ++ return dbl_p ? "fmov.d\t%0,%1" : "fmov.s\t%0,%1"; ++ } ++ ++ if (dest_code == MEM) ++ { ++ if (lsx_p || lasx_p) ++ { ++ ++ switch (GET_MODE_SIZE (mode)) ++ { ++ case 16: ++ return "vst\t%w1,%0"; ++ case 32: ++ return "xvst\t%u1,%0"; ++ default: gcc_unreachable (); ++ } ++ } ++ ++ return dbl_p ? "fst.d\t%1,%0" : "fst.s\t%1,%0"; ++ } ++ } ++ if (dest_code == REG && FP_REG_P (REGNO (dest))) ++ { ++ if (src_code == MEM) ++ { ++ if (lsx_p || lasx_p) ++ { ++ switch (GET_MODE_SIZE (mode)) ++ { ++ case 16: ++ return "vld\t%w0,%1"; ++ case 32: ++ return "xvld\t%u0,%1"; ++ default: gcc_unreachable (); ++ } ++ } ++ return dbl_p ? "fld.d\t%0,%1" : "fld.s\t%0,%1"; ++ } ++ } ++ gcc_unreachable (); ++} ++ ++/* Return true if CMP1 is a suitable second operand for integer ordering ++ test CODE. See also the *sCC patterns in loongarch.md. */ ++ ++static bool ++loongarch_int_order_operand_ok_p (enum rtx_code code, rtx cmp1) ++{ ++ switch (code) ++ { ++ case GT: ++ case GTU: ++ return reg_or_0_operand (cmp1, VOIDmode); ++ ++ case GE: ++ case GEU: ++ return cmp1 == const1_rtx; ++ ++ case LT: ++ case LTU: ++ return arith_operand (cmp1, VOIDmode); ++ ++ case LE: ++ return sle_operand (cmp1, VOIDmode); ++ ++ case LEU: ++ return sleu_operand (cmp1, VOIDmode); ++ ++ default: ++ gcc_unreachable (); ++ } ++} ++ ++/* Return true if *CMP1 (of mode MODE) is a valid second operand for ++ integer ordering test *CODE, or if an equivalent combination can ++ be formed by adjusting *CODE and *CMP1. When returning true, update ++ *CODE and *CMP1 with the chosen code and operand, otherwise leave ++ them alone. */ ++ ++static bool ++loongarch_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1, ++ machine_mode mode) ++{ ++ HOST_WIDE_INT plus_one; ++ ++ if (loongarch_int_order_operand_ok_p (*code, *cmp1)) ++ return true; ++ ++ if (CONST_INT_P (*cmp1)) ++ switch (*code) ++ { ++ case LE: ++ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode); ++ if (INTVAL (*cmp1) < plus_one) ++ { ++ *code = LT; ++ *cmp1 = force_reg (mode, GEN_INT (plus_one)); ++ return true; ++ } ++ break; ++ ++ case LEU: ++ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode); ++ if (plus_one != 0) ++ { ++ *code = LTU; ++ *cmp1 = force_reg (mode, GEN_INT (plus_one)); ++ return true; ++ } ++ break; ++ ++ default: ++ break; ++ } ++ return false; ++} ++ ++/* Compare CMP0 and CMP1 using ordering test CODE and store the result ++ in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR ++ is nonnull, it's OK to set TARGET to the inverse of the result and ++ flip *INVERT_PTR instead. */ ++ ++static void ++loongarch_emit_int_order_test (enum rtx_code code, bool *invert_ptr, ++ rtx target, rtx cmp0, rtx cmp1) ++{ ++ machine_mode mode; ++ ++ /* First see if there is a LARCH instruction that can do this operation. ++ If not, try doing the same for the inverse operation. If that also ++ fails, force CMP1 into a register and try again. */ ++ mode = GET_MODE (cmp0); ++ if (loongarch_canonicalize_int_order_test (&code, &cmp1, mode)) ++ loongarch_emit_binary (code, target, cmp0, cmp1); ++ else ++ { ++ enum rtx_code inv_code = reverse_condition (code); ++ if (!loongarch_canonicalize_int_order_test (&inv_code, &cmp1, mode)) ++ { ++ cmp1 = force_reg (mode, cmp1); ++ loongarch_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1); ++ } ++ else if (invert_ptr == 0) ++ { ++ rtx inv_target; ++ ++ inv_target = loongarch_force_binary (GET_MODE (target), ++ inv_code, cmp0, cmp1); ++ loongarch_emit_binary (XOR, target, inv_target, const1_rtx); ++ } ++ else ++ { ++ *invert_ptr = !*invert_ptr; ++ loongarch_emit_binary (inv_code, target, cmp0, cmp1); ++ } ++ } ++} ++ ++/* Return a register that is zero if CMP0 and CMP1 are equal. ++ The register will have the same mode as CMP0. */ ++ ++static rtx ++loongarch_zero_if_equal (rtx cmp0, rtx cmp1) ++{ ++ if (cmp1 == const0_rtx) ++ return cmp0; ++ ++ if (uns_arith_operand (cmp1, VOIDmode)) ++ return expand_binop (GET_MODE (cmp0), xor_optab, ++ cmp0, cmp1, 0, 0, OPTAB_DIRECT); ++ ++ return expand_binop (GET_MODE (cmp0), sub_optab, ++ cmp0, cmp1, 0, 0, OPTAB_DIRECT); ++} ++ ++/* Allocate a floating-point condition-code register of mode MODE. ++ ++ These condition code registers are used for certain kinds ++ of compound operation, such as compare and branches, vconds, ++ and built-in functions. At expand time, their use is entirely ++ controlled by LARCH-specific code and is entirely internal ++ to these compound operations. ++ ++ We could (and did in the past) expose condition-code values ++ as pseudo registers and leave the register allocator to pick ++ appropriate registers. The problem is that it is not practically ++ possible for the rtl optimizers to guarantee that no spills will ++ be needed, even when AVOID_CCMODE_COPIES is defined. We would ++ therefore need spill and reload sequences to handle the worst case. ++ ++ Although such sequences do exist, they are very expensive and are ++ not something we'd want to use. ++ ++ The main benefit of having more than one condition-code register ++ is to allow the pipelining of operations, especially those involving ++ comparisons and conditional moves. We don't really expect the ++ registers to be live for long periods, and certainly never want ++ them to be live across calls. ++ ++ Also, there should be no penalty attached to using all the available ++ registers. They are simply bits in the same underlying FPU control ++ register. ++ ++ We therefore expose the hardware registers from the outset and use ++ a simple round-robin allocation scheme. */ ++ ++static rtx ++loongarch_allocate_fcc (machine_mode mode) ++{ ++ unsigned int regno, count; ++ ++ gcc_assert (TARGET_HARD_FLOAT); ++ ++ if (mode == FCCmode) ++ count = 1; ++ else ++ gcc_unreachable (); ++ ++ cfun->machine->next_fcc += -cfun->machine->next_fcc & (count - 1); ++ if (cfun->machine->next_fcc > ST_REG_LAST - ST_REG_FIRST) ++ cfun->machine->next_fcc = 0; ++ ++ regno = ST_REG_FIRST + cfun->machine->next_fcc; ++ cfun->machine->next_fcc += count; ++ return gen_rtx_REG (mode, regno); ++} ++ ++ ++/* Sign- or zero-extend OP0 and OP1 for integer comparisons. */ ++ ++static void ++loongarch_extend_comparands (rtx_code code, rtx *op0, rtx *op1) ++{ ++ /* Comparisons consider all XLEN bits, so extend sub-XLEN values. */ ++ if (GET_MODE_SIZE (word_mode) > GET_MODE_SIZE (GET_MODE (*op0))) ++ { ++ /* TODO: checkout It is more profitable to zero-extend QImode values. */ ++ if (unsigned_condition (code) == code && GET_MODE (*op0) == QImode) ++ { ++ *op0 = gen_rtx_ZERO_EXTEND (word_mode, *op0); ++ if (CONST_INT_P (*op1)) ++ *op1 = GEN_INT ((uint8_t) INTVAL (*op1)); ++ else ++ *op1 = gen_rtx_ZERO_EXTEND (word_mode, *op1); ++ } ++ else ++ { ++ *op0 = gen_rtx_SIGN_EXTEND (word_mode, *op0); ++ if (*op1 != const0_rtx) ++ *op1 = gen_rtx_SIGN_EXTEND (word_mode, *op1); ++ } ++ } ++} ++ ++/* Convert a comparison into something that can be used in a branch. On ++ entry, *OP0 and *OP1 are the values being compared and *CODE is the code ++ used to compare them. Update them to describe the final comparison. */ ++ ++static void ++loongarch_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1) ++{ ++ if (splittable_const_int_operand (*op1, VOIDmode)) ++ { ++ HOST_WIDE_INT rhs = INTVAL (*op1); ++ ++ if (*code == EQ || *code == NE) ++ { ++ /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */ ++ if (SMALL_OPERAND (-rhs)) ++ { ++ *op0 = loongarch_force_binary (GET_MODE (*op0), PLUS, *op0, ++ GEN_INT (-rhs)); ++ *op1 = const0_rtx; ++ } ++ } ++ else ++ { ++ static const enum rtx_code mag_comparisons[][2] = { ++ {LEU, LTU}, {GTU, GEU}, {LE, LT}, {GT, GE} ++ }; ++ ++ /* Convert e.g. (OP0 <= 0xFFF) into (OP0 < 0x1000). */ ++ for (size_t i = 0; i < ARRAY_SIZE (mag_comparisons); i++) ++ { ++ HOST_WIDE_INT new_rhs; ++ bool increment = *code == mag_comparisons[i][0]; ++ bool decrement = *code == mag_comparisons[i][1]; ++ if (!increment && !decrement) ++ continue; ++ ++ new_rhs = rhs + (increment ? 1 : -1); ++ if (loongarch_integer_cost (new_rhs) ++ < loongarch_integer_cost (rhs) ++ && (rhs < 0) == (new_rhs < 0)) ++ { ++ *op1 = GEN_INT (new_rhs); ++ *code = mag_comparisons[i][increment]; ++ } ++ break; ++ } ++ } ++ } ++ ++ ++ *op0 = force_reg (GET_MODE (*op0), *op0); ++ if (*op1 != const0_rtx) ++ *op1 = force_reg (GET_MODE (*op0), *op1); ++} ++ ++/* Like riscv_emit_int_compare, but for floating-point comparisons. */ ++ ++static void ++loongarch_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1) ++{ ++ rtx cmp_op0 = *op0; ++ rtx cmp_op1 = *op1; ++ ++ /* Floating-point tests use a separate FCMP.cond.fmt ++ comparison to set a register. The branch or conditional move will ++ then compare that register against zero. ++ ++ Set CMP_CODE to the code of the comparison instruction and ++ *CODE to the code that the branch or move should use. */ ++ enum rtx_code cmp_code = *code; ++ /* Three FP conditions cannot be implemented by reversing the ++ operands for FCMP.cond.fmt, instead a reversed condition code is ++ required and a test for false. */ ++ *code = NE; ++ *op0 = loongarch_allocate_fcc (FCCmode); ++ ++ *op1 = const0_rtx; ++ loongarch_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1); ++} ++ ++/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2] ++ and OPERAND[3]. Store the result in OPERANDS[0]. ++ ++ On 64-bit targets, the mode of the comparison and target will always be ++ SImode, thus possibly narrower than that of the comparison's operands. */ ++ ++void ++loongarch_expand_scc (rtx operands[]) ++{ ++ rtx target = operands[0]; ++ enum rtx_code code = GET_CODE (operands[1]); ++ rtx op0 = operands[2]; ++ rtx op1 = operands[3]; ++ ++ gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT); ++ ++ if (code == EQ || code == NE) ++ { ++ { ++ rtx zie = loongarch_zero_if_equal (op0, op1); ++ loongarch_emit_binary (code, target, zie, const0_rtx); ++ } ++ } ++ else ++ loongarch_emit_int_order_test (code, 0, target, op0, op1); ++} ++ ++/* Compare OPERANDS[1] with OPERANDS[2] using comparison code ++ CODE and jump to OPERANDS[3] if the condition holds. */ ++ ++void ++loongarch_expand_conditional_branch (rtx *operands) ++{ ++ enum rtx_code code = GET_CODE (operands[0]); ++ rtx op0 = operands[1]; ++ rtx op1 = operands[2]; ++ rtx condition; ++ ++ if (FLOAT_MODE_P (GET_MODE (op1))) ++ loongarch_emit_float_compare (&code, &op0, &op1); ++ else ++ loongarch_emit_int_compare (&code, &op0, &op1); ++ ++ condition = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1); ++ emit_jump_insn (gen_condjump (condition, operands[3])); ++} ++ ++/* Perform the comparison in OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0] ++ if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0]. */ ++ ++void ++loongarch_expand_conditional_move (rtx *operands) ++{ ++ enum rtx_code code = GET_CODE (operands[1]); ++ rtx op0 = XEXP (operands[1], 0); ++ rtx op1 = XEXP (operands[1], 1); ++ ++ if (FLOAT_MODE_P (GET_MODE (op1))) ++ loongarch_emit_float_compare (&code, &op0, &op1); ++ else ++ { ++ if (code == EQ || code == NE) /*see test-mask-1.c && test-mask-5.c*/ ++ { ++ op0 = loongarch_zero_if_equal(op0, op1); ++ op1 = const0_rtx; ++ } ++ else /*see test-mask-2.c*/ ++ { ++ /* The comparison needs a separate scc instruction. Store the ++ result of the scc in *OP0 and compare it against zero. */ ++ bool invert = false; ++ rtx target = gen_reg_rtx (GET_MODE (op0)); ++ loongarch_emit_int_order_test (code, &invert, target, op0, op1); ++ code = invert ? EQ: NE; ++ op0 = target; ++ op1 = const0_rtx; ++ } ++ } ++ ++ rtx cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1); ++ /* There is no direct support for general conditional GP move involving ++ two registers using SEL. see test-mask-3.c */ ++ if (INTEGRAL_MODE_P (GET_MODE (operands[2])) ++ && register_operand (operands[2], VOIDmode) ++ && register_operand (operands[3], VOIDmode)) ++ { ++ machine_mode mode = GET_MODE (operands[0]); ++ rtx temp = gen_reg_rtx (mode); ++ rtx temp2 = gen_reg_rtx (mode); ++ ++ emit_insn (gen_rtx_SET (temp, ++ gen_rtx_IF_THEN_ELSE (mode, cond, ++ operands[2], const0_rtx))); ++ ++ /* Flip the test for the second operand. */ ++ cond = gen_rtx_fmt_ee ((code == EQ) ? NE : EQ, GET_MODE (op0), op0, op1); ++ ++ emit_insn (gen_rtx_SET (temp2, ++ gen_rtx_IF_THEN_ELSE (mode, cond, ++ operands[3], const0_rtx))); ++ ++ /* Merge the two results, at least one is guaranteed to be zero. */ ++ emit_insn (gen_rtx_SET (operands[0], gen_rtx_IOR (mode, temp, temp2))); ++ } ++ else ++ emit_insn (gen_rtx_SET (operands[0], ++ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond, ++ operands[2], operands[3]))); ++} ++ ++ ++/* Initialize *CUM for a call to a function of type FNTYPE. */ ++ ++void ++loongarch_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype) ++{ ++ memset (cum, 0, sizeof (*cum)); ++ cum->prototype = (fntype && prototype_p (fntype)); ++ cum->gp_reg_found = (cum->prototype && stdarg_p (fntype)); ++} ++ ++ ++ ++/* Implement TARGET_EXPAND_BUILTIN_VA_START. */ ++ ++static void ++loongarch_va_start (tree valist, rtx nextarg) ++{ ++ nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size); ++ std_expand_builtin_va_start (valist, nextarg); ++} ++ ++ ++/* Start a definition of function NAME. */ ++ ++static void ++loongarch_start_function_definition (const char *name) ++{ ++ ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function"); ++ ++ /* Start the definition proper. */ ++ assemble_name (asm_out_file, name); ++ fputs (":\n", asm_out_file); ++} ++ ++/* End a function definition started by loongarch_start_function_definition. */ ++ ++static void ++loongarch_end_function_definition (const char *name) ++{ ++} ++ ++/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */ ++ ++static bool ++loongarch_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED) ++{ ++ if (!TARGET_SIBCALLS) ++ return false; ++ ++ /* Interrupt handlers need special epilogue code and therefore can't ++ use sibcalls. */ ++ if (loongarch_interrupt_type_p (TREE_TYPE (current_function_decl))) ++ return false; ++ ++ /* Otherwise OK. */ ++ return true; ++} ++ ++/* Implement a handler for STORE_BY_PIECES operations ++ for TARGET_USE_MOVE_BY_PIECES_INFRASTRUCTURE_P. */ ++ ++bool ++loongarch_store_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align) ++{ ++ /* Storing by pieces involves moving constants into registers ++ of size MIN (ALIGN, BITS_PER_WORD), then storing them. ++ We need to decide whether it is cheaper to load the address of ++ constant data into a register and use a block move instead. */ ++ ++ /* If the data is only byte aligned, then: ++ ++ (a1) A block move of less than 4 bytes would involve three 3 LD.Bs and ++ 3 ST.Bs. We might as well use 3 single-instruction LIs and 3 SD.Bs ++ instead. ++ ++ (a2) A block move of 4 bytes from aligned source data can use an ++ LD.W/ST.W sequence. This is often better than the 4 LIs and ++ 4 SD.Bs that we would generate when storing by pieces. */ ++ if (align <= BITS_PER_UNIT) ++ return size < 4; ++ ++ /* If the data is 2-byte aligned, then: ++ ++ (b1) A block move of less than 4 bytes would use a combination of LD.Bs, ++ LD.Hs, SD.Bs and SD.Hs. We get better code by using single-instruction ++ LIs, SD.Bs and SD.Hs instead. ++ ++ (b2) A block move of 4 bytes from aligned source data would again use ++ an LD.W/ST.W sequence. In most cases, loading the address of ++ the source data would require at least one extra instruction. ++ It is often more efficient to use 2 single-instruction LIs and ++ 2 SHs instead. ++ ++ (b3) A block move of up to 3 additional bytes would be like (b1). ++ ++ (b4) A block move of 8 bytes from aligned source data can use two ++ LD.W/ST.W sequences. Both sequences are better than the 4 LIs ++ and 4 ST.Hs that we'd generate when storing by pieces. ++ ++ The reasoning for higher alignments is similar: ++ ++ (c1) A block move of less than 4 bytes would be the same as (b1). ++ ++ (c2) A block move of 4 bytes would use an LD.W/ST.W sequence. Again, ++ loading the address of the source data would typically require ++ at least one extra instruction. It is generally better to use ++ LUI/ORI/SW instead. ++ ++ (c3) A block move of up to 3 additional bytes would be like (b1). ++ ++ (c4) A block move of 8 bytes can use two LD.W/ST.W sequences or a single ++ LD.D/ST.D sequence, and in these cases we've traditionally preferred ++ the memory copy over the more bulky constant moves. */ ++ return size < 8; ++} ++ ++/* Emit straight-line code to move LENGTH bytes from SRC to DEST. ++ Assume that the areas do not overlap. */ ++ ++static void ++loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) ++{ ++ HOST_WIDE_INT offset, delta; ++ unsigned HOST_WIDE_INT bits; ++ int i; ++ machine_mode mode; ++ rtx *regs; ++ ++ /* Work out how many bits to move at a time. If both operands have ++ half-word alignment, it is usually better to move in half words. ++ For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr ++ and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr. ++ Otherwise move word-sized chunks. ++ ++ For ISA_HAS_LWL_LWR we rely on the lwl/lwr & swl/swr load. Otherwise ++ picking the minimum of alignment or BITS_PER_WORD gets us the ++ desired size for bits. */ ++ ++ bits = MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))); ++ ++ if (TARGET_LASX) ++ { ++ bits = BITS_PER_WORD * 4; ++ mode = V4DImode; ++ delta = bits / BITS_PER_UNIT; ++ } ++ else ++ { ++ mode = int_mode_for_size (bits, 0).require (); ++ delta = bits / BITS_PER_UNIT; ++ } ++ ++ /* Allocate a buffer for the temporary registers. */ ++ regs = XALLOCAVEC (rtx, length / delta); ++ ++ /* Load as many BITS-sized chunks as possible. Use a normal load if ++ the source has enough alignment, otherwise use left/right pairs. */ ++ if (TARGET_LASX) ++ { ++ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) ++ { ++ regs[i] = gen_reg_rtx (mode); ++ loongarch_emit_move (regs[i], adjust_address (src, mode, offset)); ++ } ++ } ++ else ++ { ++ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) ++ { ++ regs[i] = gen_reg_rtx (mode); ++ loongarch_emit_move (regs[i], adjust_address (src, mode, offset)); ++ } ++ } ++ ++ /* Copy the chunks to the destination. */ ++ if (TARGET_LASX) ++ { ++ ++ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) ++ { ++ loongarch_emit_move (adjust_address (dest, mode, offset), regs[i]); ++ } ++ } ++ else ++ { ++ ++ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) ++ loongarch_emit_move (adjust_address (dest, mode, offset), regs[i]); ++ } ++ ++ /* Mop up any left-over bytes. */ ++ if (offset < length) ++ { ++ if (TARGET_LASX) ++ { ++ if(length - offset >= 16) ++ { ++ rtx *regs_tmp = XALLOCAVEC (rtx, 1); ++ regs_tmp[0] = gen_reg_rtx (V2DImode); ++ loongarch_emit_move (regs_tmp[0], adjust_address (src, V2DImode, offset)); ++ loongarch_emit_move (adjust_address (dest, V2DImode, offset), regs_tmp[0]); ++ offset += 16; ++ } ++ if(length - offset >= 8) ++ { ++ rtx *regs_tmp = XALLOCAVEC (rtx, 1); ++ regs_tmp[0] = gen_reg_rtx (DImode); ++ loongarch_emit_move (regs_tmp[0], adjust_address (src, DImode, offset)); ++ loongarch_emit_move (adjust_address (dest, DImode, offset), regs_tmp[0]); ++ offset += 8; ++ } ++ if(length - offset >= 4) ++ { ++ rtx *regs_tmp = XALLOCAVEC (rtx, 1); ++ regs_tmp[0] = gen_reg_rtx (SImode); ++ loongarch_emit_move (regs_tmp[0], adjust_address (src, SImode, offset)); ++ loongarch_emit_move (adjust_address (dest, SImode, offset), regs_tmp[0]); ++ offset += 4; ++ } ++ if(length - offset >= 2) ++ { ++ rtx *regs_tmp = XALLOCAVEC (rtx, 1); ++ regs_tmp[0] = gen_reg_rtx (HImode); ++ loongarch_emit_move (regs_tmp[0], adjust_address (src, HImode, offset)); ++ loongarch_emit_move (adjust_address (dest, HImode, offset), regs_tmp[0]); ++ offset += 2; ++ } ++ if(length - offset >= 1) ++ { ++ rtx *regs_tmp = XALLOCAVEC (rtx, 1); ++ regs_tmp[0] = gen_reg_rtx (QImode); ++ loongarch_emit_move (regs_tmp[0], adjust_address (src, QImode, offset)); ++ loongarch_emit_move (adjust_address (dest, QImode, offset), regs_tmp[0]); ++ offset += 1; ++ } ++ ++ if(length - offset != 0) ++ gcc_unreachable (); ++ } ++ else ++ { ++ src = adjust_address (src, BLKmode, offset); ++ dest = adjust_address (dest, BLKmode, offset); ++ move_by_pieces (dest, src, length - offset, ++ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0); ++ ++ } ++ } ++} ++ ++/* Helper function for doing a loop-based block operation on memory ++ reference MEM. Each iteration of the loop will operate on LENGTH ++ bytes of MEM. ++ ++ Create a new base register for use within the loop and point it to ++ the start of MEM. Create a new memory reference that uses this ++ register. Store them in *LOOP_REG and *LOOP_MEM respectively. */ ++ ++static void ++loongarch_adjust_block_mem (rtx mem, HOST_WIDE_INT length, ++ rtx *loop_reg, rtx *loop_mem) ++{ ++ *loop_reg = copy_addr_to_reg (XEXP (mem, 0)); ++ ++ /* Although the new mem does not refer to a known location, ++ it does keep up to LENGTH bytes of alignment. */ ++ *loop_mem = change_address (mem, BLKmode, *loop_reg); ++ set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT)); ++} ++ ++/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER ++ bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that ++ the memory regions do not overlap. */ ++ ++static void ++loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, ++ HOST_WIDE_INT bytes_per_iter) ++{ ++ rtx_code_label *label; ++ rtx src_reg, dest_reg, final_src, test; ++ HOST_WIDE_INT leftover; ++ ++ leftover = length % bytes_per_iter; ++ length -= leftover; ++ ++ /* Create registers and memory references for use within the loop. */ ++ loongarch_adjust_block_mem (src, bytes_per_iter, &src_reg, &src); ++ loongarch_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest); ++ ++ /* Calculate the value that SRC_REG should have after the last iteration ++ of the loop. */ ++ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length), ++ 0, 0, OPTAB_WIDEN); ++ ++ /* Emit the start of the loop. */ ++ label = gen_label_rtx (); ++ emit_label (label); ++ ++ /* Emit the loop body. */ ++ loongarch_block_move_straight (dest, src, bytes_per_iter); ++ ++ /* Move on to the next block. */ ++ loongarch_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter)); ++ loongarch_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter)); ++ ++ /* Emit the loop condition. */ ++ test = gen_rtx_NE (VOIDmode, src_reg, final_src); ++ if (Pmode == DImode) ++ emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label)); ++ else ++ emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label)); ++ ++ /* Mop up any left-over bytes. */ ++ if (leftover) ++ loongarch_block_move_straight (dest, src, leftover); ++ else ++ /* Temporary fix for PR79150. */ ++ emit_insn (gen_nop ()); ++} ++ ++/* Expand a movmemsi instruction, which copies LENGTH bytes from ++ memory reference SRC to memory reference DEST. */ ++ ++bool ++loongarch_expand_block_move (rtx dest, rtx src, rtx length) ++{ ++ ++ int max_move_bytes = (TARGET_LASX ? \ ++ LARCH_MAX_MOVE_BYTES_STRAIGHT * 8 \ ++ : LARCH_MAX_MOVE_BYTES_STRAIGHT); ++ ++ if (CONST_INT_P (length) && INTVAL (length) <= loongarch_max_inline_memcpy_size) ++ { ++ if (INTVAL (length) <= max_move_bytes) ++ { ++ loongarch_block_move_straight (dest, src, INTVAL (length)); ++ return true; ++ } ++ else if (optimize) ++ { ++ loongarch_block_move_loop (dest, src, INTVAL (length), ++ LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER); ++ return true; ++ } ++ } ++ return false; ++} ++ ++ ++/* Expand a QI or HI mode atomic memory operation. ++ ++ GENERATOR contains a pointer to the gen_* function that generates ++ the SI mode underlying atomic operation using masks that we ++ calculate. ++ ++ RESULT is the return register for the operation. Its value is NULL ++ if unused. ++ ++ MEM is the location of the atomic access. ++ ++ OLDVAL is the first operand for the operation. ++ ++ NEWVAL is the optional second operand for the operation. Its value ++ is NULL if unused. */ ++ ++void ++loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs generator, ++ rtx result, rtx mem, rtx oldval, ++ rtx newval, rtx model) ++{ ++ rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask; ++ rtx unshifted_mask_reg, mask, inverted_mask, si_op; ++ rtx res = NULL; ++ rtx tmp = NULL; ++ machine_mode mode; ++ ++ mode = GET_MODE (mem); ++ ++ /* Compute the address of the containing SImode value. */ ++ orig_addr = force_reg (Pmode, XEXP (mem, 0)); ++ memsi_addr = loongarch_force_binary (Pmode, AND, orig_addr, ++ force_reg (Pmode, GEN_INT (-4))); ++ ++ /* Create a memory reference for it. */ ++ memsi = gen_rtx_MEM (SImode, memsi_addr); ++ set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER); ++ MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem); ++ ++ /* Work out the byte offset of the QImode or HImode value, ++ counting from the least significant byte. */ ++ shift = loongarch_force_binary (Pmode, AND, orig_addr, GEN_INT (3)); ++ ++ /* Multiply by eight to convert the shift value from bytes to bits. */ ++ loongarch_emit_binary (ASHIFT, shift, shift, GEN_INT (3)); ++ ++ /* Make the final shift an SImode value, so that it can be used in ++ SImode operations. */ ++ shiftsi = force_reg (SImode, gen_lowpart (SImode, shift)); ++ ++ /* Set MASK to an inclusive mask of the QImode or HImode value. */ ++ unshifted_mask = GEN_INT (GET_MODE_MASK (mode)); ++ unshifted_mask_reg = force_reg (SImode, unshifted_mask); ++ mask = loongarch_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi); ++ ++ /* Compute the equivalent exclusive mask. */ ++ inverted_mask = gen_reg_rtx (SImode); ++ emit_insn (gen_rtx_SET (inverted_mask, gen_rtx_NOT (SImode, mask))); ++ ++ /* Shift the old value into place. */ ++ if (oldval != const0_rtx) ++ { ++ oldval = convert_modes (SImode, mode, oldval, true); ++ oldval = force_reg (SImode, oldval); ++ oldval = loongarch_force_binary (SImode, ASHIFT, oldval, shiftsi); ++ } ++ ++ /* Do the same for the new value. */ ++ if (newval && newval != const0_rtx) ++ { ++ newval = convert_modes (SImode, mode, newval, true); ++ newval = force_reg (SImode, newval); ++ newval = loongarch_force_binary (SImode, ASHIFT, newval, shiftsi); ++ } ++ ++ /* Do the SImode atomic access. */ ++ if (result) ++ res = gen_reg_rtx (SImode); ++ ++ if (newval) ++ si_op = generator.fn_7 (res, memsi, mask, inverted_mask, oldval, newval, model); ++ else if (result) ++ si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, model); ++ else ++ si_op = generator.fn_5 (memsi, mask, inverted_mask, oldval, model); ++ ++ //si_op = generator.fn_7 (res, memsi, mask, inverted_mask, oldval, newval, model); ++ ++ emit_insn (si_op); ++ ++ if (result) ++ { ++ /* Shift and convert the result. */ ++ loongarch_emit_binary (AND, res, res, mask); ++ loongarch_emit_binary (LSHIFTRT, res, res, shiftsi); ++ loongarch_emit_move (result, gen_lowpart (GET_MODE (result), res)); ++ } ++} ++ ++/* Return true if X is a MEM with the same size as MODE. */ ++ ++bool ++loongarch_mem_fits_mode_p (machine_mode mode, rtx x) ++{ ++ return (MEM_P (x) ++ && MEM_SIZE_KNOWN_P (x) ++ && MEM_SIZE (x) == GET_MODE_SIZE (mode)); ++} ++ ++/* Return true if (zero_extract OP WIDTH BITPOS) can be used as the ++ source of an "ext" instruction or the destination of an "ins" ++ instruction. OP must be a register operand and the following ++ conditions must hold: ++ ++ 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op)) ++ 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op)) ++ 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op)) ++ ++ Also reject lengths equal to a word as they are better handled ++ by the move patterns. */ ++ ++bool ++loongarch_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos) ++{ ++ if (!register_operand (op, VOIDmode) ++ || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD) ++ return false; ++ ++ if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1)) ++ return false; ++ ++ if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op))) ++ return false; ++ ++ return true; ++} ++ ++ ++/* Return true iff OP1 and OP2 are valid operands together for the ++ *and3 patterns. For the cases to consider, ++ see the table in the comment before the pattern. */ ++ ++bool ++and_operands_ok (machine_mode mode, rtx op1, rtx op2) ++{ ++ ++ if (memory_operand (op1, mode)) ++ { ++ return and_load_operand (op2, mode); ++ } ++ else ++ return and_reg_operand (op2, mode); ++} ++ ++/* Print the text for PRINT_OPERAND punctation character CH to FILE. ++ The punctuation characters are: ++ ++ '.' Print the name of the register with a hard-wired zero (zero or $r0). ++ '$' Print the name of the stack pointer register (sp or $r3). ++ ':' Print "c" to use the compact version if the delay slot is a nop. ++ '!' Print "s" to use the short version if the delay slot contains a ++ 16-bit instruction. ++ ++ See also loongarch_init_print_operand_punct. */ ++ ++static void ++loongarch_print_operand_punctuation (FILE *file, int ch) ++{ ++ switch (ch) ++ { ++ case '.': ++ fputs (reg_names[GP_REG_FIRST + 0], file); ++ break; ++ ++ case '$': ++ fputs (reg_names[STACK_POINTER_REGNUM], file); ++ break; ++ ++ case ':': ++ /* When final_sequence is 0, the delay slot will be a nop. We can ++ use the compact version where available. The %: formatter will ++ only be present if a compact form of the branch is available. */ ++ if (final_sequence == 0) ++ putc ('c', file); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ break; ++ } ++} ++ ++/* Initialize loongarch_print_operand_punct. */ ++ ++static void ++loongarch_init_print_operand_punct (void) ++{ ++ const char *p; ++ ++ for (p = ".$:"; *p; p++) ++ loongarch_print_operand_punct[(unsigned char) *p] = true; ++} ++ ++/* PRINT_OPERAND prefix LETTER refers to the integer branch instruction ++ associated with condition CODE. Print the condition part of the ++ opcode to FILE. */ ++ ++static void ++loongarch_print_int_branch_condition (FILE *file, enum rtx_code code, int letter) ++{ ++ switch (code) ++ { ++ case EQ: ++ case NE: ++ case GT: ++ case GE: ++ case LT: ++ case LE: ++ case GTU: ++ case GEU: ++ case LTU: ++ case LEU: ++ /* Conveniently, the LARCH names for these conditions are the same ++ as their RTL equivalents. */ ++ fputs (GET_RTX_NAME (code), file); ++ break; ++ ++ default: ++ output_operand_lossage ("'%%%c' is not a valid operand prefix", letter); ++ break; ++ } ++} ++ ++/* Likewise floating-point branches. */ ++ ++static void ++loongarch_print_float_branch_condition (FILE *file, enum rtx_code code, int letter) ++{ ++ switch (code) ++ { ++ case EQ: ++ fputs ("ceqz", file); ++ break; ++ ++ case NE: ++ fputs ("cnez", file); ++ break; ++ ++ default: ++ output_operand_lossage ("'%%%c' is not a valid operand prefix", letter); ++ break; ++ } ++} ++ ++/* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */ ++ ++static bool ++loongarch_print_operand_punct_valid_p (unsigned char code) ++{ ++ return loongarch_print_operand_punct[code]; ++} ++ ++/* Return true if a FENCE should be emitted to before a memory access to ++ implement the release portion of memory model MODEL. */ ++ ++static bool ++loongarch_memmodel_needs_rel_and_acq_fence (enum memmodel model) ++{ ++ switch (model) ++ { ++ case MEMMODEL_ACQ_REL: ++ case MEMMODEL_SEQ_CST: ++ case MEMMODEL_SYNC_SEQ_CST: ++ case MEMMODEL_RELEASE: ++ case MEMMODEL_SYNC_RELEASE: ++ case MEMMODEL_ACQUIRE: ++ case MEMMODEL_CONSUME: ++ case MEMMODEL_SYNC_ACQUIRE: ++ return true; ++ ++ case MEMMODEL_RELAXED: ++ return false; ++ ++ default: ++ gcc_unreachable (); ++ } ++} ++ ++/* Return true if a FENCE should be emitted to before a memory access to ++ implement the release portion of memory model MODEL. */ ++ ++static bool ++loongarch_memmodel_needs_release_fence (enum memmodel model) ++{ ++ switch (model) ++ { ++ case MEMMODEL_ACQ_REL: ++ case MEMMODEL_SEQ_CST: ++ case MEMMODEL_SYNC_SEQ_CST: ++ case MEMMODEL_RELEASE: ++ case MEMMODEL_SYNC_RELEASE: ++ return true; ++ ++ case MEMMODEL_ACQUIRE: ++ case MEMMODEL_CONSUME: ++ case MEMMODEL_SYNC_ACQUIRE: ++ case MEMMODEL_RELAXED: ++ return false; ++ ++ default: ++ gcc_unreachable (); ++ } ++} ++ ++/* Implement TARGET_PRINT_OPERAND. The LARCH-specific operand codes are: ++ ++ 'E' Print CONST_INT OP element 0 of a replicated CONST_VECTOR in decimal. ++ 'X' Print CONST_INT OP in hexadecimal format. ++ 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format. ++ 'd' Print CONST_INT OP in decimal. ++ 'B' Print CONST_INT OP element 0 of a replicated CONST_VECTOR ++ as an unsigned byte [0..255]. ++ 'm' Print one less than CONST_INT OP in decimal. ++ 'y' Print exact log2 of CONST_INT OP in decimal. ++ 'h' Print the high-part relocation associated with OP, after stripping ++ any outermost HIGH. ++ 'R' Print the low-part relocation associated with OP. ++ 'C' Print the integer branch condition for comparison OP. ++ 'N' Print the inverse of the integer branch condition for comparison OP. ++ 'F' Print the FPU branch condition for comparison OP. ++ 'W' Print the inverse of the FPU branch condition for comparison OP. ++ 'w' Print a LSX register. ++ 'u' Print a LASX register. ++ 'T' Print 'f' for (eq:FCC ...), 't' for (ne:FCC ...), ++ 'z' for (eq:?I ...), 'n' for (ne:?I ...). ++ 't' Like 'T', but with the EQ/NE cases reversed ++ 'Y' Print loongarch_fp_conditions[INTVAL (OP)] ++ 'Z' Print OP and a comma for 8CC, otherwise print nothing. ++ 'D' Print the second part of a double-word register or memory operand. ++ 'L' Print the low-order register in a double-word register operand. ++ 'M' Print high-order register in a double-word register operand. ++ 'z' Print $0 if OP is zero, otherwise print OP normally. ++ 'b' Print the address of a memory operand, without offset. ++ 'v' Print the insn size suffix b, h, w or d for vector modes V16QI, V8HI, ++ V4SI, V2SI, and w, d for vector modes V4SF, V2DF respectively. ++ 'V' Print exact log2 of CONST_INT OP element 0 of a replicated ++ CONST_VECTOR in decimal. ++ 'A' Print a _DB suffix if the memory model requires a release. ++ 'G' Print a DBAR insn if the memory model requires a release. ++ 'i' Print i if the operand is not a register. */ ++ ++static void ++loongarch_print_operand (FILE *file, rtx op, int letter) ++{ ++ enum rtx_code code; ++ ++ if (loongarch_print_operand_punct_valid_p (letter)) ++ { ++ loongarch_print_operand_punctuation (file, letter); ++ return; ++ } ++ ++ gcc_assert (op); ++ code = GET_CODE (op); ++ ++ switch (letter) ++ { ++ case 'E': ++ if (GET_CODE (op) == CONST_VECTOR) ++ { ++ gcc_assert (loongarch_const_vector_same_val_p (op, GET_MODE (op))); ++ op = CONST_VECTOR_ELT (op, 0); ++ gcc_assert (CONST_INT_P (op)); ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op)); ++ } ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ ++ case 'X': ++ if (CONST_INT_P (op)) ++ fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op)); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ ++ case 'x': ++ if (CONST_INT_P (op)) ++ fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ ++ case 'd': ++ if (CONST_INT_P (op)) ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op)); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ ++ case 'B': ++ if (GET_CODE (op) == CONST_VECTOR) ++ { ++ gcc_assert (loongarch_const_vector_same_val_p (op, GET_MODE (op))); ++ op = CONST_VECTOR_ELT (op, 0); ++ gcc_assert (CONST_INT_P (op)); ++ unsigned HOST_WIDE_INT val8 = UINTVAL (op) & GET_MODE_MASK (QImode); ++ fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, val8); ++ } ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ ++ case 'm': ++ if (CONST_INT_P (op)) ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ ++ case 'y': ++ if (CONST_INT_P (op)) ++ { ++ int val = exact_log2 (INTVAL (op)); ++ if (val != -1) ++ fprintf (file, "%d", val); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ } ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ ++ case 'V': ++ if (GET_CODE (op) == CONST_VECTOR) ++ { ++ machine_mode mode = GET_MODE_INNER (GET_MODE (op)); ++ unsigned HOST_WIDE_INT val = UINTVAL (CONST_VECTOR_ELT (op, 0)); ++ int vlog2 = exact_log2 (val & GET_MODE_MASK (mode)); ++ if (vlog2 != -1) ++ fprintf (file, "%d", vlog2); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ } ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ ++ case 'C': ++ loongarch_print_int_branch_condition (file, code, letter); ++ break; ++ ++ case 'N': ++ loongarch_print_int_branch_condition (file, reverse_condition (code), letter); ++ break; ++ ++ case 'F': ++ loongarch_print_float_branch_condition (file, code, letter); ++ break; ++ ++ case 'W': ++ loongarch_print_float_branch_condition (file, reverse_condition (code), ++ letter); ++ break; ++ ++ case 'T': ++ case 't': ++ { ++ int truth = (code == NE) == (letter == 'T'); ++ fputc ("zfnt"[truth * 2 + ST_REG_P (REGNO (XEXP (op, 0)))], file); ++ } ++ break; ++ ++ case 'Y': ++ if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (loongarch_fp_conditions)) ++ fputs (loongarch_fp_conditions[UINTVAL (op)], file); ++ else ++ output_operand_lossage ("'%%%c' is not a valid operand prefix", ++ letter); ++ break; ++ ++ case 'Z': ++ loongarch_print_operand (file, op, 0); ++ fputc (',', file); ++ break; ++ ++ case 'w': ++ if (code == REG && LSX_REG_P (REGNO (op))) ++ fprintf (file, "$vr%s", ®_names[REGNO (op)][2]); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ ++ case 'u': ++ if (code == REG && LASX_REG_P (REGNO (op))) ++ fprintf (file, "$xr%s", ®_names[REGNO (op)][2]); ++ else ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ break; ++ ++ case 'v': ++ switch (GET_MODE (op)) ++ { ++ case E_V16QImode: ++ case E_V32QImode: ++ fprintf (file, "b"); ++ break; ++ case E_V8HImode: ++ case E_V16HImode: ++ fprintf (file, "h"); ++ break; ++ case E_V4SImode: ++ case E_V4SFmode: ++ case E_V8SImode: ++ case E_V8SFmode: ++ fprintf (file, "w"); ++ break; ++ case E_V2DImode: ++ case E_V2DFmode: ++ case E_V4DImode: ++ case E_V4DFmode: ++ fprintf (file, "d"); ++ break; ++ default: ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ } ++ break; ++ ++ case 'A': ++ if (loongarch_memmodel_needs_rel_and_acq_fence ((enum memmodel) INTVAL (op))) ++ fputs ("_db", file); ++ break; ++ ++ case 'G': ++ if (loongarch_memmodel_needs_release_fence ((enum memmodel) INTVAL (op))) ++ fputs ("dbar\t0", file); ++ break; ++ ++ case 'i': ++ if (code != REG) ++ fputs ("i", file); ++ break; ++ ++ default: ++ switch (code) ++ { ++ case REG: ++ { ++ unsigned int regno = REGNO (op); ++ if ((letter == 'M') ++ || letter == 'D') ++ regno++; ++ else if (letter && letter != 'z' && letter != 'M' && letter != 'L') ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ fprintf (file, "%s", reg_names[regno]); ++ } ++ break; ++ ++ case MEM: ++ if (letter == 'D') ++ output_address (GET_MODE (op), plus_constant (Pmode, ++ XEXP (op, 0), 4)); ++ else if (letter == 'b') ++ { ++ gcc_assert (REG_P (XEXP (op, 0))); ++ loongarch_print_operand (file, XEXP (op, 0), 0); ++ } ++ else if (letter && letter != 'z') ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ else ++ output_address (GET_MODE (op), XEXP (op, 0)); ++ break; ++ ++ default: ++ if (letter == 'z' && op == CONST0_RTX (GET_MODE (op))) ++ fputs (reg_names[GP_REG_FIRST], file); ++ else if (letter && letter != 'z') ++ output_operand_lossage ("invalid use of '%%%c'", letter); ++ else ++ output_addr_const (file, loongarch_strip_unspec_address (op)); ++ break; ++ } ++ } ++} ++ ++/* Implement TARGET_PRINT_OPERAND_ADDRESS. */ ++ ++static void ++loongarch_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x) ++{ ++ struct loongarch_address_info addr; ++ ++ if (loongarch_classify_address (&addr, x, word_mode, true)) ++ switch (addr.type) ++ { ++ case ADDRESS_REG: ++ fprintf (file, "%s,", reg_names[REGNO (addr.reg)]); ++ loongarch_print_operand (file, addr.offset, 0); ++ return; ++ ++ case ADDRESS_CONST_INT: ++ fprintf (file, "%s,", reg_names[GP_REG_FIRST]); ++ output_addr_const (file, x); ++ return; ++ ++ case ADDRESS_SYMBOLIC: ++ output_addr_const (file, loongarch_strip_unspec_address (x)); ++ return; ++ } ++ if (GET_CODE (x) == CONST_INT) ++ output_addr_const (file, x); ++ else ++ gcc_unreachable (); ++} ++ ++ ++/* Implement TARGET_ENCODE_SECTION_INFO. */ ++ ++static void ++loongarch_encode_section_info (tree decl, rtx rtl, int first) ++{ ++ default_encode_section_info (decl, rtl, first); ++ ++ if (TREE_CODE (decl) == FUNCTION_DECL) ++ { ++ rtx symbol = XEXP (rtl, 0); ++ tree type = TREE_TYPE (decl); ++ ++ /* Encode whether the symbol is short or long. */ ++ if ((TARGET_LONG_CALLS && !loongarch_near_type_p (type)) ++ || loongarch_far_type_p (type)) ++ SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL; ++ } ++} ++ ++/* Implement TARGET_SELECT_RTX_SECTION. */ ++ ++static section * ++loongarch_select_rtx_section (machine_mode mode, rtx x, ++ unsigned HOST_WIDE_INT align) ++{ ++ /* ??? Consider using mergeable small data sections. */ ++ if (loongarch_rtx_constant_in_small_data_p (mode)) ++ return get_named_section (NULL, ".sdata", 0); ++ ++ return default_elf_select_rtx_section (mode, x, align); ++} ++ ++/* Implement TARGET_ASM_FUNCTION_RODATA_SECTION. ++ ++ The complication here is that, with the combination ++ !TARGET_ABSOLUTE_ABICALLS , jump tables will use ++ absolute addresses, and should therefore not be included in the ++ read-only part of a DSO. Handle such cases by selecting a normal ++ data section instead of a read-only one. The logic apes that in ++ default_function_rodata_section. */ ++ ++static section * ++loongarch_function_rodata_section (tree decl) ++{ ++ return default_function_rodata_section (decl); ++} ++ ++/* Implement TARGET_IN_SMALL_DATA_P. */ ++ ++static bool ++loongarch_in_small_data_p (const_tree decl) ++{ ++ unsigned HOST_WIDE_INT size; ++ ++ if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL) ++ return false; ++ ++ /* We don't yet generate small-data references for ++ VxWorks RTP code. See the related -G handling in ++ loongarch_option_override. */ ++ if (TARGET_VXWORKS_RTP) ++ return false; ++ ++ if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0) ++ { ++ const char *name; ++ ++ /* Reject anything that isn't in a known small-data section. */ ++ name = DECL_SECTION_NAME (decl); ++ if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0) ++ return false; ++ ++ /* If a symbol is defined externally, the assembler will use the ++ usual -G rules when deciding how to implement macros. */ ++ if (!DECL_EXTERNAL (decl)) ++ return true; ++ } ++ ++ /* We have traditionally not treated zero-sized objects as small data, ++ so this is now effectively part of the ABI. */ ++ size = int_size_in_bytes (TREE_TYPE (decl)); ++ return size > 0 && size <= loongarch_small_data_threshold; ++} ++ ++/* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use ++ anchors for small data: the GP register acts as an anchor in that ++ case. We also don't want to use them for PC-relative accesses, ++ where the PC acts as an anchor. */ ++ ++static bool ++loongarch_use_anchors_for_symbol_p (const_rtx symbol) ++{ ++ return default_use_anchors_for_symbol_p (symbol); ++} ++ ++/* The LARCH debug format wants all automatic variables and arguments ++ to be in terms of the virtual frame pointer (stack pointer before ++ any adjustment in the function), while the LARCH 3.0 linker wants ++ the frame pointer to be the stack pointer after the initial ++ adjustment. So, we do the adjustment here. The arg pointer (which ++ is eliminated) points to the virtual frame pointer, while the frame ++ pointer (which may be eliminated) points to the stack pointer after ++ the initial adjustments. */ ++ ++HOST_WIDE_INT ++loongarch_debugger_offset (rtx addr, HOST_WIDE_INT offset) ++{ ++ rtx offset2 = const0_rtx; ++ rtx reg = eliminate_constant_term (addr, &offset2); ++ ++ if (offset == 0) ++ offset = INTVAL (offset2); ++ ++ if (reg == stack_pointer_rtx ++ || reg == frame_pointer_rtx ++ || reg == hard_frame_pointer_rtx) ++ { ++ offset -= cfun->machine->frame.total_size; ++ if (reg == hard_frame_pointer_rtx) ++ offset += cfun->machine->frame.hard_frame_pointer_offset; ++ } ++ ++ return offset; ++} ++ ++/* Implement ASM_OUTPUT_EXTERNAL. */ ++ ++void ++loongarch_output_external (FILE *file, tree decl, const char *name) ++{ ++ default_elf_asm_output_external (file, decl, name); ++ ++ /* We output the name if and only if TREE_SYMBOL_REFERENCED is ++ set in order to avoid putting out names that are never really ++ used. */ ++ if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))) ++ { ++ if (loongarch_in_small_data_p (decl)) ++ { ++ /* When using assembler macros, emit .extern directives for ++ all small-data externs so that the assembler knows how ++ big they are. ++ ++ In most cases it would be safe (though pointless) to emit ++ .externs for other symbols too. One exception is when an ++ object is within the -G limit but declared by the user to ++ be in a section other than .sbss or .sdata. */ ++ fputs ("\t.extern\t", file); ++ assemble_name (file, name); ++ fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n", ++ int_size_in_bytes (TREE_TYPE (decl))); ++ } ++ } ++} ++ ++/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */ ++ ++static void ATTRIBUTE_UNUSED ++loongarch_output_dwarf_dtprel (FILE *file, int size, rtx x) ++{ ++ switch (size) ++ { ++ case 4: ++ fputs ("\t.dtprelword\t", file); ++ break; ++ ++ case 8: ++ fputs ("\t.dtpreldword\t", file); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ output_addr_const (file, x); ++ fputs ("+0x8000", file); ++} ++ ++/* Implement TARGET_DWARF_REGISTER_SPAN. */ ++ ++static rtx ++loongarch_dwarf_register_span (rtx reg) ++{ ++ rtx high, low; ++ machine_mode mode; ++ ++ mode = GET_MODE (reg); ++ ++ return NULL_RTX; ++} ++ ++/* Implement TARGET_DWARF_FRAME_REG_MODE. */ ++ ++static machine_mode ++loongarch_dwarf_frame_reg_mode (int regno) ++{ ++ machine_mode mode = default_dwarf_frame_reg_mode (regno); ++ ++ if (FP_REG_P (regno) && loongarch_abi == ABILP32 && TARGET_FLOAT64) ++ mode = SImode; ++ ++ return mode; ++} ++ ++ ++/* Implement ASM_OUTPUT_ASCII. */ ++ ++void ++loongarch_output_ascii (FILE *stream, const char *string, size_t len) ++{ ++ size_t i; ++ int cur_pos; ++ ++ cur_pos = 17; ++ fprintf (stream, "\t.ascii\t\""); ++ for (i = 0; i < len; i++) ++ { ++ int c; ++ ++ c = (unsigned char) string[i]; ++ if (ISPRINT (c)) ++ { ++ if (c == '\\' || c == '\"') ++ { ++ putc ('\\', stream); ++ cur_pos++; ++ } ++ putc (c, stream); ++ cur_pos++; ++ } ++ else ++ { ++ fprintf (stream, "\\%03o", c); ++ cur_pos += 4; ++ } ++ ++ if (cur_pos > 72 && i+1 < len) ++ { ++ cur_pos = 17; ++ fprintf (stream, "\"\n\t.ascii\t\""); ++ } ++ } ++ fprintf (stream, "\"\n"); ++} ++ ++/* Emit either a label, .comm, or .lcomm directive. When using assembler ++ macros, mark the symbol as written so that loongarch_asm_output_external ++ won't emit an .extern for it. STREAM is the output file, NAME is the ++ name of the symbol, INIT_STRING is the string that should be written ++ before the symbol and FINAL_STRING is the string that should be ++ written after it. FINAL_STRING is a printf format that consumes the ++ remaining arguments. */ ++ ++void ++loongarch_declare_object (FILE *stream, const char *name, const char *init_string, ++ const char *final_string, ...) ++{ ++ va_list ap; ++ ++ fputs (init_string, stream); ++ assemble_name (stream, name); ++ va_start (ap, final_string); ++ vfprintf (stream, final_string, ap); ++ va_end (ap); ++ ++ tree name_tree = get_identifier (name); ++ TREE_ASM_WRITTEN (name_tree) = 1; ++} ++ ++/* Declare a common object of SIZE bytes using asm directive INIT_STRING. ++ NAME is the name of the object and ALIGN is the required alignment ++ in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third ++ alignment argument. */ ++ ++void ++loongarch_declare_common_object (FILE *stream, const char *name, ++ const char *init_string, ++ unsigned HOST_WIDE_INT size, ++ unsigned int align, bool takes_alignment_p) ++{ ++ if (!takes_alignment_p) ++ { ++ size += (align / BITS_PER_UNIT) - 1; ++ size -= size % (align / BITS_PER_UNIT); ++ loongarch_declare_object (stream, name, init_string, ++ "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size); ++ } ++ else ++ loongarch_declare_object (stream, name, init_string, ++ "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n", ++ size, align / BITS_PER_UNIT); ++} ++ ++/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the ++ elfos.h version, but we also need to handle -muninit-const-in-rodata. */ ++ ++void ++loongarch_output_aligned_decl_common (FILE *stream, tree decl, const char *name, ++ unsigned HOST_WIDE_INT size, ++ unsigned int align) ++{ ++ loongarch_declare_common_object (stream, name, "\n\t.comm\t", ++ size, align, true); ++} ++ ++#ifdef ASM_OUTPUT_SIZE_DIRECTIVE ++extern int size_directive_output; ++ ++/* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF ++ definitions except that it uses loongarch_declare_object to emit the label. */ ++ ++void ++loongarch_declare_object_name (FILE *stream, const char *name, ++ tree decl ATTRIBUTE_UNUSED) ++{ ++#ifdef ASM_OUTPUT_TYPE_DIRECTIVE ++#ifdef USE_GNU_UNIQUE_OBJECT ++ /* As in elfos.h. */ ++ if (USE_GNU_UNIQUE_OBJECT && DECL_ONE_ONLY (decl) ++ && (!DECL_ARTIFICIAL (decl) || !TREE_READONLY (decl))) ++ ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "gnu_unique_object"); ++ else ++#endif ++ ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object"); ++#endif ++ ++ size_directive_output = 0; ++ if (!flag_inhibit_size_directive && DECL_SIZE (decl)) ++ { ++ HOST_WIDE_INT size; ++ ++ size_directive_output = 1; ++ size = int_size_in_bytes (TREE_TYPE (decl)); ++ ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size); ++ } ++ ++ loongarch_declare_object (stream, name, "", ":\n"); ++} ++ ++/* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */ ++ ++void ++loongarch_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end) ++{ ++ const char *name; ++ ++ name = XSTR (XEXP (DECL_RTL (decl), 0), 0); ++ if (!flag_inhibit_size_directive ++ && DECL_SIZE (decl) != 0 ++ && !at_end ++ && top_level ++ && DECL_INITIAL (decl) == error_mark_node ++ && !size_directive_output) ++ { ++ HOST_WIDE_INT size; ++ ++ size_directive_output = 1; ++ size = int_size_in_bytes (TREE_TYPE (decl)); ++ ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size); ++ } ++} ++#endif ++ ++/* Mark text contents as code or data, mainly for the purpose of correct ++ disassembly. Emit a local symbol and set its type appropriately for ++ that purpose. Also emit `.insn' if marking contents as code so that ++ the ISA mode is recorded and any padding that follows is disassembled ++ as correct instructions. */ ++ ++void ++loongarch_set_text_contents_type (FILE *file ATTRIBUTE_UNUSED, ++ const char *prefix ATTRIBUTE_UNUSED, ++ unsigned long num ATTRIBUTE_UNUSED, ++ bool function_p ATTRIBUTE_UNUSED) ++{ ++#ifdef ASM_OUTPUT_TYPE_DIRECTIVE ++ char buf[(sizeof (num) * 10) / 4 + 2]; ++ const char *fnname; ++ char *sname; ++ rtx symbol; ++ ++ sprintf (buf, "%lu", num); ++ symbol = XEXP (DECL_RTL (current_function_decl), 0); ++ fnname = targetm.strip_name_encoding (XSTR (symbol, 0)); ++ sname = ACONCAT ((prefix, fnname, "_", buf, NULL)); ++ ++ ASM_OUTPUT_TYPE_DIRECTIVE (file, sname, function_p ? "function" : "object"); ++ assemble_name (file, sname); ++ fputs (":\n", file); ++// if (function_p) ++// fputs ("\t.insn\n", file); ++#endif ++} ++ ++ ++/* Implement TARGET_ASM_FILE_START. */ ++ ++static void ++loongarch_file_start (void) ++{ ++ default_file_start (); ++ ++ /* Generate a special section to describe the ABI switches used to ++ produce the resultant binary. */ ++} ++ ++ ++/* Return true if REGNO is a register that is ordinarily call-clobbered ++ but must nevertheless be preserved by an interrupt handler. */ ++ ++static bool ++loongarch_interrupt_extra_call_saved_reg_p (unsigned int regno) ++{ ++ if (GP_REG_P (regno) ++ && cfun->machine->use_shadow_register_set == SHADOW_SET_NO) ++ { ++ /* $0 is hard-wired. */ ++ if (regno == GP_REG_FIRST) ++ return false; ++ ++ /* The function will return the stack pointer to its original value ++ anyway. */ ++ if (regno == STACK_POINTER_REGNUM) ++ return false; ++ ++ /* Otherwise, return true for registers that aren't ordinarily ++ call-clobbered. */ ++ return call_used_regs[regno]; ++ } ++ ++ return false; ++} ++ ++/* Implement TARGET_FRAME_POINTER_REQUIRED. */ ++ ++static bool ++loongarch_frame_pointer_required (void) ++{ ++ /* If the function contains dynamic stack allocations, we need to ++ use the frame pointer to access the static parts of the frame. */ ++ if (cfun->calls_alloca) ++ return true; ++ ++ return false; ++} ++ ++/* Make sure that we're not trying to eliminate to the wrong hard frame ++ pointer. */ ++ ++static bool ++loongarch_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to) ++{ ++ return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM); ++} ++ ++ ++ ++/* Implement RETURN_ADDR_RTX. We do not support moving back to a ++ previous frame. */ ++ ++rtx ++loongarch_return_addr (int count, rtx frame ATTRIBUTE_UNUSED) ++{ ++ if (count != 0) ++ return const0_rtx; ++ ++ return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM); ++} ++ ++/* Emit code to change the current function's return address to ++ ADDRESS. SCRATCH is available as a scratch register, if needed. ++ ADDRESS and SCRATCH are both word-mode GPRs. */ ++ ++void ++loongarch_set_return_address (rtx address, rtx scratch) ++{ ++ rtx slot_address; ++ ++ gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM)); ++ if (frame_pointer_needed) ++ slot_address = loongarch_add_offset (scratch, hard_frame_pointer_rtx, ++ -UNITS_PER_WORD); ++ else ++ slot_address = loongarch_add_offset (scratch, stack_pointer_rtx, ++ cfun->machine->frame.gp_sp_offset); ++ loongarch_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address); ++} ++ ++ ++/* Fill *BASE and *OFFSET such that *BASE + *OFFSET refers to the ++ cprestore slot. LOAD_P is true if the caller wants to load from ++ the cprestore slot; it is false if the caller wants to store to ++ the slot. */ ++ ++static void ++loongarch_get_cprestore_base_and_offset (rtx *base, HOST_WIDE_INT *offset, ++ bool load_p) ++{ ++ const struct loongarch_frame_info *frame; ++ ++ frame = &cfun->machine->frame; ++ /* .cprestore always uses the stack pointer instead of the frame pointer. ++ We have a free choice for direct stores, ++ Using the stack pointer would sometimes give more ++ (early) scheduling freedom, but using the frame pointer would ++ sometimes give more (late) scheduling freedom. It's hard to ++ predict which applies to a given function, so let's keep things ++ simple. ++ ++ Loads must always use the frame pointer in functions that call ++ alloca, and there's little benefit to using the stack pointer ++ otherwise. */ ++ if (frame_pointer_needed) ++ { ++ *base = hard_frame_pointer_rtx; ++ *offset = frame->args_size - frame->hard_frame_pointer_offset; ++ } ++ else ++ { ++ *base = stack_pointer_rtx; ++ *offset = frame->args_size; ++ } ++} ++ ++/* Return true if X is the load or store address of the cprestore slot; ++ LOAD_P says which. */ ++ ++bool ++loongarch_cprestore_address_p (rtx x, bool load_p) ++{ ++ rtx given_base, required_base; ++ HOST_WIDE_INT given_offset, required_offset; ++ ++ loongarch_split_plus (x, &given_base, &given_offset); ++ loongarch_get_cprestore_base_and_offset (&required_base, &required_offset, load_p); ++ return given_base == required_base && given_offset == required_offset; ++} ++ ++ ++/* A function to save or store a register. The first argument is the ++ register and the second is the stack slot. */ ++typedef void (*loongarch_save_restore_fn) (rtx, rtx); ++ ++/* LOONGSON LA464 Emit insn pattern for gssq and gslq*/ ++void ++loongarch_la464_emit_128bit_load(rtx operands[]) ++{ ++ rtx op0; ++ rtx op1; ++ rtx op2; ++ rtx op3; ++ ++#if 0 /*for debug*/ ++ printf("464po: emit 128 PO LOAD!\n"); ++ printf("reg num of op0 is: %d\n",REGNO(operands[0])); ++ printf("reg num of op2 is: %d\n",REGNO(operands[2])); ++#endif ++ op0 = gen_rtx_REG (GET_MODE (operands[0]), REGNO (operands[0])); ++ op1 = operands[1]; ++ op2 = gen_rtx_REG (GET_MODE (operands[2]), REGNO (operands[2])); ++ op3 = operands[3]; ++ emit (gen_rtx_PARALLEL (VOIDmode, ++ gen_rtvec (2, ++ gen_rtx_SET (op0,op1), ++ gen_rtx_SET (op2,op3)))); ++} ++ ++void ++loongarch_la464_emit_128bit_store(rtx operands[]) ++{ ++ rtx op0; ++ rtx op1; ++ rtx op2; ++ rtx op3; ++ ++#if 0 /*for debug*/ ++ printf("464po: emit 128 PO STORE!\n"); ++ printf("reg num of op1 is: %d\n",REGNO(operands[1])); ++ printf("reg num of op3 is: %d\n",REGNO(operands[3])); ++#endif ++ op0 = operands[0]; ++ op1 = gen_rtx_REG (GET_MODE (operands[1]), REGNO (operands[1])); ++ op2 = operands[2]; ++ op3 = gen_rtx_REG (GET_MODE (operands[3]), REGNO (operands[3])); ++ emit (gen_rtx_PARALLEL (VOIDmode, ++ gen_rtvec (2, ++ gen_rtx_SET (op0,op1), ++ gen_rtx_SET (op2,op3)))); ++ ++} ++ ++ ++ ++ ++/* Implement ASM_DECLARE_FUNCTION_NAME. */ ++ ++void loongarch_declare_function_name(FILE *stream ATTRIBUTE_UNUSED, ++ const char *name, tree fndecl ATTRIBUTE_UNUSED) ++{ ++ loongarch_start_function_definition (name); ++} ++ ++/* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */ ++ ++static void ++loongarch_output_function_prologue (FILE *file) ++{ ++} ++ ++/* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */ ++ ++static void ++loongarch_output_function_epilogue (FILE *) ++{ ++ const char *fnname; ++ ++ /* Get the function name the same way that toplev.c does before calling ++ assemble_start_function. This is needed so that the name used here ++ exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */ ++ fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0); ++ loongarch_end_function_definition (fnname); ++} ++ ++ ++#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP) ++ ++#if PROBE_INTERVAL > 16384 ++#error Cannot use indexed addressing mode for stack probing ++#endif ++ ++/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE, ++ inclusive. These are offsets from the current stack pointer. */ ++ ++static void ++loongarch_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size) ++{ ++ ++ /* See if we have a constant small number of probes to generate. If so, ++ that's the easy case. */ ++ if ((TARGET_64BIT && (first + size <= 8 * PROBE_INTERVAL)) ++ || (!TARGET_64BIT && (first + size <= 2048))) ++ { ++ HOST_WIDE_INT i; ++ ++ /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until ++ it exceeds SIZE. If only one probe is needed, this will not ++ generate any code. Then probe at FIRST + SIZE. */ ++ for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL) ++ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx, ++ -(first + i))); ++ ++ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx, ++ -(first + size))); ++ } ++ ++ /* Otherwise, do the same as above, but in a loop. Note that we must be ++ extra careful with variables wrapping around because we might be at ++ the very top (or the very bottom) of the address space and we have ++ to be able to handle this case properly; in particular, we use an ++ equality test for the loop condition. */ ++ else ++ { ++ HOST_WIDE_INT rounded_size; ++ rtx r13 = LARCH_PROLOGUE_TEMP (Pmode); ++ rtx r12 = LARCH_PROLOGUE_TEMP2 (Pmode); ++ rtx r14 = LARCH_PROLOGUE_TEMP3 (Pmode); ++ ++ /* Sanity check for the addressing mode we're going to use. */ ++ gcc_assert (first <= 16384); ++ ++ ++ /* Step 1: round SIZE to the previous multiple of the interval. */ ++ ++ rounded_size = ROUND_DOWN (size, PROBE_INTERVAL); ++ /* TEST_ADDR = SP + FIRST */ ++ if (first != 0) ++ { ++ emit_move_insn (r14, GEN_INT (first)); ++ emit_insn (gen_rtx_SET (r13, gen_rtx_MINUS (Pmode, stack_pointer_rtx, r14))); ++ } ++ else ++ emit_move_insn (r13, stack_pointer_rtx); ++ ++ /* Step 2: compute initial and final value of the loop counter. */ ++ ++ emit_move_insn (r14, GEN_INT (PROBE_INTERVAL)); ++ if (rounded_size == 0) ++ emit_move_insn (r12, r13); ++ /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */ ++ else ++ { ++ emit_move_insn (r12, GEN_INT (rounded_size)); ++ emit_insn (gen_rtx_SET (r12, gen_rtx_MINUS (Pmode, r13, r12))); ++ /* Step 3: the loop ++ ++ do ++ { ++ TEST_ADDR = TEST_ADDR + PROBE_INTERVAL ++ probe at TEST_ADDR ++ } ++ while (TEST_ADDR != LAST_ADDR) ++ ++ probes at FIRST + N * PROBE_INTERVAL for values of N from 1 ++ until it is equal to ROUNDED_SIZE. */ ++ ++ emit_insn (PMODE_INSN (gen_probe_stack_range, (r13, r13, r12, r14))); ++ } ++ ++ /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time ++ that SIZE is equal to ROUNDED_SIZE. */ ++ ++ if (size != rounded_size) ++ { ++ if (TARGET_64BIT) ++ emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size)); ++ else ++ { ++ HOST_WIDE_INT i; ++ for (i = 2048; i < (size - rounded_size); i += 2048 ) ++ { ++ emit_stack_probe (plus_constant (Pmode, r12, -i)); ++ emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, r12, -2048))); ++ } ++ emit_stack_probe (plus_constant (Pmode, r12, -(size - rounded_size - i + 2048))); ++ } ++ } ++ } ++ ++ /* Make sure nothing is scheduled before we are done. */ ++ emit_insn (gen_blockage ()); ++} ++ ++/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are ++ absolute addresses. */ ++ ++const char * ++loongarch_output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3) ++{ ++ static int labelno = 0; ++ char loop_lab[32], tmp[64]; ++ rtx xops[3]; ++ ++ ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++); ++ ++ /* Loop. */ ++ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab); ++ ++ /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */ ++ xops[0] = reg1; ++ xops[1] = GEN_INT (-PROBE_INTERVAL); ++ xops[2] = reg3; ++ if (TARGET_64BIT) ++ output_asm_insn ("sub.d\t%0,%0,%2", xops); ++ else ++ output_asm_insn ("sub.w\t%0,%0,%2", xops); ++ ++ /* Probe at TEST_ADDR, test if TEST_ADDR == LAST_ADDR and branch. */ ++ xops[1] = reg2; ++ strcpy (tmp, "bne\t%0,%1,"); ++ if (TARGET_64BIT) ++ output_asm_insn ("st.d\t$r0,%0,0", xops); ++ else ++ output_asm_insn ("st.w\t$r0,%0,0", xops); ++ output_asm_insn (strcat (tmp, &loop_lab[1]), xops); ++ ++ return ""; ++} ++ ++/* Expand the "prologue" pattern. */ ++ ++void ++loongarch_expand_prologue (void) ++{ ++ struct loongarch_frame_info *frame = &cfun->machine->frame; ++ HOST_WIDE_INT size = frame->total_size; ++ unsigned mask = frame->mask; ++ rtx insn; ++ ++ if (flag_stack_usage_info) ++ current_function_static_stack_size = size; ++ ++ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK ++ || flag_stack_clash_protection) ++ { ++ if (crtl->is_leaf && !cfun->calls_alloca) ++ { ++ if (size > PROBE_INTERVAL && size > get_stack_check_protect ()) ++ loongarch_emit_probe_stack_range (get_stack_check_protect (), ++ size - get_stack_check_protect ()); ++ } ++ else if (size > 0) ++ loongarch_emit_probe_stack_range (get_stack_check_protect (), size); ++ } ++ ++ /* When optimizing for size, call a subroutine to save the registers. */ ++ if (loongarch_use_save_libcall (frame)) ++ { ++ rtx dwarf = NULL_RTX; ++ dwarf = loongarch_adjust_libcall_cfi_prologue (); ++ ++ frame->mask = 0; /* Temporarily fib that we need not save GPRs. */ ++ size -= frame->save_libcall_adjustment; ++ insn = emit_insn (gen_gpr_save (GEN_INT (mask))); ++ ++ RTX_FRAME_RELATED_P (insn) = 1; ++ REG_NOTES (insn) = dwarf; ++ } ++ ++ /* Save the registers. */ ++ if ((frame->mask | frame->fmask) != 0) ++ { ++ HOST_WIDE_INT step1 = MIN (size, loongarch_first_stack_step (frame)); ++ ++ insn = gen_add3_insn (stack_pointer_rtx, ++ stack_pointer_rtx, ++ GEN_INT (-step1)); ++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; ++ size -= step1; ++ loongarch_for_each_saved_reg (size, loongarch_save_reg); ++ } ++ ++ frame->mask = mask; /* Undo the above fib. */ ++ ++ /* Set up the frame pointer, if we're using one. */ ++ if (frame_pointer_needed) ++ { ++ insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx, ++ GEN_INT (frame->hard_frame_pointer_offset - size)); ++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; ++ ++ loongarch_emit_stack_tie (); ++ } ++ ++ /* Allocate the rest of the frame. */ ++ if (size > 0) ++ { ++ if (SMALL_OPERAND (-size)) ++ { ++ insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, ++ GEN_INT (-size)); ++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; ++ } ++ else ++ { ++ loongarch_emit_move (N_LARCH_PROLOGUE_TEMP (Pmode), GEN_INT (-size)); ++ emit_insn (gen_add3_insn (stack_pointer_rtx, ++ stack_pointer_rtx, ++ N_LARCH_PROLOGUE_TEMP (Pmode))); ++ ++ /* Describe the effect of the previous instructions. */ ++ insn = plus_constant (Pmode, stack_pointer_rtx, -size); ++ insn = gen_rtx_SET (stack_pointer_rtx, insn); ++ loongarch_set_frame_expr (insn); ++ } ++ } ++} ++ ++ ++/* Return true if register REGNO can store a value of mode MODE. ++ The result of this function is cached in loongarch_hard_regno_mode_ok. */ ++ ++static bool ++loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode) ++{ ++ unsigned int size; ++ enum mode_class mclass; ++ ++ if (mode == FCCmode) ++ return ST_REG_P (regno); ++ ++ size = GET_MODE_SIZE (mode); ++ mclass = GET_MODE_CLASS (mode); ++ ++ if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode) && !LASX_SUPPORTED_MODE_P (mode)) ++ return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD; ++ ++ /* For LSX, allow TImode and 128-bit vector modes in all FPR. */ ++ if (FP_REG_P (regno) && LSX_SUPPORTED_MODE_P (mode)) ++ return true; ++ ++ /* For LASX, allow TImode and 256-bit vector modes in all FPR. FIXME: */ ++ if (FP_REG_P (regno) && LASX_SUPPORTED_MODE_P (mode)) ++ return true; ++ ++ if (FP_REG_P (regno) ++ && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0 ++ || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG))) ++ { ++ if (mclass == MODE_FLOAT ++ || mclass == MODE_COMPLEX_FLOAT ++ || mclass == MODE_VECTOR_FLOAT) ++ return size <= UNITS_PER_FPVALUE; ++ ++ /* Allow integer modes that fit into a single register. We need ++ to put integers into FPRs when using instructions like CVT ++ and TRUNC. There's no point allowing sizes smaller than a word, ++ because the FPU has no appropriate load/store instructions. */ ++ if (mclass == MODE_INT) ++ return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG; ++ } ++ ++ return false; ++} ++ ++/* Implement TARGET_HARD_REGNO_MODE_OK. */ ++ ++static bool ++loongarch_hard_regno_mode_ok (unsigned int regno, machine_mode mode) ++{ ++ return loongarch_hard_regno_mode_ok_p[mode][regno]; ++} ++ ++/* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */ ++ ++bool ++loongarch_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED, ++ unsigned int new_reg) ++{ ++ /* Interrupt functions can only use registers that have already been ++ saved by the prologue, even if they would normally be call-clobbered. */ ++ if (cfun->machine->interrupt_handler_p && !df_regs_ever_live_p (new_reg)) ++ return false; ++ ++ return true; ++} ++ ++/* Return nonzero if register REGNO can be used as a scratch register ++ in peephole2. */ ++ ++bool ++loongarch_hard_regno_scratch_ok (unsigned int regno) ++{ ++ /* See loongarch_hard_regno_rename_ok. */ ++ if (cfun->machine->interrupt_handler_p && !df_regs_ever_live_p (regno)) ++ return false; ++ ++ return true; ++} ++ ++static bool ++loongarch_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode) ++{ ++ if (ISA_HAS_LSX && FP_REG_P (regno) && GET_MODE_SIZE (mode) > 8) ++ return true; ++ ++ return false; ++} ++ ++/* Implement TARGET_HARD_REGNO_NREGS. */ ++ ++static unsigned int ++loongarch_hard_regno_nregs (unsigned int regno, machine_mode mode) ++{ ++ if (ST_REG_P (regno)) ++ /* The size of FP status registers is always 4, because they only hold ++ FCCmode values, and FCCmode is always considered to be 4 bytes wide. */ ++ return (GET_MODE_SIZE (mode) + 3) / 4; ++ ++ if (FP_REG_P (regno)) ++ { ++ if (LSX_SUPPORTED_MODE_P (mode)) ++ return 1; ++ ++ if (LASX_SUPPORTED_MODE_P (mode)) ++ return 1; ++ ++ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG; ++ } ++ ++ /* All other registers are word-sized. */ ++ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; ++} ++ ++/* Implement CLASS_MAX_NREGS, taking the maximum of the cases ++ in loongarch_hard_regno_nregs. */ ++ ++int ++loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode) ++{ ++ int size; ++ HARD_REG_SET left; ++ ++ size = 0x8000; ++ COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]); ++ if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS])) ++ { ++ if (loongarch_hard_regno_mode_ok (ST_REG_FIRST, mode)) ++ size = MIN (size, 4); ++ ++ AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]); ++ } ++ if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS])) ++ { ++ if (loongarch_hard_regno_mode_ok (FP_REG_FIRST, mode)) ++ { ++ if (LASX_SUPPORTED_MODE_P (mode)) //Fix me ++ size = MIN (size, UNITS_PER_LASX_REG); ++ else if (LSX_SUPPORTED_MODE_P (mode)) ++ size = MIN (size, UNITS_PER_LSX_REG); ++ else ++ size = MIN (size, UNITS_PER_FPREG); ++ } ++ ++ AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]); ++ } ++ if (!hard_reg_set_empty_p (left)) ++ size = MIN (size, UNITS_PER_WORD); ++ return (GET_MODE_SIZE (mode) + size - 1) / size; ++} ++ ++/* Implement TARGET_CAN_CHANGE_MODE_CLASS. */ ++ ++static bool ++loongarch_can_change_mode_class (machine_mode from, ++ machine_mode to, reg_class_t rclass) ++{ ++ /* Allow conversions between different Loongson integer vectors, ++ and between those vectors and DImode. */ ++ if (GET_MODE_SIZE (from) == 8 && GET_MODE_SIZE (to) == 8 ++ && INTEGRAL_MODE_P (from) && INTEGRAL_MODE_P (to)) ++ return true; ++ ++ /* Allow conversions between different LSX/LASX vector modes. */ ++ if (LASX_SUPPORTED_MODE_P (from) && LASX_SUPPORTED_MODE_P (to)) ++ return true; ++ ++ /* Allow conversions between different LSX vector modes. */ ++ if (LSX_SUPPORTED_MODE_P (from) && LSX_SUPPORTED_MODE_P (to)) ++ return true; ++ ++ /* Otherwise, there are several problems with changing the modes of ++ values in floating-point registers: ++ ++ - When a multi-word value is stored in paired floating-point ++ registers, the first register always holds the low word. We ++ therefore can't allow FPRs to change between single-word and ++ multi-word modes on big-endian targets. ++ ++ - GCC assumes that each word of a multiword register can be ++ accessed individually using SUBREGs. This is not true for ++ floating-point registers if they are bigger than a word. ++ ++ - Loading a 32-bit value into a 64-bit floating-point register ++ will not sign-extend the value, despite what LOAD_EXTEND_OP ++ says. We can't allow FPRs to change from SImode to a wider ++ mode on 64-bit targets. ++ ++ - If the FPU has already interpreted a value in one format, we ++ must not ask it to treat the value as having a different ++ format. ++ ++ We therefore disallow all mode changes involving FPRs. */ ++ ++ return !reg_classes_intersect_p (FP_REGS, rclass); ++} ++ ++/* Implement target hook small_register_classes_for_mode_p. */ ++ ++static bool ++loongarch_small_register_classes_for_mode_p (machine_mode mode ++ ATTRIBUTE_UNUSED) ++{ ++ return 0; ++} ++ ++/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction, ++ or use the LSX's move.v instruction. */ ++ ++static bool ++loongarch_mode_ok_for_mov_fmt_p (machine_mode mode) ++{ ++ switch (mode) ++ { ++ case E_SFmode: ++ return TARGET_HARD_FLOAT; ++ ++ case E_DFmode: ++ return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT; ++ ++ case E_V2SFmode: ++ return 0; ++ ++ default: ++ return ISA_HAS_LASX ? LASX_SUPPORTED_MODE_P (mode) : LSX_SUPPORTED_MODE_P (mode); ++ } ++} ++ ++/* Implement TARGET_MODES_TIEABLE_P. */ ++ ++static bool ++loongarch_modes_tieable_p (machine_mode mode1, machine_mode mode2) ++{ ++ /* FPRs allow no mode punning, so it's not worth tying modes if we'd ++ prefer to put one of them in FPRs. */ ++ return (mode1 == mode2 ++ || (!loongarch_mode_ok_for_mov_fmt_p (mode1) ++ && !loongarch_mode_ok_for_mov_fmt_p (mode2))); ++} ++ ++/* Implement TARGET_PREFERRED_RELOAD_CLASS. */ ++ ++static reg_class_t ++loongarch_preferred_reload_class (rtx x, reg_class_t rclass) ++{ ++ if (reg_class_subset_p (FP_REGS, rclass) ++ && loongarch_mode_ok_for_mov_fmt_p (GET_MODE (x))) ++ return FP_REGS; ++ ++ if (reg_class_subset_p (GR_REGS, rclass)) ++ rclass = GR_REGS; ++ ++ return rclass; ++} ++ ++/* RCLASS is a class involved in a REGISTER_MOVE_COST calculation. ++ Return a "canonical" class to represent it in later calculations. */ ++ ++static reg_class_t ++loongarch_canonicalize_move_class (reg_class_t rclass) ++{ ++ if (reg_class_subset_p (rclass, GENERAL_REGS)) ++ rclass = GENERAL_REGS; ++ ++ return rclass; ++} ++ ++/* Return the cost of moving a value from a register of class FROM to a GPR. ++ Return 0 for classes that are unions of other classes handled by this ++ function. */ ++ ++static int ++loongarch_move_to_gpr_cost (reg_class_t from) ++{ ++ switch (from) ++ { ++ case GENERAL_REGS: ++ /* MOVE macro. */ ++ return 2; ++ ++ case FP_REGS: ++ /* MFC1, etc. */ ++ return 4; ++ ++ default: ++ return 0; ++ } ++} ++ ++/* Return the cost of moving a value from a GPR to a register of class TO. ++ Return 0 for classes that are unions of other classes handled by this ++ function. */ ++ ++static int ++loongarch_move_from_gpr_cost (reg_class_t to) ++{ ++ switch (to) ++ { ++ case GENERAL_REGS: ++ /*MOVE macro. */ ++ return 2; ++ ++ case FP_REGS: ++ /* MTC1, etc. */ ++ return 4; ++ ++ default: ++ return 0; ++ } ++} ++ ++/* Implement TARGET_REGISTER_MOVE_COST. Return 0 for classes that are the ++ maximum of the move costs for subclasses; regclass will work out ++ the maximum for us. */ ++ ++static int ++loongarch_register_move_cost (machine_mode mode, ++ reg_class_t from, reg_class_t to) ++{ ++ reg_class_t dregs; ++ int cost1, cost2; ++ ++ from = loongarch_canonicalize_move_class (from); ++ to = loongarch_canonicalize_move_class (to); ++ ++ /* Handle moves that can be done without using general-purpose registers. */ ++ if (from == FP_REGS) ++ { ++ if (to == FP_REGS && loongarch_mode_ok_for_mov_fmt_p (mode)) ++ /* MOV.FMT. */ ++ return 4; ++ } ++ ++ /* Handle cases in which only one class deviates from the ideal. */ ++ dregs = GENERAL_REGS; ++ if (from == dregs) ++ return loongarch_move_from_gpr_cost (to); ++ if (to == dregs) ++ return loongarch_move_to_gpr_cost (from); ++ ++ /* Handles cases that require a GPR temporary. */ ++ cost1 = loongarch_move_to_gpr_cost (from); ++ if (cost1 != 0) ++ { ++ cost2 = loongarch_move_from_gpr_cost (to); ++ if (cost2 != 0) ++ return cost1 + cost2; ++ } ++ ++ return 0; ++} ++ ++/* Implement TARGET_MEMORY_MOVE_COST. */ ++ ++static int ++loongarch_memory_move_cost (machine_mode mode, reg_class_t rclass, bool in) ++{ ++ return (loongarch_cost->memory_latency ++ + memory_move_secondary_cost (mode, rclass, in)); ++} ++ ++/* Implement TARGET_SECONDARY_MEMORY_NEEDED. ++ ++ When targeting the o32 FPXX ABI, all moves with a length of doubleword ++ or greater must be performed by FR-mode-aware instructions. ++ This can be achieved using MOVFRH2GR.S/MOVGR2FRH.W when these instructions are ++ available but otherwise moves must go via memory. ++ Using MOVGR2FR/MOVFR2GR to access the lower-half of these registers would require ++ a forbidden single-precision access. We require all double-word moves to use ++ memory because adding even and odd floating-point registers classes ++ would have a significant impact on the backend. */ ++ ++static bool ++loongarch_secondary_memory_needed (machine_mode mode, reg_class_t class1, ++ reg_class_t class2) ++{ ++ /* Ignore spilled pseudos. */ ++ if (lra_in_progress && (class1 == NO_REGS || class2 == NO_REGS)) ++ return false; ++ ++ return false; ++} ++ ++/* Return the register class required for a secondary register when ++ copying between one of the registers in RCLASS and value X, which ++ has mode MODE. X is the source of the move if IN_P, otherwise it ++ is the destination. Return NO_REGS if no secondary register is ++ needed. */ ++ ++enum reg_class ++loongarch_secondary_reload_class (enum reg_class rclass, ++ machine_mode mode, rtx x, bool) ++{ ++ int regno; ++ ++ regno = true_regnum (x); ++ ++ /* Copying from accumulator registers to anywhere other than a general ++ register requires a temporary general register. */ ++// if (reg_class_subset_p (rclass, ACC_REGS)) ?????? ++// return GP_REG_P (regno) ? NO_REGS : GR_REGS; ++ if (reg_class_subset_p (rclass, FP_REGS)) ++ { ++ if (regno < 0 ++ || (MEM_P (x) ++ && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))) ++ /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use ++ pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */ ++ return NO_REGS; ++ ++ if (MEM_P (x) && LSX_SUPPORTED_MODE_P (mode)) ++ /* In this case we can use LSX LD.* and ST.*. */ ++ return NO_REGS; ++ ++ if (GP_REG_P (regno) || x == CONST0_RTX (mode)) ++ /* In this case we can use movgr2fr.s, movfr2gr.s, movgr2fr.d or movfr2gr.d. */ ++ return NO_REGS; ++ ++ if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x)) ++ /* We can force the constant to memory and use lwc1 ++ and ldc1. As above, we will use pairs of lwc1s if ++ ldc1 is not supported. */ ++ return NO_REGS; ++ ++ if (FP_REG_P (regno) && loongarch_mode_ok_for_mov_fmt_p (mode)) ++ /* In this case we can use mov.fmt. */ ++ return NO_REGS; ++ ++ /* Otherwise, we need to reload through an integer register. */ ++ return GR_REGS; ++ } ++ if (FP_REG_P (regno)) ++ return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS; ++ ++ return NO_REGS; ++} ++ ++ ++/* Implement TARGET_VALID_POINTER_MODE. */ ++ ++static bool ++loongarch_valid_pointer_mode (scalar_int_mode mode) ++{ ++ return mode == SImode || (TARGET_64BIT && mode == DImode); ++} ++ ++/* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */ ++ ++static bool ++loongarch_vector_mode_supported_p (machine_mode mode) ++{ ++ return ISA_HAS_LASX ? LASX_SUPPORTED_MODE_P (mode) : LSX_SUPPORTED_MODE_P (mode); ++} ++ ++/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */ ++ ++static bool ++loongarch_scalar_mode_supported_p (scalar_mode mode) ++{ ++ if (ALL_FIXED_POINT_MODE_P (mode) ++ && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD) ++ return true; ++ ++ return default_scalar_mode_supported_p (mode); ++} ++ ++/* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */ ++ ++static machine_mode ++loongarch_preferred_simd_mode (scalar_mode mode) ++{ ++ if (!ISA_HAS_LSX) ++ return word_mode; ++ ++ switch (mode) ++ { ++ case E_QImode: ++ return ISA_HAS_LASX ? E_V32QImode : E_V16QImode; ++ case E_HImode: ++ return ISA_HAS_LASX ? E_V16HImode : E_V8HImode; ++ case E_SImode: ++ return ISA_HAS_LASX ? E_V8SImode : E_V4SImode; ++ case E_DImode: ++ return ISA_HAS_LASX ? E_V4DImode : E_V2DImode; ++ ++ case E_SFmode: ++ return ISA_HAS_LASX ? E_V8SFmode : E_V4SFmode; ++ ++ case E_DFmode: ++ return ISA_HAS_LASX ? E_V4DFmode : E_V2DFmode; ++ ++ default: ++ break; ++ } ++ return word_mode; ++} ++ ++/* Implement TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES. */ ++ ++static void ++loongarch_autovectorize_vector_sizes (vector_sizes *sizes) ++{ ++ if (ISA_HAS_LASX) ++ { ++ sizes->safe_push (32); ++ sizes->safe_push (16); ++ } ++ else if (ISA_HAS_LSX) ++ sizes->safe_push (16); ++} ++ ++/* Return the length of INSN. LENGTH is the initial length computed by ++ attributes in the machine-description file. */ ++ ++int ++loongarch_adjust_insn_length (rtx_insn *insn, int length) ++{ ++ /* loongarch.md uses MAX_PIC_BRANCH_LENGTH as a placeholder for the length ++ of a PIC long-branch sequence. Substitute the correct value. */ ++ if (length == MAX_PIC_BRANCH_LENGTH ++ && JUMP_P (insn) ++ && INSN_CODE (insn) >= 0 ++ && get_attr_type (insn) == TYPE_BRANCH) ++ { ++ /* Add the branch-over instruction and its delay slot, if this ++ is a conditional branch. */ ++ length = simplejump_p (insn) ? 0 : 8; ++ ++ /* Add the length of an indirect jump, ignoring the delay slot. */ ++ length += 4; ++ } ++ ++ /* A unconditional jump has an unfilled delay slot if it is not part ++ of a sequence. A conditional jump normally has a delay slot. */ ++ if (CALL_P (insn) || (JUMP_P (insn))) ++ length += 4; ++ ++ /* See how many nops might be needed to avoid hardware hazards. */ ++ if (!cfun->machine->ignore_hazard_length_p ++ && INSN_P (insn) ++ && INSN_CODE (insn) >= 0) ++ switch (get_attr_hazard (insn)) ++ { ++ case HAZARD_NONE: ++ break; ++ ++ case HAZARD_DELAY: ++ case HAZARD_FORBIDDEN_SLOT: ++ length += NOP_INSN_LENGTH; ++ break; ++ } ++ ++ return length; ++} ++ ++/* Return the assembly code for INSN, which has the operands given by ++ OPERANDS, and which branches to OPERANDS[0] if some condition is true. ++ BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0] ++ is in range of a direct branch. BRANCH_IF_FALSE is an inverted ++ version of BRANCH_IF_TRUE. */ ++ ++const char * ++loongarch_output_conditional_branch (rtx_insn *insn, rtx *operands, ++ const char *branch_if_true, ++ const char *branch_if_false) ++{ ++ unsigned int length; ++ rtx taken; ++ ++ gcc_assert (LABEL_P (operands[0])); ++ ++ length = get_attr_length (insn); ++ if (length <= 12) ++ { ++ return branch_if_true; ++ } ++ ++ /* Generate a reversed branch around a direct jump. This fallback does ++ not use branch-likely instructions. */ ++ rtx_code_label *not_taken = gen_label_rtx (); ++ taken = operands[0]; ++ ++ /* Generate the reversed branch to NOT_TAKEN. */ ++ operands[0] = not_taken; ++ output_asm_insn (branch_if_false, operands); ++ ++ /* If INSN has a delay slot, we must provide delay slots for both the ++ branch to NOT_TAKEN and the conditional jump. We must also ensure ++ that INSN's delay slot is executed in the appropriate cases. */ ++ if (final_sequence) ++ { ++ /* This first delay slot will always be executed, so use INSN's ++ delay slot if is not annulled. */ ++ if (!INSN_ANNULLED_BRANCH_P (insn)) ++ { ++ final_scan_insn (final_sequence->insn (1), ++ asm_out_file, optimize, 1, NULL); ++ final_sequence->insn (1)->set_deleted (); ++ } ++ fprintf (asm_out_file, "\n"); ++ } ++ ++ output_asm_insn (LARCH_ABSOLUTE_JUMP ("b\t%0"), &taken); ++ ++ /* Now deal with its delay slot; see above. */ ++ if (final_sequence) ++ { ++ /* This delay slot will only be executed if the branch is taken. ++ Use INSN's delay slot if is annulled. */ ++ if (INSN_ANNULLED_BRANCH_P (insn)) ++ { ++ final_scan_insn (final_sequence->insn (1), ++ asm_out_file, optimize, 1, NULL); ++ final_sequence->insn (1)->set_deleted (); ++ } ++ fprintf (asm_out_file, "\n"); ++ } ++ ++ /* Output NOT_TAKEN. */ ++ targetm.asm_out.internal_label (asm_out_file, "L", ++ CODE_LABEL_NUMBER (not_taken)); ++ return ""; ++} ++ ++/* Return the assembly code for INSN, which branches to OPERANDS[0] ++ if some equality condition is true. The condition is given by ++ OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of ++ OPERANDS[1]. OPERANDS[2] is the comparison's first operand; ++ OPERANDS[3] is the second operand and may be zero or a register. */ ++ ++const char * ++loongarch_output_equal_conditional_branch (rtx_insn* insn, rtx *operands, ++ bool inverted_p) ++{ ++ const char *branch[2]; ++ if (operands[3] == const0_rtx) ++ { ++ branch[!inverted_p] = LARCH_BRANCH ("b%C1z", "%2,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("b%N1z", "%2,%0"); ++ } else ++ { ++ branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,%z3,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("b%N1", "%2,%z3,%0"); ++ } ++ ++ return loongarch_output_conditional_branch (insn, operands, branch[1], branch[0]); ++} ++ ++/* Return the assembly code for INSN, which branches to OPERANDS[0] ++ if some ordering condition is true. The condition is given by ++ OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of ++ OPERANDS[1]. OPERANDS[2] is the comparison's first operand; ++ OPERANDS[3] is the second operand and may be zero or a register. */ ++ ++const char * ++loongarch_output_order_conditional_branch (rtx_insn *insn, rtx *operands, ++ bool inverted_p) ++{ ++ const char *branch[2]; ++ ++ /* Make BRANCH[1] branch to OPERANDS[0] when the condition is true. ++ Make BRANCH[0] branch on the inverse condition. */ ++ if (operands[3] != const0_rtx) ++ { ++ /* Handle degenerate cases that should not, but do, occur. */ ++ if (REGNO (operands[2]) == REGNO (operands[3])) ++ { ++ switch (GET_CODE (operands[1])) ++ { ++ case LT: ++ case LTU: ++ case GT: ++ case GTU: ++ inverted_p = !inverted_p; ++ /* Fall through. */ ++ case LE: ++ case LEU: ++ case GE: ++ case GEU: ++ branch[!inverted_p] = LARCH_BRANCH ("b", "%0"); ++ branch[inverted_p] = "\t# branch never"; ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ else ++ { ++ switch (GET_CODE (operands[1])) ++ { ++ case LE: ++ branch[!inverted_p] = LARCH_BRANCH ("bge", "%3,%2,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("blt", "%3,%2,%0"); ++ break; ++ case LEU: ++ branch[!inverted_p] = LARCH_BRANCH ("bgeu", "%3,%2,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("bltu", "%3,%2,%0"); ++ break; ++ case GT: ++ branch[!inverted_p] = LARCH_BRANCH ("blt", "%3,%2,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("bge", "%3,%2,%0"); ++ break; ++ case GTU: ++ branch[!inverted_p] = LARCH_BRANCH ("bltu", "%3,%2,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("bgeu", "%3,%2,%0"); ++ break; ++ case LT: ++ case LTU: ++ case GE: ++ case GEU: ++ branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,%3,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("b%N1", "%2,%3,%0"); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ } ++ else ++ { ++ switch (GET_CODE (operands[1])) ++ { ++ /* These cases are equivalent to comparisons against zero. */ ++ case LEU: ++ inverted_p = !inverted_p; ++ /* Fall through. */ ++ case GTU: ++ branch[!inverted_p] = LARCH_BRANCH ("bne", "%2,%.,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("beq", "%2,%.,%0"); ++ break; ++ ++ /* These cases are always true or always false. */ ++ case LTU: ++ inverted_p = !inverted_p; ++ /* Fall through. */ ++ case GEU: ++ branch[!inverted_p] = LARCH_BRANCH ("beq", "%.,%.,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("bne", "%.,%.,%0"); ++ break; ++ ++ case LE: ++ branch[!inverted_p] = LARCH_BRANCH ("bge", "$r0,%2,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("blt", "$r0,%2,%0"); ++ break; ++ case GT: ++ branch[!inverted_p] = LARCH_BRANCH ("blt", "$r0,%2,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("bge", "$r0,%2,%0"); ++ break; ++ case LT: ++ case GE: ++ branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,$r0,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("b%N1", "%2,$r0,%0"); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ return loongarch_output_conditional_branch (insn, operands, branch[1], branch[0]); ++} ++ ++/* Return the assembly code for DIV or DDIV instruction DIVISION, which has ++ the operands given by OPERANDS. Add in a divide-by-zero check if needed. ++ ++ When working around R4000 and R4400 errata, we need to make sure that ++ the division is not immediately followed by a shift[1][2]. We also ++ need to stop the division from being put into a branch delay slot[3]. ++ The easiest way to avoid both problems is to add a nop after the ++ division. When a divide-by-zero check is needed, this nop can be ++ used to fill the branch delay slot. ++ ++ [1] If a double-word or a variable shift executes immediately ++ after starting an integer division, the shift may give an ++ incorrect result. See quotations of errata #16 and #28 from ++ "LARCH R4000PC/SC Errata, Processor Revision 2.2 and 3.0" ++ in loongarch.md for details. ++ ++ [2] A similar bug to [1] exists for all revisions of the ++ R4000 and the R4400 when run in an MC configuration. ++ From "LARCH R4000MC Errata, Processor Revision 2.2 and 3.0": ++ ++ "19. In this following sequence: ++ ++ ddiv (or ddivu or div or divu) ++ dsll32 (or dsrl32, dsra32) ++ ++ if an MPT stall occurs, while the divide is slipping the cpu ++ pipeline, then the following double shift would end up with an ++ incorrect result. ++ ++ Workaround: The compiler needs to avoid generating any ++ sequence with divide followed by extended double shift." ++ ++ This erratum is also present in "LARCH R4400MC Errata, Processor ++ Revision 1.0" and "LARCH R4400MC Errata, Processor Revision 2.0 ++ & 3.0" as errata #10 and #4, respectively. ++ ++ [3] From "LARCH R4000PC/SC Errata, Processor Revision 2.2 and 3.0" ++ (also valid for LARCH R4000MC processors): ++ ++ "52. R4000SC: This bug does not apply for the R4000PC. ++ ++ There are two flavors of this bug: ++ ++ 1) If the instruction just after divide takes an RF exception ++ (tlb-refill, tlb-invalid) and gets an instruction cache ++ miss (both primary and secondary) and the line which is ++ currently in secondary cache at this index had the first ++ data word, where the bits 5..2 are set, then R4000 would ++ get a wrong result for the div. ++ ++ ##1 ++ nop ++ div r8, r9 ++ ------------------- # end-of page. -tlb-refill ++ nop ++ ##2 ++ nop ++ div r8, r9 ++ ------------------- # end-of page. -tlb-invalid ++ nop ++ ++ 2) If the divide is in the taken branch delay slot, where the ++ target takes RF exception and gets an I-cache miss for the ++ exception vector or where I-cache miss occurs for the ++ target address, under the above mentioned scenarios, the ++ div would get wrong results. ++ ++ ##1 ++ j r2 # to next page mapped or unmapped ++ div r8,r9 # this bug would be there as long ++ # as there is an ICache miss and ++ nop # the "data pattern" is present ++ ++ ##2 ++ beq r0, r0, NextPage # to Next page ++ div r8,r9 ++ nop ++ ++ This bug is present for div, divu, ddiv, and ddivu ++ instructions. ++ ++ Workaround: For item 1), OS could make sure that the next page ++ after the divide instruction is also mapped. For item 2), the ++ compiler could make sure that the divide instruction is not in ++ the branch delay slot." ++ ++ These processors have PRId values of 0x00004220 and 0x00004300 for ++ the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */ ++ ++const char * ++loongarch_output_division (const char *division, rtx *operands) ++{ ++ const char *s; ++ ++ s = division; ++ if (TARGET_CHECK_ZERO_DIV) ++ { ++ output_asm_insn (s, operands); ++ s = "bne\t%2,%.,1f\n\tbreak\t7\n1:"; ++ } ++ return s; ++} ++ ++/* Return the assembly code for LSX DIV_{S,U}.DF or MOD_{S,U}.DF instructions, ++ which has the operands given by OPERANDS. Add in a divide-by-zero check ++ if needed. */ ++ ++const char * ++loongarch_lsx_output_division (const char *division, rtx *operands) ++{ ++ const char *s; ++ machine_mode mode = GET_MODE (*operands); ++ ++ s = division; ++ if (TARGET_CHECK_ZERO_DIV) ++ { ++ if(ISA_HAS_LASX && GET_MODE_SIZE (mode) == 32) ++ { ++ output_asm_insn ("xvsetallnez.%v0\t$fcc7,%u2",operands); ++ output_asm_insn (s, operands); ++ output_asm_insn ("bcnez\t$fcc7,1f", operands); ++ } ++ else if(ISA_HAS_LSX) ++ { ++ output_asm_insn ("vsetallnez.%v0\t$fcc7,%w2",operands); ++ output_asm_insn (s, operands); ++ output_asm_insn ("bcnez\t$fcc7,1f", operands); ++ } ++ s = "break\t7\n1:"; ++ } ++ return s; ++} ++ ++/* Return true if destination of IN_INSN is used as add source in ++ OUT_INSN. Both IN_INSN and OUT_INSN are of type fmadd. Example: ++ madd.s dst, x, y, z ++ madd.s a, dst, b, c */ ++ ++bool ++loongarch_fmadd_bypass (rtx_insn *out_insn, rtx_insn *in_insn) ++{ ++ int dst_reg, src_reg; ++ ++ gcc_assert (get_attr_type (in_insn) == TYPE_FMADD); ++ gcc_assert (get_attr_type (out_insn) == TYPE_FMADD); ++ ++ extract_insn (in_insn); ++ dst_reg = REG_P (recog_data.operand[0]); ++ ++ extract_insn (out_insn); ++ src_reg = REG_P (recog_data.operand[1]); ++ ++ if (dst_reg == src_reg) ++ return true; ++ ++ return false; ++} ++ ++/* Return true if IN_INSN is a multiply-add or multiply-subtract ++ instruction and if OUT_INSN assigns to the accumulator operand. */ ++ ++bool ++loongarch_linked_madd_p (rtx_insn *out_insn, rtx_insn *in_insn) ++{ ++ enum attr_accum_in accum_in; ++ int accum_in_opnum; ++ rtx accum_in_op; ++ ++ if (recog_memoized (in_insn) < 0) ++ return false; ++ ++ accum_in = get_attr_accum_in (in_insn); ++ if (accum_in == ACCUM_IN_NONE) ++ return false; ++ ++ accum_in_opnum = accum_in - ACCUM_IN_0; ++ ++ extract_insn (in_insn); ++ gcc_assert (accum_in_opnum < recog_data.n_operands); ++ accum_in_op = recog_data.operand[accum_in_opnum]; ++ ++ return reg_set_p (accum_in_op, out_insn); ++} ++ ++/* True if the dependency between OUT_INSN and IN_INSN is on the store ++ data rather than the address. We need this because the cprestore ++ pattern is type "store", but is defined using an UNSPEC_VOLATILE, ++ which causes the default routine to abort. We just return false ++ for that case. */ ++ ++bool ++loongarch_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn) ++{ ++ if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE) ++ return false; ++ ++ return store_data_bypass_p (out_insn, in_insn); ++} ++ ++ ++/* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output ++ dependencies have no cost, except on the 20Kc where output-dependence ++ is treated like input-dependence. */ ++ ++static int ++loongarch_adjust_cost (rtx_insn *, int dep_type, rtx_insn *, int cost, unsigned int) ++{ ++ if (dep_type != 0 && (dep_type != REG_DEP_OUTPUT)) ++ return 0; ++ return cost; ++} ++ ++/* Return the number of instructions that can be issued per cycle. */ ++ ++static int ++loongarch_issue_rate (void) ++{ ++ switch (loongarch_tune) ++ { ++ case PROCESSOR_LOONGARCH64: ++ case PROCESSOR_LA464: ++ return 4; ++ ++ default: ++ return 1; ++ } ++} ++ ++/* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should ++ be as wide as the scheduling freedom in the DFA. */ ++ ++static int ++loongarch_multipass_dfa_lookahead (void) ++{ ++ if (TUNE_LOONGARCH64 || TUNE_LA464) ++ return 4; ++ ++ return 0; ++} ++ ++ ++static void ++loongarch_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, ++ int max_ready ATTRIBUTE_UNUSED) ++{ ++} ++ ++/* Implement TARGET_SCHED_REORDER. */ ++ ++static int ++loongarch_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, ++ rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED) ++{ ++ return loongarch_issue_rate (); ++} ++ ++/* Implement TARGET_SCHED_REORDER2. */ ++ ++static int ++loongarch_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, ++ rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED) ++{ ++ return cached_can_issue_more; ++} ++ ++/* Implement TARGET_SCHED_VARIABLE_ISSUE. */ ++ ++static int ++loongarch_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, ++ rtx_insn *insn, int more) ++{ ++ /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */ ++ if (USEFUL_INSN_P (insn)) ++ { ++ if (get_attr_type (insn) != TYPE_GHOST) ++ more--; ++ } ++ ++ /* Instructions of type 'multi' should all be split before ++ the second scheduling pass. */ ++ gcc_assert (!reload_completed ++ || recog_memoized (insn) < 0 ++ || get_attr_type (insn) != TYPE_MULTI); ++ ++ cached_can_issue_more = more; ++ return more; ++} ++ ++/* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY), ++ return the first operand of the associated PREF or PREFX insn. */ ++ ++rtx ++loongarch_prefetch_cookie (rtx write, rtx locality) ++{ ++ /* store_streamed / load_streamed. */ ++ if (INTVAL (locality) <= 0) ++ return GEN_INT (INTVAL (write) + 4); ++ ++ /* store / load. */ ++ if (INTVAL (locality) <= 2) ++ return write; ++ ++ /* store_retained / load_retained. */ ++ return GEN_INT (INTVAL (write) + 6); ++} ++ ++ ++/* Return whether CFG is used in loongarch_reorg. */ ++ ++static bool ++loongarch_cfg_in_reorg (void) ++{ ++ return (TARGET_RELAX_PIC_CALLS); ++} ++ ++/* If INSN is a call, return the underlying CALL expr. Return NULL_RTX ++ otherwise. If INSN has two call rtx, then store the second one in ++ SECOND_CALL. */ ++ ++static rtx ++loongarch_call_expr_from_insn (rtx_insn *insn, rtx *second_call) ++{ ++ rtx x; ++ rtx x2; ++ ++ if (!CALL_P (insn)) ++ return NULL_RTX; ++ ++ x = PATTERN (insn); ++ if (GET_CODE (x) == PARALLEL) ++ { ++ /* Calls returning complex values have two CALL rtx. Look for the second ++ one here, and return it via the SECOND_CALL arg. */ ++ x2 = XVECEXP (x, 0, 1); ++ if (GET_CODE (x2) == SET) ++ x2 = XEXP (x2, 1); ++ if (GET_CODE (x2) == CALL) ++ *second_call = x2; ++ ++ x = XVECEXP (x, 0, 0); ++ } ++ if (GET_CODE (x) == SET) ++ x = XEXP (x, 1); ++ gcc_assert (GET_CODE (x) == CALL); ++ ++ return x; ++} ++ ++/* REG is set in DEF. See if the definition is one of the ways we load a ++ register with a symbol address for a loongarch_use_pic_fn_addr_reg_p call. ++ If it is, return the symbol reference of the function, otherwise return ++ NULL_RTX. ++ ++ If RECURSE_P is true, use loongarch_find_pic_call_symbol to interpret ++ the values of source registers, otherwise treat such registers as ++ having an unknown value. */ ++ ++static rtx ++loongarch_pic_call_symbol_from_set (df_ref def, rtx reg, bool recurse_p) ++{ ++ rtx_insn *def_insn; ++ rtx set; ++ ++ if (DF_REF_IS_ARTIFICIAL (def)) ++ return NULL_RTX; ++ ++ def_insn = DF_REF_INSN (def); ++ set = single_set (def_insn); ++ if (set && rtx_equal_p (SET_DEST (set), reg)) ++ { ++ rtx note, src, symbol; ++ ++ /* First see whether the source is a plain symbol. This is used ++ when calling symbols that are not lazily bound. */ ++ src = SET_SRC (set); ++ if (GET_CODE (src) == SYMBOL_REF) ++ return src; ++ ++ /* Handle %call16 references. */ ++ symbol = loongarch_strip_unspec_call (src); ++ if (symbol) ++ { ++ gcc_assert (GET_CODE (symbol) == SYMBOL_REF); ++ return symbol; ++ } ++ ++ /* If we have something more complicated, look for a ++ REG_EQUAL or REG_EQUIV note. */ ++ note = find_reg_equal_equiv_note (def_insn); ++ if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF) ++ return XEXP (note, 0); ++ ++ /* Follow at most one simple register copy. Such copies are ++ interesting in cases like: ++ ++ for (...) ++ { ++ locally_binding_fn (...); ++ } ++ ++ and: ++ ++ locally_binding_fn (...); ++ ... ++ locally_binding_fn (...); ++ ++ where the load of locally_binding_fn can legitimately be ++ hoisted or shared. However, we do not expect to see complex ++ chains of copies, so a full worklist solution to the problem ++ would probably be overkill. */ ++ if (recurse_p && REG_P (src)) ++ return loongarch_find_pic_call_symbol (def_insn, src, false); ++ } ++ ++ return NULL_RTX; ++} ++ ++/* Find the definition of the use of REG in INSN. See if the definition ++ is one of the ways we load a register with a symbol address for a ++ loongarch_use_pic_fn_addr_reg_p call. If it is return the symbol reference ++ of the function, otherwise return NULL_RTX. RECURSE_P is as for ++ loongarch_pic_call_symbol_from_set. */ ++ ++static rtx ++loongarch_find_pic_call_symbol (rtx_insn *insn, rtx reg, bool recurse_p) ++{ ++ df_ref use; ++ struct df_link *defs; ++ rtx symbol; ++ ++ use = df_find_use (insn, regno_reg_rtx[REGNO (reg)]); ++ if (!use) ++ return NULL_RTX; ++ defs = DF_REF_CHAIN (use); ++ if (!defs) ++ return NULL_RTX; ++ symbol = loongarch_pic_call_symbol_from_set (defs->ref, reg, recurse_p); ++ if (!symbol) ++ return NULL_RTX; ++ ++ /* If we have more than one definition, they need to be identical. */ ++ for (defs = defs->next; defs; defs = defs->next) ++ { ++ rtx other; ++ ++ other = loongarch_pic_call_symbol_from_set (defs->ref, reg, recurse_p); ++ if (!rtx_equal_p (symbol, other)) ++ return NULL_RTX; ++ } ++ ++ return symbol; ++} ++ ++/* Replace the args_size operand of the call expression CALL with the ++ call-attribute UNSPEC and fill in SYMBOL as the function symbol. */ ++ ++static void ++loongarch_annotate_pic_call_expr (rtx call, rtx symbol) ++{ ++ rtx args_size; ++ ++ args_size = XEXP (call, 1); ++ XEXP (call, 1) = gen_rtx_UNSPEC (GET_MODE (args_size), ++ gen_rtvec (2, args_size, symbol), ++ UNSPEC_CALL_ATTR); ++} ++ ++/* OPERANDS[ARGS_SIZE_OPNO] is the arg_size operand of a CALL expression. See ++ if instead of the arg_size argument it contains the call attributes. If ++ yes return true along with setting OPERANDS[ARGS_SIZE_OPNO] to the function ++ symbol from the call attributes. Also return false if ARGS_SIZE_OPNO is ++ -1. */ ++ ++bool ++loongarch_get_pic_call_symbol (rtx *operands, int args_size_opno) ++{ ++ rtx args_size, symbol; ++ ++ if (!TARGET_RELAX_PIC_CALLS || args_size_opno == -1) ++ return false; ++ ++ args_size = operands[args_size_opno]; ++ if (GET_CODE (args_size) != UNSPEC) ++ return false; ++ gcc_assert (XINT (args_size, 1) == UNSPEC_CALL_ATTR); ++ ++ symbol = XVECEXP (args_size, 0, 1); ++ gcc_assert (GET_CODE (symbol) == SYMBOL_REF); ++ ++ operands[args_size_opno] = symbol; ++ return true; ++} ++ ++/* Use DF to annotate PIC indirect calls with the function symbol they ++ dispatch to. */ ++ ++static void ++loongarch_annotate_pic_calls (void) ++{ ++ basic_block bb; ++ rtx_insn *insn; ++ ++ FOR_EACH_BB_FN (bb, cfun) ++ FOR_BB_INSNS (bb, insn) ++ { ++ rtx call, reg, symbol, second_call; ++ ++ second_call = 0; ++ call = loongarch_call_expr_from_insn (insn, &second_call); ++ if (!call) ++ continue; ++ gcc_assert (MEM_P (XEXP (call, 0))); ++ reg = XEXP (XEXP (call, 0), 0); ++ if (!REG_P (reg)) ++ continue; ++ ++ symbol = loongarch_find_pic_call_symbol (insn, reg, true); ++ if (symbol) ++ { ++ loongarch_annotate_pic_call_expr (call, symbol); ++ if (second_call) ++ loongarch_annotate_pic_call_expr (second_call, symbol); ++ } ++ } ++} ++ ++ ++/* A structure representing the state of the processor pipeline. ++ Used by the loongarch_sim_* family of functions. */ ++struct loongarch_sim { ++ /* The maximum number of instructions that can be issued in a cycle. ++ (Caches loongarch_issue_rate.) */ ++ unsigned int issue_rate; ++ ++ /* The current simulation time. */ ++ unsigned int time; ++ ++ /* How many more instructions can be issued in the current cycle. */ ++ unsigned int insns_left; ++ ++ /* LAST_SET[X].INSN is the last instruction to set register X. ++ LAST_SET[X].TIME is the time at which that instruction was issued. ++ INSN is null if no instruction has yet set register X. */ ++ struct { ++ rtx_insn *insn; ++ unsigned int time; ++ } last_set[FIRST_PSEUDO_REGISTER]; ++ ++ /* The pipeline's current DFA state. */ ++ state_t dfa_state; ++}; ++ ++/* Reset STATE to the initial simulation state. */ ++ ++static void ++loongarch_sim_reset (struct loongarch_sim *state) ++{ ++ curr_state = state->dfa_state; ++ ++ state->time = 0; ++ state->insns_left = state->issue_rate; ++ memset (&state->last_set, 0, sizeof (state->last_set)); ++ state_reset (curr_state); ++ ++ targetm.sched.init (0, false, 0); ++ advance_state (curr_state); ++} ++ ++/* Initialize STATE before its first use. DFA_STATE points to an ++ allocated but uninitialized DFA state. */ ++ ++static void ++loongarch_sim_init (struct loongarch_sim *state, state_t dfa_state) ++{ ++ if (targetm.sched.init_dfa_pre_cycle_insn) ++ targetm.sched.init_dfa_pre_cycle_insn (); ++ ++ if (targetm.sched.init_dfa_post_cycle_insn) ++ targetm.sched.init_dfa_post_cycle_insn (); ++ ++ state->issue_rate = loongarch_issue_rate (); ++ state->dfa_state = dfa_state; ++ loongarch_sim_reset (state); ++} ++ ++ ++ ++/* Set up costs based on the current architecture and tuning settings. */ ++ ++static void ++loongarch_set_tuning_info (void) ++{ ++ ++ loongarch_tuning_info.arch = loongarch_arch; ++ loongarch_tuning_info.tune = loongarch_tune; ++ loongarch_tuning_info.initialized_p = true; ++ ++ dfa_start (); ++ ++ struct loongarch_sim state; ++ loongarch_sim_init (&state, alloca (state_size ())); ++ ++ dfa_finish (); ++} ++ ++/* Implement TARGET_EXPAND_TO_RTL_HOOK. */ ++ ++static void ++loongarch_expand_to_rtl_hook (void) ++{ ++ /* We need to call this at a point where we can safely create sequences ++ of instructions, so TARGET_OVERRIDE_OPTIONS is too early. We also ++ need to call it at a point where the DFA infrastructure is not ++ already in use, so we can't just call it lazily on demand. ++ ++ At present, loongarch_tuning_info is only needed during post-expand ++ RTL passes such as split_insns, so this hook should be early enough. ++ We may need to move the call elsewhere if loongarch_tuning_info starts ++ to be used for other things (such as rtx_costs, or expanders that ++ could be called during gimple optimization). */ ++ loongarch_set_tuning_info (); ++} ++ ++/* This structure records that the current function has a LO_SUM ++ involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is ++ the largest offset applied to BASE by all such LO_SUMs. */ ++struct loongarch_lo_sum_offset { ++ rtx base; ++ HOST_WIDE_INT offset; ++}; ++ ++/* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */ ++ ++static hashval_t ++loongarch_hash_base (rtx base) ++{ ++ int do_not_record_p; ++ ++ return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false); ++} ++ ++/* Hashtable helpers. */ ++ ++struct loongarch_lo_sum_offset_hasher : free_ptr_hash ++{ ++ typedef rtx_def *compare_type; ++ static inline hashval_t hash (const loongarch_lo_sum_offset *); ++ static inline bool equal (const loongarch_lo_sum_offset *, const rtx_def *); ++}; ++ ++/* Hash-table callbacks for loongarch_lo_sum_offsets. */ ++ ++inline hashval_t ++loongarch_lo_sum_offset_hasher::hash (const loongarch_lo_sum_offset *entry) ++{ ++ return loongarch_hash_base (entry->base); ++} ++ ++inline bool ++loongarch_lo_sum_offset_hasher::equal (const loongarch_lo_sum_offset *entry, ++ const rtx_def *value) ++{ ++ return rtx_equal_p (entry->base, value); ++} ++ ++typedef hash_table loongarch_offset_table; ++ ++ ++/* Subroutine of loongarch_reorg to manage passes that require DF. */ ++ ++static void ++loongarch_df_reorg (void) ++{ ++ /* Create def-use chains. */ ++ df_set_flags (DF_EQ_NOTES); ++ df_chain_add_problem (DF_UD_CHAIN); ++ df_analyze (); ++ ++ if (TARGET_RELAX_PIC_CALLS) ++ loongarch_annotate_pic_calls (); ++ ++ df_finish_pass (false); ++} ++ ++ ++/* Implement TARGET_MACHINE_DEPENDENT_REORG. */ ++ ++static void ++loongarch_reorg (void) ++{ ++ /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF.DF insn info is only kept up ++ to date if the CFG is available. */ ++ if (loongarch_cfg_in_reorg ()) ++ compute_bb_for_insn (); ++ if (loongarch_cfg_in_reorg ()) ++ { ++ loongarch_df_reorg (); ++ free_bb_for_insn (); ++ } ++} ++ ++/* We use a machine specific pass to do a second machine dependent reorg ++ pass after delay branch scheduling. */ ++ ++static unsigned int ++loongarch_machine_reorg2 (void) ++{ ++// loongarch_insert_insn_pseudos (); ++ return 0; ++} ++ ++namespace { ++ ++const pass_data pass_data_loongarch_machine_reorg2 = ++{ ++ RTL_PASS, /* type */ ++ "mach2", /* name */ ++ OPTGROUP_NONE, /* optinfo_flags */ ++ TV_MACH_DEP, /* tv_id */ ++ 0, /* properties_required */ ++ 0, /* properties_provided */ ++ 0, /* properties_destroyed */ ++ 0, /* todo_flags_start */ ++ 0, /* todo_flags_finish */ ++}; ++ ++class pass_loongarch_machine_reorg2 : public rtl_opt_pass ++{ ++public: ++ pass_loongarch_machine_reorg2(gcc::context *ctxt) ++ : rtl_opt_pass(pass_data_loongarch_machine_reorg2, ctxt) ++ {} ++ ++ /* opt_pass methods: */ ++ virtual unsigned int execute (function *) { return loongarch_machine_reorg2 (); } ++ ++}; // class pass_loongarch_machine_reorg2 ++ ++} // anon namespace ++ ++rtl_opt_pass * ++make_pass_loongarch_machine_reorg2 (gcc::context *ctxt) ++{ ++ return new pass_loongarch_machine_reorg2 (ctxt); ++} ++ ++ ++/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text ++ in order to avoid duplicating too much logic from elsewhere. */ ++ ++static void ++loongarch_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, ++ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, ++ tree function) ++{ ++ rtx this_rtx, temp1, temp2, fnaddr; ++ rtx_insn *insn; ++ bool use_sibcall_p; ++ ++ /* Pretend to be a post-reload pass while generating rtl. */ ++ reload_completed = 1; ++ ++ /* Mark the end of the (empty) prologue. */ ++ emit_note (NOTE_INSN_PROLOGUE_END); ++ ++ /* Determine if we can use a sibcall to call FUNCTION directly. */ ++ fnaddr = XEXP (DECL_RTL (function), 0); ++ use_sibcall_p = (loongarch_function_ok_for_sibcall (function, NULL) ++ && const_call_insn_operand (fnaddr, Pmode)); ++ ++// /* Determine if we need to load FNADDR from the GOT. */ ++// if (!use_sibcall_p ++// && (loongarch_got_symbol_type_p ++// (loongarch_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA)))) ++// { ++// /* Pick a global pointer. Use a call-clobbered register if ++// TARGET_CALL_SAVED_GP. */ ++// cfun->machine->global_pointer ++// = GLOBAL_POINTER_REGNUM; ++// cfun->machine->must_initialize_gp_p = true; ++// SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer); ++// ++// /* Set up the global pointer for n32 or n64 abicalls. */ ++// loongarch_emit_loadgp (); ++// } ++ ++ /* We need two temporary registers in some cases. */ ++ temp1 = gen_rtx_REG (Pmode, 12); ++ temp2 = gen_rtx_REG (Pmode, 13); ++ ++ /* Find out which register contains the "this" pointer. */ ++ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)) ++ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1); ++ else ++ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST); ++ ++ /* Add DELTA to THIS_RTX. */ ++ if (delta != 0) ++ { ++ rtx offset = GEN_INT (delta); ++ if (!SMALL_OPERAND (delta)) ++ { ++ loongarch_emit_move (temp1, offset); ++ offset = temp1; ++ } ++ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset)); ++ } ++ ++ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */ ++ if (vcall_offset != 0) ++ { ++ rtx addr; ++ ++ /* Set TEMP1 to *THIS_RTX. */ ++ loongarch_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx)); ++ ++ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */ ++ addr = loongarch_add_offset (temp2, temp1, vcall_offset); ++ ++ /* Load the offset and add it to THIS_RTX. */ ++ loongarch_emit_move (temp1, gen_rtx_MEM (Pmode, addr)); ++ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1)); ++ } ++ ++ /* Jump to the target function. Use a sibcall if direct jumps are ++ allowed, otherwise load the address into a register first. */ ++ if (use_sibcall_p) ++ { ++ insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx)); ++ SIBLING_CALL_P (insn) = 1; ++ } ++ else ++ { ++ loongarch_emit_move (temp1, fnaddr); ++ emit_jump_insn (gen_indirect_jump (temp1)); ++ } ++ ++ /* Run just enough of rest_of_compilation. This sequence was ++ "borrowed" from alpha.c. */ ++ insn = get_insns (); ++ split_all_insns_noflow (); ++ shorten_branches (insn); ++ final_start_function (insn, file, 1); ++ final (insn, file, 1); ++ final_end_function (); ++ ++ /* Clean up the vars set above. Note that final_end_function resets ++ the global pointer for us. */ ++ reload_completed = 0; ++} ++ ++ ++/* Allocate a chunk of memory for per-function machine-dependent data. */ ++ ++static struct machine_function * ++loongarch_init_machine_status (void) ++{ ++ return ggc_cleared_alloc (); ++} ++ ++/* Return the processor associated with the given ISA level, or null ++ if the ISA isn't valid. */ ++ ++static const struct loongarch_cpu_info * ++loongarch_cpu_info_from_isa (int isa) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < ARRAY_SIZE (loongarch_cpu_info_table); i++) ++ if (loongarch_cpu_info_table[i].isa == isa) ++ return loongarch_cpu_info_table + i; ++ ++ return NULL; ++} ++ ++/* Return a loongarch_cpu_info entry determined by an option valued ++ OPT. */ ++ ++static const struct loongarch_cpu_info * ++loongarch_cpu_info_from_opt (int opt) ++{ ++ switch (opt) ++ { ++ case LARCH_ARCH_OPTION_NATIVE: ++ gcc_unreachable (); ++ ++ default: ++ return &loongarch_cpu_info_table[opt]; ++ } ++} ++ ++/* Return a default loongarch_cpu_info entry, given that no -march= option ++ was explicitly specified. */ ++ ++static const struct loongarch_cpu_info * ++loongarch_default_arch (void) ++{ ++#if defined (LARCH_CPU_STRING_DEFAULT) ++ unsigned int i; ++ for (i = 0; i < ARRAY_SIZE (loongarch_cpu_info_table); i++) ++ if (strcmp (loongarch_cpu_info_table[i].name, LARCH_CPU_STRING_DEFAULT) == 0) ++ return loongarch_cpu_info_table + i; ++ gcc_unreachable (); ++#elif defined (LARCH_ISA_DEFAULT) ++ return loongarch_cpu_info_from_isa (LARCH_ISA_DEFAULT); ++#else ++ gcc_unreachable (); ++#endif ++} ++ ++/* Set up globals to generate code for the ISA or processor ++ described by INFO. */ ++ ++static void ++loongarch_set_architecture (const struct loongarch_cpu_info *info) ++{ ++ if (info != 0) ++ { ++ loongarch_arch_info = info; ++ loongarch_arch = info->cpu; ++ loongarch_isa = info->isa; ++ if (loongarch_isa < 32) ++ loongarch_isa_rev = 0; ++ else ++ loongarch_isa_rev = (loongarch_isa & 31) + 1; ++ } ++} ++ ++/* Likewise for tuning. */ ++ ++static void ++loongarch_set_tune (const struct loongarch_cpu_info *info) ++{ ++ if (info != 0) ++ { ++ loongarch_tune_info = info; ++ loongarch_tune = info->cpu; ++ } ++} ++ ++/* Implement TARGET_OPTION_OVERRIDE. */ ++ ++static void ++loongarch_option_override (void) ++{ ++ int i, start, regno, mode; ++ ++#ifdef SUBTARGET_OVERRIDE_OPTIONS ++ SUBTARGET_OVERRIDE_OPTIONS; ++#endif ++ ++ ++ /* -mno-float overrides -mhard-float and -msoft-float. */ ++ if (TARGET_NO_FLOAT) ++ { ++ target_flags |= MASK_SOFT_FLOAT_ABI; ++ target_flags_explicit |= MASK_SOFT_FLOAT_ABI; ++ } ++ ++ ++ /* Set the small data limit. */ ++ loongarch_small_data_threshold = (global_options_set.x_g_switch_value ++ ? g_switch_value ++ : LARCH_DEFAULT_GVALUE); ++ ++ /* The following code determines the architecture and register size. ++ Similar code was added to GAS 2.14 (see tc-loongarch.c:md_after_parse_args()). ++ The GAS and GCC code should be kept in sync as much as possible. */ ++ ++ if (global_options_set.x_loongarch_arch_option) ++ loongarch_set_architecture (loongarch_cpu_info_from_opt (loongarch_arch_option)); ++ ++ if (loongarch_arch_info == 0) ++ loongarch_set_architecture (loongarch_default_arch ()); ++ ++ /* Optimize for loongarch_arch, unless -mtune selects a different processor. */ ++ if (global_options_set.x_loongarch_tune_option) ++ loongarch_set_tune (loongarch_cpu_info_from_opt (loongarch_tune_option)); ++ ++ if (loongarch_tune_info == 0) ++ loongarch_set_tune (loongarch_arch_info); ++ ++ if ((target_flags_explicit & MASK_64BIT) == 0) ++ { ++ /* Infer the integer register size from the ABI and processor. ++ Restrict ourselves to 32-bit registers if that's all the ++ processor has, or if the ABI cannot handle 64-bit registers. */ ++ if (loongarch_abi == ABILP32) ++ target_flags &= ~MASK_64BIT; ++ else ++ target_flags |= MASK_64BIT; ++ } ++ ++ if ((target_flags_explicit & MASK_FLOAT64) != 0) ++ { ++ if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64) ++ error ("unsupported combination: %s", "-mfp64 -msingle-float"); ++ } ++ else ++ { ++ /* -msingle-float selects 32-bit float registers. On r6 and later, ++ -mdouble-float selects 64-bit float registers, since the old paired ++ register model is not supported. In other cases the float registers ++ should be the same size as the integer ones. */ ++ if (TARGET_64BIT && TARGET_DOUBLE_FLOAT) ++ target_flags |= MASK_FLOAT64; ++ else if (loongarch_abi == ABILP32 && ISA_HAS_LSX) ++ target_flags |= MASK_FLOAT64; ++ else ++ target_flags &= ~MASK_FLOAT64; ++ } ++ ++ /* End of code shared with GAS. */ ++ ++ if (!TARGET_OLDABI) ++ flag_pcc_struct_return = 0; ++ ++ /* Decide which rtx_costs structure to use. */ ++ if (optimize_size) ++ loongarch_cost = &loongarch_rtx_cost_optimize_size; ++ else ++ loongarch_cost = &loongarch_rtx_cost_data[loongarch_tune]; ++ ++ /* If the user hasn't specified a branch cost, use the processor's ++ default. */ ++ if (loongarch_branch_cost == 0) ++ loongarch_branch_cost = loongarch_cost->branch_cost; ++ ++ /* Prefer a call to memcpy over inline code when optimizing for size, ++ though see MOVE_RATIO in loongarch.h. */ ++ if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0) ++ target_flags |= MASK_MEMCPY; ++ ++ /* If we have a nonzero small-data limit, check that the -mgpopt ++ setting is consistent with the other target flags. */ ++ if (loongarch_small_data_threshold > 0) ++ { ++ if (TARGET_VXWORKS_RTP) ++ warning (0, "cannot use small-data accesses for %qs", "-mrtp"); ++ } ++ ++ /* Make sure that when ISA_HAS_LSX is true, TARGET_FLOAT64 and ++ TARGET_HARD_FLOAT_ABI and both true. */ ++ if (ISA_HAS_LSX && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI)) ++ error ("%<-mlsx%> must be used with %<-mfp64%> and %<-mhard-float%>"); ++ ++ /* If TARGET_LASX, enable TARGET_LSX. */ ++ if (TARGET_LASX) ++ target_flags |= MASK_LSX; ++ ++ /* .cfi_* directives generate a read-only section, so fall back on ++ manual .eh_frame creation if we need the section to be writable. */ ++ if (TARGET_WRITABLE_EH_FRAME) ++ flag_dwarf2_cfi_asm = 0; ++ ++ loongarch_init_print_operand_punct (); ++ ++ /* Set up array to map GCC register number to debug register number. ++ Ignore the special purpose register numbers. */ ++ ++ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) ++ { ++ loongarch_dbx_regno[i] = IGNORED_DWARF_REGNUM; ++ if (GP_REG_P (i) || FP_REG_P (i)) ++ loongarch_dwarf_regno[i] = i; ++ else ++ loongarch_dwarf_regno[i] = INVALID_REGNUM; ++ } ++ ++ start = GP_DBX_FIRST - GP_REG_FIRST; ++ for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++) ++ loongarch_dbx_regno[i] = i + start; ++ ++ start = FP_DBX_FIRST - FP_REG_FIRST; ++ for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++) ++ loongarch_dbx_regno[i] = i + start; ++ ++ /* Set up loongarch_hard_regno_mode_ok. */ ++ for (mode = 0; mode < MAX_MACHINE_MODE; mode++) ++ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) ++ loongarch_hard_regno_mode_ok_p[mode][regno] ++ = loongarch_hard_regno_mode_ok_uncached (regno, (machine_mode) mode); ++ ++ /* Function to allocate machine-dependent function status. */ ++ init_machine_status = &loongarch_init_machine_status; ++ target_flags &= ~MASK_RELAX_PIC_CALLS; ++ ++ /* We register a second machine specific reorg pass after delay slot ++ filling. Registering the pass must be done at start up. It's ++ convenient to do it here. */ ++ opt_pass *new_pass = make_pass_loongarch_machine_reorg2 (g); ++ struct register_pass_info insert_pass_loongarch_machine_reorg2 = ++ { ++ new_pass, /* pass */ ++ "dbr", /* reference_pass_name */ ++ 1, /* ref_pass_instance_number */ ++ PASS_POS_INSERT_AFTER /* po_op */ ++ }; ++ register_pass (&insert_pass_loongarch_machine_reorg2); ++ ++ loongarch_register_frame_header_opt (); ++} ++ ++ ++/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */ ++ ++static void ++loongarch_conditional_register_usage (void) ++{ ++ if (!TARGET_HARD_FLOAT) ++ { ++ AND_COMPL_HARD_REG_SET (accessible_reg_set, ++ reg_class_contents[(int) FP_REGS]); ++ AND_COMPL_HARD_REG_SET (accessible_reg_set, ++ reg_class_contents[(int) ST_REGS]); ++ } ++} ++ ++/* Implement EH_USES. */ ++ ++bool ++loongarch_eh_uses (unsigned int regno) ++{ ++ return false; ++} ++ ++/* Implement EPILOGUE_USES. */ ++ ++bool ++loongarch_epilogue_uses (unsigned int regno) ++{ ++ /* Say that the epilogue uses the return address register. Note that ++ in the case of sibcalls, the values "used by the epilogue" are ++ considered live at the start of the called function. */ ++ if (regno == RETURN_ADDR_REGNUM) ++ return true; ++ ++ /* An interrupt handler must preserve some registers that are ++ ordinarily call-clobbered. */ ++ if (cfun->machine->interrupt_handler_p ++ && loongarch_interrupt_extra_call_saved_reg_p (regno)) ++ return true; ++ ++ return false; ++} ++ ++/* Return true if MEM1 and MEM2 use the same base register, and the ++ offset of MEM2 equals the offset of MEM1 plus 4. FIRST_REG is the ++ register into (from) which the contents of MEM1 will be loaded ++ (stored), depending on the value of LOAD_P. ++ SWAP_P is true when the 1st and 2nd instructions are swapped. */ ++ ++static bool ++loongarch_load_store_pair_p_1 (bool load_p, bool swap_p, ++ rtx first_reg, rtx mem1, rtx mem2) ++{ ++ rtx base1, base2; ++ HOST_WIDE_INT offset1, offset2; ++ ++ if (!MEM_P (mem1) || !MEM_P (mem2)) ++ return false; ++ ++ loongarch_split_plus (XEXP (mem1, 0), &base1, &offset1); ++ loongarch_split_plus (XEXP (mem2, 0), &base2, &offset2); ++ ++ if (!REG_P (base1) || !rtx_equal_p (base1, base2)) ++ return false; ++ ++ /* Avoid invalid load pair instructions. */ ++ if (load_p && REGNO (first_reg) == REGNO (base1)) ++ return false; ++ ++ /* We must avoid this case for anti-dependence. ++ Ex: lw $3, 4($3) ++ lw $2, 0($3) ++ first_reg is $2, but the base is $3. */ ++ if (load_p ++ && swap_p ++ && REGNO (first_reg) + 1 == REGNO (base1)) ++ return false; ++ ++ if (offset2 != offset1 + 4) ++ return false; ++ ++ if (!ULARCH_12BIT_OFFSET_P (offset1)) ++ return false; ++ ++ return true; ++} ++ ++bool ++loongarch_load_store_bonding_p (rtx *operands, machine_mode mode, bool load_p) ++{ ++ rtx reg1, reg2, mem1, mem2, base1, base2; ++ enum reg_class rc1, rc2; ++ HOST_WIDE_INT offset1, offset2; ++ ++ if (load_p) ++ { ++ reg1 = operands[0]; ++ reg2 = operands[2]; ++ mem1 = operands[1]; ++ mem2 = operands[3]; ++ } ++ else ++ { ++ reg1 = operands[1]; ++ reg2 = operands[3]; ++ mem1 = operands[0]; ++ mem2 = operands[2]; ++ } ++ ++ if (loongarch_address_insns (XEXP (mem1, 0), mode, false) == 0 ++ || loongarch_address_insns (XEXP (mem2, 0), mode, false) == 0) ++ return false; ++ ++ loongarch_split_plus (XEXP (mem1, 0), &base1, &offset1); ++ loongarch_split_plus (XEXP (mem2, 0), &base2, &offset2); ++ ++ /* Base regs do not match. */ ++ if (!REG_P (base1) || !rtx_equal_p (base1, base2)) ++ return false; ++ ++ /* Either of the loads is clobbering base register. It is legitimate to bond ++ loads if second load clobbers base register. However, hardware does not ++ support such bonding. */ ++ if (load_p ++ && (REGNO (reg1) == REGNO (base1) ++ || (REGNO (reg2) == REGNO (base1)))) ++ return false; ++ ++ /* Loading in same registers. */ ++ if (load_p ++ && REGNO (reg1) == REGNO (reg2)) ++ return false; ++ ++ /* The loads/stores are not of same type. */ ++ rc1 = REGNO_REG_CLASS (REGNO (reg1)); ++ rc2 = REGNO_REG_CLASS (REGNO (reg2)); ++ if (rc1 != rc2 ++ && !reg_class_subset_p (rc1, rc2) ++ && !reg_class_subset_p (rc2, rc1)) ++ return false; ++ ++ if (abs (offset1 - offset2) != GET_MODE_SIZE (mode)) ++ return false; ++ ++ return true; ++} ++ ++/* OPERANDS describes the operands to a pair of SETs, in the order ++ dest1, src1, dest2, src2. Return true if the operands can be used ++ in an LWP or SWP instruction; LOAD_P says which. */ ++ ++bool ++loongarch_load_store_pair_p (bool load_p, rtx *operands) ++{ ++ rtx reg1, reg2, mem1, mem2; ++ ++ if (load_p) ++ { ++ reg1 = operands[0]; ++ reg2 = operands[2]; ++ mem1 = operands[1]; ++ mem2 = operands[3]; ++ } ++ else ++ { ++ reg1 = operands[1]; ++ reg2 = operands[3]; ++ mem1 = operands[0]; ++ mem2 = operands[2]; ++ } ++ ++ if (REGNO (reg2) == REGNO (reg1) + 1) ++ return loongarch_load_store_pair_p_1 (load_p, false, reg1, mem1, mem2); ++ ++ if (REGNO (reg1) == REGNO (reg2) + 1) ++ return loongarch_load_store_pair_p_1 (load_p, true, reg2, mem2, mem1); ++ ++ return false; ++} ++ ++/* Return true if REG1 and REG2 match the criteria for a movep insn. */ ++ ++bool ++loongarch_movep_target_p (rtx reg1, rtx reg2) ++{ ++ int regno1, regno2, pair; ++ unsigned int i; ++ static const int match[8] = { ++ 0x00000060, /* 5, 6 */ ++ 0x000000a0, /* 5, 7 */ ++ 0x000000c0, /* 6, 7 */ ++ 0x00200010, /* 4, 21 */ ++ 0x00400010, /* 4, 22 */ ++ 0x00000030, /* 4, 5 */ ++ 0x00000050, /* 4, 6 */ ++ 0x00000090 /* 4, 7 */ ++ }; ++ ++ if (!REG_P (reg1) || !REG_P (reg2)) ++ return false; ++ ++ regno1 = REGNO (reg1); ++ regno2 = REGNO (reg2); ++ ++ if (!GP_REG_P (regno1) || !GP_REG_P (regno2)) ++ return false; ++ ++ pair = (1 << regno1) | (1 << regno2); ++ ++ for (i = 0; i < ARRAY_SIZE (match); i++) ++ if (pair == match[i]) ++ return true; ++ ++ return false; ++} ++ ++/* Return the size in bytes of the trampoline code, padded to ++ TRAMPOLINE_ALIGNMENT bits. The static chain pointer and target ++ function address immediately follow. */ ++ ++int ++loongarch_trampoline_code_size (void) ++{ ++ return 4 * 4; ++} ++ ++/* Implement TARGET_TRAMPOLINE_INIT. */ ++ ++static void ++loongarch_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) ++{ ++ rtx addr, end_addr, high, low, opcode, mem; ++ rtx trampoline[8]; ++ unsigned int i, j; ++ HOST_WIDE_INT end_addr_offset, static_chain_offset, target_function_offset; ++ ++ /* Work out the offsets of the pointers from the start of the ++ trampoline code. */ ++ end_addr_offset = loongarch_trampoline_code_size (); ++ static_chain_offset = end_addr_offset; ++ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode); ++ ++ /* Get pointers to the beginning and end of the code block. */ ++ addr = force_reg (Pmode, XEXP (m_tramp, 0)); ++ end_addr = loongarch_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset)); ++ ++#define OP(X) gen_int_mode (X, SImode) ++ ++ /* Build up the code in TRAMPOLINE. */ ++ i = 0; ++ /* ++ pcaddi $static_chain,0 ++ ld.[dw] $tmp,$static_chain,target_function_offset ++ ld.[dw] $static_chain,$static_chain,static_chain_offset ++ jirl $r0,$tmp,0 ++ */ ++ trampoline[i++] = OP (0x18000000 | (STATIC_CHAIN_REGNUM - GP_REG_FIRST)); ++ trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000) ++ | 19 /* $t7 */ ++ | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5) ++ | ((target_function_offset & 0xfff) << 10)); ++ trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000) ++ | (STATIC_CHAIN_REGNUM - GP_REG_FIRST) ++ | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5) ++ | ((static_chain_offset & 0xfff) << 10)); ++ trampoline[i++] = OP (0x4c000000 | (19 << 5)); ++#undef OP ++ ++ for (j = 0; j < i; j++) ++ { ++ mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode)); ++ loongarch_emit_move (mem, trampoline[j]); ++ } ++ ++ /* Set up the static chain pointer field. */ ++ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset); ++ loongarch_emit_move (mem, chain_value); ++ ++ /* Set up the target function field. */ ++ mem = adjust_address (m_tramp, ptr_mode, target_function_offset); ++ loongarch_emit_move (mem, XEXP (DECL_RTL (fndecl), 0)); ++ ++ /* Flush the code part of the trampoline. */ ++ emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE))); ++ emit_insn (gen_clear_cache (addr, end_addr)); ++} ++ ++ ++/* Implement TARGET_SHIFT_TRUNCATION_MASK. We want to keep the default ++ behavior of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even ++ when TARGET_LOONGSON_MMI is true. */ ++ ++static unsigned HOST_WIDE_INT ++loongarch_shift_truncation_mask (machine_mode mode) ++{ ++ return GET_MODE_BITSIZE (mode) - 1; ++} ++ ++ ++/* Generate or test for an insn that supports a constant permutation. */ ++ ++#define MAX_VECT_LEN 32 ++ ++struct expand_vec_perm_d ++{ ++ rtx target, op0, op1; ++ unsigned char perm[MAX_VECT_LEN]; ++ machine_mode vmode; ++ unsigned char nelt; ++ bool one_vector_p; ++ bool testing_p; ++}; ++ ++/* Construct (set target (vec_select op0 (parallel perm))) and ++ return true if that's a valid instruction in the active ISA. */ ++ ++static bool ++loongarch_expand_vselect (rtx target, rtx op0, ++ const unsigned char *perm, unsigned nelt) ++{ ++ rtx rperm[MAX_VECT_LEN], x; ++ rtx_insn *insn; ++ unsigned i; ++ ++ for (i = 0; i < nelt; ++i) ++ rperm[i] = GEN_INT (perm[i]); ++ ++ x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm)); ++ x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x); ++ x = gen_rtx_SET (target, x); ++ ++ insn = emit_insn (x); ++ if (recog_memoized (insn) < 0) ++ { ++ remove_insn (insn); ++ return false; ++ } ++ return true; ++} ++ ++/* Similar, but generate a vec_concat from op0 and op1 as well. */ ++ ++static bool ++loongarch_expand_vselect_vconcat (rtx target, rtx op0, rtx op1, ++ const unsigned char *perm, unsigned nelt) ++{ ++ machine_mode v2mode; ++ rtx x; ++ ++ if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0)).exists (&v2mode)) ++ return false; ++ x = gen_rtx_VEC_CONCAT (v2mode, op0, op1); ++ return loongarch_expand_vselect (target, x, perm, nelt); ++} ++ ++/* Construct (set target (vec_select op0 (parallel selector))) and ++ return true if that's a valid instruction in the active ISA. */ ++ ++static bool ++loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d) ++{ ++ rtx x, elts[MAX_VECT_LEN]; ++ rtvec v; ++ rtx_insn *insn; ++ unsigned i; ++ ++ if (!ISA_HAS_LSX && !ISA_HAS_LASX) ++ return false; ++ ++ for (i = 0; i < d->nelt; i++) ++ elts[i] = GEN_INT (d->perm[i]); ++ ++ v = gen_rtvec_v (d->nelt, elts); ++ x = gen_rtx_PARALLEL (VOIDmode, v); ++ ++ if (!loongarch_const_vector_shuffle_set_p (x, d->vmode)) ++ return false; ++ ++ x = gen_rtx_VEC_SELECT (d->vmode, d->op0, x); ++ x = gen_rtx_SET (d->target, x); ++ ++ insn = emit_insn (x); ++ if (recog_memoized (insn) < 0) ++ { ++ remove_insn (insn); ++ return false; ++ } ++ return true; ++} ++ ++static bool ++loongarch_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) ++{ ++ unsigned int i, nelt = d->nelt; ++ unsigned char perm2[MAX_VECT_LEN]; ++ ++ if (d->one_vector_p) ++ { ++ /* Try interleave with alternating operands. */ ++ memcpy (perm2, d->perm, sizeof(perm2)); ++ for (i = 1; i < nelt; i += 2) ++ perm2[i] += nelt; ++ if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2, nelt)) ++ return true; ++ } ++ else ++ { ++ if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, ++ d->perm, nelt)) ++ return true; ++ ++ /* Try again with swapped operands. */ ++ for (i = 0; i < nelt; ++i) ++ perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1); ++ if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt)) ++ return true; ++ } ++ ++ if (loongarch_expand_lsx_shuffle (d)) ++ return true; ++ return false; ++} ++ ++/* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */ ++ ++static bool ++loongarch_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0, ++ rtx op1, const vec_perm_indices &sel) ++{ ++ struct expand_vec_perm_d d; ++ int i, nelt, which; ++ unsigned char orig_perm[MAX_VECT_LEN]; ++ bool ok; ++ ++ d.target = target; ++ d.op0 = op0; ++ d.op1 = op1; ++ ++ d.vmode = vmode; ++ gcc_assert (VECTOR_MODE_P (vmode)); ++ d.nelt = nelt = GET_MODE_NUNITS (vmode); ++ d.testing_p = !target; ++ ++ /* This is overly conservative, but ensures we don't get an ++ uninitialized warning on ORIG_PERM. */ ++ memset (orig_perm, 0, MAX_VECT_LEN); ++ for (i = which = 0; i < nelt; ++i) ++ { ++ int ei = sel[i] & (2 * nelt - 1); ++ which |= (ei < nelt ? 1 : 2); ++ orig_perm[i] = ei; ++ } ++ memcpy (d.perm, orig_perm, MAX_VECT_LEN); ++ ++ switch (which) ++ { ++ default: ++ gcc_unreachable(); ++ ++ case 3: ++ d.one_vector_p = false; ++ if (d.testing_p || !rtx_equal_p (d.op0, d.op1)) ++ break; ++ /* FALLTHRU */ ++ ++ case 2: ++ for (i = 0; i < nelt; ++i) ++ d.perm[i] &= nelt - 1; ++ d.op0 = d.op1; ++ d.one_vector_p = true; ++ break; ++ ++ case 1: ++ d.op1 = d.op0; ++ d.one_vector_p = true; ++ break; ++ } ++ ++ if (d.testing_p) ++ { ++ d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1); ++ d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2); ++ if (!d.one_vector_p) ++ d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3); ++ ++ start_sequence (); ++ ok = loongarch_expand_vec_perm_const_1 (&d); ++ end_sequence (); ++ return ok; ++ } ++ ++ ok = loongarch_expand_vec_perm_const_1 (&d); ++ ++ /* If we were given a two-vector permutation which just happened to ++ have both input vectors equal, we folded this into a one-vector ++ permutation. There are several loongson patterns that are matched ++ via direct vec_select+vec_concat expansion, but we do not have ++ support in loongarch_expand_vec_perm_const_1 to guess the adjustment ++ that should be made for a single operand. Just try again with ++ the original permutation. */ ++ if (!ok && which == 3) ++ { ++ d.op0 = op0; ++ d.op1 = op1; ++ d.one_vector_p = false; ++ memcpy (d.perm, orig_perm, MAX_VECT_LEN); ++ ok = loongarch_expand_vec_perm_const_1 (&d); ++ } ++ ++ return ok; ++} ++ ++/* Implement TARGET_SCHED_REASSOCIATION_WIDTH. */ ++ ++static int ++loongarch_sched_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED, ++ machine_mode mode) ++{ ++ switch (loongarch_tune) ++ { ++ case PROCESSOR_LOONGARCH64: ++ case PROCESSOR_LA464: ++ /* Vector part. */ ++ if (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode)) ++ { ++ /* Integer vector instructions execute in FP unit. ++ The width of integer/float-point vector instructions is 3. */ ++ return 3; ++ } ++ ++ /* Scalar part. */ ++ else if (INTEGRAL_MODE_P (mode)) ++ return 1; ++ else if (FLOAT_MODE_P (mode)) ++ return 4; ++ break; ++ default: ++ break; ++ } ++ return 1; ++} ++ ++/* Expand an integral vector unpack operation. */ ++ ++void ++loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) ++{ ++ machine_mode imode = GET_MODE (operands[1]); ++ rtx (*unpack) (rtx, rtx, rtx); ++ rtx (*extend) (rtx, rtx); ++ rtx (*cmpFunc) (rtx, rtx, rtx); ++ rtx (*swap_hi_lo) (rtx, rtx, rtx, rtx); ++ rtx tmp, dest, zero; ++ machine_mode halfmode = BLKmode; ++ ++ if (ISA_HAS_LASX && GET_MODE_SIZE (imode) == 32) ++ { ++ switch (imode) ++ { ++ ++ case E_V8SImode: ++ if (unsigned_p) ++ extend = gen_lasx_vext2xv_du_wu; ++ else ++ extend = gen_lasx_vext2xv_d_w; ++ swap_hi_lo = gen_lasx_xvpermi_q_v8si; ++ break; ++ ++ case E_V16HImode: ++ if (unsigned_p) ++ extend = gen_lasx_vext2xv_wu_hu; ++ else ++ extend = gen_lasx_vext2xv_w_h; ++ swap_hi_lo = gen_lasx_xvpermi_q_v16hi; ++ break; ++ ++ case E_V32QImode: ++ if (unsigned_p) ++ extend = gen_lasx_vext2xv_hu_bu; ++ else ++ extend = gen_lasx_vext2xv_h_b; ++ swap_hi_lo = gen_lasx_xvpermi_q_v32qi; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ break; ++ } ++ ++ if (high_p) ++ { ++ tmp = gen_reg_rtx (imode); ++ emit_insn (swap_hi_lo (tmp, tmp, operands[1], const1_rtx)); ++ emit_insn(extend (operands[0], tmp)); ++ return; ++ } ++ ++ emit_insn(extend (operands[0], operands[1])); ++ return; ++ ++ } ++ else if (ISA_HAS_LSX) ++ { ++ switch (imode) ++ { ++ case E_V4SImode: ++ if (high_p != 0) ++ unpack = gen_lsx_vilvh_w; ++ else ++ unpack = gen_lsx_vilvl_w; ++ ++ cmpFunc = gen_lsx_vslt_w; ++ break; ++ ++ case E_V8HImode: ++ if (high_p != 0) ++ unpack = gen_lsx_vilvh_h; ++ else ++ unpack = gen_lsx_vilvl_h; ++ ++ cmpFunc = gen_lsx_vslt_h; ++ break; ++ ++ case E_V16QImode: ++ if (high_p != 0) ++ unpack = gen_lsx_vilvh_b; ++ else ++ unpack = gen_lsx_vilvl_b; ++ ++ cmpFunc = gen_lsx_vslt_b; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ break; ++ } ++ ++ if (!unsigned_p) ++ { ++ /* Extract sign extention for each element comparing each element ++ with immediate zero. */ ++ tmp = gen_reg_rtx (imode); ++ emit_insn (cmpFunc (tmp, operands[1], CONST0_RTX (imode))); ++ } ++ else ++ tmp = force_reg (imode, CONST0_RTX (imode)); ++ ++ dest = gen_reg_rtx (imode); ++ ++ emit_insn (unpack (dest, operands[1], tmp)); ++ emit_move_insn (operands[0], gen_lowpart (GET_MODE (operands[0]), dest)); ++ return; ++ } ++ gcc_unreachable (); ++} ++ ++/* Construct and return PARALLEL RTX with CONST_INTs for HIGH (high_p == TRUE) ++ or LOW (high_p == FALSE) half of a vector for mode MODE. */ ++ ++rtx ++loongarch_lsx_vec_parallel_const_half (machine_mode mode, bool high_p) ++{ ++ int nunits = GET_MODE_NUNITS (mode); ++ rtvec v = rtvec_alloc (nunits / 2); ++ int base; ++ int i; ++ ++ base = high_p ? nunits / 2 : 0; ++ ++ for (i = 0; i < nunits / 2; i++) ++ RTVEC_ELT (v, i) = GEN_INT (base + i); ++ ++ return gen_rtx_PARALLEL (VOIDmode, v); ++} ++ ++/* A subroutine of loongarch_expand_vec_init, match constant vector elements. */ ++ ++static inline bool ++loongarch_constant_elt_p (rtx x) ++{ ++ return CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE; ++} ++ ++rtx ++loongarch_gen_const_int_vector_shuffle (machine_mode mode, int val) ++{ ++ int nunits = GET_MODE_NUNITS (mode); ++ int nsets = nunits / 4; ++ rtx elts[MAX_VECT_LEN]; ++ int set = 0; ++ int i, j; ++ ++ /* Generate a const_int vector replicating the same 4-element set ++ from an immediate. */ ++ for (j = 0; j < nsets; j++, set = 4 * j) ++ for (i = 0; i < 4; i++) ++ elts[set + i] = GEN_INT (set + ((val >> (2 * i)) & 0x3)); ++ ++ return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nunits, elts)); ++} ++ ++ ++/* Expand a vector initialization. */ ++ ++void ++loongarch_expand_vector_init (rtx target, rtx vals) ++{ ++ machine_mode vmode = GET_MODE (target); ++ machine_mode imode = GET_MODE_INNER (vmode); ++ unsigned i, nelt = GET_MODE_NUNITS (vmode); ++ unsigned nvar = 0, one_var = -1u; ++ bool all_same = true; ++ rtx x; ++ ++ for (i = 0; i < nelt; ++i) ++ { ++ x = XVECEXP (vals, 0, i); ++ if (!loongarch_constant_elt_p (x)) ++ nvar++, one_var = i; ++ if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0))) ++ all_same = false; ++ } ++ ++ if (ISA_HAS_LASX && GET_MODE_SIZE (vmode) == 32) ++ { ++ if (all_same) ++ { ++ rtx same = XVECEXP (vals, 0, 0); ++ rtx temp, temp2; ++ ++ if (CONST_INT_P (same) && nvar == 0 ++ && loongarch_signed_immediate_p (INTVAL (same), 10, 0)) ++ { ++ switch (vmode) ++ { ++ case E_V32QImode: ++ case E_V16HImode: ++ case E_V8SImode: ++ case E_V4DImode: ++ temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)); ++ emit_move_insn (target, temp); ++ return; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++ ++ temp = gen_reg_rtx (imode); ++ if (imode == GET_MODE (same)) ++ temp2 = same; ++ else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) ++ { ++ if(GET_CODE (same) == MEM) ++ { ++ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); ++ loongarch_emit_move (reg_tmp, same); ++ temp2 = simplify_gen_subreg (imode, reg_tmp, GET_MODE (reg_tmp), 0); ++ } ++ else ++ temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0); ++ } ++ else ++ { ++ if(GET_CODE (same) == MEM) ++ { ++ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); ++ loongarch_emit_move (reg_tmp, same); ++ temp2 = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp)); ++ } ++ else ++ temp2 = lowpart_subreg (imode, same, GET_MODE (same)); ++ } ++ emit_move_insn (temp, temp2); ++ ++ switch (vmode) ++ { ++ case E_V32QImode: ++ case E_V16HImode: ++ case E_V8SImode: ++ case E_V4DImode: ++ loongarch_emit_move (target, gen_rtx_VEC_DUPLICATE (vmode, temp)); ++ break; ++ ++ case E_V8SFmode: ++ emit_insn (gen_lasx_xvreplve0_w_f_scalar (target, temp)); ++ break; ++ ++ case E_V4DFmode: ++ emit_insn (gen_lasx_xvreplve0_d_f_scalar (target, temp)); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++ else ++ { ++ rtvec vec = shallow_copy_rtvec (XVEC (vals, 0)); ++ ++ for (i = 0; i < nelt; ++i) ++ RTVEC_ELT (vec, i) = CONST0_RTX (imode); ++ ++ emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec)); ++ ++ machine_mode half_mode = VOIDmode; ++ rtx target_hi, target_lo; ++ ++ switch (vmode) ++ { ++ case E_V32QImode: ++ half_mode=E_V16QImode; ++ target_hi = gen_reg_rtx (half_mode); ++ target_lo = gen_reg_rtx (half_mode); ++ for (i = 0; i < nelt/2; ++i) ++ { ++ rtx temp_hi = gen_reg_rtx (imode); ++ rtx temp_lo = gen_reg_rtx (imode); ++ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); ++ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ emit_insn (gen_vec_setv16qi (target_hi, temp_hi, GEN_INT (i))); ++ emit_insn (gen_vec_setv16qi (target_lo, temp_lo, GEN_INT (i))); ++ } ++ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); ++ break; ++ ++ case E_V16HImode: ++ half_mode=E_V8HImode; ++ target_hi = gen_reg_rtx (half_mode); ++ target_lo = gen_reg_rtx (half_mode); ++ for (i = 0; i < nelt/2; ++i) ++ { ++ rtx temp_hi = gen_reg_rtx (imode); ++ rtx temp_lo = gen_reg_rtx (imode); ++ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); ++ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ emit_insn (gen_vec_setv8hi (target_hi, temp_hi, GEN_INT (i))); ++ emit_insn (gen_vec_setv8hi (target_lo, temp_lo, GEN_INT (i))); ++ } ++ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); ++ break; ++ ++ case E_V8SImode: ++ half_mode=V4SImode; ++ target_hi = gen_reg_rtx (half_mode); ++ target_lo = gen_reg_rtx (half_mode); ++ for (i = 0; i < nelt/2; ++i) ++ { ++ rtx temp_hi = gen_reg_rtx (imode); ++ rtx temp_lo = gen_reg_rtx (imode); ++ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); ++ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ emit_insn (gen_vec_setv4si (target_hi, temp_hi, GEN_INT (i))); ++ emit_insn (gen_vec_setv4si (target_lo, temp_lo, GEN_INT (i))); ++ } ++ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); ++ break; ++ ++ case E_V4DImode: ++ half_mode=E_V2DImode; ++ target_hi = gen_reg_rtx (half_mode); ++ target_lo = gen_reg_rtx (half_mode); ++ for (i = 0; i < nelt/2; ++i) ++ { ++ rtx temp_hi = gen_reg_rtx (imode); ++ rtx temp_lo = gen_reg_rtx (imode); ++ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); ++ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ emit_insn (gen_vec_setv2di (target_hi, temp_hi, GEN_INT (i))); ++ emit_insn (gen_vec_setv2di (target_lo, temp_lo, GEN_INT (i))); ++ } ++ /* PUT_MODE(target_hi, GET_MODE (target)); */ ++ /* PUT_MODE(target_lo, GET_MODE (target)); */ ++ /* emit_insn ( gen_lasx_shufi_q_v4di (target_hi, target_lo, GEN_INT(1))); */ ++ /* emit_move_insn (target, target_hi); */ ++ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); ++ break; ++ ++ case E_V8SFmode: ++ half_mode=E_V4SFmode; ++ target_hi = gen_reg_rtx (half_mode); ++ target_lo = gen_reg_rtx (half_mode); ++ for (i = 0; i < nelt/2; ++i) ++ { ++ rtx temp_hi = gen_reg_rtx (imode); ++ rtx temp_lo = gen_reg_rtx (imode); ++ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); ++ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ emit_insn (gen_vec_setv4sf (target_hi, temp_hi, GEN_INT (i))); ++ emit_insn (gen_vec_setv4sf (target_lo, temp_lo, GEN_INT (i))); ++ } ++ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); ++ break; ++ ++ case E_V4DFmode: ++ half_mode=E_V2DFmode; ++ target_hi = gen_reg_rtx (half_mode); ++ target_lo = gen_reg_rtx (half_mode); ++ for (i = 0; i < nelt/2; ++i) ++ { ++ rtx temp_hi = gen_reg_rtx (imode); ++ rtx temp_lo = gen_reg_rtx (imode); ++ emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); ++ emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ emit_insn (gen_vec_setv2df (target_hi, temp_hi, GEN_INT (i))); ++ emit_insn (gen_vec_setv2df (target_lo, temp_lo, GEN_INT (i))); ++ } ++ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); ++ break; ++ ++ default: ++ gcc_unreachable(); ++ } ++ ++ } ++ return; ++ } ++ ++ if (ISA_HAS_LSX) ++ { ++ if (all_same) ++ { ++ rtx same = XVECEXP (vals, 0, 0); ++ rtx temp, temp2; ++ ++ if (CONST_INT_P (same) && nvar == 0 ++ && loongarch_signed_immediate_p (INTVAL (same), 10, 0)) ++ { ++ switch (vmode) ++ { ++ case E_V16QImode: ++ case E_V8HImode: ++ case E_V4SImode: ++ case E_V2DImode: ++ temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)); ++ emit_move_insn (target, temp); ++ return; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++ temp = gen_reg_rtx (imode); ++ if (imode == GET_MODE (same)) ++ temp2 = same; ++ else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) ++ { ++ if(GET_CODE (same) == MEM) ++ { ++ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); ++ loongarch_emit_move (reg_tmp, same); ++ temp2 = simplify_gen_subreg (imode, reg_tmp, GET_MODE (reg_tmp), 0); ++ } ++ else ++ temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0); ++ } ++ else ++ { ++ if(GET_CODE (same) == MEM) ++ { ++ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); ++ loongarch_emit_move (reg_tmp, same); ++ temp2 = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp)); ++ } ++ else ++ temp2 = lowpart_subreg (imode, same, GET_MODE (same)); ++ } ++ emit_move_insn (temp, temp2); ++ ++ switch (vmode) ++ { ++ case E_V16QImode: ++ case E_V8HImode: ++ case E_V4SImode: ++ case E_V2DImode: ++ loongarch_emit_move (target, gen_rtx_VEC_DUPLICATE (vmode, temp)); ++ break; ++ ++ case E_V4SFmode: ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (target, temp)); ++ break; ++ ++ case E_V2DFmode: ++ emit_insn (gen_lsx_vreplvei_d_f_scalar (target, temp)); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++ else ++ { ++ emit_move_insn (target, CONST0_RTX (vmode)); ++ ++ for (i = 0; i < nelt; ++i) ++ { ++ rtx temp = gen_reg_rtx (imode); ++ emit_move_insn (temp, XVECEXP (vals, 0, i)); ++ switch (vmode) ++ { ++ case E_V16QImode: ++ emit_insn (gen_vec_setv16qi (target, temp, GEN_INT (i))); ++ break; ++ ++ case E_V8HImode: ++ emit_insn (gen_vec_setv8hi (target, temp, GEN_INT (i))); ++ break; ++ ++ case E_V4SImode: ++ emit_insn (gen_vec_setv4si (target, temp, GEN_INT (i))); ++ break; ++ ++ case E_V2DImode: ++ emit_insn (gen_vec_setv2di (target, temp, GEN_INT (i))); ++ break; ++ ++ case E_V4SFmode: ++ emit_insn (gen_vec_setv4sf (target, temp, GEN_INT (i))); ++ break; ++ ++ case E_V2DFmode: ++ emit_insn (gen_vec_setv2df (target, temp, GEN_INT (i))); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++ } ++ return; ++ } ++ ++ /* Load constants from the pool, or whatever's handy. */ ++ if (nvar == 0) ++ { ++ emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0))); ++ return; ++ } ++ ++ /* For two-part initialization, always use CONCAT. */ ++ if (nelt == 2) ++ { ++ rtx op0 = force_reg (imode, XVECEXP (vals, 0, 0)); ++ rtx op1 = force_reg (imode, XVECEXP (vals, 0, 1)); ++ x = gen_rtx_VEC_CONCAT (vmode, op0, op1); ++ emit_insn (gen_rtx_SET (target, x)); ++ return; ++ } ++ ++ /* Loongson is the only cpu with vectors with more elements. */ ++ gcc_assert (0); ++} ++ ++/* Implement HARD_REGNO_CALLER_SAVE_MODE. */ ++ ++machine_mode ++loongarch_hard_regno_caller_save_mode (unsigned int regno, ++ unsigned int nregs, ++ machine_mode mode) ++{ ++ /* For performance, avoid saving/restoring upper parts of a register ++ by returning MODE as save mode when the mode is known. */ ++ if (mode == VOIDmode) ++ return choose_hard_reg_mode (regno, nregs, false); ++ else ++ return mode; ++} ++ ++/* Generate RTL for comparing CMP_OP0 and CMP_OP1 using condition COND and ++ store the result -1 or 0 in DEST. */ ++ ++static void ++loongarch_expand_lsx_cmp (rtx dest, enum rtx_code cond, rtx op0, rtx op1) ++{ ++ machine_mode cmp_mode = GET_MODE (op0); ++ int unspec = -1; ++ bool negate = false; ++ ++ switch (cmp_mode) ++ { ++ case E_V16QImode: ++ case E_V32QImode: ++ case E_V8HImode: ++ case E_V16HImode: ++ case E_V4SImode: ++ case E_V8SImode: ++ case E_V2DImode: ++ case E_V4DImode: ++ switch (cond) ++ { ++ case NE: ++ cond = reverse_condition (cond); ++ negate = true; ++ break; ++ case EQ: ++ case LT: ++ case LE: ++ case LTU: ++ case LEU: ++ break; ++ case GE: ++ case GT: ++ case GEU: ++ case GTU: ++ std::swap (op0, op1); ++ cond = swap_condition (cond); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ loongarch_emit_binary (cond, dest, op0, op1); ++ if (negate) ++ emit_move_insn (dest, gen_rtx_NOT (GET_MODE (dest), dest)); ++ break; ++ ++ case E_V4SFmode: ++ case E_V2DFmode: ++ switch (cond) ++ { ++ case UNORDERED: ++ case ORDERED: ++ case EQ: ++ case NE: ++ case UNEQ: ++ case UNLE: ++ case UNLT: ++ break; ++ case LTGT: cond = NE; break; ++ case UNGE: cond = UNLE; std::swap (op0, op1); break; ++ case UNGT: cond = UNLT; std::swap (op0, op1); break; ++ case LE: unspec = UNSPEC_LSX_VFCMP_SLE; break; ++ case LT: unspec = UNSPEC_LSX_VFCMP_SLT; break; ++ case GE: unspec = UNSPEC_LSX_VFCMP_SLE; std::swap (op0, op1); break; ++ case GT: unspec = UNSPEC_LSX_VFCMP_SLT; std::swap (op0, op1); break; ++ default: ++ gcc_unreachable (); ++ } ++ if (unspec < 0) ++ loongarch_emit_binary (cond, dest, op0, op1); ++ else ++ { ++ rtx x = gen_rtx_UNSPEC (GET_MODE (dest), ++ gen_rtvec (2, op0, op1), unspec); ++ emit_insn (gen_rtx_SET (dest, x)); ++ } ++ break; ++ ++ case E_V8SFmode: ++ case E_V4DFmode: ++ switch (cond) ++ { ++ case UNORDERED: ++ case ORDERED: ++ case EQ: ++ case NE: ++ case UNEQ: ++ case UNLE: ++ case UNLT: ++ break; ++ case LTGT: cond = NE; break; ++ case UNGE: cond = UNLE; std::swap (op0, op1); break; ++ case UNGT: cond = UNLT; std::swap (op0, op1); break; ++ case LE: unspec = UNSPEC_LASX_XVFCMP_SLE; break; ++ case LT: unspec = UNSPEC_LASX_XVFCMP_SLT; break; ++ case GE: unspec = UNSPEC_LASX_XVFCMP_SLE; std::swap (op0, op1); break; ++ case GT: unspec = UNSPEC_LASX_XVFCMP_SLT; std::swap (op0, op1); break; ++ default: ++ gcc_unreachable (); ++ } ++ if (unspec < 0) ++ loongarch_emit_binary (cond, dest, op0, op1); ++ else ++ { ++ rtx x = gen_rtx_UNSPEC (GET_MODE (dest), ++ gen_rtvec (2, op0, op1), unspec); ++ emit_insn (gen_rtx_SET (dest, x)); ++ } ++ break; ++ ++ default: ++ gcc_unreachable (); ++ break; ++ } ++} ++ ++/* Expand VEC_COND_EXPR, where: ++ MODE is mode of the result ++ VIMODE equivalent integer mode ++ OPERANDS operands of VEC_COND_EXPR. */ ++ ++void ++loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode, ++ rtx *operands) ++{ ++ rtx cond = operands[3]; ++ rtx cmp_op0 = operands[4]; ++ rtx cmp_op1 = operands[5]; ++ rtx cmp_res = gen_reg_rtx (vimode); ++ ++ loongarch_expand_lsx_cmp (cmp_res, GET_CODE (cond), cmp_op0, cmp_op1); ++ ++ /* We handle the following cases: ++ 1) r = a CMP b ? -1 : 0 ++ 2) r = a CMP b ? -1 : v ++ 3) r = a CMP b ? v : 0 ++ 4) r = a CMP b ? v1 : v2 */ ++ ++ /* Case (1) above. We only move the results. */ ++ if (operands[1] == CONSTM1_RTX (vimode) ++ && operands[2] == CONST0_RTX (vimode)) ++ emit_move_insn (operands[0], cmp_res); ++ else ++ { ++ rtx src1 = gen_reg_rtx (vimode); ++ rtx src2 = gen_reg_rtx (vimode); ++ rtx mask = gen_reg_rtx (vimode); ++ rtx bsel; ++ ++ /* Move the vector result to use it as a mask. */ ++ emit_move_insn (mask, cmp_res); ++ ++ if (register_operand (operands[1], mode)) ++ { ++ rtx xop1 = operands[1]; ++ if (mode != vimode) ++ { ++ xop1 = gen_reg_rtx (vimode); ++ emit_move_insn (xop1, gen_rtx_SUBREG (vimode, operands[1], 0)); ++ } ++ emit_move_insn (src1, xop1); ++ } ++ else ++ { ++ gcc_assert (operands[1] == CONSTM1_RTX (vimode)); ++ /* Case (2) if the below doesn't move the mask to src2. */ ++ emit_move_insn (src1, mask); ++ } ++ ++ if (register_operand (operands[2], mode)) ++ { ++ rtx xop2 = operands[2]; ++ if (mode != vimode) ++ { ++ xop2 = gen_reg_rtx (vimode); ++ emit_move_insn (xop2, gen_rtx_SUBREG (vimode, operands[2], 0)); ++ } ++ emit_move_insn (src2, xop2); ++ } ++ else ++ { ++ gcc_assert (operands[2] == CONST0_RTX (mode)); ++ /* Case (3) if the above didn't move the mask to src1. */ ++ emit_move_insn (src2, mask); ++ } ++ ++ /* We deal with case (4) if the mask wasn't moved to either src1 or src2. ++ In any case, we eventually do vector mask-based copy. */ ++ bsel = gen_rtx_IOR (vimode, ++ gen_rtx_AND (vimode, ++ gen_rtx_NOT (vimode, mask), src2), ++ gen_rtx_AND (vimode, mask, src1)); ++ /* The result is placed back to a register with the mask. */ ++ emit_insn (gen_rtx_SET (mask, bsel)); ++ emit_move_insn (operands[0], gen_rtx_SUBREG (mode, mask, 0)); ++ } ++} ++ ++/* Expand integer vector comparison */ ++bool ++loongarch_expand_int_vec_cmp(rtx operands[]) ++{ ++ ++ rtx_code code = GET_CODE (operands[1]); ++ loongarch_expand_lsx_cmp (operands[0], code, operands[2], operands[3]); ++ return true; ++} ++ ++/* Expand integer vector comparison */ ++bool ++loongarch_expand_fp_vec_cmp(rtx operands[]) ++{ ++ rtx_code code = GET_CODE (operands[1]); ++ loongarch_expand_lsx_cmp (operands[0], code, operands[2], operands[3]); ++ return true; ++} ++ ++ ++/* Implement TARGET_CASE_VALUES_THRESHOLD. */ ++ ++unsigned int ++loongarch_case_values_threshold (void) ++{ ++ return default_case_values_threshold (); ++} ++ ++ ++/* Implement TARGET_SPILL_CLASS. */ ++ ++static reg_class_t ++loongarch_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED, ++ machine_mode mode ATTRIBUTE_UNUSED) ++{ ++ return NO_REGS; ++} ++ ++/* Implement TARGET_LRA_P. */ ++ ++static bool ++loongarch_lra_p (void) ++{ ++ return loongarch_lra_flag; ++} ++ ++/* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS. */ ++ ++static reg_class_t ++loongarch_ira_change_pseudo_allocno_class (int regno, reg_class_t allocno_class, ++ reg_class_t best_class ATTRIBUTE_UNUSED) ++{ ++ /* LRA will allocate an FPR for an integer mode pseudo instead of spilling ++ to memory if an FPR is present in the allocno class. It is rare that ++ we actually need to place an integer mode value in an FPR so where ++ possible limit the allocation to GR_REGS. This will slightly pessimize ++ code that involves integer to/from float conversions as these will have ++ to reload into FPRs in LRA. Such reloads are sometimes eliminated and ++ sometimes only partially eliminated. We choose to take this penalty ++ in order to eliminate usage of FPRs in code that does not use floating ++ point data. ++ ++ This change has a similar effect to increasing the cost of FPR->GPR ++ register moves for integer modes so that they are higher than the cost ++ of memory but changing the allocno class is more reliable. ++ ++ This is also similar to forbidding integer mode values in FPRs entirely ++ but this would lead to an inconsistency in the integer to/from float ++ instructions that say integer mode values must be placed in FPRs. */ ++ if (INTEGRAL_MODE_P (PSEUDO_REGNO_MODE (regno)) && allocno_class == ALL_REGS) ++ return GR_REGS; ++ return allocno_class; ++} ++ ++/* Implement TARGET_PROMOTE_FUNCTION_MODE */ ++ ++/* This function is equivalent to default_promote_function_mode_always_promote ++ except that it returns a promoted mode even if type is NULL_TREE. This is ++ needed by libcalls which have no type (only a mode) such as fixed conversion ++ routines that take a signed or unsigned char/short argument and convert it ++ to a fixed type. */ ++ ++static machine_mode ++loongarch_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, ++ machine_mode mode, ++ int *punsignedp ATTRIBUTE_UNUSED, ++ const_tree fntype ATTRIBUTE_UNUSED, ++ int for_return ATTRIBUTE_UNUSED) ++{ ++ int unsignedp; ++ ++ if (type != NULL_TREE) ++ return promote_mode (type, mode, punsignedp); ++ ++ unsignedp = *punsignedp; ++ PROMOTE_MODE (mode, unsignedp, type); ++ *punsignedp = unsignedp; ++ return mode; ++} ++ ++/* Implement TARGET_TRULY_NOOP_TRUNCATION. */ ++ ++static bool ++loongarch_truly_noop_truncation (poly_uint64 outprec, poly_uint64 inprec) ++{ ++ return !TARGET_64BIT || inprec <= 32 || outprec > 32; ++} ++ ++/* Implement TARGET_CONSTANT_ALIGNMENT. */ ++ ++static HOST_WIDE_INT ++loongarch_constant_alignment (const_tree exp, HOST_WIDE_INT align) ++{ ++ if (TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR) ++ return MAX (align, BITS_PER_WORD); ++ return align; ++} ++ ++/* Implement TARGET_STARTING_FRAME_OFFSET. See loongarch_compute_frame_info ++ for details about the frame layout. */ ++ ++static HOST_WIDE_INT ++loongarch_starting_frame_offset (void) ++{ ++ if (FRAME_GROWS_DOWNWARD) ++ return 0; ++ return crtl->outgoing_args_size; ++} ++ ++/* Loongson ext test for LA464 128 bit offset mem is legaly. */ ++ ++bool ++loongarch_la464_128_store_p (rtx operands[]) ++{ ++ int offset0; ++ int offset1; ++ rtx dst0 = operands[0]; ++ rtx dst1 = operands[2]; ++ rtx src0 = operands[1]; ++ rtx src1 = operands[3]; ++ int base_reg0; ++ int base_reg1; ++ ++ if (GET_CODE (XEXP (dst0, 0)) == PLUS) ++ { ++ offset0 = XINT (XEXP (XEXP (dst0, 0), 1), 0); ++ base_reg0 = REGNO (XEXP (XEXP (dst0, 0), 0)); ++ } ++ else if (GET_CODE (XEXP (dst0, 0)) == MINUS) ++ { ++ offset0 = XINT (XEXP (XEXP (dst0, 0), 1), 0); ++ base_reg0 = REGNO (XEXP (XEXP (dst0, 0), 0)); ++ } ++ else ++ { ++ offset0 = 0; ++ base_reg0 = REGNO (XEXP (dst0, 0)); ++ } ++ ++ if (GET_CODE (XEXP (dst1, 0)) == PLUS) ++ { ++ offset1= XINT (XEXP (XEXP (dst1, 0), 1), 0); ++ base_reg1 = REGNO (XEXP (XEXP (dst1, 0), 0)); ++ } ++ else if (GET_CODE (XEXP (dst1, 0)) == MINUS) ++ { ++ offset1= XINT (XEXP (XEXP (dst1, 0), 1), 0); ++ base_reg1 = REGNO (XEXP (XEXP (dst1, 0), 0)); ++ } ++ else ++ { ++ offset1 = 0; ++ base_reg1 = REGNO (XEXP (dst1, 0)); ++ } ++ ++ if (base_reg0 != base_reg1) ++ return false; ++ ++ if (offset1 % 16 !=0) ++ { ++ /* store offset is not align! */ ++ return false; ++ } ++ ++ if ( offset0 - offset1 !=8) ++ { ++ /* store offset diff is not 8! */ ++ return false; ++ } ++ ++ if ( offset1>4095 || offset1<-4096) ++ { ++ /* load offset out of range! */ ++ return false; ++ } ++ ++ return true; ++} ++ ++bool ++loongarch_la464_128_load_p (rtx operands[]) ++{ ++ int offset0; ++ int offset1; ++ rtx dst0 = operands[0]; ++ rtx dst1 = operands[2]; ++ rtx src0 = operands[1]; ++ rtx src1 = operands[3]; ++ int base_reg0; ++ int base_reg1; ++ int dst_reg0; ++ ++ dst_reg0 = REGNO (dst0); ++ ++ if (GET_CODE (XEXP (src0, 0)) == PLUS) ++ { ++ offset0 = XINT (XEXP (XEXP (src0, 0), 1), 0); ++ base_reg0 = REGNO (XEXP (XEXP (src0, 0), 0)); ++ } ++ else if (GET_CODE (XEXP (src0, 0)) == MINUS) ++ { ++ offset0 = XINT (XEXP (XEXP (src0, 0), 1), 0); ++ base_reg0 = REGNO (XEXP (XEXP (src0, 0), 0)); ++ } ++ else ++ { ++ offset0 = 0; ++ base_reg0 = REGNO (XEXP (src0, 0)); ++ } ++ ++ if (GET_CODE (XEXP (src1, 0)) == PLUS) ++ { ++ offset1= XINT (XEXP (XEXP (src1, 0), 1), 0); ++ base_reg1 = REGNO (XEXP (XEXP (src1, 0), 0)); ++ } ++ else if (GET_CODE (XEXP (src1, 0)) == MINUS) ++ { ++ offset1= XINT (XEXP (XEXP (src1, 0), 1), 0); ++ base_reg1 = REGNO (XEXP (XEXP (src1, 0), 0)); ++ } ++ else ++ { ++ offset1 =0; ++ base_reg1 = REGNO (XEXP (src1, 0)); ++ } ++ ++ if (base_reg0 != base_reg1) ++ return false; ++ ++ /* Skip read dead reg. */ ++ if (base_reg0 == dst_reg0) ++ return false; ++ ++ if (offset1 % 16 !=0) ++ { ++ /* load offset is not align! */ ++ return false; ++ } ++ ++ if ( offset0 - offset1 !=8) ++ { ++ /* load offset diff is not 8! */ ++ return false; ++ } ++ ++ if ( offset1>4095 || offset1<-4096) ++ { ++ /* load offset out of range! */ ++ return false; ++ } ++ ++ return true; ++} ++ ++/* A subroutine of loongarch_build_signbit_mask. If VECT is true, ++ then replicate the value for all elements of the vector ++ register. */ ++ ++rtx ++loongarch_build_const_vector (machine_mode mode, bool vect, rtx value) ++{ ++ int i, n_elt; ++ rtvec v; ++ machine_mode scalar_mode; ++ ++ switch (mode) ++ { ++ case E_V64QImode: ++ case E_V32QImode: ++ case E_V16QImode: ++ case E_V32HImode: ++ case E_V16HImode: ++ case E_V8HImode: ++ case E_V16SImode: ++ case E_V8SImode: ++ case E_V4SImode: ++ case E_V8DImode: ++ case E_V4DImode: ++ case E_V2DImode: ++ gcc_assert (vect); ++ /* FALLTHRU */ ++ case E_V16SFmode: ++ case E_V8SFmode: ++ case E_V4SFmode: ++ case E_V8DFmode: ++ case E_V4DFmode: ++ case E_V2DFmode: ++ n_elt = GET_MODE_NUNITS (mode); ++ v = rtvec_alloc (n_elt); ++ scalar_mode = GET_MODE_INNER (mode); ++ ++ RTVEC_ELT (v, 0) = value; ++ ++ for (i = 1; i < n_elt; ++i) ++ RTVEC_ELT (v, i) = vect ? value : CONST0_RTX (scalar_mode); ++ ++ return gen_rtx_CONST_VECTOR (mode, v); ++ ++ default: ++ gcc_unreachable (); ++ } ++} ++ ++/* Create a mask for the sign bit in MODE ++ for an SSE register. If VECT is true, then replicate the mask for ++ all elements of the vector register. If INVERT is true, then create ++ a mask excluding the sign bit. */ ++ ++rtx ++loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) ++{ ++ machine_mode vec_mode, imode; ++ wide_int w; ++ rtx mask, v; ++ ++ switch (mode) ++ { ++ case E_V16SImode: ++ case E_V16SFmode: ++ case E_V8SImode: ++ case E_V4SImode: ++ case E_V8SFmode: ++ case E_V4SFmode: ++ vec_mode = mode; ++ imode = SImode; ++ break; ++ ++ case E_V8DImode: ++ case E_V4DImode: ++ case E_V2DImode: ++ case E_V8DFmode: ++ case E_V4DFmode: ++ case E_V2DFmode: ++ vec_mode = mode; ++ imode = DImode; ++ break; ++ ++ case E_TImode: ++ case E_TFmode: ++ vec_mode = VOIDmode; ++ imode = TImode; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ machine_mode inner_mode = GET_MODE_INNER (mode); ++ w = wi::set_bit_in_zero (GET_MODE_BITSIZE (inner_mode) - 1, ++ GET_MODE_BITSIZE (inner_mode)); ++ if (invert) ++ w = wi::bit_not (w); ++ ++ /* Force this value into the low part of a fp vector constant. */ ++ mask = immed_wide_int_const (w, imode); ++ mask = gen_lowpart (inner_mode, mask); ++ ++ if (vec_mode == VOIDmode) ++ return force_reg (inner_mode, mask); ++ ++ v = loongarch_build_const_vector (vec_mode, vect, mask); ++ return force_reg (vec_mode, v); ++} ++ ++ ++ ++/* Initialize the GCC target structure. */ ++#undef TARGET_ASM_ALIGNED_HI_OP ++#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" ++#undef TARGET_ASM_ALIGNED_SI_OP ++#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t" ++#undef TARGET_ASM_ALIGNED_DI_OP ++#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t" ++ ++#undef TARGET_OPTION_OVERRIDE ++#define TARGET_OPTION_OVERRIDE loongarch_option_override ++ ++#undef TARGET_LEGITIMIZE_ADDRESS ++#define TARGET_LEGITIMIZE_ADDRESS loongarch_legitimize_address ++ ++#undef TARGET_ASM_FUNCTION_PROLOGUE ++#define TARGET_ASM_FUNCTION_PROLOGUE loongarch_output_function_prologue ++#undef TARGET_ASM_FUNCTION_EPILOGUE ++#define TARGET_ASM_FUNCTION_EPILOGUE loongarch_output_function_epilogue ++#undef TARGET_ASM_SELECT_RTX_SECTION ++#define TARGET_ASM_SELECT_RTX_SECTION loongarch_select_rtx_section ++#undef TARGET_ASM_FUNCTION_RODATA_SECTION ++#define TARGET_ASM_FUNCTION_RODATA_SECTION loongarch_function_rodata_section ++ ++#undef TARGET_SCHED_INIT ++#define TARGET_SCHED_INIT loongarch_sched_init ++#undef TARGET_SCHED_REORDER ++#define TARGET_SCHED_REORDER loongarch_sched_reorder ++#undef TARGET_SCHED_REORDER2 ++#define TARGET_SCHED_REORDER2 loongarch_sched_reorder2 ++#undef TARGET_SCHED_VARIABLE_ISSUE ++#define TARGET_SCHED_VARIABLE_ISSUE loongarch_variable_issue ++#undef TARGET_SCHED_ADJUST_COST ++#define TARGET_SCHED_ADJUST_COST loongarch_adjust_cost ++#undef TARGET_SCHED_ISSUE_RATE ++#define TARGET_SCHED_ISSUE_RATE loongarch_issue_rate ++#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ++#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ ++ loongarch_multipass_dfa_lookahead ++#undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P ++#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \ ++ loongarch_small_register_classes_for_mode_p ++ ++#undef TARGET_FUNCTION_OK_FOR_SIBCALL ++#define TARGET_FUNCTION_OK_FOR_SIBCALL loongarch_function_ok_for_sibcall ++ ++#undef TARGET_INSERT_ATTRIBUTES ++#define TARGET_INSERT_ATTRIBUTES loongarch_insert_attributes ++#undef TARGET_MERGE_DECL_ATTRIBUTES ++#define TARGET_MERGE_DECL_ATTRIBUTES loongarch_merge_decl_attributes ++#undef TARGET_CAN_INLINE_P ++#define TARGET_CAN_INLINE_P loongarch_can_inline_p ++ ++#undef TARGET_VALID_POINTER_MODE ++#define TARGET_VALID_POINTER_MODE loongarch_valid_pointer_mode ++#undef TARGET_REGISTER_MOVE_COST ++#define TARGET_REGISTER_MOVE_COST loongarch_register_move_cost ++#undef TARGET_MEMORY_MOVE_COST ++#define TARGET_MEMORY_MOVE_COST loongarch_memory_move_cost ++#undef TARGET_RTX_COSTS ++#define TARGET_RTX_COSTS loongarch_rtx_costs ++#undef TARGET_ADDRESS_COST ++#define TARGET_ADDRESS_COST loongarch_address_cost ++#undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST ++#define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \ ++ loongarch_builtin_vectorization_cost ++ ++ ++#undef TARGET_IN_SMALL_DATA_P ++#define TARGET_IN_SMALL_DATA_P loongarch_in_small_data_p ++ ++#undef TARGET_MACHINE_DEPENDENT_REORG ++#define TARGET_MACHINE_DEPENDENT_REORG loongarch_reorg ++ ++#undef TARGET_PREFERRED_RELOAD_CLASS ++#define TARGET_PREFERRED_RELOAD_CLASS loongarch_preferred_reload_class ++ ++#undef TARGET_EXPAND_TO_RTL_HOOK ++#define TARGET_EXPAND_TO_RTL_HOOK loongarch_expand_to_rtl_hook ++#undef TARGET_ASM_FILE_START ++#define TARGET_ASM_FILE_START loongarch_file_start ++#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE ++#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true ++ ++#undef TARGET_EXPAND_BUILTIN_VA_START ++#define TARGET_EXPAND_BUILTIN_VA_START loongarch_va_start ++ ++#undef TARGET_PROMOTE_FUNCTION_MODE ++#define TARGET_PROMOTE_FUNCTION_MODE loongarch_promote_function_mode ++#undef TARGET_RETURN_IN_MEMORY ++#define TARGET_RETURN_IN_MEMORY loongarch_return_in_memory ++ ++#undef TARGET_ASM_OUTPUT_MI_THUNK ++#define TARGET_ASM_OUTPUT_MI_THUNK loongarch_output_mi_thunk ++#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK ++#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true ++ ++#undef TARGET_PRINT_OPERAND ++#define TARGET_PRINT_OPERAND loongarch_print_operand ++#undef TARGET_PRINT_OPERAND_ADDRESS ++#define TARGET_PRINT_OPERAND_ADDRESS loongarch_print_operand_address ++#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P ++#define TARGET_PRINT_OPERAND_PUNCT_VALID_P loongarch_print_operand_punct_valid_p ++ ++#undef TARGET_SETUP_INCOMING_VARARGS ++#define TARGET_SETUP_INCOMING_VARARGS loongarch_setup_incoming_varargs ++#undef TARGET_STRICT_ARGUMENT_NAMING ++#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true ++#undef TARGET_MUST_PASS_IN_STACK ++#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size ++#undef TARGET_PASS_BY_REFERENCE ++#define TARGET_PASS_BY_REFERENCE loongarch_pass_by_reference ++#undef TARGET_ARG_PARTIAL_BYTES ++#define TARGET_ARG_PARTIAL_BYTES loongarch_arg_partial_bytes ++#undef TARGET_FUNCTION_ARG ++#define TARGET_FUNCTION_ARG loongarch_function_arg ++#undef TARGET_FUNCTION_ARG_ADVANCE ++#define TARGET_FUNCTION_ARG_ADVANCE loongarch_function_arg_advance ++#undef TARGET_FUNCTION_ARG_BOUNDARY ++#define TARGET_FUNCTION_ARG_BOUNDARY loongarch_function_arg_boundary ++ ++#undef TARGET_VECTOR_MODE_SUPPORTED_P ++#define TARGET_VECTOR_MODE_SUPPORTED_P loongarch_vector_mode_supported_p ++ ++#undef TARGET_SCALAR_MODE_SUPPORTED_P ++#define TARGET_SCALAR_MODE_SUPPORTED_P loongarch_scalar_mode_supported_p ++ ++#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE ++#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE loongarch_preferred_simd_mode ++ ++#undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES ++#define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \ ++ loongarch_autovectorize_vector_sizes ++ ++#undef TARGET_INIT_BUILTINS ++#define TARGET_INIT_BUILTINS loongarch_init_builtins ++#undef TARGET_BUILTIN_DECL ++#define TARGET_BUILTIN_DECL loongarch_builtin_decl ++#undef TARGET_EXPAND_BUILTIN ++#define TARGET_EXPAND_BUILTIN loongarch_expand_builtin ++ ++#undef TARGET_HAVE_TLS ++#define TARGET_HAVE_TLS HAVE_AS_TLS ++ ++#undef TARGET_CANNOT_FORCE_CONST_MEM ++#define TARGET_CANNOT_FORCE_CONST_MEM loongarch_cannot_force_const_mem ++ ++#undef TARGET_LEGITIMATE_CONSTANT_P ++#define TARGET_LEGITIMATE_CONSTANT_P loongarch_legitimate_constant_p ++ ++#undef TARGET_ENCODE_SECTION_INFO ++#define TARGET_ENCODE_SECTION_INFO loongarch_encode_section_info ++ ++#undef TARGET_ATTRIBUTE_TABLE ++#define TARGET_ATTRIBUTE_TABLE loongarch_attribute_table ++/* All our function attributes are related to how out-of-line copies should ++ be compiled or called. They don't in themselves prevent inlining. */ ++#undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P ++#define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true ++ ++#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P ++#define TARGET_USE_BLOCKS_FOR_CONSTANT_P loongarch_use_blocks_for_constant_p ++#undef TARGET_USE_ANCHORS_FOR_SYMBOL_P ++#define TARGET_USE_ANCHORS_FOR_SYMBOL_P loongarch_use_anchors_for_symbol_p ++ ++#undef TARGET_COMP_TYPE_ATTRIBUTES ++#define TARGET_COMP_TYPE_ATTRIBUTES loongarch_comp_type_attributes ++ ++#ifdef HAVE_AS_DTPRELWORD ++#undef TARGET_ASM_OUTPUT_DWARF_DTPREL ++#define TARGET_ASM_OUTPUT_DWARF_DTPREL loongarch_output_dwarf_dtprel ++#endif ++#undef TARGET_DWARF_REGISTER_SPAN ++#define TARGET_DWARF_REGISTER_SPAN loongarch_dwarf_register_span ++#undef TARGET_DWARF_FRAME_REG_MODE ++#define TARGET_DWARF_FRAME_REG_MODE loongarch_dwarf_frame_reg_mode ++ ++#undef TARGET_LEGITIMATE_ADDRESS_P ++#define TARGET_LEGITIMATE_ADDRESS_P loongarch_legitimate_address_p ++ ++#undef TARGET_FRAME_POINTER_REQUIRED ++#define TARGET_FRAME_POINTER_REQUIRED loongarch_frame_pointer_required ++ ++#undef TARGET_CAN_ELIMINATE ++#define TARGET_CAN_ELIMINATE loongarch_can_eliminate ++ ++#undef TARGET_CONDITIONAL_REGISTER_USAGE ++#define TARGET_CONDITIONAL_REGISTER_USAGE loongarch_conditional_register_usage ++ ++#undef TARGET_TRAMPOLINE_INIT ++#define TARGET_TRAMPOLINE_INIT loongarch_trampoline_init ++ ++#undef TARGET_SHIFT_TRUNCATION_MASK ++#define TARGET_SHIFT_TRUNCATION_MASK loongarch_shift_truncation_mask ++ ++#undef TARGET_VECTORIZE_VEC_PERM_CONST ++#define TARGET_VECTORIZE_VEC_PERM_CONST loongarch_vectorize_vec_perm_const ++ ++#undef TARGET_SCHED_REASSOCIATION_WIDTH ++#define TARGET_SCHED_REASSOCIATION_WIDTH loongarch_sched_reassociation_width ++ ++#undef TARGET_CASE_VALUES_THRESHOLD ++#define TARGET_CASE_VALUES_THRESHOLD loongarch_case_values_threshold ++ ++#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV ++#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV loongarch_atomic_assign_expand_fenv ++ ++#undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS ++#define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true ++ ++#undef TARGET_SPILL_CLASS ++#define TARGET_SPILL_CLASS loongarch_spill_class ++#undef TARGET_LRA_P ++#define TARGET_LRA_P loongarch_lra_p ++#undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS ++#define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS loongarch_ira_change_pseudo_allocno_class ++ ++#undef TARGET_HARD_REGNO_SCRATCH_OK ++#define TARGET_HARD_REGNO_SCRATCH_OK loongarch_hard_regno_scratch_ok ++ ++#undef TARGET_HARD_REGNO_NREGS ++#define TARGET_HARD_REGNO_NREGS loongarch_hard_regno_nregs ++#undef TARGET_HARD_REGNO_MODE_OK ++#define TARGET_HARD_REGNO_MODE_OK loongarch_hard_regno_mode_ok ++ ++#undef TARGET_MODES_TIEABLE_P ++#define TARGET_MODES_TIEABLE_P loongarch_modes_tieable_p ++ ++#undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED ++#define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \ ++ loongarch_hard_regno_call_part_clobbered ++ ++#undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS ++#define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 2 ++ ++#undef TARGET_SECONDARY_MEMORY_NEEDED ++#define TARGET_SECONDARY_MEMORY_NEEDED loongarch_secondary_memory_needed ++ ++#undef TARGET_CAN_CHANGE_MODE_CLASS ++#define TARGET_CAN_CHANGE_MODE_CLASS loongarch_can_change_mode_class ++ ++#undef TARGET_TRULY_NOOP_TRUNCATION ++#define TARGET_TRULY_NOOP_TRUNCATION loongarch_truly_noop_truncation ++ ++#undef TARGET_CONSTANT_ALIGNMENT ++#define TARGET_CONSTANT_ALIGNMENT loongarch_constant_alignment ++ ++#undef TARGET_STARTING_FRAME_OFFSET ++#define TARGET_STARTING_FRAME_OFFSET loongarch_starting_frame_offset ++ ++struct gcc_target targetm = TARGET_INITIALIZER; ++ ++#include "gt-loongarch.h" +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +new file mode 100644 +index 000000000..18d17afb8 +--- /dev/null ++++ b/gcc/config/loongarch/loongarch.h +@@ -0,0 +1,2145 @@ ++/* Definitions of target machine for GNU compiler. LARCH version. ++ Copyright (C) 1989-2018 Free Software Foundation, Inc. ++ Contributed by A. Lichnewsky (lich@inria.inria.fr). ++ Changed by Michael Meissner (meissner@osf.org). ++ 64-bit r4000 support by Ian Lance Taylor (ian@cygnus.com) and ++ Brendan Eich (brendan@microunity.com). ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++ ++#include "config/vxworks-dummy.h" ++ ++#ifdef GENERATOR_FILE ++/* This is used in some insn conditions, so needs to be declared, but ++ does not need to be defined. */ ++extern int target_flags_explicit; ++#endif ++ ++/* LARCH external variables defined in loongarch.c. */ ++ ++/* Which ABI to use. ABILP32 (original 32, or o32), ABILPX32 (n32), ++ ABILP64 (n64) are all defined by SGI. */ ++ ++#define ABILP32 0 ++#define ABILPX32 1 ++#define ABILP64 2 ++ ++/* Information about one recognized processor. Defined here for the ++ benefit of TARGET_CPU_CPP_BUILTINS. */ ++struct loongarch_cpu_info { ++ /* The 'canonical' name of the processor as far as GCC is concerned. ++ It's typically a manufacturer's prefix followed by a numerical ++ designation. It should be lowercase. */ ++ const char *name; ++ ++ /* The internal processor number that most closely matches this ++ entry. Several processors can have the same value, if there's no ++ difference between them from GCC's point of view. */ ++ enum processor cpu; ++ ++ /* The ISA level that the processor implements. */ ++ int isa; ++ ++ /* A mask of PTF_* values. */ ++ unsigned int tune_flags; ++}; ++ ++#include "config/loongarch/loongarch-opts.h" ++ ++/* Macros to silence warnings about numbers being signed in traditional ++ C and unsigned in ISO C when compiled on 32-bit hosts. */ ++ ++#define BITMASK_HIGH (((unsigned long)1) << 31) /* 0x80000000 */ ++#define BITMASK_UPPER16 ((unsigned long)0xffff << 16) /* 0xffff0000 */ ++#define BITMASK_LOWER16 ((unsigned long)0xffff) /* 0x0000ffff */ ++ ++ ++/* Run-time compilation parameters selecting different hardware subsets. */ ++ ++/* True if we are generating position-independent VxWorks RTP code. */ ++#define TARGET_RTP_PIC (TARGET_VXWORKS_RTP && flag_pic) ++ ++/* True if we can optimize sibling calls. For simplicity, we only ++ handle cases in which call_insn_operand will reject invalid ++ sibcall addresses. There are two cases in which this isn't true: ++ ++ - TARGET_USE_GOT && !TARGET_EXPLICIT_RELOCS. call_insn_operand ++ accepts global constants, but all sibcalls must be indirect. */ ++#define TARGET_SIBCALLS (1) ++ ++/* True if we can use the J and JAL instructions. */ ++#define TARGET_ABSOLUTE_JUMPS (!flag_pic) ++ ++/* True if the output must have a writable .eh_frame. ++ See ASM_PREFERRED_EH_DATA_FORMAT for details. */ ++#ifdef HAVE_LD_PERSONALITY_RELAXATION ++#define TARGET_WRITABLE_EH_FRAME 0 ++#else ++#define TARGET_WRITABLE_EH_FRAME (flag_pic && TARGET_SHARED) ++#endif ++ ++ ++/* ISA has LSA available. */ ++#define ISA_HAS_LSA (1) ++ ++/* ISA has DLSA available. */ ++#define ISA_HAS_DLSA (TARGET_64BIT) ++ ++/* Architecture target defines. */ ++#define TARGET_LOONGARCH64 (loongarch_arch == PROCESSOR_LOONGARCH64) ++#define TUNE_LOONGARCH64 (loongarch_tune == PROCESSOR_LOONGARCH64) ++#define TARGET_LA464 (loongarch_arch == PROCESSOR_LA464) ++#define TUNE_LA464 (loongarch_tune == PROCESSOR_LA464) ++/* True if the pre-reload scheduler should try to create chains of ++ multiply-add or multiply-subtract instructions. For example, ++ suppose we have: ++ ++ t1 = a * b ++ t2 = t1 + c * d ++ t3 = e * f ++ t4 = t3 - g * h ++ ++ t1 will have a higher priority than t2 and t3 will have a higher ++ priority than t4. However, before reload, there is no dependence ++ between t1 and t3, and they can often have similar priorities. ++ The scheduler will then tend to prefer: ++ ++ t1 = a * b ++ t3 = e * f ++ t2 = t1 + c * d ++ t4 = t3 - g * h ++ ++ which stops us from making full use of macc/madd-style instructions. ++ This sort of situation occurs frequently in Fourier transforms and ++ in unrolled loops. ++ ++ To counter this, the TUNE_MACC_CHAINS code will reorder the ready ++ queue so that chained multiply-add and multiply-subtract instructions ++ appear ahead of any other instruction that is likely to clobber lo. ++ In the example above, if t2 and t3 become ready at the same time, ++ the code ensures that t2 is scheduled first. ++ ++ Multiply-accumulate instructions are a bigger win for some targets ++ than others, so this macro is defined on an opt-in basis. */ ++#define TUNE_MACC_CHAINS 0 ++ ++#define TARGET_OLDABI (loongarch_abi == ABILP32) ++#define TARGET_NEWABI (loongarch_abi == ABILPX32 || loongarch_abi == ABILP64) ++ ++/* TARGET_HARD_FLOAT and TARGET_SOFT_FLOAT reflect whether the FPU is ++ directly accessible, while the command-line options select ++ TARGET_HARD_FLOAT_ABI and TARGET_SOFT_FLOAT_ABI to reflect the ABI ++ in use. */ ++#define TARGET_HARD_FLOAT (TARGET_HARD_FLOAT_ABI) ++#define TARGET_SOFT_FLOAT (TARGET_SOFT_FLOAT_ABI) ++ ++/* False if SC acts as a memory barrier with respect to itself, ++ otherwise a SYNC will be emitted after SC for atomic operations ++ that require ordering between the SC and following loads and ++ stores. It does not tell anything about ordering of loads and ++ stores prior to and following the SC, only about the SC itself and ++ those loads and stores follow it. */ ++#define TARGET_SYNC_AFTER_SC (1) ++ ++/* Define preprocessor macros for the -march and -mtune options. ++ PREFIX is either _LARCH_ARCH or _LARCH_TUNE, INFO is the selected ++ processor. If INFO's canonical name is "foo", define PREFIX to ++ be "foo", and define an additional macro PREFIX_FOO. */ ++#define LARCH_CPP_SET_PROCESSOR(PREFIX, INFO) \ ++ do \ ++ { \ ++ char *macro, *p; \ ++ \ ++ macro = concat ((PREFIX), "_", (INFO)->name, NULL); \ ++ for (p = macro; *p != 0; p++) \ ++ if (*p == '+') \ ++ *p = 'P'; \ ++ else \ ++ *p = TOUPPER (*p); \ ++ \ ++ builtin_define (macro); \ ++ builtin_define_with_value ((PREFIX), (INFO)->name, 1); \ ++ free (macro); \ ++ } \ ++ while (0) ++ ++/* Target CPU builtins. */ ++#define TARGET_CPU_CPP_BUILTINS() loongarch_cpu_cpp_builtins (pfile) ++ ++/* Target CPU versions for D. */ ++#define TARGET_D_CPU_VERSIONS loongarch_d_target_versions ++ ++/* Default target_flags if no switches are specified */ ++ ++#ifndef TARGET_DEFAULT ++#define TARGET_DEFAULT 0 ++#endif ++ ++#ifndef TARGET_CPU_DEFAULT ++#define TARGET_CPU_DEFAULT 0 ++#endif ++ ++#ifdef IN_LIBGCC2 ++#undef TARGET_64BIT ++/* Make this compile time constant for libgcc2 */ ++#ifdef __loongarch64 ++#define TARGET_64BIT 1 ++#else ++#define TARGET_64BIT 0 ++#endif ++#endif /* IN_LIBGCC2 */ ++ ++#define TARGET_LIBGCC_SDATA_SECTION ".sdata" ++ ++#ifndef MULTILIB_ISA_DEFAULT ++#if LARCH_ISA_DEFAULT == 0 ++#define MULTILIB_ISA_DEFAULT "loongarch64" ++#endif ++#endif ++ ++#ifndef LARCH_ABI_DEFAULT ++#define LARCH_ABI_DEFAULT ABILP32 ++#endif ++ ++/* Use the most portable ABI flag for the ASM specs. */ ++ ++#if LARCH_ABI_DEFAULT == ABILP32 ++#define MULTILIB_ABI_DEFAULT "mabi=lp32" ++#elif LARCH_ABI_DEFAULT == ABILP64 ++#define MULTILIB_ABI_DEFAULT "mabi=lp64" ++#endif ++ ++#ifndef MULTILIB_DEFAULTS ++#define MULTILIB_DEFAULTS \ ++ {MULTILIB_ISA_DEFAULT, MULTILIB_ABI_DEFAULT } ++#endif ++ ++/* A spec condition that matches all -loongarch arguments. */ ++ ++#define LARCH_ISA_LEVEL_OPTION_SPEC \ ++ "loongarch" ++ ++/* A spec condition that matches all architecture arguments. */ ++ ++#define LARCH_ARCH_OPTION_SPEC \ ++ LARCH_ISA_LEVEL_OPTION_SPEC "|march=*" ++ ++/* A spec that infers a -loongarch argument from an -march argument. */ ++ ++#define LARCH_ISA_LEVEL_SPEC \ ++ "%{" LARCH_ISA_LEVEL_OPTION_SPEC ":;:}" ++ ++/* A spec that injects the default multilib ISA if no architecture is ++ specified. */ ++ ++#define LARCH_DEFAULT_ISA_LEVEL_SPEC \ ++ "%{" LARCH_ISA_LEVEL_OPTION_SPEC ":;: \ ++ %{!march=*: -" MULTILIB_ISA_DEFAULT "}}" ++ ++/* A spec that infers a -mhard-float or -msoft-float setting from an ++ -march argument. Note that soft-float and hard-float code are not ++ link-compatible. */ ++ ++#define LARCH_ARCH_FLOAT_SPEC \ ++ "%{mhard-float|msoft-float|mno-float|march=loongarch*:; \ ++ march=vr41*|march=m4k|march=4k*|march=24kc|march=24kec \ ++ |march=34kc|march=34kn|march=74kc|march=1004kc|march=5kc \ ++ |march=m14k*|march=m5101|march=octeon|march=xlr: -msoft-float; \ ++ march=*: -mhard-float}" ++ ++/* A spec condition that matches 32-bit options. It only works if ++ LARCH_ISA_LEVEL_SPEC has been applied. */ ++ ++#define LARCH_32BIT_OPTION_SPEC \ ++ "loongarch1|loongarch2|loongarch32*|mgp32" ++ ++#if (LARCH_ABI_DEFAULT == ABILPX32 \ ++ || LARCH_ABI_DEFAULT == ABILP64) ++#define OPT_ARCH64 "mabi=32|mgp32:;" ++#define OPT_ARCH32 "mabi=32|mgp32" ++#else ++#define OPT_ARCH64 "mabi=o64|mabi=n32|mabi=64|mgp64" ++#define OPT_ARCH32 "mabi=o64|mabi=n32|mabi=64|mgp64:;" ++#endif ++ ++/* Support for a compile-time default CPU, et cetera. The rules are: ++ --with-arch is ignored if -march is specified or a -loongarch is specified ++ ; likewise --with-arch-32 and --with-arch-64. ++ --with-tune is ignored if -mtune is specified; likewise ++ --with-tune-32 and --with-tune-64. ++ --with-abi is ignored if -mabi is specified. ++ --with-float is ignored if -mhard-float or -msoft-float are ++ specified. ++ --with-fpu is ignored if -msoft-float, -msingle-float or -mdouble-float are ++ specified. ++ --with-fp-32 is ignored if -msoft-float, -msingle-float, -mlsx or -mfp are ++ specified. ++ --with-divide is ignored if -mdivide-traps or -mdivide-breaks are ++ specified. */ ++#define OPTION_DEFAULT_SPECS \ ++ {"arch", "%{" LARCH_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}" }, \ ++ {"arch_32", "%{" OPT_ARCH32 ":%{" LARCH_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \ ++ {"arch_64", "%{" OPT_ARCH64 ":%{" LARCH_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \ ++ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \ ++ {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \ ++ {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \ ++ {"abi", "%{!mabi=*:-mabi=%(VALUE)}" }, \ ++ {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \ ++ {"fpu", "%{!msoft-float:%{!msingle-float:%{!mdouble-float:-m%(VALUE)-float}}}" }, \ ++ {"fp_32", "%{" OPT_ARCH32 \ ++ ":%{!msoft-float:%{!msingle-float:%{!mfp*:%{!mlsx:%{!mloongson-asx:-mfp%(VALUE)}}}}}" }, \ ++ {"divide", "%{!mdivide-traps:%{!mdivide-breaks:-mdivide-%(VALUE)}}" } ++ ++/* A spec that infers the: ++ -mlsx setting from a -march=la464 argument. ++ -mlasx setting from a -march=la464 argument. */ ++#define BASE_DRIVER_SELF_SPECS \ ++ LARCH_ASE_LSX_SPEC \ ++ LARCH_ASE_LASX_SPEC ++ ++#define LARCH_ASE_LSX_SPEC \ ++ "%{!mno-lsx: \ ++ %{march=la464: -mlsx}}" ++ ++#define LARCH_ASE_LASX_SPEC \ ++ "%{!mno-lasx: \ ++ %{march=la464: -mlasx}}" ++ ++#define DRIVER_SELF_SPECS \ ++ BASE_DRIVER_SELF_SPECS ++ ++/* from N_LARCH */ ++#define ABI_SPEC \ ++ "%{mabi=lp32:32}" \ ++ "%{mabi=lp64:64}" \ ++ ++#define STARTFILE_PREFIX_SPEC \ ++ "/lib" ABI_SPEC "/ " \ ++ "/usr/lib" ABI_SPEC "/ " \ ++ "/lib/ " \ ++ "/usr/lib/ " ++ ++/* This definition replaces the formerly used 'm' constraint with a ++ different constraint letter in order to avoid changing semantics of ++ the 'm' constraint when accepting new address formats in ++ TARGET_LEGITIMATE_ADDRESS_P. The constraint letter defined here ++ must not be used in insn definitions or inline assemblies. */ ++#define TARGET_MEM_CONSTRAINT 'w' ++ ++/* True if the file format uses 64-bit symbols. At present, this is ++ only true for n64, which uses 64-bit ELF. */ ++#define FILE_HAS_64BIT_SYMBOLS (loongarch_abi == ABILP64) ++ ++/* True if symbols are 64 bits wide. This is usually determined by ++ the ABI's file format, but it can be overridden by -msym32. Note that ++ overriding the size with -msym32 changes the ABI of relocatable objects, ++ although it doesn't change the ABI of a fully-linked object. */ ++#define ABI_HAS_64BIT_SYMBOLS (FILE_HAS_64BIT_SYMBOLS \ ++ && Pmode == DImode) ++ ++/* ISA supports instructions DMUL, DMULU, DMUH, DMUHU. */ ++#define ISA_HAS_DMUL (TARGET_64BIT) ++ ++/* ISA has floating-point RECIP.fmt and RSQRT.fmt instructions. The ++ LARCH64 rev. 1 ISA says that RECIP.D and RSQRT.D are unpredictable when ++ doubles are stored in pairs of FPRs, so for safety's sake, we apply ++ this restriction to the LARCH IV ISA too. */ ++#define ISA_HAS_FP_RECIP_RSQRT(MODE) \ ++ ((MODE) == SFmode \ ++ || (TARGET_FLOAT64 \ ++ && (MODE) == DFmode)) ++ ++/* The LSX ASE is available. */ ++#define ISA_HAS_LSX (TARGET_LSX) ++ ++/* The LASX ASE is available. */ ++#define ISA_HAS_LASX (TARGET_LASX) ++ ++/* Tell collect what flags to pass to nm. */ ++#ifndef NM_FLAGS ++#define NM_FLAGS "-Bn" ++#endif ++ ++ ++/* SUBTARGET_ASM_DEBUGGING_SPEC handles passing debugging options to ++ the assembler. It may be overridden by subtargets. ++ ++ Beginning with gas 2.13, -mdebug must be passed to correctly handle ++ COFF debugging info. */ ++ ++#ifndef SUBTARGET_ASM_DEBUGGING_SPEC ++#define SUBTARGET_ASM_DEBUGGING_SPEC "\ ++%{g} %{g0} %{g1} %{g2} %{g3} \ ++%{ggdb:-g} %{ggdb0:-g0} %{ggdb1:-g1} %{ggdb2:-g2} %{ggdb3:-g3} \ ++%{gstabs:-g} %{gstabs0:-g0} %{gstabs1:-g1} %{gstabs2:-g2} %{gstabs3:-g3} \ ++%{gstabs+:-g} %{gstabs+0:-g0} %{gstabs+1:-g1} %{gstabs+2:-g2} %{gstabs+3:-g3}" ++#endif ++ ++/* FP_ASM_SPEC represents the floating-point options that must be passed ++ to the assembler when FPXX support exists. Prior to that point the ++ assembler could accept the options but were not required for ++ correctness. We only add the options when absolutely necessary ++ because passing -msoft-float to the assembler will cause it to reject ++ all hard-float instructions which may require some user code to be ++ updated. */ ++ ++#ifdef HAVE_AS_DOT_MODULE ++#define FP_ASM_SPEC "\ ++%{mhard-float} %{msoft-float} \ ++%{msingle-float} %{mdouble-float}" ++#else ++#define FP_ASM_SPEC ++#endif ++ ++/* SUBTARGET_ASM_SPEC is always passed to the assembler. It may be ++ overridden by subtargets. */ ++ ++#ifndef SUBTARGET_ASM_SPEC ++#define SUBTARGET_ASM_SPEC "" ++#endif ++ ++#undef ASM_SPEC ++#define ASM_SPEC "\ ++%{mabi=*} %{!mabi=*: %(asm_abi_default_spec)} \ ++" ++/* Extra switches sometimes passed to the linker. */ ++ ++#ifndef LINK_SPEC ++#define LINK_SPEC "" ++#endif /* LINK_SPEC defined */ ++ ++ ++/* Specs for the compiler proper */ ++ ++/* SUBTARGET_CC1_SPEC is passed to the compiler proper. It may be ++ overridden by subtargets. */ ++#ifndef SUBTARGET_CC1_SPEC ++#define SUBTARGET_CC1_SPEC "" ++#endif ++ ++/* CC1_SPEC is the set of arguments to pass to the compiler proper. */ ++ ++#undef CC1_SPEC ++#define CC1_SPEC "\ ++%{G*} %{EB:-meb} %{EL:-mel} %{EB:%{EL:%emay not use both -EB and -EL}} \ ++%(subtarget_cc1_spec)" ++ ++/* Preprocessor specs. */ ++ ++/* SUBTARGET_CPP_SPEC is passed to the preprocessor. It may be ++ overridden by subtargets. */ ++#ifndef SUBTARGET_CPP_SPEC ++#define SUBTARGET_CPP_SPEC "" ++#endif ++ ++#define CPP_SPEC "%(subtarget_cpp_spec)" ++ ++/* This macro defines names of additional specifications to put in the specs ++ that can be used in various specifications like CC1_SPEC. Its definition ++ is an initializer with a subgrouping for each command option. ++ ++ Each subgrouping contains a string constant, that defines the ++ specification name, and a string constant that used by the GCC driver ++ program. ++ ++ Do not define this macro if it does not need to do anything. */ ++ ++#define EXTRA_SPECS \ ++ { "subtarget_cc1_spec", SUBTARGET_CC1_SPEC }, \ ++ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \ ++ { "subtarget_asm_debugging_spec", SUBTARGET_ASM_DEBUGGING_SPEC }, \ ++ { "subtarget_asm_spec", SUBTARGET_ASM_SPEC }, \ ++ { "asm_abi_default_spec", "-" MULTILIB_ABI_DEFAULT }, \ ++ SUBTARGET_EXTRA_SPECS ++ ++#ifndef SUBTARGET_EXTRA_SPECS ++#define SUBTARGET_EXTRA_SPECS ++#endif ++ ++#define DBX_DEBUGGING_INFO 1 /* generate stabs (OSF/rose) */ ++#define DWARF2_DEBUGGING_INFO 1 /* dwarf2 debugging info */ ++ ++#ifndef PREFERRED_DEBUGGING_TYPE ++#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG ++#endif ++ ++/* The size of DWARF addresses should be the same as the size of symbols ++ in the target file format. They shouldn't depend on things like -msym32, ++ because many DWARF consumers do not allow the mixture of address sizes ++ that one would then get from linking -msym32 code with -msym64 code. ++*/ ++#define DWARF2_ADDR_SIZE (FILE_HAS_64BIT_SYMBOLS ? 8 : 4) ++ ++/* By default, turn on GDB extensions. */ ++#define DEFAULT_GDB_EXTENSIONS 1 ++ ++/* Registers may have a prefix which can be ignored when matching ++ user asm and register definitions. */ ++#ifndef REGISTER_PREFIX ++#define REGISTER_PREFIX "$" ++#endif ++ ++/* Local compiler-generated symbols must have a prefix that the assembler ++ understands. By default, this is $, although some targets (e.g., ++ NetBSD-ELF) need to override this. */ ++ ++#ifndef LOCAL_LABEL_PREFIX ++#define LOCAL_LABEL_PREFIX "$" ++#endif ++ ++/* By default on the loongarch, external symbols do not have an underscore ++ prepended, but some targets (e.g., NetBSD) require this. */ ++ ++#ifndef USER_LABEL_PREFIX ++#define USER_LABEL_PREFIX "" ++#endif ++ ++/* On Sun 4, this limit is 2048. We use 1500 to be safe, ++ since the length can run past this up to a continuation point. */ ++#undef DBX_CONTIN_LENGTH ++#define DBX_CONTIN_LENGTH 1500 ++ ++/* How to renumber registers for dbx and gdb. */ ++#define DBX_REGISTER_NUMBER(REGNO) loongarch_dbx_regno[REGNO] ++ ++/* The mapping from gcc register number to DWARF 2 CFA column number. */ ++#define DWARF_FRAME_REGNUM(REGNO) loongarch_dwarf_regno[REGNO] ++ ++/* The DWARF 2 CFA column which tracks the return address. */ ++#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM ++ ++/* Before the prologue, RA lives in r1. */ ++#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM) ++ ++/* Describe how we implement __builtin_eh_return. */ ++#define EH_RETURN_DATA_REGNO(N) \ ++ ((N) < (4) ? (N) + GP_ARG_FIRST : INVALID_REGNUM) ++ ++#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4) ++ ++#define EH_USES(N) loongarch_eh_uses (N) ++ ++/* Offsets recorded in opcodes are a multiple of this alignment factor. ++ The default for this in 64-bit mode is 8, which causes problems with ++ SFmode register saves. */ ++#define DWARF_CIE_DATA_ALIGNMENT -4 ++ ++/* Correct the offset of automatic variables and arguments. Note that ++ the LARCH debug format wants all automatic variables and arguments ++ to be in terms of the virtual frame pointer (stack pointer before ++ any adjustment in the function), while the LARCH 3.0 linker wants ++ the frame pointer to be the stack pointer after the initial ++ adjustment. */ ++ ++#define DEBUGGER_AUTO_OFFSET(X) \ ++ loongarch_debugger_offset (X, (HOST_WIDE_INT) 0) ++#define DEBUGGER_ARG_OFFSET(OFFSET, X) \ ++ loongarch_debugger_offset (X, (HOST_WIDE_INT) OFFSET) ++ ++/* Target machine storage layout */ ++ ++#define BITS_BIG_ENDIAN 0 ++#define BYTES_BIG_ENDIAN 0 ++#define WORDS_BIG_ENDIAN 0 ++ ++#define MAX_BITS_PER_WORD 64 ++ ++/* Width of a word, in units (bytes). */ ++#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4) ++#ifndef IN_LIBGCC2 ++#define MIN_UNITS_PER_WORD 4 ++#endif ++ ++/* Width of a LSX vector register in bytes. */ ++#define UNITS_PER_LSX_REG 16 ++/* Width of a LSX vector register in bits. */ ++#define BITS_PER_LSX_REG (UNITS_PER_LSX_REG * BITS_PER_UNIT) ++ ++/* Width of a LASX vector register in bytes. */ ++#define UNITS_PER_LASX_REG 32 ++/* Width of a LASX vector register in bits. */ ++#define BITS_PER_LASX_REG (UNITS_PER_LASX_REG * BITS_PER_UNIT) ++ ++/* For LARCH, width of a floating point register. */ ++#define UNITS_PER_FPREG (TARGET_FLOAT64 ? 8 : 4) ++ ++/* The number of consecutive floating-point registers needed to store the ++ largest format supported by the FPU. */ ++#define MAX_FPRS_PER_FMT (TARGET_FLOAT64 || TARGET_SINGLE_FLOAT ? 1 : 2) ++ ++/* The number of consecutive floating-point registers needed to store the ++ smallest format supported by the FPU. */ ++#define MIN_FPRS_PER_FMT 1 ++ ++/* The largest size of value that can be held in floating-point ++ registers and moved with a single instruction. */ ++#define UNITS_PER_HWFPVALUE \ ++ (TARGET_SOFT_FLOAT_ABI ? 0 : MAX_FPRS_PER_FMT * UNITS_PER_FPREG) ++ ++/* The largest size of value that can be held in floating-point ++ registers. */ ++#define UNITS_PER_FPVALUE \ ++ (TARGET_SOFT_FLOAT_ABI ? 0 \ ++ : TARGET_SINGLE_FLOAT ? UNITS_PER_FPREG \ ++ : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT) ++ ++/* The number of bytes in a double. */ ++#define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT) ++ ++/* Set the sizes of the core types. */ ++#define SHORT_TYPE_SIZE 16 ++#define INT_TYPE_SIZE 32 ++#define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32) ++#define LONG_LONG_TYPE_SIZE 64 ++ ++#define FLOAT_TYPE_SIZE 32 ++#define DOUBLE_TYPE_SIZE 64 ++#define LONG_DOUBLE_TYPE_SIZE (TARGET_NEWABI ? 128 : 64) ++ ++/* Define the sizes of fixed-point types. */ ++#define SHORT_FRACT_TYPE_SIZE 8 ++#define FRACT_TYPE_SIZE 16 ++#define LONG_FRACT_TYPE_SIZE 32 ++#define LONG_LONG_FRACT_TYPE_SIZE 64 ++ ++#define SHORT_ACCUM_TYPE_SIZE 16 ++#define ACCUM_TYPE_SIZE 32 ++#define LONG_ACCUM_TYPE_SIZE 64 ++/* FIXME. LONG_LONG_ACCUM_TYPE_SIZE should be 128 bits, but GCC ++ doesn't support 128-bit integers for LARCH32 currently. */ ++#define LONG_LONG_ACCUM_TYPE_SIZE (TARGET_64BIT ? 128 : 64) ++ ++/* long double is not a fixed mode, but the idea is that, if we ++ support long double, we also want a 128-bit integer type. */ ++#define MAX_FIXED_MODE_SIZE LONG_DOUBLE_TYPE_SIZE ++ ++/* Width in bits of a pointer. */ ++#ifndef POINTER_SIZE ++#define POINTER_SIZE ((TARGET_64BIT) ? 64 : 32) ++#endif ++ ++/* Allocation boundary (in *bits*) for storing arguments in argument list. */ ++#define PARM_BOUNDARY BITS_PER_WORD ++ ++/* Allocation boundary (in *bits*) for the code of a function. */ ++#define FUNCTION_BOUNDARY 32 ++ ++/* Alignment of field after `int : 0' in a structure. */ ++#define EMPTY_FIELD_BOUNDARY 32 ++ ++/* Every structure's size must be a multiple of this. */ ++/* 8 is observed right on a DECstation and on riscos 4.02. */ ++#define STRUCTURE_SIZE_BOUNDARY 8 ++ ++/* There is no point aligning anything to a rounder boundary than ++ LONG_DOUBLE_TYPE_SIZE, unless under LSX/LASX the bigggest alignment is ++ BITS_PER_LSX_REG/BITS_PER_LASX_REG/.. */ ++#define BIGGEST_ALIGNMENT \ ++ (ISA_HAS_LASX? BITS_PER_LASX_REG : (ISA_HAS_LSX ? BITS_PER_LSX_REG : LONG_DOUBLE_TYPE_SIZE)) ++ ++/* All accesses must be aligned. */ ++#define STRICT_ALIGNMENT (TARGET_STRICT_ALIGN) ++ ++/* Define this if you wish to imitate the way many other C compilers ++ handle alignment of bitfields and the structures that contain ++ them. ++ ++ The behavior is that the type written for a bit-field (`int', ++ `short', or other integer type) imposes an alignment for the ++ entire structure, as if the structure really did contain an ++ ordinary field of that type. In addition, the bit-field is placed ++ within the structure so that it would fit within such a field, ++ not crossing a boundary for it. ++ ++ Thus, on most machines, a bit-field whose type is written as `int' ++ would not cross a four-byte boundary, and would force four-byte ++ alignment for the whole structure. (The alignment used may not ++ be four bytes; it is controlled by the other alignment ++ parameters.) ++ ++ If the macro is defined, its definition should be a C expression; ++ a nonzero value for the expression enables this behavior. */ ++ ++#define PCC_BITFIELD_TYPE_MATTERS 1 ++ ++/* If defined, a C expression to compute the alignment for a static ++ variable. TYPE is the data type, and ALIGN is the alignment that ++ the object would ordinarily have. The value of this macro is used ++ instead of that alignment to align the object. ++ ++ If this macro is not defined, then ALIGN is used. ++ ++ One use of this macro is to increase alignment of medium-size ++ data to make it all fit in fewer cache lines. Another is to ++ cause character arrays to be word-aligned so that `strcpy' calls ++ that copy constants to character arrays can be done inline. */ ++ ++#undef DATA_ALIGNMENT ++#define DATA_ALIGNMENT(TYPE, ALIGN) \ ++ ((((ALIGN) < BITS_PER_WORD) \ ++ && (TREE_CODE (TYPE) == ARRAY_TYPE \ ++ || TREE_CODE (TYPE) == UNION_TYPE \ ++ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN)) ++ ++/* We need this for the same reason as DATA_ALIGNMENT, namely to cause ++ character arrays to be word-aligned so that `strcpy' calls that copy ++ constants to character arrays can be done inline, and 'strcmp' can be ++ optimised to use word loads. */ ++#define LOCAL_ALIGNMENT(TYPE, ALIGN) \ ++ DATA_ALIGNMENT (TYPE, ALIGN) ++ ++#define PAD_VARARGS_DOWN \ ++ (targetm.calls.function_arg_padding (TYPE_MODE (type), type) == PAD_DOWNWARD) ++ ++/* Define if operations between registers always perform the operation ++ on the full register even if a narrower mode is specified. */ ++#define WORD_REGISTER_OPERATIONS 1 ++ ++/* When in 64-bit mode, move insns will sign extend SImode and CCmode ++ moves. All other references are zero extended. */ ++#define LOAD_EXTEND_OP(MODE) \ ++ (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \ ++ ? SIGN_EXTEND : ZERO_EXTEND) ++ ++/* Define this macro if it is advisable to hold scalars in registers ++ in a wider mode than that declared by the program. In such cases, ++ the value is constrained to be within the bounds of the declared ++ type, but kept valid in the wider mode. The signedness of the ++ extension may differ from that of the type. */ ++ ++#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ ++ if (GET_MODE_CLASS (MODE) == MODE_INT \ ++ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \ ++ { \ ++ if ((MODE) == SImode) \ ++ (UNSIGNEDP) = 0; \ ++ (MODE) = Pmode; \ ++ } ++ ++/* Pmode is always the same as ptr_mode, but not always the same as word_mode. ++ Extensions of pointers to word_mode must be signed. */ ++#define POINTERS_EXTEND_UNSIGNED false ++ ++/* Define if loading short immediate values into registers sign extends. */ ++#define SHORT_IMMEDIATES_SIGN_EXTEND 1 ++ ++/* The [d]clz instructions have the natural values at 0. */ ++ ++#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ ++ ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2) ++ ++/* Standard register usage. */ ++ ++/* Number of hardware registers. We have: ++ ++ - 32 integer registers ++ - 32 floating point registers ++ - 8 condition code registers ++ - 2 fake registers: ++ - ARG_POINTER_REGNUM ++ - FRAME_POINTER_REGNUM ++*/ ++ ++#define FIRST_PSEUDO_REGISTER 74 ++ ++/* By default, fix the kernel registers ($26 and $27), the global ++ pointer ($28) and the stack pointer ($29). This can change ++ depending on the command-line options. ++ ++ Regarding coprocessor registers: without evidence to the contrary, ++ it's best to assume that each coprocessor register has a unique ++ use. This can be overridden, in, e.g., loongarch_option_override or ++ TARGET_CONDITIONAL_REGISTER_USAGE should the assumption be ++ inappropriate for a particular target. */ ++ ++#define FIXED_REGISTERS \ ++{ \ ++ 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ 0, 0, 0, 0, 0, 0, 0, 1, 1, 1} ++ ++ ++/* Set up this array for o32 by default. ++ ++ Note that we don't mark $31 as a call-clobbered register. The idea is ++ that it's really the call instructions themselves which clobber $31. ++ We don't care what the called function does with it afterwards. ++ ++ This approach makes it easier to implement sibcalls. Unlike normal ++ calls, sibcalls don't clobber $31, so the register reaches the ++ called function in tact. EPILOGUE_USES says that $31 is useful ++ to the called function. */ ++ ++#define CALL_USED_REGISTERS \ ++{ \ ++ 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ ++ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ ++ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} ++ ++/* Internal macros to classify a register number as to whether it's a ++ general purpose register, a floating point register, a ++ multiply/divide register, or a status register. */ ++ ++#define GP_REG_FIRST 0 ++#define GP_REG_LAST 31 ++#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1) ++#define GP_DBX_FIRST 0 ++ ++#define FP_REG_FIRST 32 ++#define FP_REG_LAST 63 ++#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1) ++#define FP_DBX_FIRST ((write_symbols == DBX_DEBUG) ? 38 : 32) ++ ++#define LSX_REG_FIRST FP_REG_FIRST ++#define LSX_REG_LAST FP_REG_LAST ++#define LSX_REG_NUM FP_REG_NUM ++ ++#define LASX_REG_FIRST FP_REG_FIRST ++#define LASX_REG_LAST FP_REG_LAST ++#define LASX_REG_NUM FP_REG_NUM ++ ++/* The DWARF 2 CFA column which tracks the return address from a ++ signal handler context. This means that to maintain backwards ++ compatibility, no hard register can be assigned this column if it ++ would need to be handled by the DWARF unwinder. */ ++#define DWARF_ALT_FRAME_RETURN_COLUMN 72 ++ ++#define ST_REG_FIRST 64 ++#define ST_REG_LAST 71 ++#define ST_REG_NUM (ST_REG_LAST - ST_REG_FIRST + 1) ++ ++#define GP_REG_P(REGNO) \ ++ ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM) ++#define M16_REG_P(REGNO) \ ++ (((REGNO) >= 2 && (REGNO) <= 7) || (REGNO) == 16 || (REGNO) == 17) ++#define M16STORE_REG_P(REGNO) \ ++ (((REGNO) >= 2 && (REGNO) <= 7) || (REGNO) == 0 || (REGNO) == 17) ++#define FP_REG_P(REGNO) \ ++ ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM) ++#define ST_REG_P(REGNO) \ ++ ((unsigned int) ((int) (REGNO) - ST_REG_FIRST) < ST_REG_NUM) ++#define LSX_REG_P(REGNO) \ ++ ((unsigned int) ((int) (REGNO) - LSX_REG_FIRST) < LSX_REG_NUM) ++#define LASX_REG_P(REGNO) \ ++ ((unsigned int) ((int) (REGNO) - LASX_REG_FIRST) < LASX_REG_NUM) ++ ++#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X))) ++#define LSX_REG_RTX_P(X) (REG_P (X) && LSX_REG_P (REGNO (X))) ++#define LASX_REG_RTX_P(X) (REG_P (X) && LASX_REG_P (REGNO (X))) ++ ++ ++#define HARD_REGNO_RENAME_OK(OLD_REG, NEW_REG) \ ++ loongarch_hard_regno_rename_ok (OLD_REG, NEW_REG) ++ ++/* Select a register mode required for caller save of hard regno REGNO. */ ++#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ ++ loongarch_hard_regno_caller_save_mode (REGNO, NREGS, MODE) ++ ++/* Register to use for pushing function arguments. */ ++#define STACK_POINTER_REGNUM (GP_REG_FIRST + 3) ++ ++/* These two registers don't really exist: they get eliminated to either ++ the stack or hard frame pointer. */ ++#define ARG_POINTER_REGNUM 72 ++#define FRAME_POINTER_REGNUM 73 ++ ++#define HARD_FRAME_POINTER_REGNUM \ ++ (GP_REG_FIRST + 22) ++ ++/* FIXME: */ ++/* #define HARD_FRAME_POINTER_IS_FRAME_POINTER (HARD_FRAME_POINTER_REGNUM == FRAME_POINTER_REGNUM) */ ++/* #define HARD_FRAME_POINTER_IS_ARG_POINTER (HARD_FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM) */ ++ ++#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0 ++#define HARD_FRAME_POINTER_IS_ARG_POINTER 0 ++ ++/* FIXME: */ ++/* Register in which static-chain is passed to a function. */ ++#define STATIC_CHAIN_REGNUM (GP_REG_FIRST + 20) /* $t8 */ ++ ++#define LARCH_PROLOGUE_TEMP_REGNUM \ ++ (GP_REG_FIRST + 13) ++#define LARCH_PROLOGUE_TEMP2_REGNUM \ ++ (GP_REG_FIRST + 12) ++#define LARCH_PROLOGUE_TEMP3_REGNUM \ ++ (GP_REG_FIRST + 14) ++#define LARCH_EPILOGUE_TEMP_REGNUM \ ++ (GP_REG_FIRST + (12)) ++ ++#define LARCH_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP_REGNUM) ++#define LARCH_PROLOGUE_TEMP2(MODE) \ ++ gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP2_REGNUM) ++#define LARCH_PROLOGUE_TEMP3(MODE) \ ++ gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP3_REGNUM) ++#define LARCH_EPILOGUE_TEMP(MODE) gen_rtx_REG (MODE, LARCH_EPILOGUE_TEMP_REGNUM) ++ ++/* Define this macro if it is as good or better to call a constant ++ function address than to call an address kept in a register. */ ++#define NO_FUNCTION_CSE 1 ++ ++#define THREAD_POINTER_REGNUM (GP_REG_FIRST + 2) ++ ++ ++/* Define the classes of registers for register constraints in the ++ machine description. Also define ranges of constants. ++ ++ One of the classes must always be named ALL_REGS and include all hard regs. ++ If there is more than one class, another class must be named NO_REGS ++ and contain no registers. ++ ++ The name GENERAL_REGS must be the name of a class (or an alias for ++ another name such as ALL_REGS). This is the class of registers ++ that is allowed by "g" or "r" in a register constraint. ++ Also, registers outside this class are allocated only when ++ instructions express preferences for them. ++ ++ The classes must be numbered in nondecreasing order; that is, ++ a larger-numbered class must never be contained completely ++ in a smaller-numbered class. ++ ++ For any two classes, it is very desirable that there be another ++ class that represents their union. */ ++ ++enum reg_class ++{ ++ NO_REGS, /* no registers in set */ ++ SIBCALL_REGS, /* SIBCALL_REGS */ ++ JALR_REGS, /* JALR_REGS */ ++ GR_REGS, /* integer registers */ ++ CSR_REGS, /* integer registers except for $r0 and $r1 for csr. */ ++ FP_REGS, /* floating point registers */ ++ ST_REGS, /* status registers (fp status) */ ++ FRAME_REGS, /* arg pointer and frame pointer */ ++ ALL_REGS, /* all registers */ ++ LIM_REG_CLASSES /* max value + 1 */ ++}; ++ ++#define N_REG_CLASSES (int) LIM_REG_CLASSES ++ ++#define GENERAL_REGS GR_REGS ++ ++/* An initializer containing the names of the register classes as C ++ string constants. These names are used in writing some of the ++ debugging dumps. */ ++ ++#define REG_CLASS_NAMES \ ++{ \ ++ "NO_REGS", \ ++ "SIBCALL_REGS", \ ++ "JALR_REGS", \ ++ "GR_REGS", \ ++ "CSR_REGS", \ ++ "FP_REGS", \ ++ "ST_REGS", \ ++ "FRAME_REGS", \ ++ "ALL_REGS" \ ++} ++ ++/* An initializer containing the contents of the register classes, ++ as integers which are bit masks. The Nth integer specifies the ++ contents of class N. The way the integer MASK is interpreted is ++ that register R is in the class if `MASK & (1 << R)' is 1. ++ ++ When the machine has more than 32 registers, an integer does not ++ suffice. Then the integers are replaced by sub-initializers, ++ braced groupings containing several integers. Each ++ sub-initializer must be suitable as an initializer for the type ++ `HARD_REG_SET' which is defined in `hard-reg-set.h'. */ ++ ++#define REG_CLASS_CONTENTS \ ++{ \ ++ { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \ ++ { 0x001ff000, 0x00000000, 0x00000000 }, /* SIBCALL_REGS */ \ ++ { 0xff9ffff0, 0x00000000, 0x00000000 }, /* JALR_REGS */ \ ++ { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \ ++ { 0xfffffffc, 0x00000000, 0x00000000 }, /* CSR_REGS */ \ ++ { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \ ++ { 0x00000000, 0x00000000, 0x000000ff }, /* ST_REGS */ \ ++ { 0x00000000, 0x00000000, 0x00000300 }, /* FRAME_REGS */ \ ++ { 0xffffffff, 0xffffffff, 0x000003ff } /* ALL_REGS */ \ ++} ++ ++ ++/* A C expression whose value is a register class containing hard ++ register REGNO. In general there is more that one such class; ++ choose a class which is "minimal", meaning that no smaller class ++ also contains the register. */ ++ ++#define REGNO_REG_CLASS(REGNO) loongarch_regno_to_class[ (REGNO) ] ++ ++/* A macro whose definition is the name of the class to which a ++ valid base register must belong. A base register is one used in ++ an address which is the register value plus a displacement. */ ++ ++#define BASE_REG_CLASS (GR_REGS) ++ ++/* A macro whose definition is the name of the class to which a ++ valid index register must belong. An index register is one used ++ in an address where its value is either multiplied by a scale ++ factor or added to another register (as well as added to a ++ displacement). */ ++ ++#define INDEX_REG_CLASS NO_REGS ++ ++/* We generally want to put call-clobbered registers ahead of ++ call-saved ones. (IRA expects this.) */ ++ ++#define REG_ALLOC_ORDER \ ++{ /* Call-clobbered GPRs. */ \ ++ 12, 13, 14, 15, 16, 17, 18, 19, 20, 4, 5, 6, 7, 8, 9, 10, 11, 1, \ ++ /* The global pointer. This is call-clobbered for o32 and o64 \ ++ abicalls, call-saved for n32 and n64 abicalls, and a program \ ++ invariant otherwise. Putting it between the call-clobbered \ ++ and call-saved registers should cope with all eventualities. */ \ ++ /* Call-saved GPRs. */ \ ++ 23, 24, 25, 26, 27, 28, 29, 30, 31, \ ++ /* GPRs that can never be exposed to the register allocator. */ \ ++ 0, 2, 3, 21, 22, \ ++ /* Call-clobbered FPRs. */ \ ++ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \ ++ 48, 49, 50, 51,52, 53, 54, 55, \ ++ /* FPRs that are usually call-saved. The odd ones are actually \ ++ call-clobbered for n32, but listing them ahead of the even \ ++ registers might encourage the register allocator to fragment \ ++ the available FPR pairs. We need paired FPRs to store long \ ++ doubles, so it isn't clear that using a different order \ ++ for n32 would be a win. */ \ ++ 56, 57, 58, 59, 60, 61, 62, 63, \ ++ /* None of the remaining classes have defined call-saved \ ++ registers. */ \ ++ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73} ++ ++/* True if VALUE is an unsigned 6-bit number. */ ++ ++#define UIMM6_OPERAND(VALUE) \ ++ (((VALUE) & ~(unsigned HOST_WIDE_INT) 0x3f) == 0) ++ ++/* True if VALUE is a signed 10-bit number. */ ++ ++#define IMM10_OPERAND(VALUE) \ ++ ((unsigned HOST_WIDE_INT) (VALUE) + 0x200 < 0x400) ++ ++/* True if VALUE is a signed 12-bit number. */ ++ ++#define IMM12_OPERAND(VALUE) \ ++ ((unsigned HOST_WIDE_INT) (VALUE) + 0x800 < 0x1000) ++ ++/* True if VALUE is a signed 13-bit number. */ ++ ++#define IMM13_OPERAND(VALUE) \ ++ ((unsigned HOST_WIDE_INT) (VALUE) + 0x1000 < 0x2000) ++ ++/* True if VALUE is a signed 16-bit number. */ ++ ++#define IMM16_OPERAND(VALUE) \ ++ ((unsigned HOST_WIDE_INT) (VALUE) + 0x8000 < 0x10000) ++ ++ ++/* True if VALUE is a signed 12-bit number. */ ++ ++#define SMALL_OPERAND(VALUE) \ ++ ((unsigned HOST_WIDE_INT) (VALUE) + 0x800 < 0x1000) ++ ++/* True if VALUE is an unsigned 12-bit number. */ ++ ++#define SMALL_OPERAND_UNSIGNED(VALUE) \ ++ (((VALUE) & ~(unsigned HOST_WIDE_INT) 0xfff) == 0) ++ ++/* True if VALUE can be loaded into a register using LUI. */ ++ ++#define LUI_OPERAND(VALUE) \ ++ (((VALUE) | 0x7ffff000) == 0x7ffff000 \ ++ || ((VALUE) | 0x7ffff000) + 0x1000 == 0) ++ ++/* True if VALUE can be loaded into a register using LUI. */ ++ ++#define LU32I_OPERAND(VALUE) \ ++ ((((VALUE) | 0x7ffff00000000) == 0x7ffff00000000) \ ++ || ((VALUE) | 0x7ffff00000000) + 0x100000000 == 0) ++ ++/* True if VALUE can be loaded into a register using LUI. */ ++ ++#define LU52I_OPERAND(VALUE) \ ++ ((((VALUE) | 0xfff0000000000000) == 0xfff0000000000000)) ++ ++/* Return a value X with the low 12 bits clear, and such that ++ VALUE - X is a signed 12-bit value. */ ++ ++#define CONST_HIGH_PART(VALUE) \ ++ (((VALUE) + 0x800) & ~(unsigned HOST_WIDE_INT) 0xfff) ++ ++#define CONST_LOW_PART(VALUE) \ ++ ((VALUE) - CONST_HIGH_PART (VALUE)) ++ ++#define SMALL_INT(X) SMALL_OPERAND (INTVAL (X)) ++#define SMALL_INT_UNSIGNED(X) SMALL_OPERAND_UNSIGNED (INTVAL (X)) ++#define LUI_INT(X) LUI_OPERAND (INTVAL (X)) ++#define LU32I_INT(X) LU32I_OPERAND (INTVAL (X)) ++#define LU52I_INT(X) LU52I_OPERAND (INTVAL (X)) ++#define ULARCH_12BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -2048, 2047)) ++#define LARCH_9BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -256, 255)) ++#define LISA_16BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -32768, 32767)) ++#define LISA_SHIFT_2_OFFSET_P(OFFSET) (((OFFSET) & 0x3) == 0) ++ ++/* The HI and LO registers can only be reloaded via the general ++ registers. Condition code registers can only be loaded to the ++ general registers, and from the floating point registers. */ ++ ++#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \ ++ loongarch_secondary_reload_class (CLASS, MODE, X, true) ++#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \ ++ loongarch_secondary_reload_class (CLASS, MODE, X, false) ++ ++/* Return the maximum number of consecutive registers ++ needed to represent mode MODE in a register of class CLASS. */ ++ ++#define CLASS_MAX_NREGS(CLASS, MODE) loongarch_class_max_nregs (CLASS, MODE) ++ ++/* Stack layout; function entry, exit and calling. */ ++ ++#define STACK_GROWS_DOWNWARD 1 ++ ++#define FRAME_GROWS_DOWNWARD 1 ++ ++#define RETURN_ADDR_RTX loongarch_return_addr ++ ++/* Similarly, don't use the least-significant bit to tell pointers to ++ code from vtable index. */ ++ ++#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta ++ ++#define ELIMINABLE_REGS \ ++{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ ++ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ ++ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ ++ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM},} ++ ++#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ ++ (OFFSET) = loongarch_initial_elimination_offset ((FROM), (TO)) ++ ++/* Allocate stack space for arguments at the beginning of each function. */ ++#define ACCUMULATE_OUTGOING_ARGS 1 ++ ++/* The argument pointer always points to the first argument. */ ++#define FIRST_PARM_OFFSET(FNDECL) 0 ++ ++/* o32 and o64 reserve stack space for all argument registers. */ ++#define REG_PARM_STACK_SPACE(FNDECL) \ ++ (TARGET_OLDABI \ ++ ? (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD) \ ++ : 0) ++ ++/* Define this if it is the responsibility of the caller to ++ allocate the area reserved for arguments passed in registers. ++ If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect ++ of this macro is to determine whether the space is included in ++ `crtl->outgoing_args_size'. */ ++#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1 ++ ++#define STACK_BOUNDARY (TARGET_NEWABI ? 128 : 64) ++ ++/* Symbolic macros for the registers used to return integer and floating ++ point values. */ ++ ++#define GP_RETURN (GP_REG_FIRST + 4) ++#define FP_RETURN ((TARGET_SOFT_FLOAT) ? GP_RETURN : (FP_REG_FIRST + 0)) ++ ++#define MAX_ARGS_IN_REGISTERS (TARGET_OLDABI ? 4 : 8) ++ ++/* Symbolic macros for the first/last argument registers. */ ++ ++#define GP_ARG_FIRST (GP_REG_FIRST + 4) ++#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) ++#define FP_ARG_FIRST (FP_REG_FIRST + 0) ++#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) ++ ++/* True if MODE is vector and supported in a LSX vector register. */ ++#define LSX_SUPPORTED_MODE_P(MODE) \ ++ (ISA_HAS_LSX \ ++ && (MODE >= 0 && MODE < NUM_MACHINE_MODES) \ ++ && GET_MODE_SIZE (MODE) == UNITS_PER_LSX_REG \ ++ && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \ ++ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT)) ++ ++#define LASX_SUPPORTED_MODE_P(MODE) \ ++ (ISA_HAS_LASX \ ++ && (MODE >= 0 && MODE < NUM_MACHINE_MODES) \ ++ && (GET_MODE_SIZE (MODE) == UNITS_PER_LSX_REG \ ++ ||GET_MODE_SIZE (MODE) == UNITS_PER_LASX_REG) \ ++ && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \ ++ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT)) ++ ++/* 1 if N is a possible register number for function argument passing. ++ We have no FP argument registers when soft-float. */ ++ ++/* Accept arguments in a0-a7, and in fa0-fa7 if permitted by the ABI. */ ++#define FUNCTION_ARG_REGNO_P(N) \ ++ (IN_RANGE ((N), GP_ARG_FIRST, GP_ARG_LAST) \ ++ || (UNITS_PER_FP_ARG && IN_RANGE ((N), FP_ARG_FIRST, FP_ARG_LAST))) ++ ++ ++/* This structure has to cope with two different argument allocation ++ schemes. Most LARCH ABIs view the arguments as a structure, of which ++ the first N words go in registers and the rest go on the stack. If I ++ < N, the Ith word might go in Ith integer argument register or in a ++ floating-point register. For these ABIs, we only need to remember ++ the offset of the current argument into the structure. ++ ++ So for the standard ABIs, the first N words are allocated to integer ++ registers, and loongarch_function_arg decides on an argument-by-argument ++ basis whether that argument should really go in an integer register, ++ or in a floating-point one. */ ++ ++typedef struct loongarch_args { ++ /* Always true for varargs functions. Otherwise true if at least ++ one argument has been passed in an integer register. */ ++ int gp_reg_found; ++ ++ /* The number of arguments seen so far. */ ++ unsigned int arg_number; ++ ++ /* The number of integer registers used so far. This is the number ++ of words that have been added to the argument structure, limited ++ to MAX_ARGS_IN_REGISTERS. */ ++ unsigned int num_gprs; ++ ++ unsigned int num_fprs; ++ ++ /* The number of words passed on the stack. */ ++ unsigned int stack_words; ++ ++ /* On the loongarch16, we need to keep track of which floating point ++ arguments were passed in general registers, but would have been ++ passed in the FP regs if this were a 32-bit function, so that we ++ can move them to the FP regs if we wind up calling a 32-bit ++ function. We record this information in fp_code, encoded in base ++ four. A zero digit means no floating point argument, a one digit ++ means an SFmode argument, and a two digit means a DFmode argument, ++ and a three digit is not used. The low order digit is the first ++ argument. Thus 6 == 1 * 4 + 2 means a DFmode argument followed by ++ an SFmode argument. ??? A more sophisticated approach will be ++ needed if LARCH_ABI != ABILP32. */ ++ int fp_code; ++ ++ /* True if the function has a prototype. */ ++ int prototype; ++} CUMULATIVE_ARGS; ++ ++/* Initialize a variable CUM of type CUMULATIVE_ARGS ++ for a call to a function whose data type is FNTYPE. ++ For a library call, FNTYPE is 0. */ ++ ++#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \ ++ memset (&(CUM), 0, sizeof (CUM)) ++ ++ ++#define EPILOGUE_USES(REGNO) loongarch_epilogue_uses (REGNO) ++ ++/* Treat LOC as a byte offset from the stack pointer and round it up ++ to the next fully-aligned offset. */ ++#define LARCH_STACK_ALIGN(LOC) \ ++ (TARGET_NEWABI ? ROUND_UP ((LOC), 16) : ROUND_UP ((LOC), 8)) ++ ++ ++/* Output assembler code to FILE to increment profiler label # LABELNO ++ for profiling a function entry. */ ++ ++#define MCOUNT_NAME "_mcount" ++ ++/* Emit rtl for profiling. Output assembler code to FILE ++ to call "_mcount" for profiling a function entry. */ ++#define PROFILE_HOOK(LABEL) \ ++ { \ ++ rtx fun, ra; \ ++ ra = get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM); \ ++ fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \ ++ emit_library_call (fun, LCT_NORMAL, VOIDmode, ra, Pmode); \ ++ } ++ ++/* All the work done in PROFILE_HOOK, but still required. */ ++#define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0) ++ ++ ++/* The profiler preserves all interesting registers, including $31. */ ++#define LARCH_SAVE_REG_FOR_PROFILING_P(REGNO) false ++ ++/* No loongarch port has ever used the profiler counter word, so don't emit it ++ or the label for it. */ ++ ++#define NO_PROFILE_COUNTERS 1 ++ ++/* Define this macro if the code for function profiling should come ++ before the function prologue. Normally, the profiling code comes ++ after. */ ++ ++/* #define PROFILE_BEFORE_PROLOGUE */ ++ ++/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, ++ the stack pointer does not matter. The value is tested only in ++ functions that have frame pointers. ++ No definition is equivalent to always zero. */ ++ ++#define EXIT_IGNORE_STACK 1 ++ ++ ++/* Trampolines are a block of code followed by two pointers. */ ++ ++#define TRAMPOLINE_SIZE \ ++ (loongarch_trampoline_code_size () + GET_MODE_SIZE (ptr_mode) * 2) ++ ++/* Forcing a 64-bit alignment for 32-bit targets allows us to load two ++ pointers from a single LUI base. */ ++ ++#define TRAMPOLINE_ALIGNMENT 64 ++ ++/* loongarch_trampoline_init calls this library function to flush ++ program and data caches. */ ++ ++#ifndef CACHE_FLUSH_FUNC ++#define CACHE_FLUSH_FUNC "_flush_cache" ++#endif ++ ++#define LARCH_ICACHE_SYNC(ADDR, SIZE) \ ++ /* Flush both caches. We need to flush the data cache in case \ ++ the system has a write-back cache. */ \ ++ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, loongarch_cache_flush_func), \ ++ LCT_NORMAL, VOIDmode, ADDR, Pmode, SIZE, Pmode, \ ++ GEN_INT (3), TYPE_MODE (integer_type_node)) ++ ++ ++/* Addressing modes, and classification of registers for them. */ ++ ++#define REGNO_OK_FOR_INDEX_P(REGNO) 0 ++#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \ ++ loongarch_regno_mode_ok_for_base_p (REGNO, MODE, 1) ++ ++/* Maximum number of registers that can appear in a valid memory address. */ ++ ++#define MAX_REGS_PER_ADDRESS 1 ++ ++/* Check for constness inline but use loongarch_legitimate_address_p ++ to check whether a constant really is an address. */ ++ ++#define CONSTANT_ADDRESS_P(X) \ ++ (CONSTANT_P (X) && memory_address_p (SImode, X)) ++ ++/* This handles the magic '..CURRENT_FUNCTION' symbol, which means ++ 'the start of the function that this code is output in'. */ ++ ++#define ASM_OUTPUT_LABELREF(FILE,NAME) \ ++ do { \ ++ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \ ++ asm_fprintf ((FILE), "%U%s", \ ++ XSTR (XEXP (DECL_RTL (current_function_decl), \ ++ 0), 0)); \ ++ else \ ++ asm_fprintf ((FILE), "%U%s", (NAME)); \ ++ } while (0) ++ ++/* Flag to mark a function decl symbol that requires a long call. */ ++#define SYMBOL_FLAG_LONG_CALL (SYMBOL_FLAG_MACH_DEP << 0) ++#define SYMBOL_REF_LONG_CALL_P(X) \ ++ ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_LONG_CALL) != 0) ++ ++/* This flag marks functions that cannot be lazily bound. */ ++#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1) ++#define SYMBOL_REF_BIND_NOW_P(RTX) \ ++ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0) ++ ++/* True if we're generating a form of LARCH16 code in which jump tables ++ are stored in the text section and encoded as 16-bit PC-relative ++ offsets. This is only possible when general text loads are allowed, ++ since the table access itself will be an "lh" instruction. If the ++ PC-relative offsets grow too large, 32-bit offsets are used instead. */ ++ ++ ++#define CASE_VECTOR_MODE (ptr_mode) ++ ++/* Only use short offsets if their range will not overflow. */ ++#define CASE_VECTOR_SHORTEN_MODE(MIN, MAX, BODY) \ ++ (ptr_mode ? HImode : SImode) ++ ++ ++/* Define this as 1 if `char' should by default be signed; else as 0. */ ++#ifndef DEFAULT_SIGNED_CHAR ++#define DEFAULT_SIGNED_CHAR 1 ++#endif ++ ++/* Although LDC1 and SDC1 provide 64-bit moves on 32-bit targets, ++ we generally don't want to use them for copying arbitrary data. ++ A single N-word move is usually the same cost as N single-word moves. */ ++#define MOVE_MAX UNITS_PER_WORD ++/* We don't modify it for LSX as it is only used by the classic reload. */ ++#define MAX_MOVE_MAX 8 ++ ++/* Define this macro as a C expression which is nonzero if ++ accessing less than a word of memory (i.e. a `char' or a ++ `short') is no faster than accessing a word of memory, i.e., if ++ such access require more than one instruction or if there is no ++ difference in cost between byte and (aligned) word loads. ++ ++ On RISC machines, it tends to generate better code to define ++ this as 1, since it avoids making a QI or HI mode register. ++ ++*/ ++#define SLOW_BYTE_ACCESS (1) ++ ++/* Standard LARCH integer shifts truncate the shift amount to the ++ width of the shifted operand. However, Loongson MMI shifts ++ do not truncate the shift amount at all. */ ++#define SHIFT_COUNT_TRUNCATED (1) ++ ++ ++/* Specify the machine mode that pointers have. ++ After generation of rtl, the compiler makes no further distinction ++ between pointers and any other objects of this machine mode. */ ++ ++#ifndef Pmode ++#define Pmode (TARGET_64BIT ? DImode : SImode) ++#endif ++ ++/* Give call MEMs SImode since it is the "most permissive" mode ++ for both 32-bit and 64-bit targets. */ ++ ++#define FUNCTION_MODE SImode ++ ++ ++/* We allocate $fcc registers by hand and can't cope with moves of ++ CCmode registers to and from pseudos (or memory). */ ++#define AVOID_CCMODE_COPIES ++ ++/* A C expression for the cost of a branch instruction. A value of ++ 1 is the default; other values are interpreted relative to that. */ ++ ++#define BRANCH_COST(speed_p, predictable_p) loongarch_branch_cost ++#define LOGICAL_OP_NON_SHORT_CIRCUIT 0 ++ ++/* The LARCH port has several functions that return an instruction count. ++ Multiplying the count by this value gives the number of bytes that ++ the instructions occupy. */ ++#define BASE_INSN_LENGTH (4) ++ ++/* The length of a NOP in bytes. */ ++#define NOP_INSN_LENGTH (4) ++ ++/* If defined, modifies the length assigned to instruction INSN as a ++ function of the context in which it is used. LENGTH is an lvalue ++ that contains the initially computed length of the insn and should ++ be updated with the correct length of the insn. */ ++#define ADJUST_INSN_LENGTH(INSN, LENGTH) \ ++ ((LENGTH) = loongarch_adjust_insn_length ((INSN), (LENGTH))) ++ ++/* Return the asm template for a conditional branch instruction. ++ OPCODE is the opcode's mnemonic and OPERANDS is the asm template for ++ its operands. */ ++#define LARCH_BRANCH(OPCODE, OPERANDS) \ ++ OPCODE "\t" OPERANDS ++ ++#define LARCH_BRANCH_C(OPCODE, OPERANDS) \ ++ OPCODE "%:\t" OPERANDS ++ ++/* Return an asm string that forces INSN to be treated as an absolute ++ J or JAL instruction instead of an assembler macro. */ ++#define LARCH_ABSOLUTE_JUMP(INSN) INSN ++ ++ ++/* Control the assembler format that we output. */ ++ ++/* Output to assembler file text saying following lines ++ may contain character constants, extra white space, comments, etc. */ ++ ++#ifndef ASM_APP_ON ++#define ASM_APP_ON " #APP\n" ++#endif ++ ++/* Output to assembler file text saying following lines ++ no longer contain unusual constructs. */ ++ ++#ifndef ASM_APP_OFF ++#define ASM_APP_OFF " #NO_APP\n" ++#endif ++ ++#define REGISTER_NAMES \ ++{ "$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7", \ ++ "$r8", "$r9", "$r10", "$r11", "$r12", "$r13", "$r14", "$r15", \ ++ "$r16", "$r17", "$r18", "$r19", "$r20", "$r21", "$r22", "$r23", \ ++ "$r24", "$r25", "$r26", "$r27", "$r28", "$r29", "$r30", "$r31", \ ++ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", \ ++ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", \ ++ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23", \ ++ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31", \ ++ "$fcc0","$fcc1","$fcc2","$fcc3","$fcc4","$fcc5","$fcc6","$fcc7", \ ++ "$arg", "$frame"} ++ ++/* List the "software" names for each register. Also list the numerical ++ names for $fp and $sp. */ ++ ++#define ADDITIONAL_REGISTER_NAMES \ ++{ \ ++ { "zero", 0 + GP_REG_FIRST }, \ ++ { "ra", 1 + GP_REG_FIRST }, \ ++ { "tp", 2 + GP_REG_FIRST }, \ ++ { "sp", 3 + GP_REG_FIRST }, \ ++ { "a0", 4 + GP_REG_FIRST }, \ ++ { "a1", 5 + GP_REG_FIRST }, \ ++ { "a2", 6 + GP_REG_FIRST }, \ ++ { "a3", 7 + GP_REG_FIRST }, \ ++ { "a4", 8 + GP_REG_FIRST }, \ ++ { "a5", 9 + GP_REG_FIRST }, \ ++ { "a6", 10 + GP_REG_FIRST }, \ ++ { "a7", 11 + GP_REG_FIRST }, \ ++ { "t0", 12 + GP_REG_FIRST }, \ ++ { "t1", 13 + GP_REG_FIRST }, \ ++ { "t2", 14 + GP_REG_FIRST }, \ ++ { "t3", 15 + GP_REG_FIRST }, \ ++ { "t4", 16 + GP_REG_FIRST }, \ ++ { "t5", 17 + GP_REG_FIRST }, \ ++ { "t6", 18 + GP_REG_FIRST }, \ ++ { "t7", 19 + GP_REG_FIRST }, \ ++ { "t8", 20 + GP_REG_FIRST }, \ ++ { "x", 21 + GP_REG_FIRST }, \ ++ { "fp", 22 + GP_REG_FIRST }, \ ++ { "s0", 23 + GP_REG_FIRST }, \ ++ { "s1", 24 + GP_REG_FIRST }, \ ++ { "s2", 25 + GP_REG_FIRST }, \ ++ { "s3", 26 + GP_REG_FIRST }, \ ++ { "s4", 27 + GP_REG_FIRST }, \ ++ { "s5", 28 + GP_REG_FIRST }, \ ++ { "s6", 29 + GP_REG_FIRST }, \ ++ { "s7", 30 + GP_REG_FIRST }, \ ++ { "s8", 31 + GP_REG_FIRST }, \ ++ { "v0", 4 + GP_REG_FIRST }, \ ++ { "v1", 5 + GP_REG_FIRST }, \ ++ { "vr0", 0 + FP_REG_FIRST }, \ ++ { "vr1", 1 + FP_REG_FIRST }, \ ++ { "vr2", 2 + FP_REG_FIRST }, \ ++ { "vr3", 3 + FP_REG_FIRST }, \ ++ { "vr4", 4 + FP_REG_FIRST }, \ ++ { "vr5", 5 + FP_REG_FIRST }, \ ++ { "vr6", 6 + FP_REG_FIRST }, \ ++ { "vr7", 7 + FP_REG_FIRST }, \ ++ { "vr8", 8 + FP_REG_FIRST }, \ ++ { "vr9", 9 + FP_REG_FIRST }, \ ++ { "vr10", 10 + FP_REG_FIRST }, \ ++ { "vr11", 11 + FP_REG_FIRST }, \ ++ { "vr12", 12 + FP_REG_FIRST }, \ ++ { "vr13", 13 + FP_REG_FIRST }, \ ++ { "vr14", 14 + FP_REG_FIRST }, \ ++ { "vr15", 15 + FP_REG_FIRST }, \ ++ { "vr16", 16 + FP_REG_FIRST }, \ ++ { "vr17", 17 + FP_REG_FIRST }, \ ++ { "vr18", 18 + FP_REG_FIRST }, \ ++ { "vr19", 19 + FP_REG_FIRST }, \ ++ { "vr20", 20 + FP_REG_FIRST }, \ ++ { "vr21", 21 + FP_REG_FIRST }, \ ++ { "vr22", 22 + FP_REG_FIRST }, \ ++ { "vr23", 23 + FP_REG_FIRST }, \ ++ { "vr24", 24 + FP_REG_FIRST }, \ ++ { "vr25", 25 + FP_REG_FIRST }, \ ++ { "vr26", 26 + FP_REG_FIRST }, \ ++ { "vr27", 27 + FP_REG_FIRST }, \ ++ { "vr28", 28 + FP_REG_FIRST }, \ ++ { "vr29", 29 + FP_REG_FIRST }, \ ++ { "vr30", 30 + FP_REG_FIRST }, \ ++ { "vr31", 31 + FP_REG_FIRST }, \ ++ { "xr0", 0 + FP_REG_FIRST }, \ ++ { "xr1", 1 + FP_REG_FIRST }, \ ++ { "xr2", 2 + FP_REG_FIRST }, \ ++ { "xr3", 3 + FP_REG_FIRST }, \ ++ { "xr4", 4 + FP_REG_FIRST }, \ ++ { "xr5", 5 + FP_REG_FIRST }, \ ++ { "xr6", 6 + FP_REG_FIRST }, \ ++ { "xr7", 7 + FP_REG_FIRST }, \ ++ { "xr8", 8 + FP_REG_FIRST }, \ ++ { "xr9", 9 + FP_REG_FIRST }, \ ++ { "xr10", 10 + FP_REG_FIRST }, \ ++ { "xr11", 11 + FP_REG_FIRST }, \ ++ { "xr12", 12 + FP_REG_FIRST }, \ ++ { "xr13", 13 + FP_REG_FIRST }, \ ++ { "xr14", 14 + FP_REG_FIRST }, \ ++ { "xr15", 15 + FP_REG_FIRST }, \ ++ { "xr16", 16 + FP_REG_FIRST }, \ ++ { "xr17", 17 + FP_REG_FIRST }, \ ++ { "xr18", 18 + FP_REG_FIRST }, \ ++ { "xr19", 19 + FP_REG_FIRST }, \ ++ { "xr20", 20 + FP_REG_FIRST }, \ ++ { "xr21", 21 + FP_REG_FIRST }, \ ++ { "xr22", 22 + FP_REG_FIRST }, \ ++ { "xr23", 23 + FP_REG_FIRST }, \ ++ { "xr24", 24 + FP_REG_FIRST }, \ ++ { "xr25", 25 + FP_REG_FIRST }, \ ++ { "xr26", 26 + FP_REG_FIRST }, \ ++ { "xr27", 27 + FP_REG_FIRST }, \ ++ { "xr28", 28 + FP_REG_FIRST }, \ ++ { "xr29", 29 + FP_REG_FIRST }, \ ++ { "xr30", 30 + FP_REG_FIRST }, \ ++ { "xr31", 31 + FP_REG_FIRST } \ ++} ++ ++#define DBR_OUTPUT_SEQEND(STREAM) \ ++do \ ++ { \ ++ /* Emit a blank line after the delay slot for emphasis. */ \ ++ fputs ("\n", STREAM); \ ++ } \ ++while (0) ++ ++/* The LARCH implementation uses some labels for its own purpose. The ++ following lists what labels are created, and are all formed by the ++ pattern $L[a-z].*. The machine independent portion of GCC creates ++ labels matching: $L[A-Z][0-9]+ and $L[0-9]+. ++ ++ LM[0-9]+ Silicon Graphics/ECOFF stabs label before each stmt. ++ $Lb[0-9]+ Begin blocks for LARCH debug support ++ $Lc[0-9]+ Label for use in s operation. ++ $Le[0-9]+ End blocks for LARCH debug support */ ++ ++#undef ASM_DECLARE_OBJECT_NAME ++#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \ ++ loongarch_declare_object (STREAM, NAME, "", ":\n") ++ ++/* Globalizing directive for a label. */ ++#define GLOBAL_ASM_OP "\t.globl\t" ++ ++/* This says how to define a global common symbol. */ ++ ++#define ASM_OUTPUT_ALIGNED_DECL_COMMON loongarch_output_aligned_decl_common ++ ++/* This says how to define a local common symbol (i.e., not visible to ++ linker). */ ++ ++#ifndef ASM_OUTPUT_ALIGNED_LOCAL ++#define ASM_OUTPUT_ALIGNED_LOCAL(STREAM, NAME, SIZE, ALIGN) \ ++ loongarch_declare_common_object (STREAM, NAME, "\n\t.lcomm\t", SIZE, ALIGN, false) ++#endif ++ ++/* This says how to output an external. It would be possible not to ++ output anything and let undefined symbol become external. However ++ the assembler uses length information on externals to allocate in ++ data/sdata bss/sbss, thereby saving exec time. */ ++ ++#undef ASM_OUTPUT_EXTERNAL ++#define ASM_OUTPUT_EXTERNAL(STREAM,DECL,NAME) \ ++ loongarch_output_external(STREAM,DECL,NAME) ++ ++/* This is how to declare a function name. The actual work of ++ emitting the label is moved to function_prologue, so that we can ++ get the line number correctly emitted before the .ent directive, ++ and after any .file directives. Define as empty so that the function ++ is not declared before the .ent directive elsewhere. */ ++ ++#undef ASM_DECLARE_FUNCTION_NAME ++#define ASM_DECLARE_FUNCTION_NAME(STREAM,NAME,DECL) \ ++ loongarch_declare_function_name(STREAM,NAME,DECL) ++ ++/* This is how to store into the string LABEL ++ the symbol_ref name of an internal numbered label where ++ PREFIX is the class of label and NUM is the number within the class. ++ This is suitable for output with `assemble_name'. */ ++ ++#undef ASM_GENERATE_INTERNAL_LABEL ++#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \ ++ sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM)) ++ ++/* Print debug labels as "foo = ." rather than "foo:" because they should ++ represent a byte pointer rather than an ISA-encoded address. This is ++ particularly important for code like: ++ ++ $LFBxxx = . ++ .cfi_startproc ++ ... ++ .section .gcc_except_table,... ++ ... ++ .uleb128 foo-$LFBxxx ++ ++ The .uleb128 requies $LFBxxx to match the FDE start address, which is ++ likewise a byte pointer rather than an ISA-encoded address. ++ ++ At the time of writing, this hook is not used for the function end ++ label: ++ ++ $LFExxx: ++ .end foo ++ ++ */ ++ ++#define ASM_OUTPUT_DEBUG_LABEL(FILE, PREFIX, NUM) \ ++ fprintf (FILE, "%s%s%d = .\n", LOCAL_LABEL_PREFIX, PREFIX, NUM) ++ ++/* This is how to output an element of a case-vector that is absolute. */ ++ ++#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \ ++ fprintf (STREAM, "\t%s\t%sL%d\n", \ ++ ptr_mode == DImode ? ".dword" : ".word", \ ++ LOCAL_LABEL_PREFIX, \ ++ VALUE) ++ ++/* This is how to output an element of a case-vector. We can make the ++ entries GP-relative when .gp(d)word is supported. */ ++ ++#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \ ++do { \ ++ if (TARGET_RTP_PIC) \ ++ { \ ++ /* Make the entry relative to the start of the function. */ \ ++ rtx fnsym = XEXP (DECL_RTL (current_function_decl), 0); \ ++ fprintf (STREAM, "\t%s\t%sL%d-", \ ++ Pmode == DImode ? ".dword" : ".word", \ ++ LOCAL_LABEL_PREFIX, VALUE); \ ++ assemble_name (STREAM, XSTR (fnsym, 0)); \ ++ fprintf (STREAM, "\n"); \ ++ } \ ++ else \ ++ fprintf (STREAM, "\t%s\t%sL%d-%sL%d\n", \ ++ ptr_mode == DImode ? ".dword" : ".word", \ ++ LOCAL_LABEL_PREFIX, VALUE, \ ++ LOCAL_LABEL_PREFIX, REL); \ ++} while (0) ++ ++/* Mark inline jump tables as data for the purpose of disassembly. For ++ simplicity embed the jump table's label number in the local symbol ++ produced so that multiple jump tables within a single function end ++ up marked with unique symbols. Retain the alignment setting from ++ `elfos.h' as we are replacing the definition from there. */ ++ ++#undef ASM_OUTPUT_BEFORE_CASE_LABEL ++#define ASM_OUTPUT_BEFORE_CASE_LABEL(STREAM, PREFIX, NUM, TABLE) \ ++ do \ ++ { \ ++ ASM_OUTPUT_ALIGN ((STREAM), 2); \ ++ if (JUMP_TABLES_IN_TEXT_SECTION) \ ++ loongarch_set_text_contents_type (STREAM, "__jump_", NUM, FALSE); \ ++ } \ ++ while (0) ++ ++/* Reset text marking to code after an inline jump table. Like with ++ the beginning of a jump table use the label number to keep symbols ++ unique. */ ++ ++#define ASM_OUTPUT_CASE_END(STREAM, NUM, TABLE) \ ++ do \ ++ if (JUMP_TABLES_IN_TEXT_SECTION) \ ++ loongarch_set_text_contents_type (STREAM, "__jend_", NUM, TRUE); \ ++ while (0) ++ ++/* This is how to output an assembler line ++ that says to advance the location counter ++ to a multiple of 2**LOG bytes. */ ++ ++#define ASM_OUTPUT_ALIGN(STREAM,LOG) \ ++ fprintf (STREAM, "\t.align\t%d\n", (LOG)) ++ ++#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM,LOG) \ ++ fprintf (STREAM, "\t.align\t%d,54525952,4\n", (LOG)) ++ ++ ++/* This is how to output an assembler line to advance the location ++ counter by SIZE bytes. */ ++ ++#undef ASM_OUTPUT_SKIP ++#define ASM_OUTPUT_SKIP(STREAM,SIZE) \ ++ fprintf (STREAM, "\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE)) ++ ++/* This is how to output a string. */ ++#undef ASM_OUTPUT_ASCII ++#define ASM_OUTPUT_ASCII loongarch_output_ascii ++ ++ ++/* Default to -G 8 */ ++#ifndef LARCH_DEFAULT_GVALUE ++#define LARCH_DEFAULT_GVALUE 8 ++#endif ++ ++/* Define the strings to put out for each section in the object file. */ ++#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */ ++#define DATA_SECTION_ASM_OP "\t.data" /* large data */ ++ ++#undef READONLY_DATA_SECTION_ASM_OP ++#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata" /* read-only data */ ++ ++#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \ ++do \ ++ { \ ++ fprintf (STREAM, "\t%s\t%s,%s,-8\n\t%s\t%s,0(%s)\n", \ ++ TARGET_64BIT ? "daddiu" : "addiu", \ ++ reg_names[STACK_POINTER_REGNUM], \ ++ reg_names[STACK_POINTER_REGNUM], \ ++ TARGET_64BIT ? "sd" : "sw", \ ++ reg_names[REGNO], \ ++ reg_names[STACK_POINTER_REGNUM]); \ ++ } \ ++while (0) ++ ++#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \ ++do \ ++ { \ ++ loongarch_push_asm_switch (&loongarch_noreorder); \ ++ fprintf (STREAM, "\t%s\t%s,0(%s)\n\t%s\t%s,%s,8\n", \ ++ TARGET_64BIT ? "ld" : "lw", \ ++ reg_names[REGNO], \ ++ reg_names[STACK_POINTER_REGNUM], \ ++ TARGET_64BIT ? "daddu" : "addu", \ ++ reg_names[STACK_POINTER_REGNUM], \ ++ reg_names[STACK_POINTER_REGNUM]); \ ++ loongarch_pop_asm_switch (&loongarch_noreorder); \ ++ } \ ++while (0) ++ ++/* How to start an assembler comment. ++ The leading space is important (the loongarch native assembler requires it). */ ++#ifndef ASM_COMMENT_START ++#define ASM_COMMENT_START " #" ++#endif ++ ++#undef SIZE_TYPE ++#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int") ++ ++#undef PTRDIFF_TYPE ++#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int") ++ ++/* The minimum alignment of any expanded block move. */ ++#define LARCH_MIN_MOVE_MEM_ALIGN 16 ++ ++/* The maximum number of bytes that can be copied by one iteration of ++ a movmemsi loop; see loongarch_block_move_loop. */ ++#define LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER \ ++ (UNITS_PER_WORD * 4) ++ ++/* The maximum number of bytes that can be copied by a straight-line ++ implementation of movmemsi; see loongarch_block_move_straight. We want ++ to make sure that any loop-based implementation will iterate at ++ least twice. */ ++#define LARCH_MAX_MOVE_BYTES_STRAIGHT \ ++ (LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER * 2) ++ ++/* The base cost of a memcpy call, for MOVE_RATIO and friends. These ++ values were determined experimentally by benchmarking with CSiBE. ++*/ ++#define LARCH_CALL_RATIO 8 ++ ++/* Any loop-based implementation of movmemsi will have at least ++ LARCH_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory ++ moves, so allow individual copies of fewer elements. ++ ++ When movmemsi is not available, use a value approximating ++ the length of a memcpy call sequence, so that move_by_pieces ++ will generate inline code if it is shorter than a function call. ++ Since move_by_pieces_ninsns counts memory-to-memory moves, but ++ we'll have to generate a load/store pair for each, halve the ++ value of LARCH_CALL_RATIO to take that into account. */ ++ ++#define MOVE_RATIO(speed) \ ++ (HAVE_movmemsi \ ++ ? LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD \ ++ : CLEAR_RATIO (speed) / 2) ++ ++/* For CLEAR_RATIO, when optimizing for size, give a better estimate ++ of the length of a memset call, but use the default otherwise. */ ++ ++#define CLEAR_RATIO(speed)\ ++ ((speed) ? 15 : LARCH_CALL_RATIO) ++ ++/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when ++ optimizing for size adjust the ratio to account for the overhead of ++ loading the constant and replicating it across the word. */ ++ ++#define SET_RATIO(speed) \ ++ ((speed) ? 15 : LARCH_CALL_RATIO - 2) ++ ++/* Since the bits of the _init and _fini function is spread across ++ many object files, each potentially with its own GP, we must assume ++ we need to load our GP. We don't preserve $gp or $ra, since each ++ init/fini chunk is supposed to initialize $gp, and crti/crtn ++ already take care of preserving $ra and, when appropriate, $gp. */ ++#if (defined _ABI64 && _LARCH_SIM == _ABI64) ++#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ ++ asm (SECTION_OP "\n\ ++ .set push\n\ ++ la $r20, " USER_LABEL_PREFIX #FUNC "\n\ ++ jirl $r1, $r20, 0\n\ ++ .set pop\n\ ++ " TEXT_SECTION_ASM_OP); ++#endif ++#ifndef HAVE_AS_TLS ++#define HAVE_AS_TLS 0 ++#endif ++ ++#ifndef HAVE_AS_NAN ++#define HAVE_AS_NAN 0 ++#endif ++ ++#ifndef USED_FOR_TARGET ++/* Information about ".set noFOO; ...; .set FOO" blocks. */ ++struct loongarch_asm_switch { ++ /* The FOO in the description above. */ ++ const char *name; ++ ++ /* The current block nesting level, or 0 if we aren't in a block. */ ++ int nesting_level; ++}; ++ ++extern const enum reg_class loongarch_regno_to_class[]; ++extern const char *current_function_file; /* filename current function is in */ ++extern int num_source_filenames; /* current .file # */ ++extern int loongarch_dbx_regno[]; ++extern int loongarch_dwarf_regno[]; ++extern bool loongarch_split_p[]; ++extern bool loongarch_use_pcrel_pool_p[]; ++extern enum processor loongarch_arch; /* which cpu to codegen for */ ++extern enum processor loongarch_tune; /* which cpu to schedule for */ ++extern int loongarch_isa; /* architectural level */ ++extern int loongarch_isa_rev; ++extern const struct loongarch_cpu_info *loongarch_arch_info; ++extern const struct loongarch_cpu_info *loongarch_tune_info; ++extern unsigned int loongarch_base_compression_flags; ++ ++/* Information about a function's frame layout. */ ++struct GTY(()) loongarch_frame_info { ++ /* The size of the frame in bytes. */ ++ HOST_WIDE_INT total_size; ++ ++ /* The number of bytes allocated to variables. */ ++ HOST_WIDE_INT var_size; ++ ++ /* The number of bytes allocated to outgoing function arguments. */ ++ HOST_WIDE_INT args_size; ++ ++ /* The number of bytes allocated to the .cprestore slot, or 0 if there ++ is no such slot. */ ++ HOST_WIDE_INT cprestore_size; ++ ++ /* Bit X is set if the function saves or restores GPR X. */ ++ unsigned int mask; ++ ++ /* Likewise FPR X. */ ++ unsigned int fmask; ++ ++ /* Likewise doubleword accumulator X ($acX). */ ++ unsigned int acc_mask; ++ ++ /* The number of GPRs, FPRs, doubleword accumulators and COP0 ++ registers saved. */ ++ unsigned int num_gp; ++ unsigned int num_fp; ++ unsigned int num_acc; ++ unsigned int num_cop0_regs; ++ ++ /* The offset of the topmost GPR, FPR, accumulator and COP0-register ++ save slots from the top of the frame, or zero if no such slots are ++ needed. */ ++ HOST_WIDE_INT gp_save_offset; ++ HOST_WIDE_INT fp_save_offset; ++ HOST_WIDE_INT acc_save_offset; ++ HOST_WIDE_INT cop0_save_offset; ++ ++ /* Likewise, but giving offsets from the bottom of the frame. */ ++ HOST_WIDE_INT gp_sp_offset; ++ HOST_WIDE_INT fp_sp_offset; ++ HOST_WIDE_INT acc_sp_offset; ++ HOST_WIDE_INT cop0_sp_offset; ++ ++ /* Similar, but the value passed to _mcount. */ ++ HOST_WIDE_INT ra_fp_offset; ++ ++ /* The offset of arg_pointer_rtx from the bottom of the frame. */ ++ HOST_WIDE_INT arg_pointer_offset; ++ ++ /* The offset of hard_frame_pointer_rtx from the bottom of the frame. */ ++ HOST_WIDE_INT hard_frame_pointer_offset; ++ ++ /* How much the GPR save/restore routines adjust sp (or 0 if unused). */ ++ unsigned save_libcall_adjustment; ++ ++ /* Offset of virtual frame pointer from stack pointer/frame bottom */ ++ HOST_WIDE_INT frame_pointer_offset; ++}; ++ ++/* Enumeration for masked vectored (VI) and non-masked (EIC) interrupts. */ ++enum loongarch_int_mask ++{ ++ INT_MASK_EIC = -1, ++ INT_MASK_SW0 = 0, ++ INT_MASK_SW1 = 1, ++ INT_MASK_HW0 = 2, ++ INT_MASK_HW1 = 3, ++ INT_MASK_HW2 = 4, ++ INT_MASK_HW3 = 5, ++ INT_MASK_HW4 = 6, ++ INT_MASK_HW5 = 7 ++}; ++ ++/* Enumeration to mark the existence of the shadow register set. ++ SHADOW_SET_INTSTACK indicates a shadow register set with a valid stack ++ pointer. */ ++enum loongarch_shadow_set ++{ ++ SHADOW_SET_NO, ++ SHADOW_SET_YES, ++ SHADOW_SET_INTSTACK ++}; ++ ++struct GTY(()) machine_function { ++ /* The next floating-point condition-code register to allocate ++ for 8CC targets, relative to ST_REG_FIRST. */ ++ unsigned int next_fcc; ++ ++ /* The number of extra stack bytes taken up by register varargs. ++ This area is allocated by the callee at the very top of the frame. */ ++ int varargs_size; ++ ++ /* The current frame information, calculated by loongarch_compute_frame_info. */ ++ struct loongarch_frame_info frame; ++ ++ /* How many instructions it takes to load a label into $AT, or 0 if ++ this property hasn't yet been calculated. */ ++ unsigned int load_label_num_insns; ++ ++ /* True if loongarch_adjust_insn_length should ignore an instruction's ++ hazard attribute. */ ++ bool ignore_hazard_length_p; ++ ++ /* True if the whole function is suitable for .set noreorder and ++ .set nomacro. */ ++ bool all_noreorder_p; ++ ++ /* True if the function has "inflexible" and "flexible" references ++ to the global pointer. See loongarch_cfun_has_inflexible_gp_ref_p ++ and loongarch_cfun_has_flexible_gp_ref_p for details. */ ++ bool has_inflexible_gp_insn_p; ++ bool has_flexible_gp_insn_p; ++ ++ /* True if the function's prologue must load the global pointer ++ value into pic_offset_table_rtx and store the same value in ++ the function's cprestore slot (if any). Even if this value ++ is currently false, we may decide to set it to true later; ++ see loongarch_must_initialize_gp_p () for details. */ ++ bool must_initialize_gp_p; ++ ++ /* True if the current function must restore $gp after any potential ++ clobber. This value is only meaningful during the first post-epilogue ++ split_insns pass; see loongarch_must_initialize_gp_p () for details. */ ++ bool must_restore_gp_when_clobbered_p; ++ ++ /* True if this is an interrupt handler. */ ++ bool interrupt_handler_p; ++ ++ /* Records the way in which interrupts should be masked. Only used if ++ interrupts are not kept masked. */ ++ enum loongarch_int_mask int_mask; ++ ++ /* Records if this is an interrupt handler that uses shadow registers. */ ++ enum loongarch_shadow_set use_shadow_register_set; ++ ++ /* True if this is an interrupt handler that should keep interrupts ++ masked. */ ++ bool keep_interrupts_masked_p; ++ ++ /* True if this is an interrupt handler that should use DERET ++ instead of ERET. */ ++ bool use_debug_exception_return_p; ++ ++ /* True if at least one of the formal parameters to a function must be ++ written to the frame header (probably so its address can be taken). */ ++ bool does_not_use_frame_header; ++ ++ /* True if none of the functions that are called by this function need ++ stack space allocated for their arguments. */ ++ bool optimize_call_stack; ++ ++ /* True if one of the functions calling this function may not allocate ++ a frame header. */ ++ bool callers_may_not_allocate_frame; ++ ++ /* True if GCC stored callee saved registers in the frame header. */ ++ bool use_frame_header_for_callee_saved_regs; ++}; ++#endif ++ ++/* Enable querying of DFA units. */ ++#define CPU_UNITS_QUERY 0 ++ ++/* As on most targets, we want the .eh_frame section to be read-only where ++ possible. And as on most targets, this means two things: ++ ++ (a) Non-locally-binding pointers must have an indirect encoding, ++ so that the addresses in the .eh_frame section itself become ++ locally-binding. ++ ++ (b) A shared library's .eh_frame section must encode locally-binding ++ pointers in a relative (relocation-free) form. ++ ++ However, LARCH has traditionally not allowed directives like: ++ ++ .long x-. ++ ++ in cases where "x" is in a different section, or is not defined in the ++ same assembly file. We are therefore unable to emit the PC-relative ++ form required by (b) at assembly time. ++ ++ Fortunately, the linker is able to convert absolute addresses into ++ PC-relative addresses on our behalf. Unfortunately, only certain ++ versions of the linker know how to do this for indirect pointers, ++ and for personality data. We must fall back on using writable ++ .eh_frame sections for shared libraries if the linker does not ++ support this feature. */ ++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \ ++ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_absptr) ++ ++#define SWITCHABLE_TARGET 1 ++ ++/* Several named LARCH patterns depend on Pmode. These patterns have the ++ form _si for Pmode == SImode and _di for Pmode == DImode. ++ Add the appropriate suffix to generator function NAME and invoke it ++ with arguments ARGS. */ ++#define PMODE_INSN(NAME, ARGS) \ ++ (Pmode == SImode ? NAME ## _si ARGS : NAME ## _di ARGS) ++ ++/***********************/ ++/* N_LARCH-PORT */ ++/***********************/ ++/* The `Q' extension is not yet supported. */ ++/* TODO: according to march */ ++#define UNITS_PER_FP_REG (TARGET_DOUBLE_FLOAT ? 8 : 4) ++ ++/* The largest type that can be passed in floating-point registers. */ ++/* TODO: according to mabi */ ++#define UNITS_PER_FP_ARG (TARGET_HARD_FLOAT ? (TARGET_64BIT ? 8 : 4) : 0) ++ ++/* Internal macros to classify an ISA register's type. */ ++ ++#define GP_TEMP_FIRST (GP_REG_FIRST + 12) ++ ++#define CALLEE_SAVED_REG_NUMBER(REGNO) \ ++ ((REGNO) >= 22 && (REGNO) <= 31 ? (REGNO) - 22 : -1) ++ ++#define N_LARCH_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1) ++#define N_LARCH_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, N_LARCH_PROLOGUE_TEMP_REGNUM) ++ ++#define LIBCALL_VALUE(MODE) \ ++ loongarch_function_value (NULL_TREE, NULL_TREE, MODE) ++ ++#define FUNCTION_VALUE(VALTYPE, FUNC) \ ++ loongarch_function_value (VALTYPE, FUNC, VOIDmode) ++ ++#define FRAME_GROWS_DOWNWARD 1 ++ ++#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN) +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +new file mode 100644 +index 000000000..be950c9e4 +--- /dev/null ++++ b/gcc/config/loongarch/loongarch.md +@@ -0,0 +1,4320 @@ ++;; Loongarch.md Machine Description for LARCH based processors ++;; Copyright (C) 1989-2018 Free Software Foundation, Inc. ++;; Contributed by A. Lichnewsky, lich@inria.inria.fr ++;; Changes by Michael Meissner, meissner@osf.org ++ ++;; This file is part of GCC. ++ ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++ ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++ ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++(define_enum "processor" [ ++ loongarch ++ loongarch64 ++ la464 ++]) ++ ++(define_c_enum "unspec" [ ++ ;; Integer operations that are too cumbersome to describe directly. ++ UNSPEC_WSBH ++ UNSPEC_DSBH ++ UNSPEC_DSHD ++ ++ ;; Floating-point moves. ++ UNSPEC_LOAD_LOW ++ UNSPEC_LOAD_HIGH ++ UNSPEC_STORE_WORD ++ UNSPEC_MOVGR2FRH ++ UNSPEC_MOVFRH2GR ++ ++ ;; Floating-point environment. ++ UNSPEC_MOVFCSR2GR ++ UNSPEC_MOVGR2FCSR ++ ++ ;; GP manipulation. ++ UNSPEC_EH_RETURN ++ ++ ;; ++ UNSPEC_FRINT ++ UNSPEC_FCLASS ++ UNSPEC_BYTEPICK_W ++ UNSPEC_BYTEPICK_D ++ UNSPEC_BITREV_4B ++ UNSPEC_BITREV_8B ++ ++ ;; Symbolic accesses. ++ UNSPEC_LOAD_CALL ++ ++ ;; Blockage and synchronisation. ++ UNSPEC_BLOCKAGE ++ UNSPEC_DBAR ++ UNSPEC_IBAR ++ ++ ;; CPUCFG ++ UNSPEC_CPUCFG ++ UNSPEC_ASRTLE_D ++ UNSPEC_ASRTGT_D ++ ++ UNSPEC_CSRRD ++ UNSPEC_CSRWR ++ UNSPEC_CSRXCHG ++ UNSPEC_IOCSRRD ++ UNSPEC_IOCSRWR ++ ++ ;; cacop ++ UNSPEC_CACOP ++ ++ ;; pte ++ UNSPEC_LDDIR ++ UNSPEC_LDPTE ++ ++ ;; Cache manipulation. ++ UNSPEC_LARCH_CACHE ++ ++ ;; Interrupt handling. ++ UNSPEC_ERTN ++ UNSPEC_DI ++ UNSPEC_EHB ++ UNSPEC_RDPGPR ++ ++ ;; Used in a call expression in place of args_size. It's present for PIC ++ ;; indirect calls where it contains args_size and the function symbol. ++ UNSPEC_CALL_ATTR ++ ++ ++ ;; Stack checking. ++ UNSPEC_PROBE_STACK_RANGE ++ ++ ;; The `.insn' pseudo-op. ++ UNSPEC_INSN_PSEUDO ++ ++ ;; TLS ++ UNSPEC_TLS_GD ++ UNSPEC_TLS_LD ++ UNSPEC_TLS_LE ++ UNSPEC_TLS_IE ++ ++ UNSPEC_LU52I_D ++ ++ UNSPEC_TIE ++ ++ ;; CRC ++ UNSPEC_CRC ++ UNSPEC_CRCC ++ UNSPEC_ADDRESS_FIRST ++]) ++ ++(define_c_enum "unspecv" [ ++ ;; Register save and restore. ++ UNSPECV_GPR_SAVE ++ UNSPECV_GPR_RESTORE ++ ++ UNSPECV_MOVE_EXTREME ++]) ++ ++ ++(define_constants ++ [(RETURN_ADDR_REGNUM 1) ++ (T0_REGNUM 12) ++ (T1_REGNUM 13) ++ (S0_REGNUM 23) ++ (S1_REGNUM 24) ++ (S2_REGNUM 25) ++ ++ ;; PIC long branch sequences are never longer than 100 bytes. ++ (MAX_PIC_BRANCH_LENGTH 100) ++]) ++ ++(include "predicates.md") ++(include "constraints.md") ++ ++;; .................... ++;; ++;; Attributes ++;; ++;; .................... ++ ++(define_attr "got" "unset,load" ++ (const_string "unset")) ++ ++;; For jal instructions, this attribute is DIRECT when the target address ++;; is symbolic and INDIRECT when it is a register. ++(define_attr "jal" "unset,direct,indirect" ++ (const_string "unset")) ++ ++ ++;; Classification of moves, extensions and truncations. Most values ++;; are as for "type" (see below) but there are also the following ++;; move-specific values: ++;; ++;; sll0 "sll DEST,SRC,0", which on 64-bit targets is guaranteed ++;; to produce a sign-extended DEST, even if SRC is not ++;; properly sign-extended ++;; pick_ins BSTRPICK.W, BSTRPICK.D, BSTRINS.W or BSTRINS.D instruction ++;; andi a single ANDI instruction ++;; shift_shift a shift left followed by a shift right ++;; ++;; This attribute is used to determine the instruction's length and ++;; scheduling type. For doubleword moves, the attribute always describes ++;; the split instructions; in some cases, it is more appropriate for the ++;; scheduling type to be "multi" instead. ++(define_attr "move_type" ++ "unknown,load,fpload,store,fpstore,mgtf,mftg,imul,move,fmove, ++ const,signext,pick_ins,logical,arith,sll0,andi,shift_shift" ++ (const_string "unknown")) ++ ++(define_attr "alu_type" "unknown,add,sub,not,nor,and,or,xor,simd_add" ++ (const_string "unknown")) ++ ++;; Main data type used by the insn ++(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,OI,SF,DF,TF,FCC, ++ V2DI,V4SI,V8HI,V16QI,V2DF,V4SF,V4DI,V8SI,V16HI,V32QI,V4DF,V8SF" ++ (const_string "unknown")) ++ ++;; True if the main data type is twice the size of a word. ++(define_attr "dword_mode" "no,yes" ++ (cond [(and (eq_attr "mode" "DI,DF") ++ (not (match_test "TARGET_64BIT"))) ++ (const_string "yes") ++ ++ (and (eq_attr "mode" "TI,TF") ++ (match_test "TARGET_64BIT")) ++ (const_string "yes")] ++ (const_string "no"))) ++ ++;; True if the main data type is four times of the size of a word. ++(define_attr "qword_mode" "no,yes" ++ (cond [(and (eq_attr "mode" "TI,TF") ++ (not (match_test "TARGET_64BIT"))) ++ (const_string "yes")] ++ (const_string "no"))) ++ ++;; True if the main data type is eight times of the size of a word. ++(define_attr "oword_mode" "no,yes" ++ (cond [(and (eq_attr "mode" "OI,V8SF,V4DF") ++ (not (match_test "TARGET_64BIT"))) ++ (const_string "yes")] ++ (const_string "no"))) ++ ++;; Attributes describing a sync loop. These loops have the form: ++;; ++;; if (RELEASE_BARRIER == YES) sync ++;; 1: OLDVAL = *MEM ++;; if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2 ++;; CMP = 0 [delay slot] ++;; $TMP1 = OLDVAL & EXCLUSIVE_MASK ++;; $TMP2 = INSN1 (OLDVAL, INSN1_OP2) ++;; $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK) ++;; $AT |= $TMP1 | $TMP3 ++;; if (!commit (*MEM = $AT)) goto 1. ++;; if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot] ++;; CMP = 1 ++;; if (ACQUIRE_BARRIER == YES) sync ++;; 2: ++;; ++;; where "$" values are temporaries and where the other values are ++;; specified by the attributes below. Values are specified as operand ++;; numbers and insns are specified as enums. If no operand number is ++;; specified, the following values are used instead: ++;; ++;; - OLDVAL: $AT ++;; - CMP: NONE ++;; - NEWVAL: $AT ++;; - INCLUSIVE_MASK: -1 ++;; - REQUIRED_OLDVAL: OLDVAL & INCLUSIVE_MASK ++;; - EXCLUSIVE_MASK: 0 ++;; ++;; MEM and INSN1_OP2 are required. ++;; ++;; Ideally, the operand attributes would be integers, with -1 meaning "none", ++;; but the gen* programs don't yet support that. ++(define_attr "sync_mem" "none,0,1,2,3,4,5" (const_string "none")) ++(define_attr "sync_oldval" "none,0,1,2,3,4,5" (const_string "none")) ++(define_attr "sync_cmp" "none,0,1,2,3,4,5" (const_string "none")) ++(define_attr "sync_newval" "none,0,1,2,3,4,5" (const_string "none")) ++(define_attr "sync_inclusive_mask" "none,0,1,2,3,4,5" (const_string "none")) ++(define_attr "sync_exclusive_mask" "none,0,1,2,3,4,5" (const_string "none")) ++(define_attr "sync_required_oldval" "none,0,1,2,3,4,5" (const_string "none")) ++(define_attr "sync_insn1_op2" "none,0,1,2,3,4,5" (const_string "none")) ++(define_attr "sync_insn1" "move,li,addu,addiu,subu,and,andi,or,ori,xor,xori" ++ (const_string "move")) ++(define_attr "sync_insn2" "nop,and,xor,not" ++ (const_string "nop")) ++;; Memory model specifier. ++;; "0"-"9" values specify the operand that stores the memory model value. ++;; "10" specifies MEMMODEL_ACQ_REL, ++;; "11" specifies MEMMODEL_ACQUIRE. ++(define_attr "sync_memmodel" "" (const_int 10)) ++ ++;; Accumulator operand for madd patterns. ++(define_attr "accum_in" "none,0,1,2,3,4,5" (const_string "none")) ++ ++;; Classification of each insn. ++;; branch conditional branch ++;; jump unconditional jump ++;; call unconditional call ++;; load load instruction(s) ++;; fpload floating point load ++;; fpidxload floating point indexed load ++;; store store instruction(s) ++;; fpstore floating point store ++;; fpidxstore floating point indexed store ++;; prefetch memory prefetch (register + offset) ++;; prefetchx memory indexed prefetch (register + register) ++;; condmove conditional moves ++;; mgtf move generate register to float register ++;; mftg move float register to generate register ++;; const load constant ++;; arith integer arithmetic instructions ++;; logical integer logical instructions ++;; shift integer shift instructions ++;; slt set less than instructions ++;; signext sign extend instructions ++;; clz the clz and clo instructions ++;; trap trap if instructions ++;; imul integer multiply 2 operands ++;; imul3 integer multiply 3 operands ++;; idiv3 integer divide 3 operands ++;; move integer register move ({,D}ADD{,U} with rt = 0) ++;; fmove floating point register move ++;; fadd floating point add/subtract ++;; fmul floating point multiply ++;; fmadd floating point multiply-add ++;; fdiv floating point divide ++;; frdiv floating point reciprocal divide ++;; fabs floating point absolute value ++;; fneg floating point negation ++;; fcmp floating point compare ++;; fcvt floating point convert ++;; fsqrt floating point square root ++;; frsqrt floating point reciprocal square root ++;; multi multiword sequence (or user asm statements) ++;; atomic atomic memory update instruction ++;; syncloop memory atomic operation implemented as a sync loop ++;; nop no operation ++;; ghost an instruction that produces no real code ++(define_attr "type" ++ "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore, ++ prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical, ++ shift,slt,signext,clz,trap,imul,imul3,idiv3,move, ++ fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcvt,fsqrt, ++ frsqrt,dspmac,dspmacsat,accext,accmod,dspalu,dspalusat, ++ multi,atomic,syncloop,nop,ghost, ++ simd_div,simd_fclass,simd_flog2,simd_fadd,simd_fcvt,simd_fmul,simd_fmadd, ++ simd_fdiv,simd_bitins,simd_bitmov,simd_insert,simd_sld,simd_mul,simd_fcmp, ++ simd_fexp2,simd_int_arith,simd_bit,simd_shift,simd_splat,simd_fill, ++ simd_permute,simd_shf,simd_sat,simd_pcnt,simd_copy,simd_branch,simd_clsx, ++ simd_fminmax,simd_logic,simd_move,simd_load,simd_store" ++ (cond [(eq_attr "jal" "!unset") (const_string "call") ++ (eq_attr "got" "load") (const_string "load") ++ ++ (eq_attr "alu_type" "add,sub") (const_string "arith") ++ ++ (eq_attr "alu_type" "not,nor,and,or,xor") (const_string "logical") ++ ++ ;; If a doubleword move uses these expensive instructions, ++ ;; it is usually better to schedule them in the same way ++ ;; as the singleword form, rather than as "multi". ++ (eq_attr "move_type" "load") (const_string "load") ++ (eq_attr "move_type" "fpload") (const_string "fpload") ++ (eq_attr "move_type" "store") (const_string "store") ++ (eq_attr "move_type" "fpstore") (const_string "fpstore") ++ (eq_attr "move_type" "mgtf") (const_string "mgtf") ++ (eq_attr "move_type" "mftg") (const_string "mftg") ++ ++ ;; These types of move are always single insns. ++ (eq_attr "move_type" "imul") (const_string "imul") ++ (eq_attr "move_type" "fmove") (const_string "fmove") ++ (eq_attr "move_type" "signext") (const_string "signext") ++ (eq_attr "move_type" "pick_ins") (const_string "arith") ++ (eq_attr "move_type" "arith") (const_string "arith") ++ (eq_attr "move_type" "logical") (const_string "logical") ++ (eq_attr "move_type" "sll0") (const_string "shift") ++ (eq_attr "move_type" "andi") (const_string "logical") ++ ++ ;; These types of move are always split. ++ (eq_attr "move_type" "shift_shift") ++ (const_string "multi") ++ ++ ;; These types of move are split for octaword modes only. ++ (and (eq_attr "move_type" "move,const") ++ (eq_attr "oword_mode" "yes")) ++ (const_string "multi") ++ ++ ;; These types of move are split for quadword modes only. ++ (and (eq_attr "move_type" "move,const") ++ (eq_attr "qword_mode" "yes")) ++ (const_string "multi") ++ ++ ;; These types of move are split for doubleword modes only. ++ (and (eq_attr "move_type" "move,const") ++ (eq_attr "dword_mode" "yes")) ++ (const_string "multi") ++ (eq_attr "move_type" "move") (const_string "move") ++ (eq_attr "move_type" "const") (const_string "const") ++ (eq_attr "sync_mem" "!none") (const_string "syncloop")] ++ (const_string "unknown"))) ++ ++(define_attr "compact_form" "always,maybe,never" ++ (cond [(eq_attr "jal" "direct") ++ (const_string "always") ++ (eq_attr "jal" "indirect") ++ (const_string "maybe") ++ (eq_attr "type" "jump") ++ (const_string "maybe")] ++ (const_string "never"))) ++ ++;; Mode for conversion types (fcvt) ++;; I2S integer to float single (SI/DI to SF) ++;; I2D integer to float double (SI/DI to DF) ++;; S2I float to integer (SF to SI/DI) ++;; D2I float to integer (DF to SI/DI) ++;; D2S double to float single ++;; S2D float single to double ++ ++(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D" ++ (const_string "unknown")) ++ ++(define_attr "compression" "none,all" ++ (const_string "none")) ++ ++;; The number of individual instructions that a non-branch pattern generates, ++;; using units of BASE_INSN_LENGTH. ++(define_attr "insn_count" "" ++ (cond [;; "Ghost" instructions occupy no space. ++ (eq_attr "type" "ghost") ++ (const_int 0) ++ ++ ;; Check for doubleword moves that are decomposed into two ++ ;; instructions. ++ (and (eq_attr "move_type" "mgtf,mftg,move") ++ (eq_attr "dword_mode" "yes")) ++ (const_int 2) ++ ++ ;; Check for quadword moves that are decomposed into four ++ ;; instructions. ++ (and (eq_attr "move_type" "mgtf,mftg,move") ++ (eq_attr "qword_mode" "yes")) ++ (const_int 4) ++ ++ ;; Check for Octaword moves that are decomposed into eight ++ ;; instructions. ++ (and (eq_attr "move_type" "mgtf,mftg,move") ++ (eq_attr "oword_mode" "yes")) ++ (const_int 8) ++ ++ ;; Constants, loads and stores are handled by external routines. ++ (and (eq_attr "move_type" "const") ++ (eq_attr "dword_mode" "yes")) ++ (symbol_ref "loongarch_split_const_insns (operands[1])") ++ (eq_attr "move_type" "const") ++ (symbol_ref "loongarch_const_insns (operands[1])") ++ (eq_attr "move_type" "load,fpload") ++ (symbol_ref "loongarch_load_store_insns (operands[1], insn)") ++ (eq_attr "move_type" "store,fpstore") ++ (symbol_ref "loongarch_load_store_insns (operands[0], insn)") ++ ++ (eq_attr "type" "idiv3") ++ (symbol_ref "loongarch_idiv_insns (GET_MODE (PATTERN (insn)))")] ++(const_int 1))) ++ ++;; Length of instruction in bytes. The default is derived from "insn_count", ++;; but there are special cases for branches (which must be handled here) ++;; and for compressed single instructions. ++ ++ ++ ++(define_attr "length" "" ++ (cond [ ++ ;; Branch instructions have a range of [-0x20000,0x1fffc]. ++ ;; If a branch is outside this range, we have a choice of two ++ ;; sequences. ++ ;; ++ ;; For PIC, an out-of-range branch like: ++ ;; ++ ;; bne r1,r2,target ++ ;; ++ ;; becomes the equivalent of: ++ ;; ++ ;; beq r1,r2,1f ++ ;; la rd,target ++ ;; jr rd ++ ;; 1: ++ ;; ++ ;; The non-PIC case is similar except that we use a direct ++ ;; jump instead of an la/jr pair. Since the target of this ++ ;; jump is an absolute 28-bit bit address (the other bits ++ ;; coming from the address of the delay slot) this form cannot ++ ;; cross a 256MB boundary. We could provide the option of ++ ;; using la/jr in this case too, but we do not do so at ++ ;; present. ++ ;; ++ ;; from the shorten_branches reference address. ++ (eq_attr "type" "branch") ++ (cond [;; Any variant can handle the 17-bit range. ++ (and (le (minus (match_dup 0) (pc)) (const_int 65532)) ++ (le (minus (pc) (match_dup 0)) (const_int 65534))) ++ (const_int 4) ++ ++ ;; The non-PIC case: branch, and J. ++ (match_test "TARGET_ABSOLUTE_JUMPS") ++ (const_int 8)] ++ ++ ;; Use MAX_PIC_BRANCH_LENGTH as a (gross) overestimate. ++ ;; loongarch_adjust_insn_length substitutes the correct length. ++ ;; ++ ;; Note that we can't simply use (symbol_ref ...) here ++ ;; because genattrtab needs to know the maximum length ++ ;; of an insn. ++ (const_int MAX_PIC_BRANCH_LENGTH)) ++ ] ++ (symbol_ref "get_attr_insn_count (insn) * BASE_INSN_LENGTH"))) ++ ++;; Attribute describing the processor. ++(define_enum_attr "cpu" "processor" ++ (const (symbol_ref "loongarch_tune"))) ++ ++;; The type of hardware hazard associated with this instruction. ++;; DELAY means that the next instruction cannot read the result ++;; of this one. ++(define_attr "hazard" "none,delay,forbidden_slot" ++ (const_string "none")) ++ ++;; Can the instruction be put into a delay slot? ++(define_attr "can_delay" "no,yes" ++ (if_then_else (and (eq_attr "type" "!branch,call,jump") ++ (eq_attr "hazard" "none") ++ (match_test "get_attr_insn_count (insn) == 1")) ++ (const_string "yes") ++ (const_string "no"))) ++ ++;; Describe a user's asm statement. ++(define_asm_attributes ++ [(set_attr "type" "multi") ++ (set_attr "can_delay" "no")]) ++ ++;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated ++;; from the same template. ++(define_mode_iterator GPR [SI (DI "TARGET_64BIT")]) ++ ++;; A copy of GPR that can be used when a pattern has two independent ++;; modes. ++(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")]) ++ ++;; Likewise, but for XLEN-sized quantities. ++(define_mode_iterator X [(SI "!TARGET_64BIT") (DI "TARGET_64BIT")]) ++ ++(define_mode_iterator MOVEP1 [SI SF]) ++(define_mode_iterator MOVEP2 [SI SF]) ++(define_mode_iterator JOIN_MODE [HI ++ SI ++ (SF "TARGET_HARD_FLOAT") ++ (DF "TARGET_HARD_FLOAT ++ && TARGET_DOUBLE_FLOAT")]) ++ ++;; This mode iterator allows :P to be used for patterns that operate on ++;; pointer-sized quantities. Exactly one of the two alternatives will match. ++(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")]) ++ ++;; 32-bit integer moves for which we provide move patterns. ++(define_mode_iterator IMOVE32 ++ [SI]) ++ ++;; 64-bit modes for which we provide move patterns. ++(define_mode_iterator MOVE64 ++ [DI DF]) ++ ++;; 128-bit modes for which we provide move patterns on 64-bit targets. ++(define_mode_iterator MOVE128 [TI TF]) ++ ++;; This mode iterator allows the QI and HI extension patterns to be ++;; defined from the same template. ++(define_mode_iterator SHORT [QI HI]) ++ ++;; Likewise the 64-bit truncate-and-shift patterns. ++(define_mode_iterator SUBDI [QI HI SI]) ++ ++;; This mode iterator allows the QI HI SI and DI extension patterns to be ++(define_mode_iterator QHWD [QI HI SI (DI "TARGET_64BIT")]) ++ ++ ++;; This mode iterator allows :ANYF to be used wherever a scalar or vector ++;; floating-point mode is allowed. ++(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT") ++ (DF "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT")]) ++ ++;; Like ANYF, but only applies to scalar modes. ++(define_mode_iterator SCALARF [(SF "TARGET_HARD_FLOAT") ++ (DF "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT")]) ++ ++;; A floating-point mode for which moves involving FPRs may need to be split. ++(define_mode_iterator SPLITF ++ [(DF "!TARGET_64BIT && TARGET_DOUBLE_FLOAT") ++ (DI "!TARGET_64BIT && TARGET_DOUBLE_FLOAT") ++ (TF "TARGET_64BIT && TARGET_FLOAT64")]) ++ ++;; In GPR templates, a string like "mul." will expand to "mul" in the ++;; 32-bit "mul.w" and "mul.d" in the 64-bit version. ++(define_mode_attr d [(SI "w") (DI "d")]) ++ ++;; Same as d but upper-case. ++(define_mode_attr D [(SI "") (DI "D")]) ++ ++;; This attribute gives the length suffix for a load or store instruction. ++;; The same suffixes work for zero and sign extensions. ++(define_mode_attr size [(QI "b") (HI "h") (SI "w") (DI "d")]) ++(define_mode_attr SIZE [(QI "B") (HI "H") (SI "W") (DI "D")]) ++ ++;; This attributes gives the mode mask of a SHORT. ++(define_mode_attr mask [(QI "0x00ff") (HI "0xffff")]) ++ ++;; This attributes gives the size (bits) of a SHORT. ++(define_mode_attr qi_hi [(QI "7") (HI "15")]) ++ ++;; Mode attributes for GPR loads. ++(define_mode_attr load [(SI "lw") (DI "ld")]) ++ ++(define_mode_attr load_l [(SI "ld.w") (DI "ld.d")]) ++;; Instruction names for stores. ++(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd")]) ++ ++;; Similarly for LARCH IV indexed FPR loads and stores. ++(define_mode_attr floadx [(SF "fldx.s") (DF "fldx.d") (V2SF "fldx.d")]) ++(define_mode_attr fstorex [(SF "fstx.s") (DF "fstx.d") (V2SF "fstx.d")]) ++ ++;; Similarly for LOONGSON indexed GPR loads and stores. ++(define_mode_attr loadx [(QI "ldx.b") ++ (HI "ldx.h") ++ (SI "ldx.w") ++ (DI "ldx.d")]) ++(define_mode_attr storex [(QI "stx.b") ++ (HI "stx.h") ++ (SI "stx.w") ++ (DI "stx.d")]) ++ ++;; This attribute gives the best constraint to use for registers of ++;; a given mode. ++(define_mode_attr reg [(SI "d") (DI "d") (FCC "z")]) ++ ++;; This attribute gives the format suffix for floating-point operations. ++(define_mode_attr fmt [(SF "s") (DF "d") (V2SF "ps")]) ++ ++;; This attribute gives the upper-case mode name for one unit of a ++;; floating-point mode or vector mode. ++(define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF") (V4SF "SF") ++ (V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI") ++ (V2DF "DF")(V8SF "SF")(V32QI "QI")(V16HI "HI")(V8SI "SI")(V4DI "DI")(V4DF "DF")]) ++ ++;; As above, but in lower case. ++(define_mode_attr unitmode [(SF "sf") (DF "df") (V2SF "sf") (V4SF "sf") ++ (V16QI "qi") (V8QI "qi") (V8HI "hi") (V4HI "hi") ++ (V4SI "si") (V2SI "si") (V2DI "di") (V2DF "df") ++ (V8SI "si") (V4DI "di") (V32QI "qi") (V16HI "hi") ++ (V8SF "sf") (V4DF "df")]) ++ ++;; This attribute gives the integer mode that has half the size of ++;; the controlling mode. ++(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (V2SF "SI") ++ (V2SI "SI") (V4HI "SI") (V8QI "SI") ++ (TF "DI")]) ++ ++(define_mode_attr p [(SI "") (DI "d")]) ++ ++;; This attribute works around the early SB-1 rev2 core "F2" erratum: ++;; ++;; In certain cases, div.s and div.ps may have a rounding error ++;; and/or wrong inexact flag. ++;; ++;; Therefore, we only allow div.s if not working around SB-1 rev2 ++;; errata or if a slight loss of precision is OK. ++(define_mode_attr divide_condition ++ [DF (SF "flag_unsafe_math_optimizations") ++ (V2SF "TARGET_SB1 && (flag_unsafe_math_optimizations)")]) ++ ++;; This attribute gives the conditions under which SQRT.fmt instructions ++;; can be used. ++(define_mode_attr sqrt_condition ++ [SF DF (V2SF "TARGET_SB1")]) ++ ++;; This code iterator allows signed and unsigned widening multiplications ++;; to use the same template. ++(define_code_iterator any_extend [sign_extend zero_extend]) ++ ++;; This code iterator allows the two right shift instructions to be ++;; generated from the same template. ++(define_code_iterator any_shiftrt [ashiftrt lshiftrt]) ++ ++;; This code iterator allows the three shift instructions to be generated ++;; from the same template. ++(define_code_iterator any_shift [ashift ashiftrt lshiftrt]) ++ ++;; This code iterator allows unsigned and signed division to be generated ++;; from the same template. ++(define_code_iterator any_div [div udiv]) ++ ++;; This code iterator allows unsigned and signed modulus to be generated ++;; from the same template. ++(define_code_iterator any_mod [mod umod]) ++ ++;; This code iterator allows addition and subtraction to be generated ++;; from the same template. ++(define_code_iterator addsub [plus minus]) ++ ++;; This code iterator allows addition and multiplication to be generated ++;; from the same template. ++(define_code_iterator addmul [plus mult]) ++ ++;; This code iterator allows addition subtraction and multiplication to be generated ++;; from the same template ++(define_code_iterator addsubmul [plus minus mult]) ++ ++;; This code iterator allows all native floating-point comparisons to be ++;; generated from the same template. ++(define_code_iterator fcond [unordered uneq unlt unle eq lt le ordered ltgt ne]) ++ ++;; This code iterator is used for comparisons that can be implemented ++;; by swapping the operands. ++(define_code_iterator swapped_fcond [ge gt unge ungt]) ++ ++;; Equality operators. ++(define_code_iterator equality_op [eq ne]) ++ ++;; These code iterators allow the signed and unsigned scc operations to use ++;; the same template. ++(define_code_iterator any_gt [gt gtu]) ++(define_code_iterator any_ge [ge geu]) ++(define_code_iterator any_lt [lt ltu]) ++(define_code_iterator any_le [le leu]) ++ ++(define_code_iterator any_return [return simple_return]) ++ ++;; expands to an empty string when doing a signed operation and ++;; "u" when doing an unsigned operation. ++(define_code_attr u [(sign_extend "") (zero_extend "u") ++ (div "") (udiv "u") ++ (mod "") (umod "u") ++ (gt "") (gtu "u") ++ (ge "") (geu "u") ++ (lt "") (ltu "u") ++ (le "") (leu "u")]) ++ ++;; is like except uppercase. ++(define_code_attr U [(sign_extend "") (zero_extend "U")]) ++ ++;; is like , but the signed form expands to "s" rather than "". ++(define_code_attr su [(sign_extend "s") (zero_extend "u")]) ++ ++;; expands to the name of the optab for a particular code. ++(define_code_attr optab [(ashift "ashl") ++ (ashiftrt "ashr") ++ (lshiftrt "lshr") ++ (ior "ior") ++ (xor "xor") ++ (and "and") ++ (plus "add") ++ (minus "sub") ++ (mult "mul") ++ (return "return") ++ (simple_return "simple_return")]) ++ ++;; expands to the name of the insn that implements a particular code. ++(define_code_attr insn [(ashift "sll") ++ (ashiftrt "sra") ++ (lshiftrt "srl") ++ (ior "or") ++ (xor "xor") ++ (and "and") ++ (plus "addu") ++ (minus "subu")]) ++ ++;; expands to the name of the insn that implements ++;; a particular code to operate on immediate values. ++(define_code_attr immediate_insn [(ior "ori") ++ (xor "xori") ++ (and "andi")]) ++ ++;; is the c.cond.fmt condition associated with a particular code. ++(define_code_attr fcond [(unordered "cun") ++ (uneq "cueq") ++ (unlt "cult") ++ (unle "cule") ++ (eq "ceq") ++ (lt "slt") ++ (le "sle") ++ (ordered "cor") ++ (ltgt "sne") ++ (ne "cune")]) ++ ++;; Similar, but for swapped conditions. ++(define_code_attr swapped_fcond [(ge "sle") ++ (gt "slt") ++ (unge "cule") ++ (ungt "cult")]) ++ ++;; The value of the bit when the branch is taken for branch_bit patterns. ++;; Comparison is always against zero so this depends on the operator. ++(define_code_attr bbv [(eq "0") (ne "1")]) ++ ++;; This is the inverse value of bbv. ++(define_code_attr bbinv [(eq "1") (ne "0")]) ++ ++;; The sel mnemonic to use depending on the condition test. ++(define_code_attr sel [(eq "masknez") (ne "maskeqz")]) ++(define_code_attr selinv [(eq "maskeqz") (ne "masknez")]) ++ ++;; Pipeline descriptions. ++;; ++;; generic.md provides a fallback for processors without a specific ++;; pipeline description. It is derived from the old define_function_unit ++;; version and uses the "alu" and "imuldiv" units declared below. ++;; ++;; Some of the processor-specific files are also derived from old ++;; define_function_unit descriptions and simply override the parts of ++;; generic.md that don't apply. The other processor-specific files ++;; are self-contained. ++(define_automaton "alu,imuldiv") ++ ++(define_cpu_unit "alu" "alu") ++(define_cpu_unit "imuldiv" "imuldiv") ++ ++;; Ghost instructions produce no real code and introduce no hazards. ++;; They exist purely to express an effect on dataflow. ++(define_insn_reservation "ghost" 0 ++ (eq_attr "type" "ghost") ++ "nothing") ++ ++(include "generic.md") ++ ++;; ++;; .................... ++;; ++;; CONDITIONAL TRAPS ++;; ++;; .................... ++;; ++ ++(define_insn "trap" ++ [(trap_if (const_int 1) (const_int 0))] ++ "" ++{ ++ return "break\t0"; ++} ++ [(set_attr "type" "trap")]) ++ ++ ++ ++;; ++;; .................... ++;; ++;; ADDITION ++;; ++;; .................... ++;; ++ ++(define_insn "add3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (plus:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")))] ++ "" ++ "fadd.\t%0,%1,%2" ++ [(set_attr "type" "fadd") ++ (set_attr "mode" "")]) ++ ++(define_expand "add3" ++ [(set (match_operand:GPR 0 "register_operand") ++ (plus:GPR (match_operand:GPR 1 "register_operand") ++ (match_operand:GPR 2 "arith_operand")))] ++ "") ++ ++(define_insn "*add3" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r") ++ (plus:GPR (match_operand:GPR 1 "register_operand" "r,r") ++ (match_operand:GPR 2 "arith_operand" "r,Q")))] ++ "" ++{ ++ if (which_alternative == 0) ++ return "add.\t%0,%1,%2"; ++ else ++ return "addi.\t%0,%1,%2"; ++} ++ [(set_attr "alu_type" "add") ++ (set_attr "compression" "*,*") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "*addsi3_extended" ++ [(set (match_operand:DI 0 "register_operand" "=r,r") ++ (sign_extend:DI ++ (plus:SI (match_operand:SI 1 "register_operand" "r,r") ++ (match_operand:SI 2 "arith_operand" "r,Q"))))] ++ "TARGET_64BIT" ++ "@ ++ add.w\t%0,%1,%2 ++ addi.w\t%0,%1,%2" ++ [(set_attr "alu_type" "add") ++ (set_attr "mode" "SI")]) ++ ++ ++;; ++;; .................... ++;; ++;; SUBTRACTION ++;; ++;; .................... ++;; ++ ++(define_insn "sub3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (minus:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")))] ++ "" ++ "fsub.\t%0,%1,%2" ++ [(set_attr "type" "fadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "sub3" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (minus:GPR (match_operand:GPR 1 "register_operand" "r") ++ (match_operand:GPR 2 "register_operand" "r")))] ++ "" ++ "sub.\t%0,%1,%2" ++ [(set_attr "alu_type" "sub") ++ (set_attr "compression" "*") ++ (set_attr "mode" "")]) ++ ++(define_insn "*subsi3_extended" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI ++ (minus:SI (match_operand:SI 1 "register_operand" "r") ++ (match_operand:SI 2 "register_operand" "r"))))] ++ "TARGET_64BIT" ++ "sub.w\t%0,%1,%2" ++ [(set_attr "alu_type" "sub") ++ (set_attr "mode" "DI")]) ++ ++;; ++;; .................... ++;; ++;; MULTIPLICATION ++;; ++;; .................... ++;; ++ ++(define_expand "mul3" ++ [(set (match_operand:SCALARF 0 "register_operand") ++ (mult:SCALARF (match_operand:SCALARF 1 "register_operand") ++ (match_operand:SCALARF 2 "register_operand")))] ++ "" ++ "") ++ ++(define_insn "*mul3" ++ [(set (match_operand:SCALARF 0 "register_operand" "=f") ++ (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f") ++ (match_operand:SCALARF 2 "register_operand" "f")))] ++ "" ++ "fmul.\t%0,%1,%2" ++ [(set_attr "type" "fmul") ++ (set_attr "mode" "")]) ++ ++(define_insn "mul3" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (mult:GPR (match_operand:GPR 1 "register_operand" "r") ++ (match_operand:GPR 2 "register_operand" "r")))] ++ "" ++ "mul.\t%0,%1,%2" ++ [(set_attr "type" "imul3") ++ (set_attr "mode" "")]) ++ ++ ++ ++(define_insn "mulsidi3_64bit" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) ++ (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))] ++ "" ++ "mul.d\t%0,%1,%2" ++ [(set_attr "type" "imul3") ++ (set_attr "mode" "DI")]) ++ ++ ++;; ++;; ........................ ++;; ++;; MULTIPLICATION HIGH-PART ++;; ++;; ........................ ++;; ++ ++ ++(define_expand "mulditi3" ++ [(set (match_operand:TI 0 "register_operand") ++ (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand")) ++ (any_extend:TI (match_operand:DI 2 "register_operand"))))] ++ "TARGET_64BIT" ++{ ++ rtx low = gen_reg_rtx (DImode); ++ emit_insn (gen_muldi3 (low, operands[1], operands[2])); ++ ++ rtx high = gen_reg_rtx (DImode); ++ emit_insn (gen_muldi3_highpart (high, operands[1], operands[2])); ++ ++ emit_move_insn (gen_lowpart (DImode, operands[0]), low); ++ emit_move_insn (gen_highpart (DImode, operands[0]), high); ++ DONE; ++}) ++ ++(define_insn "muldi3_highpart" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (truncate:DI ++ (lshiftrt:TI ++ (mult:TI (any_extend:TI ++ (match_operand:DI 1 "register_operand" " r")) ++ (any_extend:TI ++ (match_operand:DI 2 "register_operand" " r"))) ++ (const_int 64))))] ++ "TARGET_64BIT" ++ "mulh.d\t%0,%1,%2" ++ [(set_attr "type" "imul") ++ (set_attr "mode" "DI")]) ++ ++(define_expand "mulsidi3" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (mult:DI (any_extend:DI ++ (match_operand:SI 1 "register_operand" " r")) ++ (any_extend:DI ++ (match_operand:SI 2 "register_operand" " r"))))] ++ "!TARGET_64BIT" ++{ ++ rtx temp = gen_reg_rtx (SImode); ++ emit_insn (gen_mulsi3 (temp, operands[1], operands[2])); ++ emit_insn (gen_mulsi3_highpart (loongarch_subword (operands[0], true), ++ operands[1], operands[2])); ++ emit_insn (gen_movsi (loongarch_subword (operands[0], false), temp)); ++ DONE; ++}) ++ ++(define_insn "mulsi3_highpart" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (truncate:SI ++ (lshiftrt:DI ++ (mult:DI (any_extend:DI ++ (match_operand:SI 1 "register_operand" " r")) ++ (any_extend:DI ++ (match_operand:SI 2 "register_operand" " r"))) ++ (const_int 32))))] ++ "!TARGET_64BIT" ++ "mulh.w\t%0,%1,%2" ++ [(set_attr "type" "imul") ++ (set_attr "mode" "SI")]) ++ ++;; Floating point multiply accumulate instructions. ++ ++(define_expand "fma4" ++ [(set (match_operand:ANYF 0 "register_operand") ++ (fma:ANYF (match_operand:ANYF 1 "register_operand") ++ (match_operand:ANYF 2 "register_operand") ++ (match_operand:ANYF 3 "register_operand")))] ++ "TARGET_HARD_FLOAT") ++ ++(define_insn "*fma4_madd4" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (fma:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f") ++ (match_operand:ANYF 3 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT" ++ "fmadd.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "")]) ++ ++;; The fms, fnma, and fnms instructions can be used even when HONOR_NANS ++;; is true because while IEEE 754-2008 requires the negate operation to ++;; negate the sign of a NAN and the LARCH neg instruction does not do this, ++;; the fma part of the instruction has no requirement on how the sign of ++;; a NAN is handled and so the final sign bit of the entire operation is ++;; undefined. ++ ++(define_expand "fms4" ++ [(set (match_operand:ANYF 0 "register_operand") ++ (fma:ANYF (match_operand:ANYF 1 "register_operand") ++ (match_operand:ANYF 2 "register_operand") ++ (neg:ANYF (match_operand:ANYF 3 "register_operand"))))] ++ "TARGET_HARD_FLOAT") ++ ++ ++(define_insn "*fms4_msub4" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (fma:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f") ++ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] ++ "TARGET_HARD_FLOAT" ++ "fmsub.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "")]) ++ ++;; fnma is defined in GCC as (fma (neg op1) op2 op3) ++;; (-op1 * op2) + op3 ==> -(op1 * op2) + op3 ==> -((op1 * op2) - op3) ++;; The loongarch nmsub instructions implement -((op1 * op2) - op3) ++;; This transformation means we may return the wrong signed zero ++;; so we check HONOR_SIGNED_ZEROS. ++ ++(define_expand "fnma4" ++ [(set (match_operand:ANYF 0 "register_operand") ++ (fma:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand")) ++ (match_operand:ANYF 2 "register_operand") ++ (match_operand:ANYF 3 "register_operand")))] ++ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)") ++ ++(define_insn "*fnma4_nmsub4" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (fma:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) ++ (match_operand:ANYF 2 "register_operand" "f") ++ (match_operand:ANYF 3 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)" ++ "fnmsub.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "")]) ++ ++;; fnms is defined as: (fma (neg op1) op2 (neg op3)) ++;; ((-op1) * op2) - op3 ==> -(op1 * op2) - op3 ==> -((op1 * op2) + op3) ++;; The loongarch nmadd instructions implement -((op1 * op2) + op3) ++;; This transformation means we may return the wrong signed zero ++;; so we check HONOR_SIGNED_ZEROS. ++ ++(define_expand "fnms4" ++ [(set (match_operand:ANYF 0 "register_operand") ++ (fma:ANYF ++ (neg:ANYF (match_operand:ANYF 1 "register_operand")) ++ (match_operand:ANYF 2 "register_operand") ++ (neg:ANYF (match_operand:ANYF 3 "register_operand"))))] ++ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)") ++ ++(define_insn "*fnms4_nmadd4" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (fma:ANYF ++ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) ++ (match_operand:ANYF 2 "register_operand" "f") ++ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] ++ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)" ++ "fnmadd.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "")]) ++ ++;; ++;; .................... ++;; ++;; DIVISION and REMAINDER ++;; ++;; .................... ++;; ++ ++(define_expand "div3" ++ [(set (match_operand:ANYF 0 "register_operand") ++ (div:ANYF (match_operand:ANYF 1 "reg_or_1_operand") ++ (match_operand:ANYF 2 "register_operand")))] ++ "" ++{ ++ if (const_1_operand (operands[1], mode)) ++ if (!(ISA_HAS_FP_RECIP_RSQRT (mode) ++ && flag_unsafe_math_optimizations)) ++ operands[1] = force_reg (mode, operands[1]); ++}) ++ ++;; These patterns work around the early SB-1 rev2 core "F1" erratum: ++;; ++;; If an mftg1 or dmftg1 happens to access the floating point register ++;; file at the same time a long latency operation (div, sqrt, recip, ++;; sqrt) iterates an intermediate result back through the floating ++;; point register file bypass, then instead returning the correct ++;; register value the mftg1 or dmftg1 operation returns the intermediate ++;; result of the long latency operation. ++;; ++;; The workaround is to insert an unconditional 'mov' from/to the ++;; long latency op destination register. ++ ++(define_insn "*div3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (div:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")))] ++ "" ++{ ++ return "fdiv.\t%0,%1,%2"; ++} ++ [(set_attr "type" "fdiv") ++ (set_attr "mode" "") ++ (set_attr "insn_count" "1")]) ++ ++(define_insn "*recip3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") ++ (match_operand:ANYF 2 "register_operand" "f")))] ++ "ISA_HAS_FP_RECIP_RSQRT (mode) && flag_unsafe_math_optimizations" ++{ ++ return "frecip.\t%0,%2"; ++} ++ [(set_attr "type" "frdiv") ++ (set_attr "mode" "") ++ (set_attr "insn_count" "1")]) ++ ++;; Integer division and modulus. ++ ++(define_insn "div3" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ++ (any_div:GPR (match_operand:GPR 1 "register_operand" "r") ++ (match_operand:GPR 2 "register_operand" "r")))] ++ "" ++ { ++ return loongarch_output_division ("div.\t%0,%1,%2", operands); ++ } ++ [(set_attr "type" "idiv3") ++ (set_attr "mode" "")]) ++ ++(define_insn "mod3" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ++ (any_mod:GPR (match_operand:GPR 1 "register_operand" "r") ++ (match_operand:GPR 2 "register_operand" "r")))] ++ "" ++ { ++ return loongarch_output_division ("mod.\t%0,%1,%2", operands); ++ } ++ [(set_attr "type" "idiv3") ++ (set_attr "mode" "")]) ++ ++;; ++;; .................... ++;; ++;; SQUARE ROOT ++;; ++;; .................... ++ ++;; These patterns work around the early SB-1 rev2 core "F1" erratum (see ++;; "*div[sd]f3" comment for details). ++ ++(define_insn "sqrt2" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))] ++ "" ++{ ++ return "fsqrt.\t%0,%1"; ++} ++ [(set_attr "type" "fsqrt") ++ (set_attr "mode" "") ++ (set_attr "insn_count" "1")]) ++ ++(define_insn "*rsqrta" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") ++ (sqrt:ANYF (match_operand:ANYF 2 "register_operand" "f"))))] ++ "ISA_HAS_FP_RECIP_RSQRT (mode) && flag_unsafe_math_optimizations" ++{ ++ return "frsqrt.\t%0,%2"; ++} ++ [(set_attr "type" "frsqrt") ++ (set_attr "mode" "") ++ (set_attr "insn_count" "1")]) ++ ++(define_insn "*rsqrtb" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (sqrt:ANYF (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") ++ (match_operand:ANYF 2 "register_operand" "f"))))] ++ "ISA_HAS_FP_RECIP_RSQRT (mode) && flag_unsafe_math_optimizations" ++{ ++ return "frsqrt.\t%0,%2"; ++} ++ [(set_attr "type" "frsqrt") ++ (set_attr "mode" "") ++ (set_attr "insn_count" "1")]) ++ ++;; ++;; .................... ++;; ++;; ABSOLUTE VALUE ++;; ++;; .................... ++ ++;; Do not use the integer abs macro instruction, since that signals an ++;; exception on -2147483648 (sigh). ++ ++;; The "legacy" (as opposed to "2008") form of ABS.fmt is an arithmetic ++;; instruction that treats all NaN inputs as invalid; it does not clear ++;; their sign bit. We therefore can't use that form if the signs of ++;; NaNs matter. ++ ++(define_insn "abs2" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))] ++ "" ++ "fabs.\t%0,%1" ++ [(set_attr "type" "fabs") ++ (set_attr "mode" "")]) ++ ++;; ++;; ................... ++;; ++;; Count leading zeroes. ++;; ++;; ................... ++;; ++ ++(define_insn "clz2" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (clz:GPR (match_operand:GPR 1 "register_operand" "r")))] ++ "" ++ "clz.\t%0,%1" ++ [(set_attr "type" "clz") ++ (set_attr "mode" "")]) ++ ++;; ++;; ................... ++;; ++;; Count trailing zeroes. ++;; ++;; ................... ++;; ++ ++(define_insn "ctz2" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (ctz:GPR (match_operand:GPR 1 "register_operand" "r")))] ++ "" ++ "ctz.\t%0,%1" ++ [(set_attr "type" "clz") ++ (set_attr "mode" "")]) ++ ++ ++ ++;; ++;; .................... ++;; ++;; NEGATION and ONE'S COMPLEMENT ++;; ++;; .................... ++ ++(define_insn "negsi2" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (neg:SI (match_operand:SI 1 "register_operand" "r")))] ++ "" ++{ ++ return "sub.w\t%0,%.,%1"; ++} ++ [(set_attr "alu_type" "sub") ++ (set_attr "mode" "SI")]) ++ ++(define_insn "negdi2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (neg:DI (match_operand:DI 1 "register_operand" "r")))] ++ "TARGET_64BIT" ++ "sub.d\t%0,%.,%1" ++ [(set_attr "alu_type" "sub") ++ (set_attr "mode" "DI")]) ++ ++;; The "legacy" (as opposed to "2008") form of NEG.fmt is an arithmetic ++;; instruction that treats all NaN inputs as invalid; it does not flip ++;; their sign bit. We therefore can't use that form if the signs of ++;; NaNs matter. ++ ++(define_insn "neg2" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))] ++ "" ++ "fneg.\t%0,%1" ++ [(set_attr "type" "fneg") ++ (set_attr "mode" "")]) ++ ++(define_insn "one_cmpl2" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (not:GPR (match_operand:GPR 1 "register_operand" "r")))] ++ "" ++{ ++ return "nor\t%0,%.,%1"; ++} ++ [(set_attr "alu_type" "not") ++ (set_attr "compression" "*") ++ (set_attr "mode" "")]) ++ ++ ++;; ++;; .................... ++;; ++;; LOGICAL ++;; ++;; .................... ++;; ++ ++ ++(define_expand "and3" ++ [(set (match_operand:GPR 0 "register_operand") ++ (and:GPR (match_operand:GPR 1 "register_operand") ++ (match_operand:GPR 2 "and_reg_operand")))]) ++ ++;; The middle-end is not allowed to convert ANDing with 0xffff_ffff into a ++;; zero_extendsidi2 because of TARGET_TRULY_NOOP_TRUNCATION, so handle these ++;; here. Note that this variant does not trigger for SI mode because we ++;; require a 64-bit HOST_WIDE_INT and 0xffff_ffff wouldn't be a canonical ++;; sign-extended SImode value. ++;; ++;; These are possible combinations for operand 1 and 2. ++;; (r=register, mem=memory, x=match, S=split): ++;; ++;; \ op1 r/EXT r/!EXT mem ++;; op2 ++;; ++;; andi x x ++;; 0xff x x x ++;; 0xffff x x x ++;; 0xffff_ffff x S x ++;; low-bitmask x ++;; register x x ++;; register =op1 ++ ++(define_insn "*and3" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r,r,r,r,r,r") ++ (and:GPR (match_operand:GPR 1 "nonimmediate_operand" "o,o,W,r,r,r,r") ++ (match_operand:GPR 2 "and_operand" "Yb,Yh,Yw,K,Yx,Yw,r")))] ++ " and_operands_ok (mode, operands[1], operands[2])" ++{ ++ int len; ++ ++ switch (which_alternative) ++ { ++ case 0: ++ operands[1] = gen_lowpart (QImode, operands[1]); ++ return "ld.bu\t%0,%1"; ++ case 1: ++ operands[1] = gen_lowpart (HImode, operands[1]); ++ return "ld.hu\t%0,%1"; ++ case 2: ++ operands[1] = gen_lowpart (SImode, operands[1]); ++ if (loongarch_14bit_shifted_offset_address_p (XEXP (operands[1], 0), SImode)) ++ return "ldptr.w\t%0,%1\n\tbstrins.d\t%0,$r0,63,32"; ++ else if (loongarch_12bit_offset_address_p (XEXP (operands[1], 0), SImode)) ++ return "ld.wu\t%0,%1"; ++ else ++ gcc_unreachable (); ++ case 3: ++ return "andi\t%0,%1,%x2"; ++ case 4: ++ len = low_bitmask_len (mode, INTVAL (operands[2])); ++ operands[2] = GEN_INT (len-1); ++ return "bstrpick.\t%0,%1,%2,0"; ++ case 5: ++ return "#"; ++ case 6: ++ return "and\t%0,%1,%2"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "move_type" "load,load,load,andi,pick_ins,shift_shift,logical") ++ (set_attr "compression" "*,*,*,*,*,*,*") ++ (set_attr "mode" "")]) ++ ++(define_expand "ior3" ++ [(set (match_operand:GPR 0 "register_operand") ++ (ior:GPR (match_operand:GPR 1 "register_operand") ++ (match_operand:GPR 2 "uns_arith_operand")))] ++ "" ++{ ++}) ++ ++(define_insn "*ior3" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r") ++ (ior:GPR (match_operand:GPR 1 "register_operand" "r,r") ++ (match_operand:GPR 2 "uns_arith_operand" "r,K")))] ++ "" ++ "@ ++ or\t%0,%1,%2 ++ ori\t%0,%1,%x2" ++ [(set_attr "alu_type" "or") ++ (set_attr "compression" "*,*") ++ (set_attr "mode" "")]) ++ ++(define_insn "*iorhi3" ++ [(set (match_operand:HI 0 "register_operand" "=r,r") ++ (ior:HI (match_operand:HI 1 "register_operand" "r,r") ++ (match_operand:HI 2 "uns_arith_operand" "K,r")))] ++ "" ++ "@ ++ ori\t%0,%1,%x2 ++ or\t%0,%1,%2" ++ [(set_attr "alu_type" "or") ++ (set_attr "mode" "HI")]) ++ ++(define_expand "xor3" ++ [(set (match_operand:GPR 0 "register_operand") ++ (xor:GPR (match_operand:GPR 1 "register_operand") ++ (match_operand:GPR 2 "uns_arith_operand")))] ++ "" ++ "") ++ ++(define_insn "*xor3" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r") ++ (xor:GPR (match_operand:GPR 1 "register_operand" "r,r") ++ (match_operand:GPR 2 "uns_arith_operand" "r,K")))] ++ "" ++ "@ ++ xor\t%0,%1,%2 ++ xori\t%0,%1,%x2" ++ [(set_attr "alu_type" "xor") ++ (set_attr "compression" "*,*") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "*nor3" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (and:GPR (not:GPR (match_operand:GPR 1 "register_operand" "r")) ++ (not:GPR (match_operand:GPR 2 "register_operand" "r"))))] ++ "" ++ "nor\t%0,%1,%2" ++ [(set_attr "alu_type" "nor") ++ (set_attr "mode" "")]) ++ ++;; ++;; .................... ++;; ++;; TRUNCATION ++;; ++;; .................... ++ ++ ++ ++(define_insn "truncdfsf2" ++ [(set (match_operand:SF 0 "register_operand" "=f") ++ (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" ++ "fcvt.s.d\t%0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "cnv_mode" "D2S") ++ (set_attr "mode" "SF")]) ++ ++;; Integer truncation patterns. Truncating SImode values to smaller ++;; modes is a no-op, as it is for most other GCC ports. Truncating ++;; DImode values to SImode is not a no-op for TARGET_64BIT since we ++;; need to make sure that the lower 32 bits are properly sign-extended ++;; (see TARGET_TRULY_NOOP_TRUNCATION). Truncating DImode values into modes ++;; smaller than SImode is equivalent to two separate truncations: ++;; ++;; A B ++;; DI ---> HI == DI ---> SI ---> HI ++;; DI ---> QI == DI ---> SI ---> QI ++;; ++;; Step A needs a real instruction but step B does not. ++ ++(define_insn "truncdisi2" ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,ZC,m") ++ (truncate:SI (match_operand:DI 1 "register_operand" "r,r,r")))] ++ "TARGET_64BIT" ++ "@ ++ slli.w\t%0,%1,0 ++ stptr.w\t%1,%0 ++ st.w\t%1,%0" ++ [(set_attr "move_type" "sll0,store,store") ++ (set_attr "mode" "SI")]) ++ ++(define_insn "truncdi2" ++ [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,m") ++ (truncate:SHORT (match_operand:DI 1 "register_operand" "r,r")))] ++ "TARGET_64BIT" ++ "@ ++ slli.w\t%0,%1,0 ++ st.\t%1,%0" ++ [(set_attr "move_type" "sll0,store") ++ (set_attr "mode" "SI")]) ++ ++;; Combiner patterns to optimize shift/truncate combinations. ++ ++(define_insn "*ashr_trunc" ++ [(set (match_operand:SUBDI 0 "register_operand" "=r") ++ (truncate:SUBDI ++ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "const_arith_operand" ""))))] ++ "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)" ++ "srai.d\t%0,%1,%2" ++ [(set_attr "type" "shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "*lshr32_trunc" ++ [(set (match_operand:SUBDI 0 "register_operand" "=r") ++ (truncate:SUBDI ++ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r") ++ (const_int 32))))] ++ "TARGET_64BIT" ++ "srai.d\t%0,%1,32" ++ [(set_attr "type" "shift") ++ (set_attr "mode" "")]) ++ ++ ++ ++;; ++;; .................... ++;; ++;; ZERO EXTENSION ++;; ++;; .................... ++ ++;; Extension insns. ++ ++(define_expand "zero_extendsidi2" ++ [(set (match_operand:DI 0 "register_operand") ++ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))] ++ "TARGET_64BIT") ++ ++(define_insn "*zero_extendsidi2_dext" ++ [(set (match_operand:DI 0 "register_operand" "=r,r,r") ++ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,ZC,W")))] ++ "TARGET_64BIT" ++ "@ ++ bstrpick.d\t%0,%1,31,0 ++ ldptr.w\t%0,%1\n\tlu32i.d\t%0,0 ++ ld.wu\t%0,%1" ++ [(set_attr "move_type" "arith,load,load") ++ (set_attr "mode" "DI") ++ (set_attr "insn_count" "1,2,1")]) ++ ++;; See the comment before the *and3 pattern why this is generated by ++;; combine. ++ ++(define_expand "zero_extend2" ++ [(set (match_operand:GPR 0 "register_operand") ++ (zero_extend:GPR (match_operand:SHORT 1 "nonimmediate_operand")))] ++ "" ++{ ++}) ++ ++(define_insn "*zero_extend2" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r") ++ (zero_extend:GPR ++ (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))] ++ "" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "bstrpick.\t%0,%1,,0"; ++ case 1: ++ return "ld.u\t%0,%1"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "move_type" "pick_ins,load") ++ (set_attr "compression" "*,*") ++ (set_attr "mode" "")]) ++ ++ ++(define_expand "zero_extendqihi2" ++ [(set (match_operand:HI 0 "register_operand") ++ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand")))] ++ "" ++{ ++}) ++ ++(define_insn "*zero_extendqihi2" ++ [(set (match_operand:HI 0 "register_operand" "=r,r") ++ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))] ++ "" ++ "@ ++ andi\t%0,%1,0x00ff ++ ld.bu\t%0,%1" ++ [(set_attr "move_type" "andi,load") ++ (set_attr "mode" "HI")]) ++ ++;; Combiner patterns to optimize truncate/zero_extend combinations. ++ ++(define_insn "*zero_extend_trunc" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (zero_extend:GPR ++ (truncate:SHORT (match_operand:DI 1 "register_operand" "r"))))] ++ "TARGET_64BIT" ++ "bstrpick.\t%0,%1,,0" ++ [(set_attr "move_type" "pick_ins") ++ (set_attr "mode" "")]) ++ ++(define_insn "*zero_extendhi_truncqi" ++ [(set (match_operand:HI 0 "register_operand" "=r") ++ (zero_extend:HI ++ (truncate:QI (match_operand:DI 1 "register_operand" "r"))))] ++ "TARGET_64BIT" ++ "andi\t%0,%1,0xff" ++ [(set_attr "alu_type" "and") ++ (set_attr "mode" "HI")]) ++ ++;; ++;; .................... ++;; ++;; SIGN EXTENSION ++;; ++;; .................... ++ ++;; Extension insns. ++;; Those for integer source operand are ordered widest source type first. ++ ++;; When TARGET_64BIT, all SImode integer and accumulator registers ++;; should already be in sign-extended form (see TARGET_TRULY_NOOP_TRUNCATION ++;; and truncdisi2). We can therefore get rid of register->register ++;; instructions if we constrain the source to be in the same register as ++;; the destination. ++;; ++;; Only the pre-reload scheduler sees the type of the register alternatives; ++;; we split them into nothing before the post-reload scheduler runs. ++;; These alternatives therefore have type "move" in order to reflect ++;; what happens if the two pre-reload operands cannot be tied, and are ++;; instead allocated two separate GPRs. We don't distinguish between ++;; the GPR and LO cases because we don't usually know during pre-reload ++;; scheduling whether an operand will be LO or not. ++(define_insn_and_split "extendsidi2" ++ [(set (match_operand:DI 0 "register_operand" "=r,r,r") ++ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "0,ZC,m")))] ++ "TARGET_64BIT" ++ "@ ++ # ++ ldptr.w\t%0,%1 ++ ld.w\t%0,%1" ++ "&& reload_completed && register_operand (operands[1], VOIDmode)" ++ [(const_int 0)] ++{ ++ emit_note (NOTE_INSN_DELETED); ++ DONE; ++} ++ [(set_attr "move_type" "move,load,load") ++ (set_attr "mode" "DI")]) ++ ++(define_expand "extend2" ++ [(set (match_operand:GPR 0 "register_operand") ++ (sign_extend:GPR (match_operand:SHORT 1 "nonimmediate_operand")))] ++ "") ++ ++ ++(define_insn "*extend2_se" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r") ++ (sign_extend:GPR ++ (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))] ++ "" ++ "@ ++ ext.w.\t%0,%1 ++ ld.\t%0,%1" ++ [(set_attr "move_type" "signext,load") ++ (set_attr "mode" "")]) ++ ++(define_expand "extendqihi2" ++ [(set (match_operand:HI 0 "register_operand") ++ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand")))] ++ "") ++ ++(define_insn "*extendqihi2_seb" ++ [(set (match_operand:HI 0 "register_operand" "=r,r") ++ (sign_extend:HI ++ (match_operand:QI 1 "nonimmediate_operand" "r,m")))] ++ "" ++ "@ ++ ext.w.b\t%0,%1 ++ ld.b\t%0,%1" ++ [(set_attr "move_type" "signext,load") ++ (set_attr "mode" "SI")]) ++ ++;; Combiner patterns for truncate/sign_extend combinations. The SI versions ++;; use the shift/truncate patterns. ++ ++(define_insn_and_split "*extenddi_truncate" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI ++ (truncate:SHORT (match_operand:DI 1 "register_operand" "r"))))] ++ "TARGET_64BIT" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 2) ++ (ashift:DI (match_dup 1) ++ (match_dup 3))) ++ (set (match_dup 0) ++ (ashiftrt:DI (match_dup 2) ++ (match_dup 3)))] ++{ ++ operands[2] = gen_lowpart (DImode, operands[0]); ++ operands[3] = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (mode)); ++} ++ [(set_attr "move_type" "shift_shift") ++ (set_attr "mode" "DI")]) ++ ++(define_insn_and_split "*extendsi_truncate" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (sign_extend:SI ++ (truncate:SHORT (match_operand:DI 1 "register_operand" "r"))))] ++ "TARGET_64BIT" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 2) ++ (ashift:DI (match_dup 1) ++ (match_dup 3))) ++ (set (match_dup 0) ++ (truncate:SI (ashiftrt:DI (match_dup 2) ++ (match_dup 3))))] ++{ ++ operands[2] = gen_lowpart (DImode, operands[0]); ++ operands[3] = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (mode)); ++} ++ [(set_attr "move_type" "shift_shift") ++ (set_attr "mode" "SI")]) ++ ++(define_insn_and_split "*extendhi_truncateqi" ++ [(set (match_operand:HI 0 "register_operand" "=r") ++ (sign_extend:HI ++ (truncate:QI (match_operand:DI 1 "register_operand" "r"))))] ++ "TARGET_64BIT" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 2) ++ (ashift:DI (match_dup 1) ++ (const_int 56))) ++ (set (match_dup 0) ++ (truncate:HI (ashiftrt:DI (match_dup 2) ++ (const_int 56))))] ++{ ++ operands[2] = gen_lowpart (DImode, operands[0]); ++} ++ [(set_attr "move_type" "shift_shift") ++ (set_attr "mode" "SI")]) ++ ++(define_insn "extendsfdf2" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" ++ "fcvt.d.s\t%0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "cnv_mode" "S2D") ++ (set_attr "mode" "DF")]) ++ ++;; ++;; .................... ++;; ++;; CONVERSIONS ++;; ++;; .................... ++ ++(define_expand "fix_truncdfsi2" ++ [(set (match_operand:SI 0 "register_operand") ++ (fix:SI (match_operand:DF 1 "register_operand")))] ++ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" ++"" ++) ++ ++(define_insn "fix_truncdfsi2_insn" ++ [(set (match_operand:SI 0 "register_operand" "=f") ++ (fix:SI (match_operand:DF 1 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" ++ "ftintrz.w.d %0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "DF") ++ (set_attr "cnv_mode" "D2I")]) ++ ++ ++(define_expand "fix_truncsfsi2" ++ [(set (match_operand:SI 0 "register_operand") ++ (fix:SI (match_operand:SF 1 "register_operand")))] ++ "TARGET_HARD_FLOAT" ++"" ++) ++ ++(define_insn "fix_truncsfsi2_insn" ++ [(set (match_operand:SI 0 "register_operand" "=f") ++ (fix:SI (match_operand:SF 1 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT" ++ "ftintrz.w.s %0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "SF") ++ (set_attr "cnv_mode" "S2I")]) ++ ++ ++(define_insn "fix_truncdfdi2" ++ [(set (match_operand:DI 0 "register_operand" "=f") ++ (fix:DI (match_operand:DF 1 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" ++ "ftintrz.l.d %0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "DF") ++ (set_attr "cnv_mode" "D2I")]) ++ ++ ++(define_insn "fix_truncsfdi2" ++ [(set (match_operand:DI 0 "register_operand" "=f") ++ (fix:DI (match_operand:SF 1 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" ++ "ftintrz.l.s %0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "SF") ++ (set_attr "cnv_mode" "S2I")]) ++ ++ ++(define_insn "floatsidf2" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (float:DF (match_operand:SI 1 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" ++ "ffint.d.w\t%0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "DF") ++ (set_attr "cnv_mode" "I2D")]) ++ ++ ++(define_insn "floatdidf2" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (float:DF (match_operand:DI 1 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" ++ "ffint.d.l\t%0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "DF") ++ (set_attr "cnv_mode" "I2D")]) ++ ++ ++(define_insn "floatsisf2" ++ [(set (match_operand:SF 0 "register_operand" "=f") ++ (float:SF (match_operand:SI 1 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT" ++ "ffint.s.w\t%0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "SF") ++ (set_attr "cnv_mode" "I2S")]) ++ ++ ++(define_insn "floatdisf2" ++ [(set (match_operand:SF 0 "register_operand" "=f") ++ (float:SF (match_operand:DI 1 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" ++ "ffint.s.l\t%0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "SF") ++ (set_attr "cnv_mode" "I2S")]) ++ ++ ++(define_expand "fixuns_truncdfsi2" ++ [(set (match_operand:SI 0 "register_operand") ++ (unsigned_fix:SI (match_operand:DF 1 "register_operand")))] ++ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" ++{ ++ rtx reg1 = gen_reg_rtx (DFmode); ++ rtx reg2 = gen_reg_rtx (DFmode); ++ rtx reg3 = gen_reg_rtx (SImode); ++ rtx_code_label *label1 = gen_label_rtx (); ++ rtx_code_label *label2 = gen_label_rtx (); ++ rtx test; ++ REAL_VALUE_TYPE offset; ++ ++ real_2expN (&offset, 31, DFmode); ++ ++ if (reg1) /* Turn off complaints about unreached code. */ ++ { ++ loongarch_emit_move (reg1, const_double_from_real_value (offset, DFmode)); ++ do_pending_stack_adjust (); ++ ++ test = gen_rtx_GE (VOIDmode, operands[1], reg1); ++ emit_jump_insn (gen_cbranchdf4 (test, operands[1], reg1, label1)); ++ ++ emit_insn (gen_fix_truncdfsi2 (operands[0], operands[1])); ++ emit_jump_insn (gen_rtx_SET (pc_rtx, ++ gen_rtx_LABEL_REF (VOIDmode, label2))); ++ emit_barrier (); ++ ++ emit_label (label1); ++ loongarch_emit_move (reg2, gen_rtx_MINUS (DFmode, operands[1], reg1)); ++ loongarch_emit_move (reg3, GEN_INT (trunc_int_for_mode ++ (BITMASK_HIGH, SImode))); ++ ++ emit_insn (gen_fix_truncdfsi2 (operands[0], reg2)); ++ emit_insn (gen_iorsi3 (operands[0], operands[0], reg3)); ++ ++ emit_label (label2); ++ ++ /* Allow REG_NOTES to be set on last insn (labels don't have enough ++ fields, and can't be used for REG_NOTES anyway). */ ++ emit_use (stack_pointer_rtx); ++ DONE; ++ } ++}) ++ ++ ++(define_expand "fixuns_truncdfdi2" ++ [(set (match_operand:DI 0 "register_operand") ++ (unsigned_fix:DI (match_operand:DF 1 "register_operand")))] ++ "TARGET_HARD_FLOAT && TARGET_64BIT && TARGET_DOUBLE_FLOAT" ++{ ++ rtx reg1 = gen_reg_rtx (DFmode); ++ rtx reg2 = gen_reg_rtx (DFmode); ++ rtx reg3 = gen_reg_rtx (DImode); ++ rtx_code_label *label1 = gen_label_rtx (); ++ rtx_code_label *label2 = gen_label_rtx (); ++ rtx test; ++ REAL_VALUE_TYPE offset; ++ ++ real_2expN (&offset, 63, DFmode); ++ ++ loongarch_emit_move (reg1, const_double_from_real_value (offset, DFmode)); ++ do_pending_stack_adjust (); ++ ++ test = gen_rtx_GE (VOIDmode, operands[1], reg1); ++ emit_jump_insn (gen_cbranchdf4 (test, operands[1], reg1, label1)); ++ ++ emit_insn (gen_fix_truncdfdi2 (operands[0], operands[1])); ++ emit_jump_insn (gen_rtx_SET (pc_rtx, gen_rtx_LABEL_REF (VOIDmode, label2))); ++ emit_barrier (); ++ ++ emit_label (label1); ++ loongarch_emit_move (reg2, gen_rtx_MINUS (DFmode, operands[1], reg1)); ++ loongarch_emit_move (reg3, GEN_INT (BITMASK_HIGH)); ++ emit_insn (gen_ashldi3 (reg3, reg3, GEN_INT (32))); ++ ++ emit_insn (gen_fix_truncdfdi2 (operands[0], reg2)); ++ emit_insn (gen_iordi3 (operands[0], operands[0], reg3)); ++ ++ emit_label (label2); ++ ++ /* Allow REG_NOTES to be set on last insn (labels don't have enough ++ fields, and can't be used for REG_NOTES anyway). */ ++ emit_use (stack_pointer_rtx); ++ DONE; ++}) ++ ++ ++(define_expand "fixuns_truncsfsi2" ++ [(set (match_operand:SI 0 "register_operand") ++ (unsigned_fix:SI (match_operand:SF 1 "register_operand")))] ++ "TARGET_HARD_FLOAT" ++{ ++ rtx reg1 = gen_reg_rtx (SFmode); ++ rtx reg2 = gen_reg_rtx (SFmode); ++ rtx reg3 = gen_reg_rtx (SImode); ++ rtx_code_label *label1 = gen_label_rtx (); ++ rtx_code_label *label2 = gen_label_rtx (); ++ rtx test; ++ REAL_VALUE_TYPE offset; ++ ++ real_2expN (&offset, 31, SFmode); ++ ++ loongarch_emit_move (reg1, const_double_from_real_value (offset, SFmode)); ++ do_pending_stack_adjust (); ++ ++ test = gen_rtx_GE (VOIDmode, operands[1], reg1); ++ emit_jump_insn (gen_cbranchsf4 (test, operands[1], reg1, label1)); ++ ++ emit_insn (gen_fix_truncsfsi2 (operands[0], operands[1])); ++ emit_jump_insn (gen_rtx_SET (pc_rtx, gen_rtx_LABEL_REF (VOIDmode, label2))); ++ emit_barrier (); ++ ++ emit_label (label1); ++ loongarch_emit_move (reg2, gen_rtx_MINUS (SFmode, operands[1], reg1)); ++ loongarch_emit_move (reg3, GEN_INT (trunc_int_for_mode ++ (BITMASK_HIGH, SImode))); ++ ++ emit_insn (gen_fix_truncsfsi2 (operands[0], reg2)); ++ emit_insn (gen_iorsi3 (operands[0], operands[0], reg3)); ++ ++ emit_label (label2); ++ ++ /* Allow REG_NOTES to be set on last insn (labels don't have enough ++ fields, and can't be used for REG_NOTES anyway). */ ++ emit_use (stack_pointer_rtx); ++ DONE; ++}) ++ ++ ++(define_expand "fixuns_truncsfdi2" ++ [(set (match_operand:DI 0 "register_operand") ++ (unsigned_fix:DI (match_operand:SF 1 "register_operand")))] ++ "TARGET_HARD_FLOAT && TARGET_64BIT && TARGET_DOUBLE_FLOAT" ++{ ++ rtx reg1 = gen_reg_rtx (SFmode); ++ rtx reg2 = gen_reg_rtx (SFmode); ++ rtx reg3 = gen_reg_rtx (DImode); ++ rtx_code_label *label1 = gen_label_rtx (); ++ rtx_code_label *label2 = gen_label_rtx (); ++ rtx test; ++ REAL_VALUE_TYPE offset; ++ ++ real_2expN (&offset, 63, SFmode); ++ ++ loongarch_emit_move (reg1, const_double_from_real_value (offset, SFmode)); ++ do_pending_stack_adjust (); ++ ++ test = gen_rtx_GE (VOIDmode, operands[1], reg1); ++ emit_jump_insn (gen_cbranchsf4 (test, operands[1], reg1, label1)); ++ ++ emit_insn (gen_fix_truncsfdi2 (operands[0], operands[1])); ++ emit_jump_insn (gen_rtx_SET (pc_rtx, gen_rtx_LABEL_REF (VOIDmode, label2))); ++ emit_barrier (); ++ ++ emit_label (label1); ++ loongarch_emit_move (reg2, gen_rtx_MINUS (SFmode, operands[1], reg1)); ++ loongarch_emit_move (reg3, GEN_INT (BITMASK_HIGH)); ++ emit_insn (gen_ashldi3 (reg3, reg3, GEN_INT (32))); ++ ++ emit_insn (gen_fix_truncsfdi2 (operands[0], reg2)); ++ emit_insn (gen_iordi3 (operands[0], operands[0], reg3)); ++ ++ emit_label (label2); ++ ++ /* Allow REG_NOTES to be set on last insn (labels don't have enough ++ fields, and can't be used for REG_NOTES anyway). */ ++ emit_use (stack_pointer_rtx); ++ DONE; ++}) ++ ++;; ++;; .................... ++;; ++;; DATA MOVEMENT ++;; ++;; .................... ++ ++(define_expand "extzv" ++ [(set (match_operand:GPR 0 "register_operand") ++ (zero_extract:GPR (match_operand:GPR 1 "register_operand") ++ (match_operand 2 "const_int_operand") ++ (match_operand 3 "const_int_operand")))] ++ "" ++{ ++ if (!loongarch_use_ins_ext_p (operands[1], INTVAL (operands[2]), ++ INTVAL (operands[3]))) ++ FAIL; ++}) ++ ++(define_insn "*extzv" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (zero_extract:GPR (match_operand:GPR 1 "register_operand" "r") ++ (match_operand 2 "const_int_operand" "") ++ (match_operand 3 "const_int_operand" "")))] ++ "loongarch_use_ins_ext_p (operands[1], INTVAL (operands[2]), ++ INTVAL (operands[3]))" ++{ ++ operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]) -1 ); ++ return "bstrpick.\t%0,%1,%2,%3"; ++} ++ [(set_attr "type" "arith") ++ (set_attr "mode" "")]) ++ ++(define_expand "insv" ++ [(set (zero_extract:GPR (match_operand:GPR 0 "register_operand") ++ (match_operand 1 "const_int_operand") ++ (match_operand 2 "const_int_operand")) ++ (match_operand:GPR 3 "reg_or_0_operand"))] ++ "" ++{ ++ if (!loongarch_use_ins_ext_p (operands[0], INTVAL (operands[1]), ++ INTVAL (operands[2]))) ++ FAIL; ++}) ++ ++(define_insn "*insv" ++ [(set (zero_extract:GPR (match_operand:GPR 0 "register_operand" "+r") ++ (match_operand:SI 1 "const_int_operand" "") ++ (match_operand:SI 2 "const_int_operand" "")) ++ (match_operand:GPR 3 "reg_or_0_operand" "rJ"))] ++ "loongarch_use_ins_ext_p (operands[0], INTVAL (operands[1]), ++ INTVAL (operands[2]))" ++{ ++ operands[1] = GEN_INT (INTVAL (operands[1]) + INTVAL (operands[2]) -1 ); ++ return "bstrins.\t%0,%z3,%1,%2"; ++} ++ [(set_attr "type" "arith") ++ (set_attr "mode" "")]) ++ ++;; Allow combine to split complex const_int load sequences, using operand 2 ++;; to store the intermediate results. See move_operand for details. ++(define_split ++ [(set (match_operand:GPR 0 "register_operand") ++ (match_operand:GPR 1 "splittable_const_int_operand")) ++ (clobber (match_operand:GPR 2 "register_operand"))] ++ "" ++ [(const_int 0)] ++{ ++ loongarch_move_integer (operands[2], operands[0], INTVAL (operands[1])); ++ DONE; ++}) ++ ++;; 64-bit integer moves ++ ++;; Unlike most other insns, the move insns can't be split with ++;; different predicates, because register spilling and other parts of ++;; the compiler, have memoized the insn number already. ++ ++(define_expand "movdi" ++ [(set (match_operand:DI 0 "") ++ (match_operand:DI 1 ""))] ++ "" ++{ ++ if (loongarch_legitimize_move (DImode, operands[0], operands[1])) ++ DONE; ++}) ++ ++ ++(define_insn "*movdi_32bit" ++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,ZC,r,m,*f,*f,*r,*m") ++ (match_operand:DI 1 "move_operand" "r,i,ZC,r,m,r,*J*r,*m,*f,*f"))] ++ "!TARGET_64BIT ++ && (register_operand (operands[0], DImode) ++ || reg_or_0_operand (operands[1], DImode))" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "move_type" "move,const,load,store,load,store,mgtf,fpload,mftg,fpstore") ++ (set (attr "mode") ++ (if_then_else (eq_attr "move_type" "imul") ++ (const_string "SI") ++ (const_string "DI")))]) ++ ++ ++(define_insn "*movdi_64bit" ++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,ZC,r,m,*f,*f,*r,*m") ++ (match_operand:DI 1 "move_operand" "r,Yd,ZC,rJ,m,rJ,*r*J,*m,*f,*f"))] ++ "TARGET_64BIT ++ && (register_operand (operands[0], DImode) ++ || reg_or_0_operand (operands[1], DImode)) ++ && !((GET_CODE (operands[1]) == SYMBOL_REF || GET_CODE (operands[1]) == LABEL_REF) ++ && symbolic_operand (operands[1], VOIDmode) ++ && (loongarch_cmodel_var == LARCH_CMODEL_EXTREME))" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "move_type" "move,const,load,store,load,store,mgtf,fpload,mftg,fpstore") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "movdi_extreme" ++ [(parallel [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec_volatile:DI [(match_operand:DI 1 "symbolic_operand" "")] ++ UNSPECV_MOVE_EXTREME)) ++ (use (match_operand:DI 2 "register_operand" "=&r"))])] ++ "TARGET_64BIT && (loongarch_cmodel_var == LARCH_CMODEL_EXTREME)" ++ { ++ if (!loongarch_global_symbol_p (operands[1]) ++ || loongarch_symbol_binds_local_p (operands[1])) ++ return "la.local\t%0,%2,%1"; ++ else ++ return "la.global\t%0,%2,%1"; ++ } ++ [(set_attr "move_type" "const") ++ (set_attr "mode" "DI")]) ++;; 32-bit Integer moves ++ ++;; Unlike most other insns, the move insns can't be split with ++;; different predicates, because register spilling and other parts of ++;; the compiler, have memoized the insn number already. ++ ++(define_expand "mov" ++ [(set (match_operand:IMOVE32 0 "") ++ (match_operand:IMOVE32 1 ""))] ++ "" ++{ ++ if (loongarch_legitimize_move (mode, operands[0], operands[1])) ++ DONE; ++}) ++ ++;; The difference between these two is whether or not ints are allowed ++;; in FP registers (off by default, use -mdebugh to enable). ++ ++(define_insn "*mov_internal" ++ [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,ZC,r,m,*f,*f,*r,*m,*r,*z") ++ (match_operand:IMOVE32 1 "move_operand" "r,Yd,ZC,rJ,m,rJ,*r*J,*m,*f,*f,*z,*r"))] ++ "(register_operand (operands[0], mode) ++ || reg_or_0_operand (operands[1], mode))" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "move_type" "move,const,load,store,load,store,mgtf,fpload,mftg,fpstore,mftg,mgtf") ++ (set_attr "compression" "all,*,*,*,*,*,*,*,*,*,*,*") ++ (set_attr "mode" "SI")]) ++ ++ ++ ++;; LARCH supports loading and storing a floating point register from ++;; the sum of two general registers. We use two versions for each of ++;; these four instructions: one where the two general registers are ++;; SImode, and one where they are DImode. This is because general ++;; registers will be in SImode when they hold 32-bit values, but, ++;; since the 32-bit values are always sign extended, the [ls][wd]xc1 ++;; instructions will still work correctly. ++ ++;; ??? Perhaps it would be better to support these instructions by ++;; modifying TARGET_LEGITIMATE_ADDRESS_P and friends. However, since ++;; these instructions can only be used to load and store floating ++;; point registers, that would probably cause trouble in reload. ++ ++(define_insn "*_" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (mem:ANYF (plus:P (match_operand:P 1 "register_operand" "r") ++ (match_operand:P 2 "register_operand" "r"))))] ++ "" ++ "\t%0,%1,%2" ++ [(set_attr "type" "fpidxload") ++ (set_attr "mode" "")]) ++ ++(define_insn "*_" ++ [(set (mem:ANYF (plus:P (match_operand:P 1 "register_operand" "r") ++ (match_operand:P 2 "register_operand" "r"))) ++ (match_operand:ANYF 0 "register_operand" "f"))] ++ "TARGET_HARD_FLOAT" ++ "\t%0,%1,%2" ++ [(set_attr "type" "fpidxstore") ++ (set_attr "mode" "")]) ++ ++;; Loongson index address load and store. ++(define_insn "*_" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (mem:GPR ++ (plus:P (match_operand:P 1 "register_operand" "r") ++ (match_operand:P 2 "register_operand" "r"))))] ++ "" ++ "\t%0,%1,%2" ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "*_" ++ [(set (mem:GPR (plus:P (match_operand:P 1 "register_operand" "r") ++ (match_operand:P 2 "register_operand" "r"))) ++ (match_operand:GPR 0 "register_operand" "r"))] ++ "" ++ "\t%0,%1,%2" ++ [(set_attr "type" "store") ++ (set_attr "mode" "")]) ++ ++;; SHORT mode sign_extend. ++(define_insn "*extend__" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (sign_extend:GPR ++ (mem:SHORT ++ (plus:P (match_operand:P 1 "register_operand" "r") ++ (match_operand:P 2 "register_operand" "r")))))] ++ "" ++ "\t%0,%1,%2" ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "*extend_" ++ [(set (mem:SHORT (plus:P (match_operand:P 1 "register_operand" "r") ++ (match_operand:P 2 "register_operand" "r"))) ++ (match_operand:SHORT 0 "register_operand" "r"))] ++ "" ++ "\t%0,%1,%2" ++ [(set_attr "type" "store") ++ (set_attr "mode" "SI")]) ++ ++ ++;; 16-bit Integer moves ++ ++;; Unlike most other insns, the move insns can't be split with ++;; different predicates, because register spilling and other parts of ++;; the compiler, have memoized the insn number already. ++;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND. ++ ++(define_expand "movhi" ++ [(set (match_operand:HI 0 "") ++ (match_operand:HI 1 ""))] ++ "" ++{ ++ if (loongarch_legitimize_move (HImode, operands[0], operands[1])) ++ DONE; ++}) ++ ++(define_insn "*movhi_internal" ++ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,m") ++ (match_operand:HI 1 "move_operand" "r,Yd,I,m,rJ"))] ++ "(register_operand (operands[0], HImode) ++ || reg_or_0_operand (operands[1], HImode))" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "move_type" "move,const,const,load,store") ++ (set_attr "compression" "all,all,*,*,*") ++ (set_attr "mode" "HI")]) ++ ++;; 8-bit Integer moves ++ ++;; Unlike most other insns, the move insns can't be split with ++;; different predicates, because register spilling and other parts of ++;; the compiler, have memoized the insn number already. ++;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND. ++ ++(define_expand "movqi" ++ [(set (match_operand:QI 0 "") ++ (match_operand:QI 1 ""))] ++ "" ++{ ++ if (loongarch_legitimize_move (QImode, operands[0], operands[1])) ++ DONE; ++}) ++ ++(define_insn "*movqi_internal" ++ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m") ++ (match_operand:QI 1 "move_operand" "r,I,m,rJ"))] ++ "(register_operand (operands[0], QImode) ++ || reg_or_0_operand (operands[1], QImode))" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "move_type" "move,const,load,store") ++ (set_attr "compression" "all,*,*,*") ++ (set_attr "mode" "QI")]) ++ ++;; 32-bit floating point moves ++ ++(define_expand "movsf" ++ [(set (match_operand:SF 0 "") ++ (match_operand:SF 1 ""))] ++ "" ++{ ++ if (loongarch_legitimize_move (SFmode, operands[0], operands[1])) ++ DONE; ++}) ++ ++(define_insn "*movsf_hardfloat" ++ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m") ++ (match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))] ++ "TARGET_HARD_FLOAT ++ && (register_operand (operands[0], SFmode) ++ || reg_or_0_operand (operands[1], SFmode))" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,store,mgtf,mftg,move,load,store") ++ (set_attr "mode" "SF")]) ++ ++(define_insn "*movsf_softfloat" ++ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m") ++ (match_operand:SF 1 "move_operand" "Gr,m,r"))] ++ "TARGET_SOFT_FLOAT ++ && (register_operand (operands[0], SFmode) ++ || reg_or_0_operand (operands[1], SFmode))" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "move_type" "move,load,store") ++ (set_attr "mode" "SF")]) ++ ++ ++;; 64-bit floating point moves ++ ++(define_expand "movdf" ++ [(set (match_operand:DF 0 "") ++ (match_operand:DF 1 ""))] ++ "" ++{ ++ if (loongarch_legitimize_move (DFmode, operands[0], operands[1])) ++ DONE; ++}) ++ ++(define_insn "*movdf_hardfloat" ++ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m") ++ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))] ++ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT ++ && (register_operand (operands[0], DFmode) ++ || reg_or_0_operand (operands[1], DFmode))" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,store,mgtf,mftg,move,load,store") ++ (set_attr "mode" "DF")]) ++ ++(define_insn "*movdf_softfloat" ++ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m") ++ (match_operand:DF 1 "move_operand" "rG,m,rG"))] ++ "(TARGET_SOFT_FLOAT || TARGET_SINGLE_FLOAT) ++ && (register_operand (operands[0], DFmode) ++ || reg_or_0_operand (operands[1], DFmode))" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "move_type" "move,load,store") ++ (set_attr "mode" "DF")]) ++ ++ ++;; 128-bit integer moves ++ ++(define_expand "movti" ++ [(set (match_operand:TI 0) ++ (match_operand:TI 1))] ++ "TARGET_64BIT" ++{ ++ if (loongarch_legitimize_move (TImode, operands[0], operands[1])) ++ DONE; ++}) ++ ++(define_insn "*movti" ++ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r,r,m") ++ (match_operand:TI 1 "move_operand" "r,i,m,rJ"))] ++ "TARGET_64BIT ++ && (register_operand (operands[0], TImode) ++ || reg_or_0_operand (operands[1], TImode))" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "move_type" "move,const,load,store") ++ (set (attr "mode") ++ (if_then_else (eq_attr "move_type" "imul") ++ (const_string "SI") ++ (const_string "TI")))]) ++ ++ ++;; 128-bit floating point moves ++ ++(define_expand "movtf" ++ [(set (match_operand:TF 0) ++ (match_operand:TF 1))] ++ "TARGET_64BIT" ++{ ++ if (loongarch_legitimize_move (TFmode, operands[0], operands[1])) ++ DONE; ++}) ++ ++;; This pattern handles both hard- and soft-float cases. ++(define_insn "*movtf" ++ [(set (match_operand:TF 0 "nonimmediate_operand" "=r,r,m,f,r,f,m") ++ (match_operand:TF 1 "move_operand" "rG,m,rG,rG,f,m,f"))] ++ "TARGET_64BIT ++ && (register_operand (operands[0], TFmode) ++ || reg_or_0_operand (operands[1], TFmode))" ++ "#" ++ [(set_attr "move_type" "move,load,store,mgtf,mftg,fpload,fpstore") ++ (set_attr "mode" "TF")]) ++ ++ ++(define_split ++ [(set (match_operand:MOVE64 0 "nonimmediate_operand") ++ (match_operand:MOVE64 1 "move_operand"))] ++ "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1], insn)" ++ [(const_int 0)] ++{ ++ loongarch_split_move_insn (operands[0], operands[1], curr_insn); ++ DONE; ++}) ++ ++(define_split ++ [(set (match_operand:MOVE128 0 "nonimmediate_operand") ++ (match_operand:MOVE128 1 "move_operand"))] ++ "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1], insn)" ++ [(const_int 0)] ++{ ++ loongarch_split_move_insn (operands[0], operands[1], curr_insn); ++ DONE; ++}) ++ ++;; Emit a doubleword move in which exactly one of the operands is ++;; a floating-point register. We can't just emit two normal moves ++;; because of the constraints imposed by the FPU register model; ++;; see loongarch_cannot_change_mode_class for details. Instead, we keep ++;; the FPR whole and use special patterns to refer to each word of ++;; the other operand. ++ ++(define_expand "move_doubleword_fpr" ++ [(set (match_operand:SPLITF 0) ++ (match_operand:SPLITF 1))] ++ "" ++{ ++ if (FP_REG_RTX_P (operands[0])) ++ { ++ rtx low = loongarch_subword (operands[1], 0); ++ rtx high = loongarch_subword (operands[1], 1); ++ emit_insn (gen_load_low (operands[0], low)); ++ if (!TARGET_64BIT) ++ emit_insn (gen_movgr2frh (operands[0], high, operands[0])); ++ else ++ emit_insn (gen_load_high (operands[0], high, operands[0])); ++ } ++ else ++ { ++ rtx low = loongarch_subword (operands[0], 0); ++ rtx high = loongarch_subword (operands[0], 1); ++ emit_insn (gen_store_word (low, operands[1], const0_rtx)); ++ if (!TARGET_64BIT) ++ emit_insn (gen_movfrh2gr (high, operands[1])); ++ else ++ emit_insn (gen_store_word (high, operands[1], const1_rtx)); ++ } ++ DONE; ++}) ++ ++;; Load the low word of operand 0 with operand 1. ++(define_insn "load_low" ++ [(set (match_operand:SPLITF 0 "register_operand" "=f,f") ++ (unspec:SPLITF [(match_operand: 1 "general_operand" "rJ,m")] ++ UNSPEC_LOAD_LOW))] ++ "TARGET_HARD_FLOAT" ++{ ++ operands[0] = loongarch_subword (operands[0], 0); ++ return loongarch_output_move (operands[0], operands[1]); ++} ++ [(set_attr "move_type" "mgtf,fpload") ++ (set_attr "mode" "")]) ++ ++;; Load the high word of operand 0 from operand 1, preserving the value ++;; in the low word. ++(define_insn "load_high" ++ [(set (match_operand:SPLITF 0 "register_operand" "=f,f") ++ (unspec:SPLITF [(match_operand: 1 "general_operand" "rJ,m") ++ (match_operand:SPLITF 2 "register_operand" "0,0")] ++ UNSPEC_LOAD_HIGH))] ++ "TARGET_HARD_FLOAT" ++{ ++ operands[0] = loongarch_subword (operands[0], 1); ++ return loongarch_output_move (operands[0], operands[1]); ++} ++ [(set_attr "move_type" "mgtf,fpload") ++ (set_attr "mode" "")]) ++ ++;; Store one word of operand 1 in operand 0. Operand 2 is 1 to store the ++;; high word and 0 to store the low word. ++(define_insn "store_word" ++ [(set (match_operand: 0 "nonimmediate_operand" "=r,m") ++ (unspec: [(match_operand:SPLITF 1 "register_operand" "f,f") ++ (match_operand 2 "const_int_operand")] ++ UNSPEC_STORE_WORD))] ++ "TARGET_HARD_FLOAT" ++{ ++ operands[1] = loongarch_subword (operands[1], INTVAL (operands[2])); ++ return loongarch_output_move (operands[0], operands[1]); ++} ++ [(set_attr "move_type" "mftg,fpstore") ++ (set_attr "mode" "")]) ++ ++;; Move operand 1 to the high word of operand 0 using movgr2frh, preserving the ++;; value in the low word. ++(define_insn "movgr2frh" ++ [(set (match_operand:SPLITF 0 "register_operand" "=f") ++ (unspec:SPLITF [(match_operand: 1 "reg_or_0_operand" "rJ") ++ (match_operand:SPLITF 2 "register_operand" "0")] ++ UNSPEC_MOVGR2FRH))] ++ "TARGET_HARD_FLOAT && TARGET_FLOAT64" ++ "movgr2frh.w\t%z1,%0" ++ [(set_attr "move_type" "mgtf") ++ (set_attr "mode" "")]) ++ ++;; Move high word of operand 1 to operand 0 using movfrh2gr. ++(define_insn "movfrh2gr" ++ [(set (match_operand: 0 "register_operand" "=r") ++ (unspec: [(match_operand:SPLITF 1 "register_operand" "f")] ++ UNSPEC_MOVFRH2GR))] ++ "TARGET_HARD_FLOAT && TARGET_FLOAT64" ++ "movfrh2gr.s\t%0,%1" ++ [(set_attr "move_type" "mftg") ++ (set_attr "mode" "")]) ++ ++;; Expand in-line code to clear the instruction cache between operand[0] and ++;; operand[1]. ++(define_expand "clear_cache" ++ [(match_operand 0 "pmode_register_operand") ++ (match_operand 1 "pmode_register_operand")] ++ "" ++ " ++{ ++ emit_insn (gen_ibar (const0_rtx)); ++ DONE; ++}") ++ ++(define_insn "ibar" ++ [(unspec_volatile:SI [(match_operand 0 "const_uimm15_operand")] UNSPEC_IBAR)] ++ "" ++ "ibar\t%0") ++ ++(define_insn "dbar" ++ [(unspec_volatile:SI [(match_operand 0 "const_uimm15_operand")] UNSPEC_DBAR)] ++ "" ++ "dbar\t%0") ++ ++ ++ ++;; Privileged state instruction ++ ++(define_insn "cpucfg" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")] ++ UNSPEC_CPUCFG))] ++ "" ++ "cpucfg\t%0,%1" ++ [(set_attr "type" "load") ++ (set_attr "mode" "SI")]) ++ ++(define_insn "asrtle_d" ++ [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "r") ++ (match_operand:DI 1 "register_operand" "r")] ++ UNSPEC_ASRTLE_D)] ++ "TARGET_64BIT" ++ "asrtle.d\t%0,%1" ++ [(set_attr "type" "load") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "asrtgt_d" ++ [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "r") ++ (match_operand:DI 1 "register_operand" "r")] ++ UNSPEC_ASRTGT_D)] ++ "TARGET_64BIT" ++ "asrtgt.d\t%0,%1" ++ [(set_attr "type" "load") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "

csrrd" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (unspec_volatile:GPR [(match_operand 1 "const_uimm14_operand")] ++ UNSPEC_CSRRD))] ++ "" ++ "csrrd\t%0,%1" ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "

csrwr" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (unspec_volatile:GPR ++ [(match_operand:GPR 1 "register_operand" "0") ++ (match_operand 2 "const_uimm14_operand")] ++ UNSPEC_CSRWR))] ++ "" ++ "csrwr\t%0,%2" ++ [(set_attr "type" "store") ++ (set_attr "mode" "")]) ++ ++(define_insn "

csrxchg" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (unspec_volatile:GPR ++ [(match_operand:GPR 1 "register_operand" "0") ++ (match_operand:GPR 2 "register_operand" "q") ++ (match_operand 3 "const_uimm14_operand")] ++ UNSPEC_CSRXCHG))] ++ "" ++ "csrxchg\t%0,%2,%3" ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "iocsrrd_" ++ [(set (match_operand:QHWD 0 "register_operand" "=r") ++ (unspec_volatile:QHWD [(match_operand:SI 1 "register_operand" "r")] ++ UNSPEC_IOCSRRD))] ++ "" ++ "iocsrrd.\t%0,%1" ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "iocsrwr_" ++ [(unspec_volatile:QHWD [(match_operand:QHWD 0 "register_operand" "r") ++ (match_operand:SI 1 "register_operand" "r")] ++ UNSPEC_IOCSRWR)] ++ "" ++ "iocsrwr.\t%0,%1" ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "

cacop" ++ [(unspec_volatile:X [(match_operand 0 "const_uimm5_operand") ++ (match_operand:X 1 "register_operand" "r") ++ (match_operand 2 "const_imm12_operand")] ++ UNSPEC_CACOP)] ++ "" ++ "cacop\t%0,%1,%2" ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "

lddir" ++ [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r") ++ (match_operand:X 1 "register_operand" "r") ++ (match_operand 2 "const_uimm5_operand")] ++ UNSPEC_LDDIR)] ++ "" ++ "lddir\t%0,%1,%2" ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "

ldpte" ++ [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r") ++ (match_operand 1 "const_uimm5_operand")] ++ UNSPEC_LDPTE)] ++ "" ++ "ldpte\t%0,%1" ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) ++ ++ ++;; Block moves, see loongarch.c for more details. ++;; Argument 0 is the destination ++;; Argument 1 is the source ++;; Argument 2 is the length ++;; Argument 3 is the alignment ++ ++(define_expand "movmemsi" ++ [(parallel [(set (match_operand:BLK 0 "general_operand") ++ (match_operand:BLK 1 "general_operand")) ++ (use (match_operand:SI 2 "")) ++ (use (match_operand:SI 3 "const_int_operand"))])] ++ " !TARGET_MEMCPY" ++{ ++ if (loongarch_expand_block_move (operands[0], operands[1], operands[2])) ++ DONE; ++ else ++ FAIL; ++}) ++ ++;; ++;; .................... ++;; ++;; SHIFTS ++;; ++;; .................... ++ ++(define_expand "3" ++ [(set (match_operand:GPR 0 "register_operand") ++ (any_shift:GPR (match_operand:GPR 1 "register_operand") ++ (match_operand:SI 2 "arith_operand")))] ++ "" ++{ ++}) ++ ++(define_insn "*3" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (any_shift:GPR (match_operand:GPR 1 "register_operand" "r") ++ (match_operand:SI 2 "arith_operand" "rI")))] ++ "" ++{ ++ if (CONST_INT_P (operands[2])) ++ { ++ operands[2] = GEN_INT (INTVAL (operands[2]) ++ & (GET_MODE_BITSIZE (mode) - 1)); ++ return "i.\t%0,%1,%2"; ++ } else ++ return ".\t%0,%1,%2"; ++} ++ [(set_attr "type" "shift") ++ (set_attr "compression" "none") ++ (set_attr "mode" "")]) ++ ++(define_insn "*si3_extend" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI ++ (any_shift:SI (match_operand:SI 1 "register_operand" "r") ++ (match_operand:SI 2 "arith_operand" "rI"))))] ++ "TARGET_64BIT" ++{ ++ if (CONST_INT_P (operands[2])) ++ { ++ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f); ++ return "i.w\t%0,%1,%2"; ++ } else ++ return ".w\t%0,%1,%2"; ++} ++ [(set_attr "type" "shift") ++ (set_attr "mode" "SI")]) ++ ++(define_insn "zero_extend_ashift1" ++ [ (set (match_operand:DI 0 "register_operand" "=r") ++ (and:DI (ashift:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0) ++ (match_operand 2 "const_immlsa_operand" "")) ++ (match_operand 3 "shift_mask_operand" "")))] ++"" ++"bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,$r0,%2" ++[(set_attr "type" "arith") ++ (set_attr "mode" "DI") ++ (set_attr "insn_count" "2")]) ++ ++(define_insn "zero_extend_ashift2" ++ [ (set (match_operand:DI 0 "register_operand" "=r") ++ (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "const_immlsa_operand" "")) ++ (match_operand 3 "shift_mask_operand" "")))] ++"" ++"bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,$r0,%2" ++[(set_attr "type" "arith") ++ (set_attr "mode" "DI") ++ (set_attr "insn_count" "2")]) ++ ++(define_insn "alsl_paired1" ++ [(set (match_operand:DI 0 "register_operand" "=&r") ++ (plus:DI (and:DI (ashift:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0) ++ (match_operand 2 "const_immlsa_operand" "")) ++ (match_operand 3 "shift_mask_operand" "")) ++ (match_operand:DI 4 "register_operand" "r")))] ++ "" ++ "bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,%4,%2" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "DI") ++ (set_attr "insn_count" "2")]) ++ ++(define_insn "alsl_paired2" ++ [(set (match_operand:DI 0 "register_operand" "=&r") ++ (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (and:DI (ashift:DI (match_operand:DI 2 "register_operand" "r") ++ (match_operand 3 "const_immlsa_operand" "")) ++ (match_operand 4 "shift_mask_operand" ""))))] ++ "" ++ "bstrpick.d\t%0,%2,31,0\n\talsl.d\t%0,%0,%1,%3" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "DI") ++ (set_attr "insn_count" "2")]) ++ ++(define_insn "alsl_" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (plus:GPR (ashift:GPR (match_operand:GPR 1 "register_operand" "r") ++ (match_operand 2 "const_immlsa_operand" "")) ++ (match_operand:GPR 3 "register_operand" "r")))] ++ "ISA_HAS_LSA" ++ "alsl.\t%0,%1,%3,%2" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "rotr3" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (rotatert:GPR (match_operand:GPR 1 "register_operand" "r") ++ (match_operand:SI 2 "arith_operand" "rI")))] ++ "" ++{ ++ if (CONST_INT_P (operands[2])) ++ { ++ return "rotri.\t%0,%1,%2"; ++ } else ++ return "rotr.\t%0,%1,%2"; ++} ++ [(set_attr "type" "shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "bswaphi2" ++ [(set (match_operand:HI 0 "register_operand" "=r") ++ (bswap:HI (match_operand:HI 1 "register_operand" "r")))] ++ "" ++ "revb.2h\t%0,%1" ++ [(set_attr "type" "shift")]) ++ ++(define_insn_and_split "bswapsi2" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (bswap:SI (match_operand:SI 1 "register_operand" "r")))] ++ "" ++ "#" ++ "" ++ [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_WSBH)) ++ (set (match_dup 0) (rotatert:SI (match_dup 0) (const_int 16)))] ++ "" ++ [(set_attr "insn_count" "2")]) ++ ++(define_insn_and_split "bswapdi2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (bswap:DI (match_operand:DI 1 "register_operand" "r")))] ++ "TARGET_64BIT" ++ "#" ++ "" ++ [(set (match_dup 0) (unspec:DI [(match_dup 1)] UNSPEC_DSBH)) ++ (set (match_dup 0) (unspec:DI [(match_dup 0)] UNSPEC_DSHD))] ++ "" ++ [(set_attr "insn_count" "2")]) ++ ++(define_insn "wsbh" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_WSBH))] ++ "" ++ "revb.2h\t%0,%1" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "dsbh" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_DSBH))] ++ "TARGET_64BIT" ++ "revb.4h\t%0,%1" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "dshd" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_DSHD))] ++ "TARGET_64BIT" ++ "revh.d\t%0,%1" ++ [(set_attr "type" "shift")]) ++ ++;; ++;; .................... ++;; ++;; CONDITIONAL BRANCHES ++;; ++;; .................... ++ ++;; Conditional branches on floating-point equality tests. ++ ++(define_insn "*branch_fp_fcc" ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "equality_operator" ++ [(match_operand:FCC 2 "register_operand" "z") ++ (const_int 0)]) ++ (label_ref (match_operand 0 "" "")) ++ (pc)))] ++ "TARGET_HARD_FLOAT" ++{ ++ return loongarch_output_conditional_branch (insn, operands, ++ LARCH_BRANCH ("b%F1", "%Z2%0"), ++ LARCH_BRANCH ("b%W1", "%Z2%0")); ++} ++ [(set_attr "type" "branch")]) ++ ++(define_insn "*branch_fp_inverted_fcc" ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "equality_operator" ++ [(match_operand:FCC 2 "register_operand" "z") ++ (const_int 0)]) ++ (pc) ++ (label_ref (match_operand 0 "" ""))))] ++ "TARGET_HARD_FLOAT" ++{ ++ return loongarch_output_conditional_branch (insn, operands, ++ LARCH_BRANCH ("b%W1", "%Z2%0"), ++ LARCH_BRANCH ("b%F1", "%Z2%0")); ++} ++ [(set_attr "type" "branch")]) ++ ++;; Conditional branches on ordered comparisons with zero. ++ ++(define_insn "*branch_order" ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "order_operator" ++ [(match_operand:GPR 2 "register_operand" "r,r") ++ (match_operand:GPR 3 "reg_or_0_operand" "J,r")]) ++ (label_ref (match_operand 0 "" "")) ++ (pc)))] ++ "" ++ { return loongarch_output_order_conditional_branch (insn, operands, false); } ++ [(set_attr "type" "branch") ++ (set_attr "compact_form" "maybe,always") ++ (set_attr "hazard" "forbidden_slot")]) ++ ++(define_insn "*branch_order_inverted" ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "order_operator" ++ [(match_operand:GPR 2 "register_operand" "r,r") ++ (match_operand:GPR 3 "reg_or_0_operand" "J,r")]) ++ (pc) ++ (label_ref (match_operand 0 "" ""))))] ++ "" ++ { return loongarch_output_order_conditional_branch (insn, operands, true); } ++ [(set_attr "type" "branch") ++ (set_attr "compact_form" "maybe,always") ++ (set_attr "hazard" "forbidden_slot")]) ++ ++;; Conditional branch on equality comparison. ++ ++(define_insn "*branch_equality" ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "equality_operator" ++ [(match_operand:GPR 2 "register_operand" "r") ++ (match_operand:GPR 3 "reg_or_0_operand" "rJ")]) ++ (label_ref (match_operand 0 "" "")) ++ (pc)))] ++ "" ++ { return loongarch_output_equal_conditional_branch (insn, operands, false); } ++ [(set_attr "type" "branch") ++ (set_attr "compact_form" "maybe") ++ (set_attr "hazard" "forbidden_slot")]) ++ ++ ++(define_insn "*branch_equality_inverted" ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "equality_operator" ++ [(match_operand:GPR 2 "register_operand" "r") ++ (match_operand:GPR 3 "reg_or_0_operand" "rJ")]) ++ (pc) ++ (label_ref (match_operand 0 "" ""))))] ++ "" ++ { return loongarch_output_equal_conditional_branch (insn, operands, true); } ++ [(set_attr "type" "branch") ++ (set_attr "compact_form" "maybe") ++ (set_attr "hazard" "forbidden_slot")]) ++ ++ ++(define_expand "cbranch4" ++ [(set (pc) ++ (if_then_else (match_operator 0 "comparison_operator" ++ [(match_operand:GPR 1 "register_operand") ++ (match_operand:GPR 2 "nonmemory_operand")]) ++ (label_ref (match_operand 3 "")) ++ (pc)))] ++ "" ++{ ++ loongarch_expand_conditional_branch (operands); ++ DONE; ++}) ++ ++(define_expand "cbranch4" ++ [(set (pc) ++ (if_then_else (match_operator 0 "comparison_operator" ++ [(match_operand:SCALARF 1 "register_operand") ++ (match_operand:SCALARF 2 "register_operand")]) ++ (label_ref (match_operand 3 "")) ++ (pc)))] ++ "" ++{ ++ loongarch_expand_conditional_branch (operands); ++ DONE; ++}) ++ ++;; Used to implement built-in functions. ++(define_expand "condjump" ++ [(set (pc) ++ (if_then_else (match_operand 0) ++ (label_ref (match_operand 1)) ++ (pc)))]) ++ ++ ++ ++;; ++;; .................... ++;; ++;; SETTING A REGISTER FROM A COMPARISON ++;; ++;; .................... ++ ++;; Destination is always set in SI mode. ++ ++(define_expand "cstore4" ++ [(set (match_operand:SI 0 "register_operand") ++ (match_operator:SI 1 "loongarch_cstore_operator" ++ [(match_operand:GPR 2 "register_operand") ++ (match_operand:GPR 3 "nonmemory_operand")]))] ++ "" ++{ ++ loongarch_expand_scc (operands); ++ DONE; ++}) ++ ++(define_insn "*seq_zero_" ++ [(set (match_operand:GPR2 0 "register_operand" "=r") ++ (eq:GPR2 (match_operand:GPR 1 "register_operand" "r") ++ (const_int 0)))] ++ "" ++ "sltui\t%0,%1,1" ++ [(set_attr "type" "slt") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "*sne_zero_" ++ [(set (match_operand:GPR2 0 "register_operand" "=r") ++ (ne:GPR2 (match_operand:GPR 1 "register_operand" "r") ++ (const_int 0)))] ++ "" ++ "sltu\t%0,%.,%1" ++ [(set_attr "type" "slt") ++ (set_attr "mode" "")]) ++ ++(define_insn "*sgt_" ++ [(set (match_operand:GPR2 0 "register_operand" "=r") ++ (any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r") ++ (match_operand:GPR 2 "reg_or_0_operand" "rJ")))] ++ "" ++ "slt\t%0,%z2,%1" ++ [(set_attr "type" "slt") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "*sge_" ++ [(set (match_operand:GPR2 0 "register_operand" "=r") ++ (any_ge:GPR2 (match_operand:GPR 1 "register_operand" "r") ++ (const_int 1)))] ++ "" ++ "slti\t%0,%.,%1" ++ [(set_attr "type" "slt") ++ (set_attr "mode" "")]) ++ ++(define_insn "*slt_" ++ [(set (match_operand:GPR2 0 "register_operand" "=r") ++ (any_lt:GPR2 (match_operand:GPR 1 "register_operand" "r") ++ (match_operand:GPR 2 "arith_operand" "rI")))] ++ "" ++{ ++ if (CONST_INT_P (operands[2])) ++ { ++ return "slti\t%0,%1,%2"; ++ } else ++ return "slt\t%0,%1,%2"; ++} ++ [(set_attr "type" "slt") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "*sle_" ++ [(set (match_operand:GPR2 0 "register_operand" "=r") ++ (any_le:GPR2 (match_operand:GPR 1 "register_operand" "r") ++ (match_operand:GPR 2 "sle_operand" "")))] ++ "" ++{ ++ operands[2] = GEN_INT (INTVAL (operands[2]) + 1); ++ return "slti\t%0,%1,%2"; ++} ++ [(set_attr "type" "slt") ++ (set_attr "mode" "")]) ++ ++ ++;; ++;; .................... ++;; ++;; FLOATING POINT COMPARISONS ++;; ++;; .................... ++ ++(define_insn "s__using_fcc" ++ [(set (match_operand:FCC 0 "register_operand" "=z") ++ (fcond:FCC (match_operand:SCALARF 1 "register_operand" "f") ++ (match_operand:SCALARF 2 "register_operand" "f")))] ++ "" ++ "fcmp..\t%Z0%1,%2" ++ [(set_attr "type" "fcmp") ++ (set_attr "mode" "FCC")]) ++ ++(define_insn "s__using_fcc" ++ [(set (match_operand:FCC 0 "register_operand" "=z") ++ (swapped_fcond:FCC (match_operand:SCALARF 1 "register_operand" "f") ++ (match_operand:SCALARF 2 "register_operand" "f")))] ++ "" ++ "fcmp..\t%Z0%2,%1" ++ [(set_attr "type" "fcmp") ++ (set_attr "mode" "FCC")]) ++ ++;; ++;; .................... ++;; ++;; UNCONDITIONAL BRANCHES ++;; ++;; .................... ++ ++;; Unconditional branches. ++ ++(define_expand "jump" ++ [(set (pc) ++ (label_ref (match_operand 0)))]) ++ ++(define_insn "*jump_absolute" ++ [(set (pc) ++ (label_ref (match_operand 0)))] ++ "TARGET_ABSOLUTE_JUMPS" ++{ ++ return LARCH_ABSOLUTE_JUMP ("b\t%l0"); ++} ++ [(set_attr "type" "branch") ++ (set_attr "compact_form" "maybe")]) ++ ++(define_insn "*jump_pic" ++ [(set (pc) ++ (label_ref (match_operand 0)))] ++ "!TARGET_ABSOLUTE_JUMPS" ++{ ++ return "b\t%0"; ++} ++ [(set_attr "type" "branch") ++ (set_attr "compact_form" "maybe")]) ++ ++ ++ ++(define_expand "indirect_jump" ++ [(set (pc) (match_operand 0 "register_operand"))] ++ "" ++{ ++ operands[0] = force_reg (Pmode, operands[0]); ++ emit_jump_insn (PMODE_INSN (gen_indirect_jump, (operands[0]))); ++ DONE; ++}) ++ ++(define_insn "indirect_jump_" ++ [(set (pc) (match_operand:P 0 "register_operand" "r"))] ++ "" ++ { ++ return "jr\t%0"; ++ } ++ [(set_attr "type" "jump") ++ (set_attr "mode" "none")]) ++ ++(define_expand "tablejump" ++ [(set (pc) ++ (match_operand 0 "register_operand")) ++ (use (label_ref (match_operand 1 "")))] ++ "" ++{ ++ if (flag_pic) ++ operands[0] = expand_simple_binop (Pmode, PLUS, operands[0], ++ gen_rtx_LABEL_REF (Pmode, operands[1]), ++ NULL_RTX, 0, OPTAB_DIRECT); ++ emit_jump_insn (PMODE_INSN (gen_tablejump, (operands[0], operands[1]))); ++ DONE; ++}) ++ ++(define_insn "tablejump_" ++ [(set (pc) ++ (match_operand:P 0 "register_operand" "r")) ++ (use (label_ref (match_operand 1 "" "")))] ++ "" ++ { ++ return "jr\t%0"; ++ } ++ [(set_attr "type" "jump") ++ (set_attr "mode" "none")]) ++ ++ ++;; ++;; .................... ++;; ++;; Function prologue/epilogue ++;; ++;; .................... ++;; ++ ++(define_expand "prologue" ++ [(const_int 1)] ++ "" ++{ ++ loongarch_expand_prologue (); ++ DONE; ++}) ++ ++;; Block any insns from being moved before this point, since the ++;; profiling call to mcount can use various registers that aren't ++;; saved or used to pass arguments. ++ ++(define_insn "blockage" ++ [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)] ++ "" ++ "" ++ [(set_attr "type" "ghost") ++ (set_attr "mode" "none")]) ++ ++(define_insn "probe_stack_range_" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec_volatile:P [(match_operand:P 1 "register_operand" "0") ++ (match_operand:P 2 "register_operand" "r") ++ (match_operand:P 3 "register_operand" "r")] ++ UNSPEC_PROBE_STACK_RANGE))] ++ "" ++ { return loongarch_output_probe_stack_range (operands[0], operands[2], operands[3]); } ++ [(set_attr "type" "unknown") ++ (set_attr "can_delay" "no") ++ (set_attr "mode" "")]) ++ ++(define_expand "epilogue" ++ [(const_int 2)] ++ "" ++{ ++ loongarch_expand_epilogue (false); ++ DONE; ++}) ++ ++(define_expand "sibcall_epilogue" ++ [(const_int 2)] ++ "" ++{ ++ loongarch_expand_epilogue (true); ++ DONE; ++}) ++ ++;; Trivial return. Make it look like a normal return insn as that ++;; allows jump optimizations to work better. ++ ++(define_expand "return" ++ [(simple_return)] ++ "loongarch_can_use_return_insn ()" ++ { }) ++ ++(define_expand "simple_return" ++ [(simple_return)] ++ "" ++ { }) ++ ++(define_insn "*" ++ [(any_return)] ++ "" ++ { ++ operands[0] = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM); ++ return "jr\t%0"; ++ } ++ [(set_attr "type" "jump") ++ (set_attr "mode" "none")]) ++ ++;; Normal return. ++ ++(define_insn "_internal" ++ [(any_return) ++ (use (match_operand 0 "pmode_register_operand" ""))] ++ "" ++ { ++ return "jr\t%0"; ++ } ++ [(set_attr "type" "jump") ++ (set_attr "mode" "none")]) ++ ++;; Exception return. ++(define_insn "loongarch_ertn" ++ [(return) ++ (unspec_volatile [(const_int 0)] UNSPEC_ERTN)] ++ "" ++ "ertn" ++ [(set_attr "type" "trap") ++ (set_attr "mode" "none")]) ++ ++;; Disable interrupts. ++(define_insn "loongarch_di" ++ [(unspec_volatile [(const_int 0)] UNSPEC_DI)] ++ "" ++ "di" ++ [(set_attr "type" "trap") ++ (set_attr "mode" "none")]) ++ ++;; Execution hazard barrier. ++(define_insn "loongarch_ehb" ++ [(unspec_volatile [(const_int 0)] UNSPEC_EHB)] ++ "" ++ "ehb" ++ [(set_attr "type" "trap") ++ (set_attr "mode" "none")]) ++ ++;; Read GPR from previous shadow register set. ++(define_insn "loongarch_rdpgpr_" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec_volatile:P [(match_operand:P 1 "register_operand" "r")] ++ UNSPEC_RDPGPR))] ++ "" ++ "rdpgpr\t%0,%1" ++ [(set_attr "type" "move") ++ (set_attr "mode" "")]) ++ ++;; This is used in compiling the unwind routines. ++(define_expand "eh_return" ++ [(use (match_operand 0 "general_operand"))] ++ "" ++{ ++ if (GET_MODE (operands[0]) != word_mode) ++ operands[0] = convert_to_mode (word_mode, operands[0], 0); ++ if (TARGET_64BIT) ++ emit_insn (gen_eh_set_lr_di (operands[0])); ++ else ++ emit_insn (gen_eh_set_lr_si (operands[0])); ++ DONE; ++}) ++ ++;; Clobber the return address on the stack. We can't expand this ++;; until we know where it will be put in the stack frame. ++ ++(define_insn "eh_set_lr_si" ++ [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN) ++ (clobber (match_scratch:SI 1 "=&r"))] ++ "! TARGET_64BIT" ++ "#") ++ ++(define_insn "eh_set_lr_di" ++ [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN) ++ (clobber (match_scratch:DI 1 "=&r"))] ++ "TARGET_64BIT" ++ "#") ++ ++(define_split ++ [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN) ++ (clobber (match_scratch 1))] ++ "reload_completed" ++ [(const_int 0)] ++{ ++ loongarch_set_return_address (operands[0], operands[1]); ++ DONE; ++}) ++ ++ ++ ++;; ++;; .................... ++;; ++;; FUNCTION CALLS ++;; ++;; .................... ++ ++ ++;; Sibling calls. All these patterns use jump instructions. ++ ++;; If TARGET_SIBCALLS, call_insn_operand will only accept constant ++;; addresses if a direct jump is acceptable. Since the 'S' constraint ++;; is defined in terms of call_insn_operand, the same is true of the ++;; constraints. ++ ++;; When we use an indirect jump, we need a register that will be ++;; preserved by the epilogue. ++ ++(define_expand "sibcall" ++ [(parallel [(call (match_operand 0 "") ++ (match_operand 1 "")) ++ (use (match_operand 2 "")) ;; next_arg_reg ++ (use (match_operand 3 ""))])] ;; struct_value_size_rtx ++ "TARGET_SIBCALLS" ++{ ++ rtx target = loongarch_legitimize_call_address (XEXP (operands[0], 0)); ++ ++ emit_call_insn (gen_sibcall_internal (target, operands[1])); ++ DONE; ++}) ++ ++(define_insn "sibcall_internal" ++ [(call (mem:SI (match_operand 0 "call_insn_operand" "j,c,a,t,h")) ++ (match_operand 1 "" ""))] ++ "TARGET_SIBCALLS && SIBLING_CALL_P (insn)" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "jr\t%0"; ++ case 1: ++ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) ++ return "pcaddu18i\t$r12,(%%pcrel(%0+0x20000))>>18\n\t" ++ "jirl\t$r0,$r12,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.local\t$r12,$r13,%0\n\tjr\t$r12"; ++ else ++ return "b\t%0"; ++ case 2: ++ if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) ++ return "b\t%0"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; ++ else ++ return "la.global\t$r12,%0\n\tjr\t$r12"; ++ case 3: ++ if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; ++ else ++ return "la.global\t$r12,%0\n\tjr\t$r12"; ++ case 4: ++ if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) ++ return "b\t%%plt(%0)"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) ++ return "pcaddu18i\t$r12,(%%plt(%0)+0x20000)>>18\n\t" ++ "jirl\t$r0,$r12,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; ++ else ++ sorry ("cmodel extreme and tiny static not support plt."); ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "jal" "indirect,direct,direct,direct,direct")]) ++ ++(define_expand "sibcall_value" ++ [(parallel [(set (match_operand 0 "") ++ (call (match_operand 1 "") ++ (match_operand 2 ""))) ++ (use (match_operand 3 ""))])] ;; next_arg_reg ++ "TARGET_SIBCALLS" ++{ ++ rtx target = loongarch_legitimize_call_address (XEXP (operands[1], 0)); ++ ++ /* Handle return values created by loongarch_return_fpr_pair. */ ++ if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 2) ++ { ++ emit_call_insn (gen_sibcall_value_multiple_internal (XEXP (XVECEXP (operands[0], 0, 0), 0), ++ target, operands[2], XEXP (XVECEXP (operands[0], 0, 1), 0))); ++ } ++ else ++ { ++ /* Handle return values created by loongarch_return_fpr_single. */ ++ if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 1) ++ operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); ++ ++ emit_call_insn (gen_sibcall_value_internal (operands[0], target, operands[2])); ++ } ++ DONE; ++}) ++ ++(define_insn "sibcall_value_internal" ++ [(set (match_operand 0 "register_operand" "") ++ (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) ++ (match_operand 2 "" "")))] ++ "TARGET_SIBCALLS && SIBLING_CALL_P (insn)" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "jr\t%1"; ++ case 1: ++ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) ++ return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" ++ "jirl\t$r0,$r12,%%pcrel(%1+4)-((%%pcrel(%1+4+0x20000))>>18<<18)"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.local\t$r12,$r13,%1\n\t" ++ "jr\t$r12"; ++ else ++ return "b\t%1"; ++ case 2: ++ if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) ++ return "b\t%1"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.global\t$r12,$r13,%1\n\t" ++ "jr\t$r12"; ++ else ++ return "la.global\t$r12,%1\n\t" ++ "jr\t$r12"; ++ case 3: ++ if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.global\t$r12,$r13,%1\n\t" ++ "jr\t$r12"; ++ else ++ return "la.global\t$r12,%1\n\t" ++ "jr\t$r12"; ++ case 4: ++ if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) ++ return " b\t%%plt(%1)"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) ++ return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" ++ "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; ++ else ++ sorry ("loongarch cmodel extreme and tiny-static not support plt."); ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "jal" "indirect,direct,direct,direct,direct")]) ++ ++(define_insn "sibcall_value_multiple_internal" ++ [(set (match_operand 0 "register_operand" "") ++ (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) ++ (match_operand 2 "" ""))) ++ (set (match_operand 3 "register_operand" "") ++ (call (mem:SI (match_dup 1)) ++ (match_dup 2)))] ++ "TARGET_SIBCALLS && SIBLING_CALL_P (insn)" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "jr\t%1"; ++ case 1: ++ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) ++ return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" ++ "jirl\t$r0,$r12,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.local\t$r12,$r13,%1\n\t" ++ "jr\t$r12"; ++ else ++ return "b\t%1"; ++ case 2: ++ if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) ++ return "b\t%1"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.global\t$r12,$r13,%1\n\t" ++ "jr\t$r12"; ++ else ++ return "la.global\t$r12,%1\n\t" ++ "jr\t$r12"; ++ case 3: ++ if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.global\t$r12,$r13,%1\n\t" ++ "jr\t$r12"; ++ else ++ return "la.global\t$r12,%1\n\t" ++ "jr\t$r12"; ++ case 4: ++ if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) ++ return "b\t%%plt(%1)"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) ++ return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" ++ "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; ++ else ++ sorry ("loongarch cmodel extreme and tiny-static not support plt."); ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "jal" "indirect,direct,direct,direct,direct")]) ++ ++(define_expand "call" ++ [(parallel [(call (match_operand 0 "") ++ (match_operand 1 "")) ++ (use (match_operand 2 "")) ;; next_arg_reg ++ (use (match_operand 3 ""))])] ;; struct_value_size_rtx ++ "" ++{ ++ rtx target = loongarch_legitimize_call_address (XEXP (operands[0], 0)); ++ ++ emit_call_insn (gen_call_internal (target, operands[1])); ++ DONE; ++}) ++;; In the last case, we can generate the individual instructions with ++;; a define_split. There are several things to be wary of: ++;; ++;; - We can't expose the load of $gp before reload. If we did, ++;; it might get removed as dead, but reload can introduce new ++;; uses of $gp by rematerializing constants. ++;; ++;; - We shouldn't restore $gp after calls that never return. ++;; It isn't valid to insert instructions between a noreturn ++;; call and the following barrier. ++;; ++;; - The splitter deliberately changes the liveness of $gp. The unsplit ++;; instruction preserves $gp and so have no effect on its liveness. ++;; But once we generate the separate insns, it becomes obvious that ++;; $gp is not live on entry to the call. ++;; ++ ++(define_insn "call_internal" ++ [(call (mem:SI (match_operand 0 "call_insn_operand" "e,c,a,t,h")) ++ (match_operand 1 "" "")) ++ (clobber (reg:SI RETURN_ADDR_REGNUM))] ++ "" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "jirl\t$r1,%0,0"; ++ case 1: ++ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) ++ return "pcaddu18i\t$r1,%%pcrel(%0+0x20000)>>18\n\t" ++ "jirl\t$r1,$r1,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.local\t$r1,$r12,%0\n\t" ++ "jirl\t$r1,$r1,0"; ++ else ++ return "bl\t%0"; ++ case 2: ++ if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) ++ return "bl\t%0"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.global\t$r1,$r12,%0\n\t" ++ "jirl\t$r1,$r1,0"; ++ else ++ return "la.global\t$r1,%0\n\t" ++ "jirl\t$r1,$r1,0"; ++ case 3: ++ if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.global\t$r1,$r12,%0\n\t" ++ "jirl\t$r1,$r1,0"; ++ else ++ return "la.global\t$r1,%0\n\t" ++ "jirl\t$r1,$r1,0"; ++ case 4: ++ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) ++ return "pcaddu18i\t$r1,(%%plt(%0)+0x20000)>>18\n\t" ++ "jirl\t$r1,$r1,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) ++ return "bl\t%%plt(%0)"; ++ else ++ sorry ("cmodel extreme and tiny-static not support plt."); ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "jal" "indirect,direct,direct,direct,direct") ++ (set_attr "insn_count" "1,2,3,3,2")]) ++ ++ ++(define_expand "call_value" ++ [(parallel [(set (match_operand 0 "") ++ (call (match_operand 1 "") ++ (match_operand 2 ""))) ++ (use (match_operand 3 ""))])] ;; next_arg_reg ++ "" ++{ ++ rtx target = loongarch_legitimize_call_address (XEXP (operands[1], 0)); ++ /* Handle return values created by loongarch_return_fpr_pair. */ ++ if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 2) ++ emit_call_insn (gen_call_value_multiple_internal (XEXP (XVECEXP (operands[0], 0, 0), 0), ++ target, operands[2], XEXP (XVECEXP (operands[0], 0, 1), 0))); ++ else ++ { ++ /* Handle return values created by loongarch_return_fpr_single. */ ++ if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 1) ++ operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); ++ ++ emit_call_insn (gen_call_value_internal (operands[0], target, operands[2])); ++ } ++ DONE; ++}) ++ ++;; See comment for call_internal. ++(define_insn "call_value_internal" ++ [(set (match_operand 0 "register_operand" "") ++ (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) ++ (match_operand 2 "" ""))) ++ (clobber (reg:SI RETURN_ADDR_REGNUM))] ++ "" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "jirl\t$r1,%1,0"; ++ case 1: ++ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) ++ return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" ++ "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.local\t$r1,$r12,%1\n\t" ++ "jirl\t$r1,$r1,0"; ++ else ++ return "bl\t%1"; ++ case 2: ++ if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) ++ return "bl\t%1"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.global\t$r1,$r12,%1\n\t" ++ "jirl\t$r1,$r1,0"; ++ else ++ return "la.global\t$r1,%1\n\t" ++ "jirl\t$r1,$r1,0"; ++ case 3: ++ if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.global\t$r1,$r12,%1\n\t" ++ "jirl\t$r1,$r1,0"; ++ else ++ return "la.global\t$r1,%1\n\t" ++ "jirl\t$r1,$r1,0"; ++ case 4: ++ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) ++ return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" ++ "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) ++ return "bl\t%%plt(%1)"; ++ else ++ sorry ("loongarch cmodel extreme and tiny-static not support plt."); ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "jal" "indirect,direct,direct,direct,direct") ++ (set_attr "insn_count" "1,2,3,3,2")]) ++ ++;; See comment for call_internal. ++(define_insn "call_value_multiple_internal" ++ [(set (match_operand 0 "register_operand" "") ++ (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) ++ (match_operand 2 "" ""))) ++ (set (match_operand 3 "register_operand" "") ++ (call (mem:SI (match_dup 1)) ++ (match_dup 2))) ++ (clobber (reg:SI RETURN_ADDR_REGNUM))] ++ "" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "jirl\t$r1,%1,0"; ++ case 1: ++ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) ++ return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" ++ "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.local\t$r1,$r12,%1\n\t" ++ "jirl\t$r1,$r1,0"; ++ else ++ return "bl\t%1"; ++ case 2: ++ if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) ++ return "bl\t%1"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.global\t$r1,$r12,%1\n\t" ++ "jirl\t$r1,$r1,0 "; ++ else ++ return "la.global\t$r1,%1\n\t" ++ "jirl\t$r1,$r1,0"; ++ case 3: ++ if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) ++ return "la.global\t$r1,$r12,%1\n\t" ++ "jirl\t$r1,$r1,0"; ++ else ++ return "la.global\t$r1,%1\n\t" ++ "jirl\t$r1,$r1,0"; ++ case 4: ++ if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) ++ return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" ++ "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; ++ else if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) ++ return "bl\t%%plt(%1)"; ++ else ++ sorry ("loongarch cmodel extreme and tiny-static not support plt."); ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "jal" "indirect,direct,direct,direct,direct") ++ (set_attr "insn_count" "1,2,3,3,2")]) ++ ++ ++;; Call subroutine returning any type. ++ ++(define_expand "untyped_call" ++ [(parallel [(call (match_operand 0 "") ++ (const_int 0)) ++ (match_operand 1 "") ++ (match_operand 2 "")])] ++ "" ++{ ++ int i; ++ ++ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx)); ++ ++ for (i = 0; i < XVECLEN (operands[2], 0); i++) ++ { ++ rtx set = XVECEXP (operands[2], 0, i); ++ loongarch_emit_move (SET_DEST (set), SET_SRC (set)); ++ } ++ ++ emit_insn (gen_blockage ()); ++ DONE; ++}) ++ ++;; ++;; .................... ++;; ++;; MISC. ++;; ++;; .................... ++;; ++ ++ ++(define_insn "*prefetch_indexed_" ++ [(prefetch (plus:P (match_operand:P 0 "register_operand" "r") ++ (match_operand:P 1 "register_operand" "r")) ++ (match_operand 2 "const_int_operand" "n") ++ (match_operand 3 "const_int_operand" "n"))] ++ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" ++{ ++ operands[2] = loongarch_prefetch_cookie (operands[2], operands[3]); ++ return "prefx\t%2,%1(%0)"; ++} ++ [(set_attr "type" "prefetchx")]) ++ ++(define_insn "nop" ++ [(const_int 0)] ++ "" ++ "nop" ++ [(set_attr "type" "nop") ++ (set_attr "mode" "none")]) ++ ++;; Like nop, but commented out when outside a .set noreorder block. ++(define_insn "hazard_nop" ++ [(const_int 1)] ++ "" ++ { ++ return "#nop"; ++ } ++ [(set_attr "type" "nop")]) ++ ++;; The `.insn' pseudo-op. ++(define_insn "insn_pseudo" ++ [(unspec_volatile [(const_int 0)] UNSPEC_INSN_PSEUDO)] ++ "" ++ ".insn" ++ [(set_attr "mode" "none") ++ (set_attr "insn_count" "0")]) ++ ++;; Conditional move instructions. ++ ++(define_insn "*sel_using_" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r") ++ (if_then_else:GPR ++ (equality_op:GPR2 (match_operand:GPR2 1 "register_operand" "r,r") ++ (const_int 0)) ++ (match_operand:GPR 2 "reg_or_0_operand" "r,J") ++ (match_operand:GPR 3 "reg_or_0_operand" "J,r")))] ++ "register_operand (operands[2], mode) ++ != register_operand (operands[3], mode)" ++ "@ ++ \t%0,%2,%1 ++ \t%0,%3,%1" ++ [(set_attr "type" "condmove") ++ (set_attr "mode" "")]) ++ ++;; sel.fmt copies the 3rd argument when the 1st is non-zero and the 2nd ++;; argument if the 1st is zero. This means operand 2 and 3 are ++;; inverted in the instruction. ++ ++(define_insn "*sel" ++ [(set (match_operand:SCALARF 0 "register_operand" "=f") ++ (if_then_else:SCALARF ++ (ne:FCC (match_operand:FCC 1 "register_operand" "z") ++ (const_int 0)) ++ (match_operand:SCALARF 2 "reg_or_0_operand" "f") ++ (match_operand:SCALARF 3 "reg_or_0_operand" "f")))] ++ "" ++ "fsel\t%0,%3,%2,%1" ++ [(set_attr "type" "condmove") ++ (set_attr "mode" "")]) ++ ++;; These are the main define_expand's used to make conditional moves. ++ ++(define_expand "movcc" ++ [(set (match_operand:GPR 0 "register_operand") ++ (if_then_else:GPR (match_operator 1 "comparison_operator" ++ [(match_operand:GPR 2 "reg_or_0_operand") ++ (match_operand:GPR 3 "reg_or_0_operand")])))] ++ "TARGET_COND_MOVE_INT" ++{ ++ if (!INTEGRAL_MODE_P (GET_MODE (XEXP (operands[1], 0)))) ++ FAIL; ++ ++ loongarch_expand_conditional_move (operands); ++ DONE; ++}) ++ ++(define_expand "movcc" ++ [(set (match_operand:SCALARF 0 "register_operand") ++ (if_then_else:SCALARF (match_operator 1 "comparison_operator" ++ [(match_operand:SCALARF 2 "reg_or_0_operand") ++ (match_operand:SCALARF 3 "reg_or_0_operand")])))] ++ "TARGET_COND_MOVE_FLOAT" ++{ ++ if (!FLOAT_MODE_P (GET_MODE (XEXP (operands[1], 0)))) ++ FAIL; ++ ++ loongarch_expand_conditional_move (operands); ++ DONE; ++}) ++ ++(define_split ++ [(match_operand 0 "small_data_pattern")] ++ "reload_completed" ++ [(match_dup 0)] ++ { operands[0] = loongarch_rewrite_small_data (operands[0]); }) ++ ++;; Thread-Local Storage ++ ++(define_insn "got_load_tls_gd" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P ++ [(match_operand:P 1 "symbolic_operand" "")] ++ UNSPEC_TLS_GD))] ++ "" ++ "la.tls.gd\t%0,%1" ++ [(set_attr "got" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "got_load_tls_ld" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P ++ [(match_operand:P 1 "symbolic_operand" "")] ++ UNSPEC_TLS_LD))] ++ "" ++ "la.tls.ld\t%0,%1" ++ [(set_attr "got" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "got_load_tls_le" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P ++ [(match_operand:P 1 "symbolic_operand" "")] ++ UNSPEC_TLS_LE))] ++ "" ++ "la.tls.le\t%0,%1" ++ [(set_attr "got" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "got_load_tls_ie" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P ++ [(match_operand:P 1 "symbolic_operand" "")] ++ UNSPEC_TLS_IE))] ++ "" ++ "la.tls.ie\t%0,%1" ++ [(set_attr "got" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "loongarch_movfcsr2gr" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (unspec_volatile:SI [(match_operand 1 "const_uimm5_operand")] UNSPEC_MOVFCSR2GR))] ++ "TARGET_HARD_FLOAT" ++ "movfcsr2gr\t%0,$r%1") ++ ++(define_insn "loongarch_movgr2fcsr" ++ [(unspec_volatile [(match_operand 0 "const_uimm5_operand") ++ (match_operand:SI 1 "register_operand" "r")] ++ UNSPEC_MOVGR2FCSR)] ++ "TARGET_HARD_FLOAT" ++ "movgr2fcsr\t$r%0,%1") ++ ++ ++;; Match paired HI/SI/SF/DFmode load/stores. ++(define_insn "*join2_load_store" ++ [(set (match_operand:JOIN_MODE 0 "nonimmediate_operand" "=r,f,m,m,r,ZC") ++ (match_operand:JOIN_MODE 1 "nonimmediate_operand" "m,m,r,f,ZC,r")) ++ (set (match_operand:JOIN_MODE 2 "nonimmediate_operand" "=r,f,m,m,r,ZC") ++ (match_operand:JOIN_MODE 3 "nonimmediate_operand" "m,m,r,f,ZC,r"))] ++ "reload_completed" ++ { ++ bool load_p = (which_alternative == 0 || which_alternative == 1); ++ /* Reg-renaming pass reuses base register if it is dead after bonded loads. ++ Hardware does not bond those loads, even when they are consecutive. ++ However, order of the loads need to be checked for correctness. */ ++ if (!load_p || !reg_overlap_mentioned_p (operands[0], operands[1])) ++ { ++ output_asm_insn (loongarch_output_move (operands[0], operands[1]), ++ operands); ++ output_asm_insn (loongarch_output_move (operands[2], operands[3]), ++ &operands[2]); ++ } ++ else ++ { ++ output_asm_insn (loongarch_output_move (operands[2], operands[3]), ++ &operands[2]); ++ output_asm_insn (loongarch_output_move (operands[0], operands[1]), ++ operands); ++ } ++ return ""; ++ } ++ [(set_attr "move_type" "load,fpload,store,fpstore,load,store") ++ (set_attr "insn_count" "2,2,2,2,2,2")]) ++ ++;; 2 HI/SI/SF/DF loads are joined. ++;; P5600 does not support bonding of two LBs, hence QI mode is not included. ++;; The loads must be non-volatile as they might be reordered at the time of asm ++;; generation. ++(define_peephole2 ++ [(set (match_operand:JOIN_MODE 0 "register_operand") ++ (match_operand:JOIN_MODE 1 "non_volatile_mem_operand")) ++ (set (match_operand:JOIN_MODE 2 "register_operand") ++ (match_operand:JOIN_MODE 3 "non_volatile_mem_operand"))] ++ "loongarch_load_store_bonding_p (operands, mode, true)" ++ [(parallel [(set (match_dup 0) ++ (match_dup 1)) ++ (set (match_dup 2) ++ (match_dup 3))])] ++ "") ++ ++;; 2 HI/SI/SF/DF stores are joined. ++;; P5600 does not support bonding of two SBs, hence QI mode is not included. ++(define_peephole2 ++ [(set (match_operand:JOIN_MODE 0 "memory_operand") ++ (match_operand:JOIN_MODE 1 "register_operand")) ++ (set (match_operand:JOIN_MODE 2 "memory_operand") ++ (match_operand:JOIN_MODE 3 "register_operand"))] ++ "loongarch_load_store_bonding_p (operands, mode, false)" ++ [(parallel [(set (match_dup 0) ++ (match_dup 1)) ++ (set (match_dup 2) ++ (match_dup 3))])] ++ "") ++ ++;; Match paired HImode loads. ++(define_insn "*join2_loadhi" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (any_extend:SI (match_operand:HI 1 "non_volatile_mem_operand" "m"))) ++ (set (match_operand:SI 2 "register_operand" "=r") ++ (any_extend:SI (match_operand:HI 3 "non_volatile_mem_operand" "m")))] ++ "reload_completed" ++ { ++ /* Reg-renaming pass reuses base register if it is dead after bonded loads. ++ Hardware does not bond those loads, even when they are consecutive. ++ However, order of the loads need to be checked for correctness. */ ++ if (!reg_overlap_mentioned_p (operands[0], operands[1])) ++ { ++ output_asm_insn ("ld.h\t%0,%1", operands); ++ output_asm_insn ("ld.h\t%2,%3", operands); ++ } ++ else ++ { ++ output_asm_insn ("ld.h\t%2,%3", operands); ++ output_asm_insn ("ld.h\t%0,%1", operands); ++ } ++ ++ return ""; ++ } ++ [(set_attr "move_type" "load") ++ (set_attr "insn_count" "2")]) ++ ++ ++;; 2 HI loads are joined. ++(define_peephole2 ++ [(set (match_operand:SI 0 "register_operand") ++ (any_extend:SI (match_operand:HI 1 "non_volatile_mem_operand"))) ++ (set (match_operand:SI 2 "register_operand") ++ (any_extend:SI (match_operand:HI 3 "non_volatile_mem_operand")))] ++ "loongarch_load_store_bonding_p (operands, HImode, true)" ++ [(parallel [(set (match_dup 0) ++ (any_extend:SI (match_dup 1))) ++ (set (match_dup 2) ++ (any_extend:SI (match_dup 3)))])] ++ "") ++ ++ ++;; Logical AND NOT. ++(define_insn "loongson_gsandn" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (and:GPR ++ (not:GPR (match_operand:GPR 1 "register_operand" "r")) ++ (match_operand:GPR 2 "register_operand" "r")))] ++ "" ++ "andn\t%0,%2,%1" ++ [(set_attr "type" "logical")]) ++ ++;; Logical AND NOT. ++(define_insn "loongson_gsorn" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (ior:GPR ++ (not:GPR (match_operand:GPR 1 "register_operand" "r")) ++ (match_operand:GPR 2 "register_operand" "r")))] ++ "" ++ "orn\t%0,%2,%1" ++ [(set_attr "type" "logical")]) ++ ++(define_insn "smax3" ++ [(set (match_operand:SCALARF 0 "register_operand" "=f") ++ (smax:SCALARF (match_operand:SCALARF 1 "register_operand" "f") ++ (match_operand:SCALARF 2 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT" ++ "fmax.\t%0,%1,%2" ++ [(set_attr "type" "fmove") ++ (set_attr "mode" "")]) ++ ++(define_insn "smin3" ++ [(set (match_operand:SCALARF 0 "register_operand" "=f") ++ (smin:SCALARF (match_operand:SCALARF 1 "register_operand" "f") ++ (match_operand:SCALARF 2 "register_operand" "f")))] ++ "TARGET_HARD_FLOAT" ++ "fmin.\t%0,%1,%2" ++ [(set_attr "type" "fmove") ++ (set_attr "mode" "")]) ++ ++(define_insn "smaxa3" ++ [(set (match_operand:SCALARF 0 "register_operand" "=f") ++ (if_then_else:SCALARF ++ (gt (abs:SCALARF (match_operand:SCALARF 1 "register_operand" "f")) ++ (abs:SCALARF (match_operand:SCALARF 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "TARGET_HARD_FLOAT" ++ "fmaxa.\t%0,%1,%2" ++ [(set_attr "type" "fmove") ++ (set_attr "mode" "")]) ++ ++(define_insn "smina3" ++ [(set (match_operand:SCALARF 0 "register_operand" "=f") ++ (if_then_else:SCALARF ++ (lt (abs:SCALARF (match_operand:SCALARF 1 "register_operand" "f")) ++ (abs:SCALARF (match_operand:SCALARF 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "TARGET_HARD_FLOAT" ++ "fmina.\t%0,%1,%2" ++ [(set_attr "type" "fmove") ++ (set_attr "mode" "")]) ++ ++(define_insn "frint_" ++ [(set (match_operand:SCALARF 0 "register_operand" "=f") ++ (unspec:SCALARF [(match_operand:SCALARF 1 "register_operand" "f")] ++ UNSPEC_FRINT))] ++ "" ++ "frint.\t%0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "")]) ++ ++(define_insn "fclass_" ++ [(set (match_operand:SCALARF 0 "register_operand" "=f") ++ (unspec:SCALARF [(match_operand:SCALARF 1 "register_operand" "f")] ++ UNSPEC_FCLASS))] ++ "" ++ "fclass.\t%0,%1" ++ [(set_attr "type" "unknown") ++ (set_attr "mode" "")]) ++ ++(define_insn "bytepick_w" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (unspec:SI [(match_operand:SI 1 "register_operand" "r") ++ (match_operand:SI 2 "register_operand" "r") ++ (match_operand:SI 3 "const_0_to_3_operand" "n")] ++ UNSPEC_BYTEPICK_W))] ++ "" ++ "bytepick.w\t%0,%1,%2,%z3" ++ [(set_attr "type" "dspalu") ++ (set_attr "mode" "SI")]) ++ ++(define_insn "bytepick_d" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "register_operand" "r") ++ (match_operand:DI 3 "const_0_to_7_operand" "n")] ++ UNSPEC_BYTEPICK_D))] ++ "" ++ "bytepick.d\t%0,%1,%2,%z3" ++ [(set_attr "type" "dspalu") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "bitrev_4b" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (unspec:SI [(match_operand:SI 1 "register_operand" "r")] ++ UNSPEC_BITREV_4B))] ++ "" ++ "bitrev.4b\t%0,%1" ++ [(set_attr "type" "unknown") ++ (set_attr "mode" "SI")]) ++ ++(define_insn "bitrev_8b" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] ++ UNSPEC_BITREV_8B))] ++ "" ++ "bitrev.8b\t%0,%1" ++ [(set_attr "type" "unknown") ++ (set_attr "mode" "DI")]) ++ ++ ++ ++(define_insn "lu32i_d" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ior:DI ++ (zero_extend:DI ++ (subreg:SI (match_operand:DI 1 "register_operand" "0") 0)) ++ (match_operand:DI 2 "const_lu32i_operand" "u")))] ++ "TARGET_64BIT" ++ "lu32i.d\t%0,%X2>>32" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "lu52i_d" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ior:DI ++ (and:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "lu52i_mask_operand")) ++ (match_operand 3 "const_lu52i_operand" "v")))] ++ "TARGET_64BIT" ++ "lu52i.d\t%0,%1,%X3>>52" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "DI")]) ++ ++(define_mode_iterator QHSD [QI HI SI DI]) ++ ++(define_insn "crc_w__w" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (unspec:SI [(match_operand:QHSD 1 "register_operand" "r") ++ (match_operand:SI 2 "register_operand" "r")] ++ UNSPEC_CRC))] ++ "" ++ "crc.w..w\t%0,%1,%2" ++ [(set_attr "type" "unknown") ++ (set_attr "mode" "")]) ++ ++(define_insn "crcc_w__w" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (unspec:SI [(match_operand:QHSD 1 "register_operand" "r") ++ (match_operand:SI 2 "register_operand" "r")] ++ UNSPEC_CRCC))] ++ "" ++ "crcc.w..w\t%0,%1,%2" ++ [(set_attr "type" "unknown") ++ (set_attr "mode" "")]) ++ ++;; Synchronization instructions. ++ ++(include "sync.md") ++ ++; The LoongArch SX Instructions. ++(include "lsx.md") ++ ++; The MSA2.0 Instructions. ++(include "lsx2.md") ++ ++; The LoongArch ASX Instructions. ++(include "lasx.md") ++ ++;; Is copying of this instruction disallowed? ++(define_attr "cannot_copy" "no,yes" (const_string "no")) ++ ++(define_insn "stack_tie" ++ [(set (mem:BLK (scratch)) ++ (unspec:BLK [(match_operand:X 0 "register_operand" "r") ++ (match_operand:X 1 "register_operand" "r")] ++ UNSPEC_TIE))] ++ "" ++ "" ++ [(set_attr "length" "0")] ++) ++ ++(define_insn "gpr_save" ++ [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_SAVE) ++ (clobber (reg:SI T0_REGNUM)) ++ (clobber (reg:SI T1_REGNUM))] ++ "" ++ { return loongarch_output_gpr_save (INTVAL (operands[0])); }) ++ ++(define_insn "gpr_restore" ++ [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_RESTORE)] ++ "" ++ "tail\t__loongarch_restore_%0") ++ ++(define_insn "gpr_restore_return" ++ [(return) ++ (use (match_operand 0 "pmode_register_operand" "")) ++ (const_int 0)] ++ "" ++ "") ++ +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +new file mode 100644 +index 000000000..660de3674 +--- /dev/null ++++ b/gcc/config/loongarch/loongarch.opt +@@ -0,0 +1,171 @@ ++ ++; ++; Copyright (C) 2005-2018 Free Software Foundation, Inc. ++; ++; This file is part of GCC. ++; ++; GCC is free software; you can redistribute it and/or modify it under ++; the terms of the GNU General Public License as published by the Free ++; Software Foundation; either version 3, or (at your option) any later ++; version. ++; ++; GCC is distributed in the hope that it will be useful, but WITHOUT ++; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++; License for more details. ++; ++; You should have received a copy of the GNU General Public License ++; along with GCC; see the file COPYING3. If not see ++; . ++ ++HeaderInclude ++config/loongarch/loongarch-opts.h ++ ++mabi= ++Target RejectNegative Joined Enum(loongarch_abi) Var(loongarch_abi) Init(LARCH_ABI_DEFAULT) ++-mabi=ABI Generate code that conforms to the given ABI. ++ ++Enum ++Name(loongarch_abi) Type(int) ++Known Loongarch ABIs (for use with the -mabi= option): ++ ++EnumValue ++Enum(loongarch_abi) String(lp32) Value(ABILP32) ++ ++EnumValue ++Enum(loongarch_abi) String(lpx32) Value(ABILPX32) ++ ++EnumValue ++Enum(loongarch_abi) String(lp64) Value(ABILP64) ++ ++march= ++Target RejectNegative Joined Var(loongarch_arch_option) ToLower Enum(loongarch_arch_opt_value) ++-march=ISA Generate code for the given ISA. ++ ++mbranch-cost= ++Target RejectNegative Joined UInteger Var(loongarch_branch_cost) ++-mbranch-cost=COST Set the cost of branches to roughly COST instructions. ++ ++mcheck-zero-division ++Target Report Mask(CHECK_ZERO_DIV) ++Trap on integer divide by zero. ++ ++mdouble-float ++Target Report RejectNegative InverseMask(SINGLE_FLOAT, DOUBLE_FLOAT) ++Allow hardware floating-point instructions to cover both 32-bit and 64-bit operations. ++ ++mflush-func= ++Target RejectNegative Joined Var(loongarch_cache_flush_func) Init(CACHE_FLUSH_FUNC) ++-mflush-func=FUNC Use FUNC to flush the cache before calling stack trampolines. ++ ++Mask(64BIT) ++ ++Mask(FLOAT64) ++ ++mhard-float ++Target Report RejectNegative InverseMask(SOFT_FLOAT_ABI, HARD_FLOAT_ABI) ++Allow the use of hardware floating-point ABI and instructions. ++ ++mlong-calls ++Target Report Var(TARGET_LONG_CALLS) ++Use indirect calls. ++ ++mmemcpy ++Target Report Mask(MEMCPY) ++Don't optimize block moves. ++ ++mno-float ++Target Report RejectNegative Var(TARGET_NO_FLOAT) Condition(TARGET_SUPPORTS_NO_FLOAT) ++Prevent the use of all floating-point operations. ++ ++mno-flush-func ++Target RejectNegative ++Do not use a cache-flushing function before calling stack trampolines. ++ ++mrelax-pic-calls ++Target Report Mask(RELAX_PIC_CALLS) ++Try to allow the linker to turn PIC calls into direct calls. ++ ++mshared ++Target Report Var(TARGET_SHARED) Init(1) ++When generating -mabicalls code, make the code suitable for use in shared libraries. ++ ++msingle-float ++Target Report RejectNegative Mask(SINGLE_FLOAT) ++Restrict the use of hardware floating-point instructions to 32-bit operations. ++ ++msoft-float ++Target Report RejectNegative Mask(SOFT_FLOAT_ABI) ++Prevent the use of all hardware floating-point instructions. ++ ++mlra ++Target Report Var(loongarch_lra_flag) Init(1) Save ++Use LRA instead of reload. ++ ++mtune= ++Target RejectNegative Joined Var(loongarch_tune_option) ToLower Enum(loongarch_arch_opt_value) ++-mtune=PROCESSOR Optimize the output for PROCESSOR. ++ ++mframe-header-opt ++Target Report Var(flag_frame_header_optimization) Optimization ++Optimize frame header. ++ ++noasmopt ++Driver ++ ++mstrict-align ++Target Report Mask(STRICT_ALIGN) Save ++Do not generate unaligned memory accesses. ++ ++mlsx ++Target Report Mask(LSX) ++Use LoongArch SX Extension instructions. ++ ++mlasx ++Target Report Var(TARGET_LASX) ++Use LoongArch ASX Extension instructions. ++ ++malign-llsc-target ++Target Report Var(TARGET_ALIGN_LLSC_TARGET) ++Target align llsc target. ++ ++mmax-inline-memcpy-size= ++Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) ++-mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. ++ ++mvecarg ++Target Report Var(TARGET_VECARG) Init(1) ++Target pass vect arg uses vector register. ++ ++mcond-move-int ++Target Report Var(TARGET_COND_MOVE_INT) Init(1) ++Conditional moves for integral are enabled. ++ ++mcond-move-float ++Target Report Var(TARGET_COND_MOVE_FLOAT) Init(1) ++Conditional moves for float are enabled. ++ ++; The code model option names for -mcmodel. ++ ++Enum ++Name(cmodel) Type(enum loongarch_code_model) ++The code model option names for -mcmodel: ++ ++EnumValue ++Enum(cmodel) String(normal) Value(LARCH_CMODEL_NORMAL) ++ ++EnumValue ++Enum(cmodel) String(tiny) Value(LARCH_CMODEL_TINY) ++ ++EnumValue ++Enum(cmodel) String(tiny-static) Value(LARCH_CMODEL_TINY_STATIC) ++ ++EnumValue ++Enum(cmodel) String(large) Value(LARCH_CMODEL_LARGE) ++ ++EnumValue ++Enum(cmodel) String(extreme) Value(LARCH_CMODEL_EXTREME) ++ ++mcmodel= ++Target RejectNegative Joined Enum(cmodel) Var(loongarch_cmodel_var) Init(LARCH_CMODEL_NORMAL) Save ++Specify the code model. +diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md +new file mode 100644 +index 000000000..1f7034366 +--- /dev/null ++++ b/gcc/config/loongarch/lsx.md +@@ -0,0 +1,3181 @@ ++;; Machine Description for LARCH Loongson SX ASE ++;; ++;; Copyright (C) 2018 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++;; ++ ++(define_c_enum "unspec" [ ++ UNSPEC_LSX_ASUB_S ++ UNSPEC_LSX_VABSD_U ++ UNSPEC_LSX_VAVG_S ++ UNSPEC_LSX_VAVG_U ++ UNSPEC_LSX_VAVGR_S ++ UNSPEC_LSX_VAVGR_U ++ UNSPEC_LSX_VBITCLR ++ UNSPEC_LSX_VBITCLRI ++ UNSPEC_LSX_VBITREV ++ UNSPEC_LSX_VBITREVI ++ UNSPEC_LSX_VBITSET ++ UNSPEC_LSX_VBITSETI ++ UNSPEC_LSX_BRANCH_V ++ UNSPEC_LSX_BRANCH ++ UNSPEC_LSX_VFCMP_CAF ++ UNSPEC_LSX_VFCLASS ++ UNSPEC_LSX_VFCMP_CUNE ++ UNSPEC_LSX_VFCVT ++ UNSPEC_LSX_VFCVTH ++ UNSPEC_LSX_VFCVTL ++ UNSPEC_LSX_VFLOGB ++ UNSPEC_LSX_VFRECIP ++ UNSPEC_LSX_VFRINT ++ UNSPEC_LSX_VFRSQRT ++ UNSPEC_LSX_VFCMP_SAF ++ UNSPEC_LSX_VFCMP_SEQ ++ UNSPEC_LSX_VFCMP_SLE ++ UNSPEC_LSX_VFCMP_SLT ++ UNSPEC_LSX_VFCMP_SNE ++ UNSPEC_LSX_VFCMP_SOR ++ UNSPEC_LSX_VFCMP_SUEQ ++ UNSPEC_LSX_VFCMP_SULE ++ UNSPEC_LSX_VFCMP_SULT ++ UNSPEC_LSX_VFCMP_SUN ++ UNSPEC_LSX_VFCMP_SUNE ++ UNSPEC_LSX_VFTINT_S ++ UNSPEC_LSX_VFTINT_U ++ UNSPEC_LSX_VCLO ++ UNSPEC_LSX_VSAT_S ++ UNSPEC_LSX_VSAT_U ++ UNSPEC_LSX_VREPLVE ++ UNSPEC_LSX_VREPLVEI ++ UNSPEC_LSX_VSRAR ++ UNSPEC_LSX_VSRARI ++ UNSPEC_LSX_VSRLR ++ UNSPEC_LSX_VSRLRI ++ UNSPEC_LSX_VSSUB_S ++ UNSPEC_LSX_VSSUB_U ++ UNSPEC_LSX_VSHUF ++ UNSPEC_LSX_VABS ++ UNSPEC_LSX_VMUH_S ++ UNSPEC_LSX_VMUH_U ++ UNSPEC_LSX_VEXTW_S ++ UNSPEC_LSX_VEXTW_U ++ UNSPEC_LSX_VSLLWIL_S ++ UNSPEC_LSX_VSLLWIL_U ++ UNSPEC_LSX_VSRAN ++ UNSPEC_LSX_VSSRAN_S ++ UNSPEC_LSX_VSSRAN_U ++ UNSPEC_LSX_VSRAIN ++ UNSPEC_LSX_VSRAINS_S ++ UNSPEC_LSX_VSRAINS_U ++ UNSPEC_LSX_VSRARN ++ UNSPEC_LSX_VSRLN ++ UNSPEC_LSX_VSRLRN ++ UNSPEC_LSX_VSSRLRN_U ++ UNSPEC_LSX_VFRSTPI ++ UNSPEC_LSX_VFRSTP ++ UNSPEC_LSX_VSHUF4I ++ UNSPEC_LSX_VBSRL_V ++ UNSPEC_LSX_VBSLL_V ++ UNSPEC_LSX_VEXTRINS ++ UNSPEC_LSX_VMSKLTZ ++ UNSPEC_LSX_VSIGNCOV ++ UNSPEC_LSX_VFTINTRNE ++ UNSPEC_LSX_VFTINTRP ++ UNSPEC_LSX_VFTINTRM ++ UNSPEC_LSX_VFTINT_W_D ++ UNSPEC_LSX_VFFINT_S_L ++ UNSPEC_LSX_VFTINTRZ_W_D ++ UNSPEC_LSX_VFTINTRP_W_D ++ UNSPEC_LSX_VFTINTRM_W_D ++ UNSPEC_LSX_VFTINTRNE_W_D ++ UNSPEC_LSX_VFTINTL_L_S ++ UNSPEC_LSX_VFFINTH_D_W ++ UNSPEC_LSX_VFFINTL_D_W ++ UNSPEC_LSX_VFTINTRZL_L_S ++ UNSPEC_LSX_VFTINTRZH_L_S ++ UNSPEC_LSX_VFTINTRPL_L_S ++ UNSPEC_LSX_VFTINTRPH_L_S ++ UNSPEC_LSX_VFTINTRMH_L_S ++ UNSPEC_LSX_VFTINTRML_L_S ++ UNSPEC_LSX_VFTINTRNEL_L_S ++ UNSPEC_LSX_VFTINTRNEH_L_S ++ UNSPEC_LSX_VFTINTH_L_H ++ UNSPEC_LSX_VFRINTRNE_S ++ UNSPEC_LSX_VFRINTRNE_D ++ UNSPEC_LSX_VFRINTRZ_S ++ UNSPEC_LSX_VFRINTRZ_D ++ UNSPEC_LSX_VFRINTRP_S ++ UNSPEC_LSX_VFRINTRP_D ++ UNSPEC_LSX_VFRINTRM_S ++ UNSPEC_LSX_VFRINTRM_D ++ UNSPEC_LSX_VSSRARN_S ++ UNSPEC_LSX_VSSRARN_U ++ UNSPEC_LSX_VSSRLN_U ++ UNSPEC_LSX_VSSRLN ++ UNSPEC_LSX_VSSRLRN ++ UNSPEC_LSX_VLDI ++ UNSPEC_LSX_VSHUF_B ++ UNSPEC_LSX_VLDX ++ UNSPEC_LSX_VSTX ++ UNSPEC_LSX_VEXTL_QU_DU ++ UNSPEC_LSX_VSETEQZ_V ++]) ++ ++;; This attribute gives suffix for integers in VHMODE. ++(define_mode_attr dlsxfmt ++ [(V2DI "q") ++ (V4SI "d") ++ (V8HI "w") ++ (V16QI "h")]) ++ ++(define_mode_attr dlsxfmt_u ++ [(V2DI "qu") ++ (V4SI "du") ++ (V8HI "wu") ++ (V16QI "hu")]) ++ ++ ++;; All vector modes with 128 bits. ++(define_mode_iterator LSX [V2DF V4SF V2DI V4SI V8HI V16QI]) ++ ++;; Same as LSX. Used by vcond to iterate two modes. ++(define_mode_iterator LSX_2 [V2DF V4SF V2DI V4SI V8HI V16QI]) ++ ++;; Only used for splitting insert_d and copy_{u,s}.d. ++(define_mode_iterator LSX_D [V2DI V2DF]) ++ ++;; Only used for copy_{u,s}.w. ++(define_mode_iterator LSX_W [V4SI V4SF]) ++ ++;; Only integer modes. ++(define_mode_iterator ILSX [V2DI V4SI V8HI V16QI]) ++ ++;; As ILSX but excludes V16QI. ++(define_mode_iterator ILSX_DWH [V2DI V4SI V8HI]) ++ ++;; As ILSX but excludes V2DI. ++(define_mode_iterator ILSX_WHB [V4SI V8HI V16QI]) ++ ++;; Only integer modes equal or larger than a word. ++(define_mode_iterator ILSX_DW [V2DI V4SI]) ++ ++;; Only integer modes smaller than a word. ++(define_mode_iterator ILSX_HB [V8HI V16QI]) ++ ++;;;; Only integer modes for fixed-point madd_q/maddr_q. ++;;(define_mode_iterator ILSX_WH [V4SI V8HI]) ++ ++;; Only floating-point modes. ++(define_mode_iterator FLSX [V2DF V4SF]) ++ ++;; Only used for immediate set shuffle elements instruction. ++(define_mode_iterator LSX_WHB_W [V4SI V8HI V16QI V4SF]) ++ ++;; The attribute gives the integer vector mode with same size. ++(define_mode_attr VIMODE ++ [(V2DF "V2DI") ++ (V4SF "V4SI") ++ (V2DI "V2DI") ++ (V4SI "V4SI") ++ (V8HI "V8HI") ++ (V16QI "V16QI")]) ++ ++;; The attribute gives half modes for vector modes. ++(define_mode_attr VHMODE ++ [(V8HI "V16QI") ++ (V4SI "V8HI") ++ (V2DI "V4SI")]) ++ ++;; The attribute gives double modes for vector modes. ++(define_mode_attr VDMODE ++ [(V2DI "V2DI") ++ (V4SI "V2DI") ++ (V8HI "V4SI") ++ (V16QI "V8HI")]) ++ ++;; The attribute gives half modes with same number of elements for vector modes. ++(define_mode_attr VTRUNCMODE ++ [(V8HI "V8QI") ++ (V4SI "V4HI") ++ (V2DI "V2SI")]) ++ ++;; This attribute gives the mode of the result for "vpickve2gr_b, copy_u_b" etc. ++(define_mode_attr VRES ++ [(V2DF "DF") ++ (V4SF "SF") ++ (V2DI "DI") ++ (V4SI "SI") ++ (V8HI "SI") ++ (V16QI "SI")]) ++ ++;; Only used with LSX_D iterator. ++(define_mode_attr lsx_d ++ [(V2DI "reg_or_0") ++ (V2DF "register")]) ++ ++;; This attribute gives the integer vector mode with same size. ++(define_mode_attr mode_i ++ [(V2DF "v2di") ++ (V4SF "v4si") ++ (V2DI "v2di") ++ (V4SI "v4si") ++ (V8HI "v8hi") ++ (V16QI "v16qi")]) ++ ++;; This attribute gives suffix for LSX instructions. ++(define_mode_attr lsxfmt ++ [(V2DF "d") ++ (V4SF "w") ++ (V2DI "d") ++ (V4SI "w") ++ (V8HI "h") ++ (V16QI "b")]) ++ ++;; This attribute gives suffix for LSX instructions. ++(define_mode_attr lsxfmt_u ++ [(V2DF "du") ++ (V4SF "wu") ++ (V2DI "du") ++ (V4SI "wu") ++ (V8HI "hu") ++ (V16QI "bu")]) ++ ++;; This attribute gives suffix for integers in VHMODE. ++(define_mode_attr hlsxfmt ++ [(V2DI "w") ++ (V4SI "h") ++ (V8HI "b")]) ++ ++;; This attribute gives suffix for integers in VHMODE. ++(define_mode_attr hlsxfmt_u ++ [(V2DI "wu") ++ (V4SI "hu") ++ (V8HI "bu")]) ++ ++;; This attribute gives define_insn suffix for LSX instructions that need ++;; distinction between integer and floating point. ++(define_mode_attr lsxfmt_f ++ [(V2DF "d_f") ++ (V4SF "w_f") ++ (V2DI "d") ++ (V4SI "w") ++ (V8HI "h") ++ (V16QI "b")]) ++ ++(define_mode_attr flsxfmt_f ++ [(V2DF "d_f") ++ (V4SF "s_f") ++ (V2DI "d") ++ (V4SI "w") ++ (V8HI "h") ++ (V16QI "b")]) ++ ++(define_mode_attr flsxfmt ++ [(V2DF "d") ++ (V4SF "s") ++ (V2DI "d") ++ (V4SI "s")]) ++ ++(define_mode_attr ilsxfmt ++ [(V2DF "l") ++ (V4SF "w")]) ++ ++(define_mode_attr ilsxfmt_u ++ [(V2DF "lu") ++ (V4SF "wu")]) ++ ++;; This is used to form an immediate operand constraint using ++;; "const__operand". ++(define_mode_attr indeximm ++ [(V2DF "0_or_1") ++ (V4SF "0_to_3") ++ (V2DI "0_or_1") ++ (V4SI "0_to_3") ++ (V8HI "uimm3") ++ (V16QI "uimm4")]) ++ ++;; This attribute represents bitmask needed for vec_merge using ++;; "const__operand". ++(define_mode_attr bitmask ++ [(V2DF "exp_2") ++ (V4SF "exp_4") ++ (V2DI "exp_2") ++ (V4SI "exp_4") ++ (V8HI "exp_8") ++ (V16QI "exp_16")]) ++ ++;; This attribute is used to form an immediate operand constraint using ++;; "const__operand". ++(define_mode_attr bitimm ++ [(V16QI "uimm3") ++ (V8HI "uimm4") ++ (V4SI "uimm5") ++ (V2DI "uimm6")]) ++ ++(define_expand "vec_init" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand:LSX 1 "")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vector_init (operands[0], operands[1]); ++ DONE; ++}) ++ ++;; vpickev pattern with implicit type conversion. ++(define_insn "vec_pack_trunc_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vec_concat: ++ (truncate: ++ (match_operand:ILSX_DWH 1 "register_operand" "f")) ++ (truncate: ++ (match_operand:ILSX_DWH 2 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vpickev.\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "")]) ++ ++(define_expand "vec_unpacks_hi_v4sf" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (float_extend:V2DF ++ (vec_select:V2SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_dup 2))))] ++ "ISA_HAS_LSX" ++{ ++ operands[2] = loongarch_lsx_vec_parallel_const_half (V4SFmode, true/*high_p*/); ++}) ++ ++(define_expand "vec_unpacks_lo_v4sf" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (float_extend:V2DF ++ (vec_select:V2SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_dup 2))))] ++ "ISA_HAS_LSX" ++{ ++ operands[2] = loongarch_lsx_vec_parallel_const_half (V4SFmode, false/*high_p*/); ++}) ++ ++(define_expand "vec_unpacks_hi_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX_WHB 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, true/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacks_lo_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX_WHB 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, false/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacku_hi_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX_WHB 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, true/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_unpacku_lo_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX_WHB 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, false/*high_p*/); ++ DONE; ++}) ++ ++(define_expand "vec_extract" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX 1 "register_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LSX" ++{ ++ if (mode == QImode || mode == HImode) ++ { ++ rtx dest1 = gen_reg_rtx (SImode); ++ emit_insn (gen_lsx_vpickve2gr_ (dest1, operands[1], operands[2])); ++ emit_move_insn (operands[0], ++ gen_lowpart (mode, dest1)); ++ } ++ else ++ emit_insn (gen_lsx_vpickve2gr_ (operands[0], operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "vec_extract" ++ [(match_operand: 0 "register_operand") ++ (match_operand:FLSX 1 "register_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx temp; ++ HOST_WIDE_INT val = INTVAL (operands[2]); ++ ++ if (val == 0) ++ temp = operands[1]; ++ else ++ { ++ rtx n = GEN_INT (val * GET_MODE_SIZE (mode)); ++ temp = gen_reg_rtx (mode); ++ emit_insn (gen_lsx_vbsrl_ (temp, operands[1], n)); ++ } ++ emit_insn (gen_lsx_vec_extract_ (operands[0], temp)); ++ DONE; ++}) ++ ++(define_insn_and_split "lsx_vec_extract_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vec_select: ++ (match_operand:FLSX 1 "register_operand" "f") ++ (parallel [(const_int 0)])))] ++ "ISA_HAS_LSX" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 0) (match_dup 1))] ++{ ++ operands[1] = gen_rtx_REG (mode, REGNO (operands[1])); ++} ++ [(set_attr "move_type" "fmove") ++ (set_attr "mode" "")]) ++ ++(define_expand "vec_set" ++ [(match_operand:ILSX 0 "register_operand") ++ (match_operand: 1 "reg_or_0_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx index = GEN_INT (1 << INTVAL (operands[2])); ++ emit_insn (gen_lsx_vinsgr2vr_ (operands[0], operands[1], ++ operands[0], index)); ++ DONE; ++}) ++ ++(define_expand "vec_set" ++ [(match_operand:FLSX 0 "register_operand") ++ (match_operand: 1 "register_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx index = GEN_INT (1 << INTVAL (operands[2])); ++ emit_insn (gen_lsx_vextrins__scalar (operands[0], operands[1], ++ operands[0], index)); ++ DONE; ++}) ++ ++(define_expand "vcondu" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand:LSX 1 "reg_or_m1_operand") ++ (match_operand:LSX 2 "reg_or_0_operand") ++ (match_operator 3 "" ++ [(match_operand:ILSX 4 "register_operand") ++ (match_operand:ILSX 5 "register_operand")])] ++ "ISA_HAS_LSX ++ && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" ++{ ++ loongarch_expand_vec_cond_expr (mode, mode, operands); ++ DONE; ++}) ++ ++(define_expand "vcond" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand:LSX 1 "reg_or_m1_operand") ++ (match_operand:LSX 2 "reg_or_0_operand") ++ (match_operator 3 "" ++ [(match_operand:LSX_2 4 "register_operand") ++ (match_operand:LSX_2 5 "register_operand")])] ++ "ISA_HAS_LSX ++ && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" ++{ ++ loongarch_expand_vec_cond_expr (mode, mode, operands); ++ DONE; ++}) ++ ++(define_insn "lsx_vinsgr2vr_" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (vec_merge:LSX ++ (vec_duplicate:LSX ++ (match_operand: 1 "reg_or_0_operand" "rJ")) ++ (match_operand:LSX 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")))] ++ "ISA_HAS_LSX" ++{ ++ if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode)) ++ return "#"; ++ else ++ return "vinsgr2vr.\t%w0,%z1,%y3"; ++} ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ ++(define_split ++ [(set (match_operand:LSX_D 0 "register_operand") ++ (vec_merge:LSX_D ++ (vec_duplicate:LSX_D ++ (match_operand: 1 "_operand")) ++ (match_operand:LSX_D 2 "register_operand") ++ (match_operand 3 "const__operand")))] ++ "reload_completed && ISA_HAS_LSX && !TARGET_64BIT" ++ [(const_int 0)] ++{ ++ loongarch_split_lsx_insert_d (operands[0], operands[2], operands[3], operands[1]); ++ DONE; ++}) ++ ++(define_insn "lsx_vextrins__internal" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (vec_merge:LSX ++ (vec_duplicate:LSX ++ (vec_select: ++ (match_operand:LSX 1 "register_operand" "f") ++ (parallel [(const_int 0)]))) ++ (match_operand:LSX 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")))] ++ "ISA_HAS_LSX" ++ "vextrins.\t%w0,%w1,%y3<<4" ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ ++;; Operand 3 is a scalar. ++(define_insn "lsx_vextrins__scalar" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (vec_merge:FLSX ++ (vec_duplicate:FLSX ++ (match_operand: 1 "register_operand" "f")) ++ (match_operand:FLSX 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")))] ++ "ISA_HAS_LSX" ++ "vextrins.\t%w0,%w1,%y3<<4" ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vpickve2gr_" ++ [(set (match_operand: 0 "register_operand" "=r") ++ (any_extend: ++ (vec_select: ++ (match_operand:ILSX_HB 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const__operand" "")]))))] ++ "ISA_HAS_LSX" ++ "vpickve2gr.\t%0,%w1,%2" ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vpickve2gr_" ++ [(set (match_operand: 0 "register_operand" "=r") ++ (any_extend: ++ (vec_select: ++ (match_operand:LSX_W 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const__operand" "")]))))] ++ "ISA_HAS_LSX" ++ "vpickve2gr.\t%0,%w1,%2" ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "")]) ++ ++(define_insn_and_split "lsx_vpickve2gr_du" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (vec_select:DI ++ (match_operand:V2DI 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_or_1_operand" "")])))] ++ "ISA_HAS_LSX" ++{ ++ if (TARGET_64BIT) ++ return "vpickve2gr.du\t%0,%w1,%2"; ++ else ++ return "#"; ++} ++ "reload_completed && ISA_HAS_LSX && !TARGET_64BIT" ++ [(const_int 0)] ++{ ++ loongarch_split_lsx_copy_d (operands[0], operands[1], operands[2], ++ gen_lsx_vpickve2gr_wu); ++ DONE; ++} ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn_and_split "lsx_vpickve2gr_" ++ [(set (match_operand: 0 "register_operand" "=r") ++ (vec_select: ++ (match_operand:LSX_D 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const__operand" "")])))] ++ "ISA_HAS_LSX" ++{ ++ if (TARGET_64BIT) ++ return "vpickve2gr.\t%0,%w1,%2"; ++ else ++ return "#"; ++} ++ "reload_completed && ISA_HAS_LSX && !TARGET_64BIT" ++ [(const_int 0)] ++{ ++ loongarch_split_lsx_copy_d (operands[0], operands[1], operands[2], ++ gen_lsx_vpickve2gr_w); ++ DONE; ++} ++ [(set_attr "type" "simd_copy") ++ (set_attr "mode" "")]) ++ ++ ++(define_expand "abs2" ++ [(match_operand:ILSX 0 "register_operand" "=f") ++ (abs:ILSX (match_operand:ILSX 1 "register_operand" "f"))] ++ "ISA_HAS_LSX" ++{ ++ if (ISA_HAS_LSX) ++ { ++ emit_insn (gen_vabs2 (operands[0], operands[1])); ++ DONE; ++ } else { ++ rtx reg = gen_reg_rtx (mode); ++ emit_move_insn (reg, CONST0_RTX (mode)); ++ emit_insn (gen_lsx_vadda_ (operands[0], operands[1], reg)); ++ DONE; ++ } ++}) ++ ++(define_expand "neg2" ++ [(set (match_operand:ILSX 0 "register_operand") ++ (neg:ILSX (match_operand:ILSX 1 "register_operand")))] ++ "ISA_HAS_LSX" ++{ ++ emit_insn (gen_vneg2 (operands[0], operands[1])); ++ DONE; ++}) ++ ++(define_expand "neg2" ++ [(set (match_operand:FLSX 0 "register_operand") ++ (neg:FLSX (match_operand:FLSX 1 "register_operand")))] ++ "ISA_HAS_LSX" ++{ ++ rtx reg = gen_reg_rtx (mode); ++ emit_move_insn (reg, CONST0_RTX (mode)); ++ emit_insn(gen_sub3(operands[0], reg, operands[1])); ++ DONE; ++}) ++ ++(define_expand "lsx_vrepli" ++ [(match_operand:ILSX 0 "register_operand") ++ (match_operand 1 "const_imm10_operand")] ++ "ISA_HAS_LSX" ++{ ++ if (mode == V16QImode) ++ operands[1] = GEN_INT (trunc_int_for_mode (INTVAL (operands[1]), ++ mode)); ++ emit_move_insn (operands[0], ++ loongarch_gen_const_int_vector (mode, INTVAL (operands[1]))); ++ DONE; ++}) ++ ++(define_insn "lsx_vshuf_" ++ [(set (match_operand:ILSX_DWH 0 "register_operand" "=f") ++ (unspec:ILSX_DWH [(match_operand:ILSX_DWH 1 "register_operand" "0") ++ (match_operand:ILSX_DWH 2 "register_operand" "f") ++ (match_operand:ILSX_DWH 3 "register_operand" "f")] ++ UNSPEC_LSX_VSHUF))] ++ "ISA_HAS_LSX" ++ "vshuf.\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "")]) ++ ++(define_expand "mov" ++ [(set (match_operand:LSX 0) ++ (match_operand:LSX 1))] ++ "ISA_HAS_LSX" ++{ ++ if (loongarch_legitimize_move (mode, operands[0], operands[1])) ++ DONE; ++}) ++ ++(define_expand "movmisalign" ++ [(set (match_operand:LSX 0) ++ (match_operand:LSX 1))] ++ "ISA_HAS_LSX" ++{ ++ if (loongarch_legitimize_move (mode, operands[0], operands[1])) ++ DONE; ++}) ++ ++;; 128-bit LSX modes can only exist in LSX registers or memory. An exception ++;; is allowing LSX modes for GP registers for arguments and return values. ++(define_insn "mov_lsx" ++ [(set (match_operand:LSX 0 "nonimmediate_operand" "=f,f,R,*r,*f") ++ (match_operand:LSX 1 "move_operand" "fYGYI,R,f,*f,*r"))] ++ "ISA_HAS_LSX" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert") ++ (set_attr "mode" "")]) ++ ++(define_split ++ [(set (match_operand:LSX 0 "nonimmediate_operand") ++ (match_operand:LSX 1 "move_operand"))] ++ "reload_completed && ISA_HAS_LSX ++ && loongarch_split_move_insn_p (operands[0], operands[1], insn)" ++ [(const_int 0)] ++{ ++ loongarch_split_move_insn (operands[0], operands[1], curr_insn); ++ DONE; ++}) ++ ++;; Offset load ++(define_expand "lsx_ld_" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq10_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (operands[0], gen_rtx_MEM (mode, addr)); ++ DONE; ++}) ++ ++;; Offset store ++(define_expand "lsx_st_" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq10_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (gen_rtx_MEM (mode, addr), operands[0]); ++ DONE; ++}) ++ ++;; Integer operations ++(define_insn "add3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f,f") ++ (plus:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_ximm5_operand" "f,Unv5,Uuv5")))] ++ "ISA_HAS_LSX" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "vadd.\t%w0,%w1,%w2"; ++ case 1: ++ { ++ HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0)); ++ ++ operands[2] = GEN_INT (-val); ++ return "vsubi.\t%w0,%w1,%d2"; ++ } ++ case 2: ++ return "vaddi.\t%w0,%w1,%E2"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "alu_type" "simd_add") ++ (set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "sub3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (minus:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LSX" ++ "@ ++ vsub.\t%w0,%w1,%w2 ++ vsubi.\t%w0,%w1,%E2" ++ [(set_attr "alu_type" "simd_add") ++ (set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "mul3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (mult:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vmul.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vmadd_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (plus:ILSX (mult:ILSX (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand:ILSX 3 "register_operand" "f")) ++ (match_operand:ILSX 1 "register_operand" "0")))] ++ "ISA_HAS_LSX" ++ "vmadd.\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vmsub_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (minus:ILSX (match_operand:ILSX 1 "register_operand" "0") ++ (mult:ILSX (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand:ILSX 3 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vmsub.\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_mul") ++ (set_attr "mode" "")]) ++ ++(define_insn "div3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (div:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ { return loongarch_lsx_output_division ("vdiv.\t%w0,%w1,%w2", operands); } ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "udiv3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (udiv:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ { return loongarch_lsx_output_division ("vdiv.\t%w0,%w1,%w2", operands); } ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "mod3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (mod:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ { return loongarch_lsx_output_division ("vmod.\t%w0,%w1,%w2", operands); } ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "umod3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (umod:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ { return loongarch_lsx_output_division ("vmod.\t%w0,%w1,%w2", operands); } ++ [(set_attr "type" "simd_div") ++ (set_attr "mode" "")]) ++ ++(define_insn "xor3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f,f") ++ (xor:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] ++ "ISA_HAS_LSX" ++ "@ ++ vxor.v\t%w0,%w1,%w2 ++ vbitrevi.%v0\t%w0,%w1,%V2 ++ vxori.b\t%w0,%w1,%B2" ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "ior3" ++ [(set (match_operand:LSX 0 "register_operand" "=f,f,f") ++ (ior:LSX ++ (match_operand:LSX 1 "register_operand" "f,f,f") ++ (match_operand:LSX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))] ++ "ISA_HAS_LSX" ++ "@ ++ vor.v\t%w0,%w1,%w2 ++ vbitseti.%v0\t%w0,%w1,%V2 ++ vori.b\t%w0,%w1,%B2" ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "and3" ++ [(set (match_operand:LSX 0 "register_operand" "=f,f,f") ++ (and:LSX ++ (match_operand:LSX 1 "register_operand" "f,f,f") ++ (match_operand:LSX 2 "reg_or_vector_same_val_operand" "f,YZ,Urv8")))] ++ "ISA_HAS_LSX" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "vand.v\t%w0,%w1,%w2"; ++ case 1: ++ { ++ rtx elt0 = CONST_VECTOR_ELT (operands[2], 0); ++ unsigned HOST_WIDE_INT val = ~UINTVAL (elt0); ++ operands[2] = loongarch_gen_const_int_vector (mode, val & (-val)); ++ return "vbitclri.%v0\t%w0,%w1,%V2"; ++ } ++ case 2: ++ return "vandi.b\t%w0,%w1,%B2"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "type" "simd_logic,simd_bit,simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "one_cmpl2" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (not:ILSX (match_operand:ILSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vnor.v\t%w0,%w1,%w1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "TI")]) ++ ++(define_insn "vlshr3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (lshiftrt:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LSX" ++ "@ ++ vsrl.\t%w0,%w1,%w2 ++ vsrli.\t%w0,%w1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "vashr3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (ashiftrt:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LSX" ++ "@ ++ vsra.\t%w0,%w1,%w2 ++ vsrai.\t%w0,%w1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "vashl3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (ashift:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))] ++ "ISA_HAS_LSX" ++ "@ ++ vsll.\t%w0,%w1,%w2 ++ vslli.\t%w0,%w1,%E2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; Floating-point operations ++(define_insn "add3" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (plus:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfadd.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "sub3" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (minus:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfsub.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "mul3" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (mult:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfmul.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fmul") ++ (set_attr "mode" "")]) ++ ++(define_insn "div3" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (div:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfdiv.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "fma4" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (fma:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f") ++ (match_operand:FLSX 3 "register_operand" "0")))] ++ "ISA_HAS_LSX" ++ "vfmadd.\t%w0,%w1,%w2,%w0" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "fnma4" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (fma:FLSX (neg:FLSX (match_operand:FLSX 1 "register_operand" "f")) ++ (match_operand:FLSX 2 "register_operand" "f") ++ (match_operand:FLSX 3 "register_operand" "0")))] ++ "ISA_HAS_LSX" ++ "vfnmsub.\t%w0,%w1,%w2,%w0" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "sqrt2" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (sqrt:FLSX (match_operand:FLSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfsqrt.\t%w0,%w1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++;; Built-in functions ++(define_insn "lsx_vadda_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (plus:ILSX (abs:ILSX (match_operand:ILSX 1 "register_operand" "f")) ++ (abs:ILSX (match_operand:ILSX 2 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vadda.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "ssadd3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (ss_plus:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vsadd.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "usadd3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (us_plus:ILSX (match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vsadd.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vabsd_s_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_ASUB_S))] ++ "ISA_HAS_LSX" ++ "vabsd.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vabsd_u_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VABSD_U))] ++ "ISA_HAS_LSX" ++ "vabsd.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vavg_s_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VAVG_S))] ++ "ISA_HAS_LSX" ++ "vavg.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vavg_u_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VAVG_U))] ++ "ISA_HAS_LSX" ++ "vavg.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vavgr_s_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VAVGR_S))] ++ "ISA_HAS_LSX" ++ "vavgr.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vavgr_u_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VAVGR_U))] ++ "ISA_HAS_LSX" ++ "vavgr.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitclr_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VBITCLR))] ++ "ISA_HAS_LSX" ++ "vbitclr.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitclri_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VBITCLRI))] ++ "ISA_HAS_LSX" ++ "vbitclri.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitrev_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VBITREV))] ++ "ISA_HAS_LSX" ++ "vbitrev.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitrevi_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const_lsx_branch_operand" "")] ++ UNSPEC_LSX_VBITREVI))] ++ "ISA_HAS_LSX" ++ "vbitrevi.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitsel_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (ior:ILSX (and:ILSX (not:ILSX ++ (match_operand:ILSX 3 "register_operand" "f")) ++ (match_operand:ILSX 1 "register_operand" "f")) ++ (and:ILSX (match_dup 3) ++ (match_operand:ILSX 2 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vbitsel.v\t%w0,%w1,%w2,%w3" ++ [(set_attr "type" "simd_bitmov") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitseli_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (ior:V16QI (and:V16QI (not:V16QI ++ (match_operand:V16QI 1 "register_operand" "0")) ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (and:V16QI (match_dup 1) ++ (match_operand:V16QI 3 "const_vector_same_val_operand" "Urv8"))))] ++ "ISA_HAS_LSX" ++ "vbitseli.b\t%w0,%w2,%B3" ++ [(set_attr "type" "simd_bitmov") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vbitset_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VBITSET))] ++ "ISA_HAS_LSX" ++ "vbitset.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbitseti_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VBITSETI))] ++ "ISA_HAS_LSX" ++ "vbitseti.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_code_iterator ICC [eq le leu lt ltu]) ++ ++(define_code_attr icc ++ [(eq "eq") ++ (le "le") ++ (leu "le") ++ (lt "lt") ++ (ltu "lt")]) ++ ++(define_code_attr icci ++ [(eq "eqi") ++ (le "lei") ++ (leu "lei") ++ (lt "lti") ++ (ltu "lti")]) ++ ++(define_code_attr cmpi ++ [(eq "s") ++ (le "s") ++ (leu "u") ++ (lt "s") ++ (ltu "u")]) ++ ++(define_code_attr cmpi_1 ++ [(eq "") ++ (le "") ++ (leu "u") ++ (lt "") ++ (ltu "u")]) ++ ++(define_insn "lsx_vs_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (ICC:ILSX ++ (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_imm5_operand" "f,Uv5")))] ++ "ISA_HAS_LSX" ++ "@ ++ vs.\t%w0,%w1,%w2 ++ vs.\t%w0,%w1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfclass_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFCLASS))] ++ "ISA_HAS_LSX" ++ "vfclass.\t%w0,%w1" ++ [(set_attr "type" "simd_fclass") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfcmp_caf_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VFCMP_CAF))] ++ "ISA_HAS_LSX" ++ "vfcmp.caf.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfcmp_cune_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VFCMP_CUNE))] ++ "ISA_HAS_LSX" ++ "vfcmp.cune.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++(define_code_iterator vfcond [unordered ordered eq ne le lt uneq unle unlt]) ++ ++(define_code_attr fcc ++ [(unordered "cun") ++ (ordered "cor") ++ (eq "ceq") ++ (ne "cne") ++ (uneq "cueq") ++ (unle "cule") ++ (unlt "cult") ++ (le "cle") ++ (lt "clt")]) ++ ++(define_int_iterator FSC_UNS [UNSPEC_LSX_VFCMP_SAF UNSPEC_LSX_VFCMP_SUN UNSPEC_LSX_VFCMP_SOR ++ UNSPEC_LSX_VFCMP_SEQ UNSPEC_LSX_VFCMP_SNE UNSPEC_LSX_VFCMP_SUEQ ++ UNSPEC_LSX_VFCMP_SUNE UNSPEC_LSX_VFCMP_SULE UNSPEC_LSX_VFCMP_SULT ++ UNSPEC_LSX_VFCMP_SLE UNSPEC_LSX_VFCMP_SLT]) ++ ++(define_int_attr fsc ++ [(UNSPEC_LSX_VFCMP_SAF "saf") ++ (UNSPEC_LSX_VFCMP_SUN "sun") ++ (UNSPEC_LSX_VFCMP_SOR "sor") ++ (UNSPEC_LSX_VFCMP_SEQ "seq") ++ (UNSPEC_LSX_VFCMP_SNE "sne") ++ (UNSPEC_LSX_VFCMP_SUEQ "sueq") ++ (UNSPEC_LSX_VFCMP_SUNE "sune") ++ (UNSPEC_LSX_VFCMP_SULE "sule") ++ (UNSPEC_LSX_VFCMP_SULT "sult") ++ (UNSPEC_LSX_VFCMP_SLE "sle") ++ (UNSPEC_LSX_VFCMP_SLT "slt")]) ++ ++(define_insn "lsx_vfcmp__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vfcond: (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfcmp..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfcmp__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")] ++ FSC_UNS))] ++ "ISA_HAS_LSX" ++ "vfcmp..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "")]) ++ ++(define_mode_attr fint ++ [(V4SF "v4si") ++ (V2DF "v2di")]) ++ ++(define_mode_attr FINTCNV ++ [(V4SF "I2S") ++ (V2DF "I2D")]) ++ ++(define_mode_attr FINTCNV_2 ++ [(V4SF "S2I") ++ (V2DF "D2I")]) ++ ++(define_insn "float2" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (float:FLSX (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vffint..\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "floatuns2" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unsigned_float:FLSX ++ (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vffint..\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_mode_attr FFQ ++ [(V4SF "V8HI") ++ (V2DF "V4SI")]) ++ ++(define_insn "lsx_vreplgr2vr_" ++ [(set (match_operand:LSX 0 "register_operand" "=f,f") ++ (vec_duplicate:LSX ++ (match_operand: 1 "reg_or_0_operand" "r,J")))] ++ "ISA_HAS_LSX" ++{ ++ if (which_alternative == 1) ++ return "ldi.\t%w0,0"; ++ ++ if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode)) ++ return "#"; ++ else ++ return "vreplgr2vr.\t%w0,%z1"; ++} ++ [(set_attr "type" "simd_fill") ++ (set_attr "mode" "")]) ++ ++(define_split ++ [(set (match_operand:LSX_D 0 "register_operand") ++ (vec_duplicate:LSX_D ++ (match_operand: 1 "register_operand")))] ++ "reload_completed && ISA_HAS_LSX && !TARGET_64BIT" ++ [(const_int 0)] ++{ ++ loongarch_split_lsx_fill_d (operands[0], operands[1]); ++ DONE; ++}) ++ ++(define_insn "lsx_vflogb_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFLOGB))] ++ "ISA_HAS_LSX" ++ "vflogb.\t%w0,%w1" ++ [(set_attr "type" "simd_flog2") ++ (set_attr "mode" "")]) ++ ++(define_insn "smax3" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (smax:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfmax.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfmaxa_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (if_then_else:FLSX ++ (gt (abs:FLSX (match_operand:FLSX 1 "register_operand" "f")) ++ (abs:FLSX (match_operand:FLSX 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "ISA_HAS_LSX" ++ "vfmaxa.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "smin3" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (smin:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfmin.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfmina_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (if_then_else:FLSX ++ (lt (abs:FLSX (match_operand:FLSX 1 "register_operand" "f")) ++ (abs:FLSX (match_operand:FLSX 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "ISA_HAS_LSX" ++ "vfmina.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fminmax") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfrecip_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRECIP))] ++ "ISA_HAS_LSX" ++ "vfrecip.\t%w0,%w1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfrint_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINT))] ++ "ISA_HAS_LSX" ++ "vfrint.\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfrsqrt_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRSQRT))] ++ "ISA_HAS_LSX" ++ "vfrsqrt.\t%w0,%w1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vftint_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINT_S))] ++ "ISA_HAS_LSX" ++ "vftint..\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vftint_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINT_U))] ++ "ISA_HAS_LSX" ++ "vftint..\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "fix_trunc2" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (fix: (match_operand:FLSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vftintrz..\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "fixuns_trunc2" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unsigned_fix: (match_operand:FLSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vftintrz..\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "cnv_mode" "") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vhw_h_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (addsub:V8HI ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LSX" ++ "vhw.h.b\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vhw_w_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (addsub:V4SI ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LSX" ++ "vhw.w.h\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vhw_d_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (addsub:V2DI ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3)]))) ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2)])))))] ++ "ISA_HAS_LSX" ++ "vhw.d.w\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vpackev_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (vec_select:V16QI ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 16) ++ (const_int 2) (const_int 18) ++ (const_int 4) (const_int 20) ++ (const_int 6) (const_int 22) ++ (const_int 8) (const_int 24) ++ (const_int 10) (const_int 26) ++ (const_int 12) (const_int 28) ++ (const_int 14) (const_int 30)])))] ++ "ISA_HAS_LSX" ++ "vpackev.b\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vpackev_h" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (vec_select:V8HI ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (match_operand:V8HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 2) (const_int 10) ++ (const_int 4) (const_int 12) ++ (const_int 6) (const_int 14)])))] ++ "ISA_HAS_LSX" ++ "vpackev.h\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vpackev_w" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (vec_select:V4SI ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (match_operand:V4SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 2) (const_int 6)])))] ++ "ISA_HAS_LSX" ++ "vpackev.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vpackev_w_f" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_select:V4SF ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 2) (const_int 6)])))] ++ "ISA_HAS_LSX" ++ "vpackev.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vilvh_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (vec_select:V16QI ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (parallel [(const_int 8) (const_int 24) ++ (const_int 9) (const_int 25) ++ (const_int 10) (const_int 26) ++ (const_int 11) (const_int 27) ++ (const_int 12) (const_int 28) ++ (const_int 13) (const_int 29) ++ (const_int 14) (const_int 30) ++ (const_int 15) (const_int 31)])))] ++ "ISA_HAS_LSX" ++ "vilvh.b\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vilvh_h" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (vec_select:V8HI ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (match_operand:V8HI 2 "register_operand" "f")) ++ (parallel [(const_int 4) (const_int 12) ++ (const_int 5) (const_int 13) ++ (const_int 6) (const_int 14) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LSX" ++ "vilvh.h\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vilvh_w" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (vec_select:V4SI ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (match_operand:V4SI 2 "register_operand" "f")) ++ (parallel [(const_int 2) (const_int 6) ++ (const_int 3) (const_int 7)])))] ++ "ISA_HAS_LSX" ++ "vilvh.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vilvh_w_f" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_select:V4SF ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")) ++ (parallel [(const_int 2) (const_int 6) ++ (const_int 3) (const_int 7)])))] ++ "ISA_HAS_LSX" ++ "vilvh.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vilvh_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (vec_select:V2DI ++ (vec_concat:V4DI ++ (match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3)])))] ++ "ISA_HAS_LSX" ++ "vilvh.d\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vilvh_d_f" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (vec_select:V2DF ++ (vec_concat:V4DF ++ (match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3)])))] ++ "ISA_HAS_LSX" ++ "vilvh.d\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vpackod_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (vec_select:V16QI ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 17) ++ (const_int 3) (const_int 19) ++ (const_int 5) (const_int 21) ++ (const_int 7) (const_int 23) ++ (const_int 9) (const_int 25) ++ (const_int 11) (const_int 27) ++ (const_int 13) (const_int 29) ++ (const_int 15) (const_int 31)])))] ++ "ISA_HAS_LSX" ++ "vpackod.b\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vpackod_h" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (vec_select:V8HI ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (match_operand:V8HI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 9) ++ (const_int 3) (const_int 11) ++ (const_int 5) (const_int 13) ++ (const_int 7) (const_int 15)])))] ++ "ISA_HAS_LSX" ++ "vpackod.h\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vpackod_w" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (vec_select:V4SI ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (match_operand:V4SI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 5) ++ (const_int 3) (const_int 7)])))] ++ "ISA_HAS_LSX" ++ "vpackod.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vpackod_w_f" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_select:V4SF ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 5) ++ (const_int 3) (const_int 7)])))] ++ "ISA_HAS_LSX" ++ "vpackod.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vilvl_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (vec_select:V16QI ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 16) ++ (const_int 1) (const_int 17) ++ (const_int 2) (const_int 18) ++ (const_int 3) (const_int 19) ++ (const_int 4) (const_int 20) ++ (const_int 5) (const_int 21) ++ (const_int 6) (const_int 22) ++ (const_int 7) (const_int 23)])))] ++ "ISA_HAS_LSX" ++ "vilvl.b\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vilvl_h" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (vec_select:V8HI ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (match_operand:V8HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 8) ++ (const_int 1) (const_int 9) ++ (const_int 2) (const_int 10) ++ (const_int 3) (const_int 11)])))] ++ "ISA_HAS_LSX" ++ "vilvl.h\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vilvl_w" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (vec_select:V4SI ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (match_operand:V4SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 1) (const_int 5)])))] ++ "ISA_HAS_LSX" ++ "vilvl.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vilvl_w_f" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_select:V4SF ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 4) ++ (const_int 1) (const_int 5)])))] ++ "ISA_HAS_LSX" ++ "vilvl.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vilvl_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (vec_select:V2DI ++ (vec_concat:V4DI ++ (match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2)])))] ++ "ISA_HAS_LSX" ++ "vilvl.d\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vilvl_d_f" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (vec_select:V2DF ++ (vec_concat:V4DF ++ (match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2)])))] ++ "ISA_HAS_LSX" ++ "vilvl.d\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "smax3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (smax:ILSX (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] ++ "ISA_HAS_LSX" ++ "@ ++ vmax.\t%w0,%w1,%w2 ++ vmaxi.\t%w0,%w1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "umax3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (umax:ILSX (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LSX" ++ "@ ++ vmax.\t%w0,%w1,%w2 ++ vmaxi.\t%w0,%w1,%B2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "smin3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (smin:ILSX (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))] ++ "ISA_HAS_LSX" ++ "@ ++ vmin.\t%w0,%w1,%w2 ++ vmini.\t%w0,%w1,%E2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "umin3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (umin:ILSX (match_operand:ILSX 1 "register_operand" "f,f") ++ (match_operand:ILSX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))] ++ "ISA_HAS_LSX" ++ "@ ++ vmin.\t%w0,%w1,%w2 ++ vmini.\t%w0,%w1,%B2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vclo_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VCLO))] ++ "ISA_HAS_LSX" ++ "vclo.\t%w0,%w1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "clz2" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (clz:ILSX (match_operand:ILSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vclz.\t%w0,%w1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_nor_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (and:ILSX (not:ILSX (match_operand:ILSX 1 "register_operand" "f,f")) ++ (not:ILSX (match_operand:ILSX 2 "reg_or_vector_same_val_operand" "f,Urv8"))))] ++ "ISA_HAS_LSX" ++ "@ ++ vnor.v\t%w0,%w1,%w2 ++ vnori.b\t%w0,%w1,%B2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vpickev_b" ++[(set (match_operand:V16QI 0 "register_operand" "=f") ++ (vec_select:V16QI ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14) ++ (const_int 16) (const_int 18) ++ (const_int 20) (const_int 22) ++ (const_int 24) (const_int 26) ++ (const_int 28) (const_int 30)])))] ++ "ISA_HAS_LSX" ++ "vpickev.b\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vpickev_h" ++[(set (match_operand:V8HI 0 "register_operand" "=f") ++ (vec_select:V8HI ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (match_operand:V8HI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))] ++ "ISA_HAS_LSX" ++ "vpickev.h\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vpickev_w" ++[(set (match_operand:V4SI 0 "register_operand" "=f") ++ (vec_select:V4SI ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (match_operand:V4SI 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))] ++ "ISA_HAS_LSX" ++ "vpickev.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vpickev_w_f" ++[(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_select:V4SF ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")) ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))] ++ "ISA_HAS_LSX" ++ "vpickev.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vpickod_b" ++[(set (match_operand:V16QI 0 "register_operand" "=f") ++ (vec_select:V16QI ++ (vec_concat:V32QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15) ++ (const_int 17) (const_int 19) ++ (const_int 21) (const_int 23) ++ (const_int 25) (const_int 27) ++ (const_int 29) (const_int 31)])))] ++ "ISA_HAS_LSX" ++ "vpickod.b\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vpickod_h" ++[(set (match_operand:V8HI 0 "register_operand" "=f") ++ (vec_select:V8HI ++ (vec_concat:V16HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (match_operand:V8HI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)])))] ++ "ISA_HAS_LSX" ++ "vpickod.h\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vpickod_w" ++[(set (match_operand:V4SI 0 "register_operand" "=f") ++ (vec_select:V4SI ++ (vec_concat:V8SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (match_operand:V4SI 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))] ++ "ISA_HAS_LSX" ++ "vpickod.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vpickod_w_f" ++[(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_select:V4SF ++ (vec_concat:V8SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")) ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))] ++ "ISA_HAS_LSX" ++ "vpickod.w\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "popcount2" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (popcount:ILSX (match_operand:ILSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vpcnt.\t%w0,%w1" ++ [(set_attr "type" "simd_pcnt") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsat_s_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSAT_S))] ++ "ISA_HAS_LSX" ++ "vsat.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_sat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsat_u_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSAT_U))] ++ "ISA_HAS_LSX" ++ "vsat.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_sat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vshuf4i_" ++ [(set (match_operand:LSX_WHB_W 0 "register_operand" "=f") ++ (vec_select:LSX_WHB_W ++ (match_operand:LSX_WHB_W 1 "register_operand" "f") ++ (match_operand 2 "par_const_vector_shf_set_operand" "")))] ++ "ISA_HAS_LSX" ++{ ++ HOST_WIDE_INT val = 0; ++ unsigned int i; ++ ++ /* We convert the selection to an immediate. */ ++ for (i = 0; i < 4; i++) ++ val |= INTVAL (XVECEXP (operands[2], 0, i)) << (2 * i); ++ ++ operands[2] = GEN_INT (val); ++ return "vshuf4i.\t%w0,%w1,%X2"; ++} ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrar_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VSRAR))] ++ "ISA_HAS_LSX" ++ "vsrar.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrari_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSRARI))] ++ "ISA_HAS_LSX" ++ "vsrari.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrlr_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VSRLR))] ++ "ISA_HAS_LSX" ++ "vsrlr.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrlri_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSRLRI))] ++ "ISA_HAS_LSX" ++ "vsrlri.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssub_s_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSUB_S))] ++ "ISA_HAS_LSX" ++ "vssub.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssub_u_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSUB_U))] ++ "ISA_HAS_LSX" ++ "vssub.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vreplve_" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (unspec:LSX [(match_operand:LSX 1 "register_operand" "f") ++ (match_operand:SI 2 "register_operand" "r")] ++ UNSPEC_LSX_VREPLVE))] ++ "ISA_HAS_LSX" ++ "vreplve.\t%w0,%w1,%z2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vreplvei_" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (vec_duplicate:LSX ++ (vec_select: ++ (match_operand:LSX 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const__operand" "")]))))] ++ "ISA_HAS_LSX" ++ "vreplvei.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vreplvei__scalar" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand: 1 "register_operand" "f")] ++ UNSPEC_LSX_VREPLVEI))] ++ "ISA_HAS_LSX" ++ "vreplvei.\t%w0,%w1,0" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfcvt_h_s" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "f") ++ (match_operand:V4SF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFCVT))] ++ "ISA_HAS_LSX" ++ "vfcvt.h.s\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vfcvt_s_d" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFCVT))] ++ "ISA_HAS_LSX" ++ "vfcvt.s.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "vec_pack_trunc_v2df" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_concat:V4SF ++ (float_truncate:V2SF (match_operand:V2DF 1 "register_operand" "f")) ++ (float_truncate:V2SF (match_operand:V2DF 2 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vfcvt.s.d\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfcvth_s_h" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "f")] ++ UNSPEC_LSX_VFCVTH))] ++ "ISA_HAS_LSX" ++ "vfcvth.s.h\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfcvth_d_s" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (float_extend:V2DF ++ (vec_select:V2SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (parallel [(const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LSX" ++ "vfcvth.d.s\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vfcvtl_s_h" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "f")] ++ UNSPEC_LSX_VFCVTL))] ++ "ISA_HAS_LSX" ++ "vfcvtl.s.h\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfcvtl_d_s" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (float_extend:V2DF ++ (vec_select:V2SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1)]))))] ++ "ISA_HAS_LSX" ++ "vfcvtl.d.s\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V2DF")]) ++ ++(define_code_attr lsxbr ++ [(eq "bz") ++ (ne "bnz")]) ++ ++(define_code_attr lsxeq_v ++ [(eq "eqz") ++ (ne "nez")]) ++ ++(define_code_attr lsxne_v ++ [(eq "nez") ++ (ne "eqz")]) ++ ++(define_code_attr lsxeq ++ [(eq "anyeqz") ++ (ne "allnez")]) ++ ++(define_code_attr lsxne ++ [(eq "allnez") ++ (ne "anyeqz")]) ++ ++(define_insn "lsx__" ++ [(set (pc) (if_then_else ++ (equality_op ++ (unspec:SI [(match_operand:LSX 1 "register_operand" "f")] ++ UNSPEC_LSX_BRANCH) ++ (match_operand:SI 2 "const_0_operand")) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_scratch:FCC 3 "=z"))] ++ "ISA_HAS_LSX" ++{ ++ return loongarch_output_conditional_branch (insn, operands, ++ "vset.\t%Z3%w1\n\tbcnez\t%Z3%0", ++ "vset.\t%Z3%w1\n\tbcnez\t%Z3%0"); ++} ++ [(set_attr "type" "simd_branch") ++ (set_attr "mode" "") ++ (set_attr "compact_form" "never")]) ++ ++(define_insn "lsx__v_" ++ [(set (pc) (if_then_else ++ (equality_op ++ (unspec:SI [(match_operand:LSX 1 "register_operand" "f")] ++ UNSPEC_LSX_BRANCH_V) ++ (match_operand:SI 2 "const_0_operand")) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_scratch:FCC 3 "=z"))] ++ "ISA_HAS_LSX" ++{ ++ return loongarch_output_conditional_branch (insn, operands, ++ "vset.v\t%Z3%w1\n\tbcnez\t%Z3%0", ++ "vset.v\t%Z3%w1\n\tbcnez\t%Z3%0"); ++} ++ [(set_attr "type" "simd_branch") ++ (set_attr "mode" "TI") ++ (set_attr "compact_form" "never")]) ++ ++;; vec_concate ++(define_expand "vec_concatv2di" ++ [(set (match_operand:V2DI 0 "register_operand") ++ (vec_concat:V2DI ++ (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "register_operand")))] ++ "ISA_HAS_LSX" ++{ ++ emit_insn (gen_lsx_vinsgr2vr_d (operands[0], operands[1], ++ operands[0], GEN_INT(0))); ++ emit_insn (gen_lsx_vinsgr2vr_d (operands[0], operands[2], ++ operands[0], GEN_INT(1))); ++ DONE; ++}) ++ ++ ++(define_insn "vandn3" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (and:LSX (not:LSX (match_operand:LSX 1 "register_operand" "f")) ++ (match_operand:LSX 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vandn.v\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "vabs2" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (abs:ILSX (match_operand:ILSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vsigncov.\t%w0,%w1,%w1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "vneg2" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (neg:ILSX (match_operand:ILSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vneg.\t%w0,%w1" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vmuh_s_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VMUH_S))] ++ "ISA_HAS_LSX" ++ "vmuh.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vmuh_u_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VMUH_U))] ++ "ISA_HAS_LSX" ++ "vmuh.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vextw_s_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "f")] ++ UNSPEC_LSX_VEXTW_S))] ++ "ISA_HAS_LSX" ++ "vextw_s.d\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vextw_u_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SI 1 "register_operand" "f")] ++ UNSPEC_LSX_VEXTW_U))] ++ "ISA_HAS_LSX" ++ "vextw_u.d\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vsllwil_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_WHB 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSLLWIL_S))] ++ "ISA_HAS_LSX" ++ "vsllwil..\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsllwil_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_WHB 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSLLWIL_U))] ++ "ISA_HAS_LSX" ++ "vsllwil..\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsran__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSRAN))] ++ "ISA_HAS_LSX" ++ "vsran..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssran_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRAN_S))] ++ "ISA_HAS_LSX" ++ "vssran..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssran_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRAN_U))] ++ "ISA_HAS_LSX" ++ "vssran..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrain_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSRAIN))] ++ "ISA_HAS_LSX" ++ "vsrain.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; FIXME: bitimm ++(define_insn "lsx_vsrains_s_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSRAINS_S))] ++ "ISA_HAS_LSX" ++ "vsrains_s.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++;; FIXME: bitimm ++(define_insn "lsx_vsrains_u_" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VSRAINS_U))] ++ "ISA_HAS_LSX" ++ "vsrains_u.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrarn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSRARN))] ++ "ISA_HAS_LSX" ++ "vsrarn..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrarn_s__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRARN_S))] ++ "ISA_HAS_LSX" ++ "vssrarn..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrarn_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRARN_U))] ++ "ISA_HAS_LSX" ++ "vssrarn..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrln__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSRLN))] ++ "ISA_HAS_LSX" ++ "vsrln..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrln_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRLN_U))] ++ "ISA_HAS_LSX" ++ "vssrln..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrlrn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSRLRN))] ++ "ISA_HAS_LSX" ++ "vsrlrn..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrlrn_u__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRLRN_U))] ++ "ISA_HAS_LSX" ++ "vssrlrn..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfrstpi_" ++ [(set (match_operand:ILSX_HB 0 "register_operand" "=f") ++ (unspec:ILSX_HB [(match_operand:ILSX_HB 1 "register_operand" "0") ++ (match_operand:ILSX_HB 2 "register_operand" "f") ++ (match_operand 3 "const_uimm5_operand" "")] ++ UNSPEC_LSX_VFRSTPI))] ++ "ISA_HAS_LSX" ++ "vfrstpi.\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vfrstp_" ++ [(set (match_operand:ILSX_HB 0 "register_operand" "=f") ++ (unspec:ILSX_HB [(match_operand:ILSX_HB 1 "register_operand" "0") ++ (match_operand:ILSX_HB 2 "register_operand" "f") ++ (match_operand:ILSX_HB 3 "register_operand" "f")] ++ UNSPEC_LSX_VFRSTP))] ++ "ISA_HAS_LSX" ++ "vfrstp.\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vshuf4i_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand")] ++ UNSPEC_LSX_VSHUF4I))] ++ "ISA_HAS_LSX" ++ "vshuf4i.d\t%w0,%w2,%3" ++ [(set_attr "type" "simd_sld") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vbsrl_" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (unspec:LSX [(match_operand:LSX 1 "register_operand" "f") ++ (match_operand 2 "const_uimm5_operand" "")] ++ UNSPEC_LSX_VBSRL_V))] ++ "ISA_HAS_LSX" ++ "vbsrl.v\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vbsll_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const_uimm5_operand" "")] ++ UNSPEC_LSX_VBSLL_V))] ++ "ISA_HAS_LSX" ++ "vbsll.v\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vextrins_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VEXTRINS))] ++ "ISA_HAS_LSX" ++ "vextrins.\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vmskltz_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VMSKLTZ))] ++ "ISA_HAS_LSX" ++ "vmskltz.\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsigncov_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VSIGNCOV))] ++ "ISA_HAS_LSX" ++ "vsigncov.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_expand "copysign3" ++ [(set (match_dup 4) ++ (and:FLSX ++ (not:FLSX (match_dup 3)) ++ (match_operand:FLSX 1 "register_operand"))) ++ (set (match_dup 5) ++ (and:FLSX (match_dup 3) ++ (match_operand:FLSX 2 "register_operand"))) ++ (set (match_operand:FLSX 0 "register_operand") ++ (ior:FLSX (match_dup 4) (match_dup 5)))] ++ "ISA_HAS_LSX" ++{ ++ operands[3] = loongarch_build_signbit_mask (mode, 1, 0); ++ ++ operands[4] = gen_reg_rtx (mode); ++ operands[5] = gen_reg_rtx (mode); ++}) ++ ++(define_insn "absv2df2" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (abs:V2DF (match_operand:V2DF 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vbitclri.d\t%w0,%w1,63" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "absv4sf2" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (abs:V4SF (match_operand:V4SF 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vbitclri.w\t%w0,%w1,31" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "vfmadd4" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (fma:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f") ++ (match_operand:FLSX 3 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vfmadd.\t%w0,%w1,$w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "vfmsub4" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (fma:FLSX (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f") ++ (neg:FLSX (match_operand:FLSX 3 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vfmsub.\t%w0,%w1,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "vfnmsub4_nmsub4" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (neg:FLSX ++ (fma:FLSX ++ (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f") ++ (neg:FLSX (match_operand:FLSX 3 "register_operand" "f")))))] ++ "ISA_HAS_LSX" ++ "vfnmsub.\t%w0,%w1,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "vfnmadd4_nmadd4" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (neg:FLSX ++ (fma:FLSX ++ (match_operand:FLSX 1 "register_operand" "f") ++ (match_operand:FLSX 2 "register_operand" "f") ++ (match_operand:FLSX 3 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "vfnmadd.\t%w0,%w1,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vftintrne_w_s" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRNE))] ++ "ISA_HAS_LSX" ++ "vftintrne.w.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrne_l_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRNE))] ++ "ISA_HAS_LSX" ++ "vftintrne.l.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftintrp_w_s" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRP))] ++ "ISA_HAS_LSX" ++ "vftintrp.w.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrp_l_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRP))] ++ "ISA_HAS_LSX" ++ "vftintrp.l.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftintrm_w_s" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRM))] ++ "ISA_HAS_LSX" ++ "vftintrm.w.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrm_l_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRM))] ++ "ISA_HAS_LSX" ++ "vftintrm.l.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftint_w_d" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFTINT_W_D))] ++ "ISA_HAS_LSX" ++ "vftint.w.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vffint_s_l" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VFFINT_S_L))] ++ "ISA_HAS_LSX" ++ "vffint.s.l\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vftintrz_w_d" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRZ_W_D))] ++ "ISA_HAS_LSX" ++ "vftintrz.w.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftintrp_w_d" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRP_W_D))] ++ "ISA_HAS_LSX" ++ "vftintrp.w.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftintrm_w_d" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRM_W_D))] ++ "ISA_HAS_LSX" ++ "vftintrm.w.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftintrne_w_d" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f") ++ (match_operand:V2DF 2 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRNE_W_D))] ++ "ISA_HAS_LSX" ++ "vftintrne.w.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vftinth_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTH_L_H))] ++ "ISA_HAS_LSX" ++ "vftinth.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintl_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTL_L_S))] ++ "ISA_HAS_LSX" ++ "vftintl.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vffinth_d_w" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V4SI 1 "register_operand" "f")] ++ UNSPEC_LSX_VFFINTH_D_W))] ++ "ISA_HAS_LSX" ++ "vffinth.d.w\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vffintl_d_w" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V4SI 1 "register_operand" "f")] ++ UNSPEC_LSX_VFFINTL_D_W))] ++ "ISA_HAS_LSX" ++ "vffintl.d.w\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vftintrzh_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRZH_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrzh.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrzl_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRZL_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrzl.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrph_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRPH_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrph.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrpl_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRPL_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrpl.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrmh_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRMH_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrmh.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrml_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRML_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrml.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrneh_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRNEH_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrneh.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vftintrnel_l_s" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFTINTRNEL_L_S))] ++ "ISA_HAS_LSX" ++ "vftintrnel.l.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfrintrne_s" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRNE_S))] ++ "ISA_HAS_LSX" ++ "vfrintrne.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfrintrne_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRNE_D))] ++ "ISA_HAS_LSX" ++ "vfrintrne.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vfrintrz_s" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRZ_S))] ++ "ISA_HAS_LSX" ++ "vfrintrz.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfrintrz_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRZ_D))] ++ "ISA_HAS_LSX" ++ "vfrintrz.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vfrintrp_s" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRP_S))] ++ "ISA_HAS_LSX" ++ "vfrintrp.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfrintrp_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRP_D))] ++ "ISA_HAS_LSX" ++ "vfrintrp.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++(define_insn "lsx_vfrintrm_s" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRM_S))] ++ "ISA_HAS_LSX" ++ "vfrintrm.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "lsx_vfrintrm_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINTRM_D))] ++ "ISA_HAS_LSX" ++ "vfrintrm.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++;; Offset load and broadcast ++(define_expand "lsx_vldrepl_" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq12_operand")] ++ "ISA_HAS_LSX" ++{ ++ emit_insn (gen_lsx_vldrepl__insn ++ (operands[0], operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_insn "lsx_vldrepl__insn" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (vec_duplicate:LSX ++ (mem: (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "aq12_operand" )))))] ++ "ISA_HAS_LSX" ++{ ++ return "vldrepl.\t%w0,%1,%2"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++;; Offset store by sel ++(define_expand "lsx_vstelm_" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand 3 "const__operand") ++ (match_operand 2 "aq8_operand") ++ (match_operand 1 "pmode_register_operand")] ++ "ISA_HAS_LSX" ++{ ++ emit_insn (gen_lsx_vstelm__insn ++ (operands[1], operands[2], operands[0], operands[3])); ++ DONE; ++}) ++ ++(define_insn "lsx_vstelm__insn" ++ [(set (mem: (plus:DI (match_operand:DI 0 "register_operand" "r") ++ (match_operand 1 "aq8_operand" ))) ++ (vec_select: ++ (match_operand:LSX 2 "register_operand" "f") ++ (parallel [(match_operand 3 "const__operand" "")])))] ++ ++ "ISA_HAS_LSX" ++{ ++ return "vstelm.\t%w2,%0,%1,%3"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++(define_expand "lsx_vld" ++ [(match_operand:V16QI 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq12b_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (operands[0], gen_rtx_MEM (V16QImode, addr)); ++ DONE; ++}) ++ ++(define_expand "lsx_vst" ++ [(match_operand:V16QI 0 "register_operand") ++ (match_operand 1 "pmode_register_operand") ++ (match_operand 2 "aq12b_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx addr = plus_constant (GET_MODE (operands[1]), operands[1], ++ INTVAL (operands[2])); ++ loongarch_emit_move (gen_rtx_MEM (V16QImode, addr), operands[0]); ++ DONE; ++}) ++ ++(define_insn "lsx_vssrln__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRLN))] ++ "ISA_HAS_LSX" ++ "vssrln..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++ ++(define_insn "lsx_vssrlrn__" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f") ++ (match_operand:ILSX_DWH 2 "register_operand" "f")] ++ UNSPEC_LSX_VSSRLRN))] ++ "ISA_HAS_LSX" ++ "vssrlrn..\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "vorn3" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (ior:ILSX (not:ILSX (match_operand:ILSX 2 "register_operand" "f")) ++ (match_operand:ILSX 1 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "vorn.v\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_logic") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vldi" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI[(match_operand 1 "const_imm13_operand")] ++ UNSPEC_LSX_VLDI))] ++ "ISA_HAS_LSX" ++{ ++ HOST_WIDE_INT val = INTVAL (operands[1]); ++ if(val < 0) ++ { ++ HOST_WIDE_INT modeVal = (val & 0xf00) >> 8; ++ if(modeVal < 13) ++ return "vldi\t%w0,%1"; ++ else ++ sorry("for const_imm13_operand, only support 0000 ~ 1100 in bits'12...9' when bit'13' is 1."); ++ } ++ else ++ return "vldi\t%w0,%1"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vshuf_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f") ++ (match_operand:V16QI 2 "register_operand" "f") ++ (match_operand:V16QI 3 "register_operand" "f")] ++ UNSPEC_LSX_VSHUF_B))] ++ "ISA_HAS_LSX" ++ "vshuf.b\t%w0,%w1,%w2,%w3" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vldx" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (unspec:V16QI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "reg_or_0_operand" "rJ")] ++ UNSPEC_LSX_VLDX))] ++ "ISA_HAS_LSX" ++{ ++ return "vldx\t%w0,%1,%z2"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vstx" ++ [(set (mem:V16QI (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "reg_or_0_operand" "rJ"))) ++ (unspec: V16QI[(match_operand:V16QI 0 "register_operand" "f")] ++ UNSPEC_LSX_VSTX))] ++ ++ "ISA_HAS_LSX" ++{ ++ return "vstx\t%w0,%1,%z2"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "lsx_vextl_qu_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")] ++ UNSPEC_LSX_VEXTL_QU_DU))] ++ "ISA_HAS_LSX" ++ "vextl.qu.du\t%w0,%w1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vseteqz_v" ++ [(set (match_operand:FCC 0 "register_operand" "=z") ++ (eq:FCC ++ (unspec:SI [(match_operand:V16QI 1 "register_operand" "f")] ++ UNSPEC_LSX_VSETEQZ_V) ++ (match_operand:SI 2 "const_0_operand")))] ++ "ISA_HAS_LSX" ++{ ++ return "vseteqz.v\t%0,%1"; ++} ++ [(set_attr "type" "simd_fcmp") ++ (set_attr "mode" "FCC")]) +diff --git a/gcc/config/loongarch/lsx2.md b/gcc/config/loongarch/lsx2.md +new file mode 100644 +index 000000000..2f56acfc4 +--- /dev/null ++++ b/gcc/config/loongarch/lsx2.md +@@ -0,0 +1,1091 @@ ++;; Machine Description for LARCH Loongson SX ASE ++;; ++;; Copyright (C) 2018 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++;; ++ ++(define_mode_attr d2lsxfmt ++ [(V4SI "q") ++ (V8HI "d") ++ (V16QI "w")]) ++ ++(define_mode_attr d2lsxfmt_u ++ [(V4SI "qu") ++ (V8HI "du") ++ (V16QI "wu")]) ++ ++;; The attribute gives two double modes for vector modes. ++(define_mode_attr VD2MODE ++ [(V4SI "V2DI") ++ (V8HI "V2DI") ++ (V16QI "V4SI")]) ++ ++(define_c_enum "unspec" [ ++ UNSPEC_LSX_VADDWEV ++ UNSPEC_LSX_VADDWEV2 ++ UNSPEC_LSX_VADDWEV3 ++ UNSPEC_LSX_VADDWOD ++ UNSPEC_LSX_VADDWOD2 ++ UNSPEC_LSX_VADDWOD3 ++ UNSPEC_LSX_VSUBWEV ++ UNSPEC_LSX_VSUBWEV2 ++ UNSPEC_LSX_VSUBWOD ++ UNSPEC_LSX_VSUBWOD2 ++ UNSPEC_LSX_VMULWEV ++ UNSPEC_LSX_VMULWEV2 ++ UNSPEC_LSX_VMULWEV3 ++ UNSPEC_LSX_VMULWOD ++ UNSPEC_LSX_VMULWOD2 ++ UNSPEC_LSX_VMULWOD3 ++ UNSPEC_LSX_VHADDW_Q_D ++ UNSPEC_LSX_VHADDW_QU_DU ++ UNSPEC_LSX_VHSUBW_Q_D ++ UNSPEC_LSX_VHSUBW_QU_DU ++ UNSPEC_LSX_VMADDWEV ++ UNSPEC_LSX_VMADDWEV2 ++ UNSPEC_LSX_VMADDWEV3 ++ UNSPEC_LSX_VMADDWOD ++ UNSPEC_LSX_VMADDWOD2 ++ UNSPEC_LSX_VMADDWOD3 ++ UNSPEC_LSX_VROTR ++ UNSPEC_LSX_VADD_Q ++ UNSPEC_LSX_VSUB_Q ++ UNSPEC_LSX_VEXTH_Q_D ++ UNSPEC_LSX_VEXTH_QU_DU ++ UNSPEC_LSX_VMSKGEZ ++ UNSPEC_LSX_VMSKNZ ++ UNSPEC_LSX_VROTRI ++ UNSPEC_LSX_VEXTL_Q_D ++ UNSPEC_LSX_VSRLNI ++ UNSPEC_LSX_VSRLRNI ++ UNSPEC_LSX_VSSRLNI ++ UNSPEC_LSX_VSSRLNI2 ++ UNSPEC_LSX_VSSRLRNI ++ UNSPEC_LSX_VSSRLRNI2 ++ UNSPEC_LSX_VSRANI ++ UNSPEC_LSX_VSRARNI ++ UNSPEC_LSX_VSSRANI ++ UNSPEC_LSX_VSSRANI2 ++ UNSPEC_LSX_VSSRARNI ++ UNSPEC_LSX_VSSRARNI2 ++ UNSPEC_LSX_VPERMI ++]) ++ ++(define_insn "lsx_vwev_d_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (addsubmul:V2DI ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2)]))) ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2)])))))] ++ "ISA_HAS_LSX" ++ "vwev.d.w\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vwev_w_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (addsubmul:V4SI ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LSX" ++ "vwev.w.h\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vwev_h_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (addsubmul:V8HI ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LSX" ++ "vwev.h.b\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vwod_d_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (addsubmul:V2DI ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3)]))) ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3)])))))] ++ "ISA_HAS_LSX" ++ "vwod.d.w\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vwod_w_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (addsubmul:V4SI ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))))] ++ "ISA_HAS_LSX" ++ "vwod.w.h\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vwod_h_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (addsubmul:V8HI ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)])))))] ++ "ISA_HAS_LSX" ++ "vwod.h.b\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vwev_d_wu_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (addmul:V2DI ++ (zero_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2)]))) ++ (sign_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2)])))))] ++ "ISA_HAS_LSX" ++ "vwev.d.wu.w\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vwev_w_hu_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (addmul:V4SI ++ (zero_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LSX" ++ "vwev.w.hu.h\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vwev_h_bu_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (addmul:V8HI ++ (zero_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (sign_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)])))))] ++ "ISA_HAS_LSX" ++ "vwev.h.bu.b\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vwod_d_wu_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (addmul:V2DI ++ (zero_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3)]))) ++ (sign_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3)])))))] ++ "ISA_HAS_LSX" ++ "vwod.d.wu.w\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vwod_w_hu_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (addmul:V4SI ++ (zero_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)])))))] ++ "ISA_HAS_LSX" ++ "vwod.w.hu.h\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vwod_h_bu_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (addmul:V8HI ++ (zero_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 1 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (sign_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)])))))] ++ "ISA_HAS_LSX" ++ "vwod.h.bu.b\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vaddwev_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADDWEV))] ++ "ISA_HAS_LSX" ++ "vaddwev.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vaddwev_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADDWEV2))] ++ "ISA_HAS_LSX" ++ "vaddwev.q.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vaddwod_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADDWOD))] ++ "ISA_HAS_LSX" ++ "vaddwod.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vaddwod_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADDWOD2))] ++ "ISA_HAS_LSX" ++ "vaddwod.q.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vsubwev_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VSUBWEV))] ++ "ISA_HAS_LSX" ++ "vsubwev.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vsubwev_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VSUBWEV2))] ++ "ISA_HAS_LSX" ++ "vsubwev.q.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vsubwod_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VSUBWOD))] ++ "ISA_HAS_LSX" ++ "vsubwod.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vsubwod_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VSUBWOD2))] ++ "ISA_HAS_LSX" ++ "vsubwod.q.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vaddwev_q_du_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADDWEV3))] ++ "ISA_HAS_LSX" ++ "vaddwev.q.du.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vaddwod_q_du_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADDWOD3))] ++ "ISA_HAS_LSX" ++ "vaddwod.q.du.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmulwev_q_du_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VMULWEV3))] ++ "ISA_HAS_LSX" ++ "vmulwev.q.du.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmulwod_q_du_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VMULWOD3))] ++ "ISA_HAS_LSX" ++ "vmulwod.q.du.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmulwev_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VMULWEV))] ++ "ISA_HAS_LSX" ++ "vmulwev.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmulwev_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VMULWEV2))] ++ "ISA_HAS_LSX" ++ "vmulwev.q.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmulwod_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VMULWOD))] ++ "ISA_HAS_LSX" ++ "vmulwod.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmulwod_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VMULWOD2))] ++ "ISA_HAS_LSX" ++ "vmulwod.q.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vhaddw_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VHADDW_Q_D))] ++ "ISA_HAS_LSX" ++ "vhaddw.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vhaddw_qu_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VHADDW_QU_DU))] ++ "ISA_HAS_LSX" ++ "vhaddw.qu.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vhsubw_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VHSUBW_Q_D))] ++ "ISA_HAS_LSX" ++ "vhsubw.q.d\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vhsubw_qu_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VHSUBW_QU_DU))] ++ "ISA_HAS_LSX" ++ "vhsubw.qu.du\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwev_d_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (plus:V2DI ++ (match_operand:V2DI 1 "register_operand" "0") ++ (mult:V2DI ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2)]))) ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwev.d.w\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwev_w_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (plus:V4SI ++ (match_operand:V4SI 1 "register_operand" "0") ++ (mult:V4SI ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwev.w.h\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vmaddwev_h_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (plus:V8HI ++ (match_operand:V8HI 1 "register_operand" "0") ++ (mult:V8HI ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwev.h.b\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vmaddwod_d_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (plus:V2DI ++ (match_operand:V2DI 1 "register_operand" "0") ++ (mult:V2DI ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3)]))) ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwod.d.w\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwod_w_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (plus:V4SI ++ (match_operand:V4SI 1 "register_operand" "0") ++ (mult:V4SI ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwod.w.h\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vmaddwod_h_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (plus:V8HI ++ (match_operand:V8HI 1 "register_operand" "0") ++ (mult:V8HI ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwod.h.b\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vmaddwev_d_wu_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (plus:V2DI ++ (match_operand:V2DI 1 "register_operand" "0") ++ (mult:V2DI ++ (zero_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2)]))) ++ (sign_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwev.d.wu.w\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwev_w_hu_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (plus:V4SI ++ (match_operand:V4SI 1 "register_operand" "0") ++ (mult:V4SI ++ (zero_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwev.w.hu.h\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vmaddwev_h_bu_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (plus:V8HI ++ (match_operand:V8HI 1 "register_operand" "0") ++ (mult:V8HI ++ (zero_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))) ++ (sign_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 3 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6) ++ (const_int 8) (const_int 10) ++ (const_int 12) (const_int 14)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwev.h.bu.b\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vmaddwod_d_wu_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (plus:V2DI ++ (match_operand:V2DI 1 "register_operand" "0") ++ (mult:V2DI ++ (zero_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3)]))) ++ (sign_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwod.d.wu.w\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwod_w_hu_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (plus:V4SI ++ (match_operand:V4SI 1 "register_operand" "0") ++ (mult:V4SI ++ (zero_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))) ++ (sign_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwod.w.hu.h\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vmaddwod_h_bu_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (plus:V8HI ++ (match_operand:V8HI 1 "register_operand" "0") ++ (mult:V8HI ++ (zero_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 2 "register_operand" "%f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))) ++ (sign_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 3 "register_operand" "f") ++ (parallel [(const_int 1) (const_int 3) ++ (const_int 5) (const_int 7) ++ (const_int 9) (const_int 11) ++ (const_int 13) (const_int 15)]))))))] ++ "ISA_HAS_LSX" ++ "vmaddwod.h.bu.b\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_fmadd") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vmaddwev_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand:V2DI 3 "register_operand" "f")] ++ UNSPEC_LSX_VMADDWEV))] ++ "ISA_HAS_LSX" ++ "vmaddwev.q.d\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwod_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand:V2DI 3 "register_operand" "f")] ++ UNSPEC_LSX_VMADDWOD))] ++ "ISA_HAS_LSX" ++ "vmaddwod.q.d\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwev_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand:V2DI 3 "register_operand" "f")] ++ UNSPEC_LSX_VMADDWEV2))] ++ "ISA_HAS_LSX" ++ "vmaddwev.q.du\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwod_q_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand:V2DI 3 "register_operand" "f")] ++ UNSPEC_LSX_VMADDWOD2))] ++ "ISA_HAS_LSX" ++ "vmaddwod.q.du\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwev_q_du_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand:V2DI 3 "register_operand" "f")] ++ UNSPEC_LSX_VMADDWEV3))] ++ "ISA_HAS_LSX" ++ "vmaddwev.q.du.d\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmaddwod_q_du_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") ++ (match_operand:V2DI 2 "register_operand" "f") ++ (match_operand:V2DI 3 "register_operand" "f")] ++ UNSPEC_LSX_VMADDWOD3))] ++ "ISA_HAS_LSX" ++ "vmaddwod.q.du.d\t%w0,%w2,%w3" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vrotr_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand:ILSX 2 "register_operand" "f")] ++ UNSPEC_LSX_VROTR))] ++ "ISA_HAS_LSX" ++ "vrotr.\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vadd_q" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VADD_Q))] ++ "ISA_HAS_LSX" ++ "vadd.q\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vsub_q" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f") ++ (match_operand:V2DI 2 "register_operand" "f")] ++ UNSPEC_LSX_VSUB_Q))] ++ "ISA_HAS_LSX" ++ "vsub.q\t%w0,%w1,%w2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vmskgez_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f")] ++ UNSPEC_LSX_VMSKGEZ))] ++ "ISA_HAS_LSX" ++ "vmskgez.b\t%w0,%w1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vmsknz_b" ++ [(set (match_operand:V16QI 0 "register_operand" "=f") ++ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f")] ++ UNSPEC_LSX_VMSKNZ))] ++ "ISA_HAS_LSX" ++ "vmsknz.b\t%w0,%w1" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V16QI")]) ++ ++(define_insn "lsx_vexth_h_b" ++ [(set (match_operand:V8HI 0 "register_operand" "=f") ++ (any_extend:V8HI ++ (vec_select:V8QI ++ (match_operand:V16QI 1 "register_operand" "f") ++ (parallel [(const_int 8) (const_int 9) ++ (const_int 10) (const_int 11) ++ (const_int 12) (const_int 13) ++ (const_int 14) (const_int 15)]))))] ++ "ISA_HAS_LSX" ++ "vexth.h.b\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V8HI")]) ++ ++(define_insn "lsx_vexth_w_h" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (any_extend:V4SI ++ (vec_select:V4HI ++ (match_operand:V8HI 1 "register_operand" "f") ++ (parallel [(const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LSX" ++ "vexth.w.h\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V4SI")]) ++ ++(define_insn "lsx_vexth_d_w" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (any_extend:V2DI ++ (vec_select:V2SI ++ (match_operand:V4SI 1 "register_operand" "f") ++ (parallel [(const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LSX" ++ "vexth.d.w\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vexth_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")] ++ UNSPEC_LSX_VEXTH_Q_D))] ++ "ISA_HAS_LSX" ++ "vexth.q.d\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vexth_qu_du" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")] ++ UNSPEC_LSX_VEXTH_QU_DU))] ++ "ISA_HAS_LSX" ++ "vexth.qu.du\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vrotri_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VROTRI))] ++ "ISA_HAS_LSX" ++ "vrotri.\t%w0,%w1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vextl_q_d" ++ [(set (match_operand:V2DI 0 "register_operand" "=f") ++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")] ++ UNSPEC_LSX_VEXTL_Q_D))] ++ "ISA_HAS_LSX" ++ "vextl.q.d\t%w0,%w1" ++ [(set_attr "type" "simd_fcvt") ++ (set_attr "mode" "V2DI")]) ++ ++(define_insn "lsx_vsrlni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSRLNI))] ++ "ISA_HAS_LSX" ++ "vsrlni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrlrni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSRLRNI))] ++ "ISA_HAS_LSX" ++ "vsrlrni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrlni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRLNI))] ++ "ISA_HAS_LSX" ++ "vssrlni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrlni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRLNI2))] ++ "ISA_HAS_LSX" ++ "vssrlni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrlrni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRLRNI))] ++ "ISA_HAS_LSX" ++ "vssrlrni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrlrni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRLRNI2))] ++ "ISA_HAS_LSX" ++ "vssrlrni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrani__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSRANI))] ++ "ISA_HAS_LSX" ++ "vsrani..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vsrarni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSRARNI))] ++ "ISA_HAS_LSX" ++ "vsrarni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrani__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRANI))] ++ "ISA_HAS_LSX" ++ "vssrani..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrani__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRANI2))] ++ "ISA_HAS_LSX" ++ "vssrani..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrarni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRARNI))] ++ "ISA_HAS_LSX" ++ "vssrarni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vssrarni__" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VSSRARNI2))] ++ "ISA_HAS_LSX" ++ "vssrarni..\t%w0,%w2,%3" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ ++(define_insn "lsx_vpermi_w" ++ [(set (match_operand:V4SI 0 "register_operand" "=f") ++ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") ++ (match_operand:V4SI 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LSX_VPERMI))] ++ "ISA_HAS_LSX" ++ "vpermi.w\t%w0,%w2,%3" ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "V4SI")]) ++ +diff --git a/gcc/config/loongarch/lsxintrin.h b/gcc/config/loongarch/lsxintrin.h +new file mode 100644 +index 000000000..fe3043e3d +--- /dev/null ++++ b/gcc/config/loongarch/lsxintrin.h +@@ -0,0 +1,4980 @@ ++/* LARCH Loongson SX intrinsics include file. ++ ++ Copyright (C) 2018 Free Software Foundation, Inc. ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published ++ by the Free Software Foundation; either version 3, or (at your ++ option) any later version. ++ ++ GCC is distributed in the hope that it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++ License for more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++#ifndef _GCC_LOONGSON_SXINTRIN_H ++#define _GCC_LOONGSON_SXINTRIN_H 1 ++ ++#if defined(__loongarch_sx) ++typedef signed char v16i8 __attribute__ ((vector_size(16), aligned(16))); ++typedef signed char v16i8_b __attribute__ ((vector_size(16), aligned(1))); ++typedef unsigned char v16u8 __attribute__ ((vector_size(16), aligned(16))); ++typedef unsigned char v16u8_b __attribute__ ((vector_size(16), aligned(1))); ++typedef short v8i16 __attribute__ ((vector_size(16), aligned(16))); ++typedef short v8i16_h __attribute__ ((vector_size(16), aligned(2))); ++typedef unsigned short v8u16 __attribute__ ((vector_size(16), aligned(16))); ++typedef unsigned short v8u16_h __attribute__ ((vector_size(16), aligned(2))); ++typedef int v4i32 __attribute__ ((vector_size(16), aligned(16))); ++typedef int v4i32_w __attribute__ ((vector_size(16), aligned(4))); ++typedef unsigned int v4u32 __attribute__ ((vector_size(16), aligned(16))); ++typedef unsigned int v4u32_w __attribute__ ((vector_size(16), aligned(4))); ++typedef long long v2i64 __attribute__ ((vector_size(16), aligned(16))); ++typedef long long v2i64_d __attribute__ ((vector_size(16), aligned(8))); ++typedef unsigned long long v2u64 __attribute__ ((vector_size(16), aligned(16))); ++typedef unsigned long long v2u64_d __attribute__ ((vector_size(16), aligned(8))); ++typedef float v4f32 __attribute__ ((vector_size(16), aligned(16))); ++typedef float v4f32_w __attribute__ ((vector_size(16), aligned(4))); ++typedef double v2f64 __attribute__ ((vector_size(16), aligned(16))); ++typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8))); ++ ++typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__)); ++typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); ++typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsll_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsll_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsll_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsll_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsll_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsll_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsll_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsll_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vslli_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vslli_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vslli_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vslli_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vslli_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslli_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vslli_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vslli_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsra_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsra_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsra_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsra_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsra_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsra_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsra_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsra_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vsrai_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsrai_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vsrai_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsrai_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vsrai_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsrai_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vsrai_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vsrai_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrar_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrar_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrar_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrar_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrar_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrar_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrar_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrar_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vsrari_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsrari_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vsrari_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsrari_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vsrari_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsrari_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vsrari_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vsrari_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrl_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrl_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrl_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrl_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrl_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrl_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrl_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrl_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vsrli_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsrli_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vsrli_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsrli_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vsrli_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsrli_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vsrli_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vsrli_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlr_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlr_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlr_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlr_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlr_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlr_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlr_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlr_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vsrlri_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsrlri_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vsrlri_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsrlri_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vsrlri_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsrlri_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vsrlri_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vsrlri_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitclr_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitclr_b((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitclr_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitclr_h((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitclr_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitclr_w((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitclr_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitclr_d((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vbitclri_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vbitclri_b((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ ++#define __lsx_vbitclri_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vbitclri_h((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ ++#define __lsx_vbitclri_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vbitclri_w((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ ++#define __lsx_vbitclri_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vbitclri_d((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitset_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitset_b((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitset_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitset_h((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitset_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitset_w((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitset_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitset_d((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vbitseti_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vbitseti_b((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ ++#define __lsx_vbitseti_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vbitseti_h((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ ++#define __lsx_vbitseti_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vbitseti_w((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ ++#define __lsx_vbitseti_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vbitseti_d((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitrev_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitrev_b((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitrev_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitrev_h((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitrev_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitrev_w((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitrev_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vbitrev_d((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vbitrevi_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vbitrevi_b((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ ++#define __lsx_vbitrevi_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vbitrevi_h((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ ++#define __lsx_vbitrevi_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vbitrevi_w((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ ++#define __lsx_vbitrevi_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vbitrevi_d((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadd_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadd_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadd_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadd_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadd_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadd_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadd_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadd_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vaddi_bu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vaddi_bu((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vaddi_hu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vaddi_hu((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vaddi_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vaddi_wu((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vaddi_du(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vaddi_du((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsub_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsub_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsub_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsub_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsub_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsub_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsub_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsub_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vsubi_bu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsubi_bu((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vsubi_hu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsubi_hu((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vsubi_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsubi_wu((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vsubi_du(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsubi_du((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V16QI, V16QI, QI. */ ++#define __lsx_vmaxi_b(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmaxi_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V8HI, V8HI, QI. */ ++#define __lsx_vmaxi_h(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmaxi_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V4SI, V4SI, QI. */ ++#define __lsx_vmaxi_w(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmaxi_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V2DI, V2DI, QI. */ ++#define __lsx_vmaxi_d(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmaxi_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmax_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmax_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vmaxi_bu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmaxi_bu((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ ++#define __lsx_vmaxi_hu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmaxi_hu((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ ++#define __lsx_vmaxi_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmaxi_wu((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ ++#define __lsx_vmaxi_du(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmaxi_du((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V16QI, V16QI, QI. */ ++#define __lsx_vmini_b(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmini_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V8HI, V8HI, QI. */ ++#define __lsx_vmini_h(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmini_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V4SI, V4SI, QI. */ ++#define __lsx_vmini_w(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmini_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V2DI, V2DI, QI. */ ++#define __lsx_vmini_d(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vmini_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmin_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmin_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vmini_bu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmini_bu((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ ++#define __lsx_vmini_hu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmini_hu((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ ++#define __lsx_vmini_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmini_wu((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ ++#define __lsx_vmini_du(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vmini_du((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vseq_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vseq_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vseq_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vseq_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vseq_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vseq_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vseq_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vseq_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V16QI, V16QI, QI. */ ++#define __lsx_vseqi_b(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vseqi_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V8HI, V8HI, QI. */ ++#define __lsx_vseqi_h(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vseqi_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V4SI, V4SI, QI. */ ++#define __lsx_vseqi_w(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vseqi_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V2DI, V2DI, QI. */ ++#define __lsx_vseqi_d(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vseqi_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V16QI, V16QI, QI. */ ++#define __lsx_vslti_b(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslti_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V8HI, V8HI, QI. */ ++#define __lsx_vslti_h(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslti_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V4SI, V4SI, QI. */ ++#define __lsx_vslti_w(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslti_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V2DI, V2DI, QI. */ ++#define __lsx_vslti_d(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslti_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vslt_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vslt_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, UV16QI, UQI. */ ++#define __lsx_vslti_bu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslti_bu((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, UV8HI, UQI. */ ++#define __lsx_vslti_hu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslti_hu((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, UV4SI, UQI. */ ++#define __lsx_vslti_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslti_wu((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V2DI, UV2DI, UQI. */ ++#define __lsx_vslti_du(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslti_du((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V16QI, V16QI, QI. */ ++#define __lsx_vslei_b(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslei_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V8HI, V8HI, QI. */ ++#define __lsx_vslei_h(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslei_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V4SI, V4SI, QI. */ ++#define __lsx_vslei_w(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslei_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, si5. */ ++/* Data types in instruction templates: V2DI, V2DI, QI. */ ++#define __lsx_vslei_d(/*__m128i*/ _1, /*si5*/ _2) ((__m128i)__builtin_lsx_vslei_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsle_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsle_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, UV16QI, UQI. */ ++#define __lsx_vslei_bu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslei_bu((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, UV8HI, UQI. */ ++#define __lsx_vslei_hu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslei_hu((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, UV4SI, UQI. */ ++#define __lsx_vslei_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslei_wu((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V2DI, UV2DI, UQI. */ ++#define __lsx_vslei_du(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vslei_du((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vsat_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsat_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vsat_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsat_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vsat_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsat_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vsat_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vsat_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vsat_bu(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsat_bu((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UQI. */ ++#define __lsx_vsat_hu(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsat_hu((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UQI. */ ++#define __lsx_vsat_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsat_wu((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UQI. */ ++#define __lsx_vsat_du(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vsat_du((v2u64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadda_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadda_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadda_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadda_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadda_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadda_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadda_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadda_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsadd_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsadd_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavg_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavg_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vavgr_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vavgr_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssub_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssub_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vabsd_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vabsd_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmul_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmul_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmul_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmul_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmul_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmul_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmul_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmul_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmadd_b(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmadd_b((v16i8)_1, (v16i8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmadd_h(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmadd_h((v8i16)_1, (v8i16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmadd_w(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmadd_w((v4i32)_1, (v4i32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmadd_d(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmadd_d((v2i64)_1, (v2i64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmsub_b(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmsub_b((v16i8)_1, (v16i8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmsub_h(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmsub_h((v8i16)_1, (v8i16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmsub_w(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmsub_w((v4i32)_1, (v4i32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmsub_d(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmsub_d((v2i64)_1, (v2i64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vdiv_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vdiv_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_h_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_h_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_w_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_w_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_d_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_d_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_hu_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_hu_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_wu_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_wu_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_du_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_du_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_h_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_h_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_w_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_w_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_d_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_d_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_hu_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_hu_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_wu_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_wu_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_du_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_du_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmod_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmod_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, rk. */ ++/* Data types in instruction templates: V16QI, V16QI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplve_b(__m128i _1, int _2) ++{ ++ return (__m128i)__builtin_lsx_vreplve_b((v16i8)_1, (int)_2); ++} ++ ++/* Assembly instruction format: vd, vj, rk. */ ++/* Data types in instruction templates: V8HI, V8HI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplve_h(__m128i _1, int _2) ++{ ++ return (__m128i)__builtin_lsx_vreplve_h((v8i16)_1, (int)_2); ++} ++ ++/* Assembly instruction format: vd, vj, rk. */ ++/* Data types in instruction templates: V4SI, V4SI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplve_w(__m128i _1, int _2) ++{ ++ return (__m128i)__builtin_lsx_vreplve_w((v4i32)_1, (int)_2); ++} ++ ++/* Assembly instruction format: vd, vj, rk. */ ++/* Data types in instruction templates: V2DI, V2DI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplve_d(__m128i _1, int _2) ++{ ++ return (__m128i)__builtin_lsx_vreplve_d((v2i64)_1, (int)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vreplvei_b(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vreplvei_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vreplvei_h(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vreplvei_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui2. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vreplvei_w(/*__m128i*/ _1, /*ui2*/ _2) ((__m128i)__builtin_lsx_vreplvei_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui1. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vreplvei_d(/*__m128i*/ _1, /*ui1*/ _2) ((__m128i)__builtin_lsx_vreplvei_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickev_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickev_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickev_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickev_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickev_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickev_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickev_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickev_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickod_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickod_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickod_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickod_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickod_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickod_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpickod_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpickod_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvh_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvh_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvh_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvh_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvh_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvh_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvh_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvh_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvl_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvl_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvl_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvl_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvl_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvl_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vilvl_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vilvl_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackev_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackev_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackev_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackev_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackev_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackev_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackev_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackev_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackod_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackod_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackod_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackod_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackod_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackod_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpackod_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vpackod_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vshuf_h(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vshuf_h((v8i16)_1, (v8i16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vshuf_w(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vshuf_w((v4i32)_1, (v4i32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vshuf_d(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vshuf_d((v2i64)_1, (v2i64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vand_v(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vand_v((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vandi_b(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vandi_b((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vor_v(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vor_v((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vori_b(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vori_b((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vnor_v(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vnor_v((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vnori_b(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vnori_b((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vxor_v(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vxor_v((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UQI. */ ++#define __lsx_vxori_b(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vxori_b((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vbitsel_v(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vbitsel_v((v16u8)_1, (v16u8)_2, (v16u8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI, USI. */ ++#define __lsx_vbitseli_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vbitseli_b((v16u8)(_1), (v16u8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V16QI, V16QI, USI. */ ++#define __lsx_vshuf4i_b(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vshuf4i_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V8HI, V8HI, USI. */ ++#define __lsx_vshuf4i_h(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vshuf4i_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V4SI, V4SI, USI. */ ++#define __lsx_vshuf4i_w(/*__m128i*/ _1, /*ui8*/ _2) ((__m128i)__builtin_lsx_vshuf4i_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, rj. */ ++/* Data types in instruction templates: V16QI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplgr2vr_b(int _1) ++{ ++ return (__m128i)__builtin_lsx_vreplgr2vr_b((int)_1); ++} ++ ++/* Assembly instruction format: vd, rj. */ ++/* Data types in instruction templates: V8HI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplgr2vr_h(int _1) ++{ ++ return (__m128i)__builtin_lsx_vreplgr2vr_h((int)_1); ++} ++ ++/* Assembly instruction format: vd, rj. */ ++/* Data types in instruction templates: V4SI, SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplgr2vr_w(int _1) ++{ ++ return (__m128i)__builtin_lsx_vreplgr2vr_w((int)_1); ++} ++ ++/* Assembly instruction format: vd, rj. */ ++/* Data types in instruction templates: V2DI, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vreplgr2vr_d(long int _1) ++{ ++ return (__m128i)__builtin_lsx_vreplgr2vr_d((long int)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpcnt_b(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vpcnt_b((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpcnt_h(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vpcnt_h((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpcnt_w(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vpcnt_w((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vpcnt_d(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vpcnt_d((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclo_b(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclo_b((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclo_h(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclo_h((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclo_w(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclo_w((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclo_d(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclo_d((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclz_b(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclz_b((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclz_h(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclz_h((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclz_w(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclz_w((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vclz_d(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vclz_d((v2i64)_1); ++} ++ ++/* Assembly instruction format: rd, vj, ui4. */ ++/* Data types in instruction templates: SI, V16QI, UQI. */ ++#define __lsx_vpickve2gr_b(/*__m128i*/ _1, /*ui4*/ _2) ((int)__builtin_lsx_vpickve2gr_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui3. */ ++/* Data types in instruction templates: SI, V8HI, UQI. */ ++#define __lsx_vpickve2gr_h(/*__m128i*/ _1, /*ui3*/ _2) ((int)__builtin_lsx_vpickve2gr_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui2. */ ++/* Data types in instruction templates: SI, V4SI, UQI. */ ++#define __lsx_vpickve2gr_w(/*__m128i*/ _1, /*ui2*/ _2) ((int)__builtin_lsx_vpickve2gr_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui1. */ ++/* Data types in instruction templates: DI, V2DI, UQI. */ ++#define __lsx_vpickve2gr_d(/*__m128i*/ _1, /*ui1*/ _2) ((long int)__builtin_lsx_vpickve2gr_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui4. */ ++/* Data types in instruction templates: USI, V16QI, UQI. */ ++#define __lsx_vpickve2gr_bu(/*__m128i*/ _1, /*ui4*/ _2) ((unsigned int)__builtin_lsx_vpickve2gr_bu((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui3. */ ++/* Data types in instruction templates: USI, V8HI, UQI. */ ++#define __lsx_vpickve2gr_hu(/*__m128i*/ _1, /*ui3*/ _2) ((unsigned int)__builtin_lsx_vpickve2gr_hu((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui2. */ ++/* Data types in instruction templates: USI, V4SI, UQI. */ ++#define __lsx_vpickve2gr_wu(/*__m128i*/ _1, /*ui2*/ _2) ((unsigned int)__builtin_lsx_vpickve2gr_wu((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: rd, vj, ui1. */ ++/* Data types in instruction templates: UDI, V2DI, UQI. */ ++#define __lsx_vpickve2gr_du(/*__m128i*/ _1, /*ui1*/ _2) ((unsigned long int)__builtin_lsx_vpickve2gr_du((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, rj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, SI, UQI. */ ++#define __lsx_vinsgr2vr_b(/*__m128i*/ _1, /*int*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vinsgr2vr_b((v16i8)(_1), (int)(_2), (_3))) ++ ++/* Assembly instruction format: vd, rj, ui3. */ ++/* Data types in instruction templates: V8HI, V8HI, SI, UQI. */ ++#define __lsx_vinsgr2vr_h(/*__m128i*/ _1, /*int*/ _2, /*ui3*/ _3) ((__m128i)__builtin_lsx_vinsgr2vr_h((v8i16)(_1), (int)(_2), (_3))) ++ ++/* Assembly instruction format: vd, rj, ui2. */ ++/* Data types in instruction templates: V4SI, V4SI, SI, UQI. */ ++#define __lsx_vinsgr2vr_w(/*__m128i*/ _1, /*int*/ _2, /*ui2*/ _3) ((__m128i)__builtin_lsx_vinsgr2vr_w((v4i32)(_1), (int)(_2), (_3))) ++ ++/* Assembly instruction format: vd, rj, ui1. */ ++/* Data types in instruction templates: V2DI, V2DI, DI, UQI. */ ++#define __lsx_vinsgr2vr_d(/*__m128i*/ _1, /*long int*/ _2, /*ui1*/ _3) ((__m128i)__builtin_lsx_vinsgr2vr_d((v2i64)(_1), (long int)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfadd_s(__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfadd_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfadd_d(__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfadd_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfsub_s(__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfsub_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfsub_d(__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfsub_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmul_s(__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfmul_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmul_d(__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfmul_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfdiv_s(__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfdiv_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfdiv_d(__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfdiv_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcvt_h_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcvt_h_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfcvt_s_d(__m128d _1, __m128d _2) ++{ ++ return (__m128)__builtin_lsx_vfcvt_s_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmin_s(__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfmin_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmin_d(__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfmin_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmina_s(__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfmina_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmina_d(__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfmina_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmax_s(__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfmax_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmax_d(__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfmax_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmaxa_s(__m128 _1, __m128 _2) ++{ ++ return (__m128)__builtin_lsx_vfmaxa_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmaxa_d(__m128d _1, __m128d _2) ++{ ++ return (__m128d)__builtin_lsx_vfmaxa_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfclass_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vfclass_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfclass_d(__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vfclass_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfsqrt_s(__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vfsqrt_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfsqrt_d(__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vfsqrt_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfrecip_s(__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vfrecip_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfrecip_d(__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vfrecip_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfrint_s(__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vfrint_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfrint_d(__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vfrint_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfrsqrt_s(__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vfrsqrt_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfrsqrt_d(__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vfrsqrt_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vflogb_s(__m128 _1) ++{ ++ return (__m128)__builtin_lsx_vflogb_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vflogb_d(__m128d _1) ++{ ++ return (__m128d)__builtin_lsx_vflogb_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfcvth_s_h(__m128i _1) ++{ ++ return (__m128)__builtin_lsx_vfcvth_s_h((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfcvth_d_s(__m128 _1) ++{ ++ return (__m128d)__builtin_lsx_vfcvth_d_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfcvtl_s_h(__m128i _1) ++{ ++ return (__m128)__builtin_lsx_vfcvtl_s_h((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfcvtl_d_s(__m128 _1) ++{ ++ return (__m128d)__builtin_lsx_vfcvtl_d_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftint_w_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftint_w_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftint_l_d(__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftint_l_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftint_wu_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftint_wu_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftint_lu_d(__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftint_lu_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrz_w_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrz_w_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrz_l_d(__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrz_l_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrz_wu_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrz_wu_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrz_lu_d(__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrz_lu_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vffint_s_w(__m128i _1) ++{ ++ return (__m128)__builtin_lsx_vffint_s_w((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vffint_d_l(__m128i _1) ++{ ++ return (__m128d)__builtin_lsx_vffint_d_l((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SF, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vffint_s_wu(__m128i _1) ++{ ++ return (__m128)__builtin_lsx_vffint_s_wu((v4u32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vffint_d_lu(__m128i _1) ++{ ++ return (__m128d)__builtin_lsx_vffint_d_lu((v2u64)_1); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vandn_v(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vandn_v((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vneg_b(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vneg_b((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vneg_h(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vneg_h((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vneg_w(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vneg_w((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vneg_d(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vneg_d((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmuh_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmuh_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V8HI, V16QI, UQI. */ ++#define __lsx_vsllwil_h_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsllwil_h_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V4SI, V8HI, UQI. */ ++#define __lsx_vsllwil_w_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsllwil_w_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V2DI, V4SI, UQI. */ ++#define __lsx_vsllwil_d_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsllwil_d_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: UV8HI, UV16QI, UQI. */ ++#define __lsx_vsllwil_hu_bu(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vsllwil_hu_bu((v16u8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV4SI, UV8HI, UQI. */ ++#define __lsx_vsllwil_wu_hu(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vsllwil_wu_hu((v8u16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV2DI, UV4SI, UQI. */ ++#define __lsx_vsllwil_du_wu(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vsllwil_du_wu((v4u32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsran_b_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsran_b_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsran_h_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsran_h_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsran_w_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsran_w_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssran_b_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssran_b_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssran_h_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssran_h_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssran_w_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssran_w_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssran_bu_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssran_bu_h((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssran_hu_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssran_hu_w((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssran_wu_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssran_wu_d((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrarn_b_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrarn_b_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrarn_h_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrarn_h_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrarn_w_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrarn_w_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrarn_b_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrarn_b_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrarn_h_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrarn_h_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrarn_w_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrarn_w_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrarn_bu_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrarn_bu_h((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrarn_hu_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrarn_hu_w((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrarn_wu_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrarn_wu_d((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrln_b_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrln_b_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrln_h_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrln_h_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrln_w_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrln_w_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrln_bu_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrln_bu_h((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrln_hu_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrln_hu_w((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrln_wu_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrln_wu_d((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlrn_b_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlrn_b_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlrn_h_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlrn_h_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsrlrn_w_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsrlrn_w_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV16QI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrlrn_bu_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrlrn_bu_h((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrlrn_hu_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrlrn_hu_w((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrlrn_wu_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrlrn_wu_d((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, UQI. */ ++#define __lsx_vfrstpi_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vfrstpi_b((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, UQI. */ ++#define __lsx_vfrstpi_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vfrstpi_h((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfrstp_b(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vfrstp_b((v16i8)_1, (v16i8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfrstp_h(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vfrstp_h((v8i16)_1, (v8i16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vshuf4i_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vshuf4i_d((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vbsrl_v(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vbsrl_v((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vbsll_v(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vbsll_v((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vextrins_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vextrins_b((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vextrins_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vextrins_h((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vextrins_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vextrins_w((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vextrins_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vextrins_d((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmskltz_b(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vmskltz_b((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmskltz_h(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vmskltz_h((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmskltz_w(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vmskltz_w((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmskltz_d(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vmskltz_d((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsigncov_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsigncov_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsigncov_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsigncov_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsigncov_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsigncov_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsigncov_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsigncov_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmadd_s(__m128 _1, __m128 _2, __m128 _3) ++{ ++ return (__m128)__builtin_lsx_vfmadd_s((v4f32)_1, (v4f32)_2, (v4f32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmadd_d(__m128d _1, __m128d _2, __m128d _3) ++{ ++ return (__m128d)__builtin_lsx_vfmadd_d((v2f64)_1, (v2f64)_2, (v2f64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfmsub_s(__m128 _1, __m128 _2, __m128 _3) ++{ ++ return (__m128)__builtin_lsx_vfmsub_s((v4f32)_1, (v4f32)_2, (v4f32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfmsub_d(__m128d _1, __m128d _2, __m128d _3) ++{ ++ return (__m128d)__builtin_lsx_vfmsub_d((v2f64)_1, (v2f64)_2, (v2f64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfnmadd_s(__m128 _1, __m128 _2, __m128 _3) ++{ ++ return (__m128)__builtin_lsx_vfnmadd_s((v4f32)_1, (v4f32)_2, (v4f32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfnmadd_d(__m128d _1, __m128d _2, __m128d _3) ++{ ++ return (__m128d)__builtin_lsx_vfnmadd_d((v2f64)_1, (v2f64)_2, (v2f64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V4SF, V4SF, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vfnmsub_s(__m128 _1, __m128 _2, __m128 _3) ++{ ++ return (__m128)__builtin_lsx_vfnmsub_s((v4f32)_1, (v4f32)_2, (v4f32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V2DF, V2DF, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vfnmsub_d(__m128d _1, __m128d _2, __m128d _3) ++{ ++ return (__m128d)__builtin_lsx_vfnmsub_d((v2f64)_1, (v2f64)_2, (v2f64)_3); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrne_w_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrne_w_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrne_l_d(__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrne_l_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrp_w_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrp_w_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrp_l_d(__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrp_l_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrm_w_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrm_w_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrm_l_d(__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrm_l_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftint_w_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vftint_w_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SF, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128 __lsx_vffint_s_l(__m128i _1, __m128i _2) ++{ ++ return (__m128)__builtin_lsx_vffint_s_l((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrz_w_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vftintrz_w_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrp_w_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vftintrp_w_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrm_w_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vftintrm_w_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrne_w_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vftintrne_w_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintl_l_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintl_l_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftinth_l_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftinth_l_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vffinth_d_w(__m128i _1) ++{ ++ return (__m128d)__builtin_lsx_vffinth_d_w((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DF, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128d __lsx_vffintl_d_w(__m128i _1) ++{ ++ return (__m128d)__builtin_lsx_vffintl_d_w((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrzl_l_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrzl_l_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrzh_l_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrzh_l_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrpl_l_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrpl_l_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrph_l_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrph_l_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrml_l_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrml_l_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrmh_l_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrmh_l_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrnel_l_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrnel_l_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vftintrneh_l_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vftintrneh_l_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfrintrne_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vfrintrne_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfrintrne_d(__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vfrintrne_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfrintrz_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vfrintrz_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfrintrz_d(__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vfrintrz_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfrintrp_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vfrintrp_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfrintrp_d(__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vfrintrp_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfrintrm_s(__m128 _1) ++{ ++ return (__m128i)__builtin_lsx_vfrintrm_s((v4f32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfrintrm_d(__m128d _1) ++{ ++ return (__m128i)__builtin_lsx_vfrintrm_d((v2f64)_1); ++} ++ ++/* Assembly instruction format: vd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V16QI, CVPOINTER, SI, UQI. */ ++#define __lsx_vstelm_b(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lsx_vstelm_b((v16i8)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: vd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V8HI, CVPOINTER, SI, UQI. */ ++#define __lsx_vstelm_h(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lsx_vstelm_h((v8i16)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: vd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V4SI, CVPOINTER, SI, UQI. */ ++#define __lsx_vstelm_w(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lsx_vstelm_w((v4i32)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: vd, rj, si8, idx. */ ++/* Data types in instruction templates: VOID, V2DI, CVPOINTER, SI, UQI. */ ++#define __lsx_vstelm_d(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) ((void)__builtin_lsx_vstelm_d((v2i64)(_1), (void *)(_2), (_3), (_4))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_d_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_d_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_w_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_w_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_h_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_h_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_d_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_d_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_w_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_w_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_h_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_h_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_d_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_d_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_w_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_w_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_h_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_h_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_d_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_d_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_w_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_w_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_h_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_h_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_d_wu_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_d_wu_w((v4u32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_w_hu_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_w_hu_h((v8u16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_h_bu_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_h_bu_b((v16u8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_d_wu_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_d_wu_w((v4u32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_w_hu_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_w_hu_h((v8u16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_h_bu_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_h_bu_b((v16u8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_d_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_d_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_w_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_w_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_h_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_h_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_d_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_d_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_w_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_w_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_h_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_h_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_d_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_d_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_w_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_w_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_h_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_h_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_d_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_d_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_w_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_w_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_h_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_h_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_q_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_q_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_q_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_q_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_q_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_q_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_q_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_q_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_q_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_q_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_q_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_q_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwev_q_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwev_q_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsubwod_q_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsubwod_q_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwev_q_du_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwev_q_du_d((v2u64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vaddwod_q_du_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vaddwod_q_du_d((v2u64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_d_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_d_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_w_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_w_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_h_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_h_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_d_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_d_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_w_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_w_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_h_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_h_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_d_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_d_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_w_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_w_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_h_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_h_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_d_wu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_d_wu((v4u32)_1, (v4u32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_w_hu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_w_hu((v8u16)_1, (v8u16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_h_bu(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_h_bu((v16u8)_1, (v16u8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_d_wu_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_d_wu_w((v4u32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_w_hu_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_w_hu_h((v8u16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_h_bu_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_h_bu_b((v16u8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_d_wu_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_d_wu_w((v4u32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, UV8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_w_hu_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_w_hu_h((v8u16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, UV16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_h_bu_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_h_bu_b((v16u8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_q_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_q_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_q_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_q_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_q_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_q_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_q_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_q_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwev_q_du_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwev_q_du_d((v2u64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, UV2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmulwod_q_du_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vmulwod_q_du_d((v2u64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_q_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_q_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhaddw_qu_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhaddw_qu_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_q_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_q_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vhsubw_qu_du(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vhsubw_qu_du((v2u64)_1, (v2u64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_d_w(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_d_w((v2i64)_1, (v4i32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_w_h(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_w_h((v4i32)_1, (v8i16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_h_b(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_h_b((v8i16)_1, (v16i8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_d_wu(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_d_wu((v2u64)_1, (v4u32)_2, (v4u32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_w_hu(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_w_hu((v4u32)_1, (v8u16)_2, (v8u16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_h_bu(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_h_bu((v8u16)_1, (v16u8)_2, (v16u8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_d_w(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_d_w((v2i64)_1, (v4i32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_w_h(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_w_h((v4i32)_1, (v8i16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_h_b(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_h_b((v8i16)_1, (v16i8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV4SI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_d_wu(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_d_wu((v2u64)_1, (v4u32)_2, (v4u32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV4SI, UV4SI, UV8HI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_w_hu(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_w_hu((v4u32)_1, (v8u16)_2, (v8u16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV8HI, UV8HI, UV16QI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_h_bu(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_h_bu((v8u16)_1, (v16u8)_2, (v16u8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, UV4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_d_wu_w(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_d_wu_w((v2i64)_1, (v4u32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, UV8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_w_hu_h(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_w_hu_h((v4i32)_1, (v8u16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, UV16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_h_bu_b(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_h_bu_b((v8i16)_1, (v16u8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, UV4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_d_wu_w(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_d_wu_w((v2i64)_1, (v4u32)_2, (v4i32)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, UV8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_w_hu_h(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_w_hu_h((v4i32)_1, (v8u16)_2, (v8i16)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, UV16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_h_bu_b(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_h_bu_b((v8i16)_1, (v16u8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_q_d(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_q_d((v2i64)_1, (v2i64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_q_d(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_q_d((v2i64)_1, (v2i64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_q_du(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_q_du((v2u64)_1, (v2u64)_2, (v2u64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: UV2DI, UV2DI, UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_q_du(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_q_du((v2u64)_1, (v2u64)_2, (v2u64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, UV2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwev_q_du_d(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwev_q_du_d((v2i64)_1, (v2u64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, UV2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmaddwod_q_du_d(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vmaddwod_q_du_d((v2i64)_1, (v2u64)_2, (v2i64)_3); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vrotr_b(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vrotr_b((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vrotr_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vrotr_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vrotr_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vrotr_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vrotr_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vrotr_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vadd_q(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vadd_q((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vsub_q(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vsub_q((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, rj, si12. */ ++/* Data types in instruction templates: V16QI, CVPOINTER, SI. */ ++#define __lsx_vldrepl_b(/*void **/ _1, /*si12*/ _2) ((__m128i)__builtin_lsx_vldrepl_b((void *)(_1), (_2))) ++ ++/* Assembly instruction format: vd, rj, si11. */ ++/* Data types in instruction templates: V8HI, CVPOINTER, SI. */ ++#define __lsx_vldrepl_h(/*void **/ _1, /*si11*/ _2) ((__m128i)__builtin_lsx_vldrepl_h((void *)(_1), (_2))) ++ ++/* Assembly instruction format: vd, rj, si10. */ ++/* Data types in instruction templates: V4SI, CVPOINTER, SI. */ ++#define __lsx_vldrepl_w(/*void **/ _1, /*si10*/ _2) ((__m128i)__builtin_lsx_vldrepl_w((void *)(_1), (_2))) ++ ++/* Assembly instruction format: vd, rj, si9. */ ++/* Data types in instruction templates: V2DI, CVPOINTER, SI. */ ++#define __lsx_vldrepl_d(/*void **/ _1, /*si9*/ _2) ((__m128i)__builtin_lsx_vldrepl_d((void *)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmskgez_b(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vmskgez_b((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vmsknz_b(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vmsknz_b((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V8HI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_h_b(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_h_b((v16i8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V4SI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_w_h(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_w_h((v8i16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_d_w(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_d_w((v4i32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_q_d(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_q_d((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV8HI, UV16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_hu_bu(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_hu_bu((v16u8)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV4SI, UV8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_wu_hu(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_wu_hu((v8u16)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV2DI, UV4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_du_wu(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_du_wu((v4u32)_1); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vexth_qu_du(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vexth_qu_du((v2u64)_1); ++} ++ ++/* Assembly instruction format: vd, vj, ui3. */ ++/* Data types in instruction templates: V16QI, V16QI, UQI. */ ++#define __lsx_vrotri_b(/*__m128i*/ _1, /*ui3*/ _2) ((__m128i)__builtin_lsx_vrotri_b((v16i8)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V8HI, V8HI, UQI. */ ++#define __lsx_vrotri_h(/*__m128i*/ _1, /*ui4*/ _2) ((__m128i)__builtin_lsx_vrotri_h((v8i16)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V4SI, V4SI, UQI. */ ++#define __lsx_vrotri_w(/*__m128i*/ _1, /*ui5*/ _2) ((__m128i)__builtin_lsx_vrotri_w((v4i32)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V2DI, V2DI, UQI. */ ++#define __lsx_vrotri_d(/*__m128i*/ _1, /*ui6*/ _2) ((__m128i)__builtin_lsx_vrotri_d((v2i64)(_1), (_2))) ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vextl_q_d(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vextl_q_d((v2i64)_1); ++} ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vsrlni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vsrlni_b_h((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vsrlni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vsrlni_h_w((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vsrlni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vsrlni_w_d((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vsrlni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vsrlni_d_q((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vsrlrni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vsrlrni_b_h((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vsrlrni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vsrlrni_h_w((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vsrlrni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vsrlrni_w_d((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vsrlrni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vsrlrni_d_q((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vssrlni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrlni_b_h((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vssrlni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrlni_h_w((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vssrlni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrlni_w_d((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vssrlni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrlni_d_q((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ ++#define __lsx_vssrlni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrlni_bu_h((v16u8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ ++#define __lsx_vssrlni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrlni_hu_w((v8u16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ ++#define __lsx_vssrlni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrlni_wu_d((v4u32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ ++#define __lsx_vssrlni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrlni_du_q((v2u64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vssrlrni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrlrni_b_h((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vssrlrni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrlrni_h_w((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vssrlrni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrlrni_w_d((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vssrlrni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrlrni_d_q((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ ++#define __lsx_vssrlrni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrlrni_bu_h((v16u8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ ++#define __lsx_vssrlrni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrlrni_hu_w((v8u16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ ++#define __lsx_vssrlrni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrlrni_wu_d((v4u32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ ++#define __lsx_vssrlrni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrlrni_du_q((v2u64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vsrani_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vsrani_b_h((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vsrani_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vsrani_h_w((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vsrani_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vsrani_w_d((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vsrani_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vsrani_d_q((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vsrarni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vsrarni_b_h((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vsrarni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vsrarni_h_w((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vsrarni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vsrarni_w_d((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vsrarni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vsrarni_d_q((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vssrani_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrani_b_h((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vssrani_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrani_h_w((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vssrani_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrani_w_d((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vssrani_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrani_d_q((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ ++#define __lsx_vssrani_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrani_bu_h((v16u8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ ++#define __lsx_vssrani_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrani_hu_w((v8u16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ ++#define __lsx_vssrani_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrani_wu_d((v4u32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ ++#define __lsx_vssrani_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrani_du_q((v2u64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, USI. */ ++#define __lsx_vssrarni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrarni_b_h((v16i8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: V8HI, V8HI, V8HI, USI. */ ++#define __lsx_vssrarni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrarni_h_w((v8i16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vssrarni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrarni_w_d((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: V2DI, V2DI, V2DI, USI. */ ++#define __lsx_vssrarni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrarni_d_q((v2i64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui4. */ ++/* Data types in instruction templates: UV16QI, UV16QI, V16QI, USI. */ ++#define __lsx_vssrarni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) ((__m128i)__builtin_lsx_vssrarni_bu_h((v16u8)(_1), (v16i8)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui5. */ ++/* Data types in instruction templates: UV8HI, UV8HI, V8HI, USI. */ ++#define __lsx_vssrarni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) ((__m128i)__builtin_lsx_vssrarni_hu_w((v8u16)(_1), (v8i16)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui6. */ ++/* Data types in instruction templates: UV4SI, UV4SI, V4SI, USI. */ ++#define __lsx_vssrarni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) ((__m128i)__builtin_lsx_vssrarni_wu_d((v4u32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui7. */ ++/* Data types in instruction templates: UV2DI, UV2DI, V2DI, USI. */ ++#define __lsx_vssrarni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) ((__m128i)__builtin_lsx_vssrarni_du_q((v2u64)(_1), (v2i64)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, ui8. */ ++/* Data types in instruction templates: V4SI, V4SI, V4SI, USI. */ ++#define __lsx_vpermi_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) ((__m128i)__builtin_lsx_vpermi_w((v4i32)(_1), (v4i32)(_2), (_3))) ++ ++/* Assembly instruction format: vd, rj, si12. */ ++/* Data types in instruction templates: V16QI, CVPOINTER, SI. */ ++#define __lsx_vld(/*void **/ _1, /*si12*/ _2) ((__m128i)__builtin_lsx_vld((void *)(_1), (_2))) ++ ++/* Assembly instruction format: vd, rj, si12. */ ++/* Data types in instruction templates: VOID, V16QI, CVPOINTER, SI. */ ++#define __lsx_vst(/*__m128i*/ _1, /*void **/ _2, /*si12*/ _3) ((void)__builtin_lsx_vst((v16i8)(_1), (void *)(_2), (_3))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrlrn_b_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrlrn_b_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrlrn_h_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrlrn_h_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrlrn_w_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrlrn_w_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V8HI, V8HI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrln_b_h(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrln_b_h((v8i16)_1, (v8i16)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V8HI, V4SI, V4SI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrln_h_w(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrln_h_w((v4i32)_1, (v4i32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V2DI, V2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vssrln_w_d(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vssrln_w_d((v2i64)_1, (v2i64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vorn_v(__m128i _1, __m128i _2) ++{ ++ return (__m128i)__builtin_lsx_vorn_v((v16i8)_1, (v16i8)_2); ++} ++ ++/* Assembly instruction format: vd, i13. */ ++/* Data types in instruction templates: V2DI, HI. */ ++#define __lsx_vldi(/*i13*/ _1) ((__m128i)__builtin_lsx_vldi((_1))) ++ ++/* Assembly instruction format: vd, vj, vk, va. */ ++/* Data types in instruction templates: V16QI, V16QI, V16QI, V16QI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vshuf_b(__m128i _1, __m128i _2, __m128i _3) ++{ ++ return (__m128i)__builtin_lsx_vshuf_b((v16i8)_1, (v16i8)_2, (v16i8)_3); ++} ++ ++/* Assembly instruction format: vd, rj, rk. */ ++/* Data types in instruction templates: V16QI, CVPOINTER, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vldx(void * _1, long int _2) ++{ ++ return (__m128i)__builtin_lsx_vldx((void *)_1, (long int)_2); ++} ++ ++/* Assembly instruction format: vd, rj, rk. */ ++/* Data types in instruction templates: VOID, V16QI, CVPOINTER, DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++void __lsx_vstx(__m128i _1, void * _2, long int _3) ++{ ++ return (void)__builtin_lsx_vstx((v16i8)_1, (void *)_2, (long int)_3); ++} ++ ++/* Assembly instruction format: vd, vj. */ ++/* Data types in instruction templates: UV2DI, UV2DI. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vextl_qu_du(__m128i _1) ++{ ++ return (__m128i)__builtin_lsx_vextl_qu_du((v2u64)_1); ++} ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV16QI. */ ++#define __lsx_bnz_b(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_b((v16u8)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV2DI. */ ++#define __lsx_bnz_d(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_d((v2u64)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV8HI. */ ++#define __lsx_bnz_h(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_h((v8u16)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV16QI. */ ++#define __lsx_bnz_v(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_v((v16u8)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV4SI. */ ++#define __lsx_bnz_w(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_w((v4u32)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV16QI. */ ++#define __lsx_bz_b(/*__m128i*/ _1) ((int)__builtin_lsx_bz_b((v16u8)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV2DI. */ ++#define __lsx_bz_d(/*__m128i*/ _1) ((int)__builtin_lsx_bz_d((v2u64)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV8HI. */ ++#define __lsx_bz_h(/*__m128i*/ _1) ((int)__builtin_lsx_bz_h((v8u16)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV16QI. */ ++#define __lsx_bz_v(/*__m128i*/ _1) ((int)__builtin_lsx_bz_v((v16u8)(_1))) ++ ++/* Assembly instruction format: cd, vj. */ ++/* Data types in instruction templates: SI, UV4SI. */ ++#define __lsx_bz_w(/*__m128i*/ _1) ((int)__builtin_lsx_bz_w((v4u32)(_1))) ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_caf_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_caf_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_caf_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_caf_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_ceq_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_ceq_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_ceq_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_ceq_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cle_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cle_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cle_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cle_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_clt_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_clt_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_clt_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_clt_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cne_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cne_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cne_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cne_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cor_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cor_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cor_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cor_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cueq_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cueq_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cueq_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cueq_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cule_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cule_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cule_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cule_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cult_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cult_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cult_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cult_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cun_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cun_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cune_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cune_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cune_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cune_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_cun_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_cun_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_saf_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_saf_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_saf_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_saf_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_seq_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_seq_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_seq_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_seq_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sle_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sle_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sle_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sle_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_slt_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_slt_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_slt_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_slt_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sne_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sne_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sne_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sne_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sor_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sor_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sor_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sor_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sueq_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sueq_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sueq_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sueq_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sule_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sule_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sule_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sule_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sult_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sult_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sult_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sult_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sun_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sun_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V2DI, V2DF, V2DF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sune_d(__m128d _1, __m128d _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sune_d((v2f64)_1, (v2f64)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sune_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sune_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, vj, vk. */ ++/* Data types in instruction templates: V4SI, V4SF, V4SF. */ ++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) ++__m128i __lsx_vfcmp_sun_s(__m128 _1, __m128 _2) ++{ ++ return (__m128i)__builtin_lsx_vfcmp_sun_s((v4f32)_1, (v4f32)_2); ++} ++ ++/* Assembly instruction format: vd, si10. */ ++/* Data types in instruction templates: V16QI, HI. */ ++#define __lsx_vrepli_b(/*si10*/ _1) ((__m128i)__builtin_lsx_vrepli_b((_1))) ++ ++/* Assembly instruction format: vd, si10. */ ++/* Data types in instruction templates: V2DI, HI. */ ++#define __lsx_vrepli_d(/*si10*/ _1) ((__m128i)__builtin_lsx_vrepli_d((_1))) ++ ++/* Assembly instruction format: vd, si10. */ ++/* Data types in instruction templates: V8HI, HI. */ ++#define __lsx_vrepli_h(/*si10*/ _1) ((__m128i)__builtin_lsx_vrepli_h((_1))) ++ ++/* Assembly instruction format: vd, si10. */ ++/* Data types in instruction templates: V4SI, HI. */ ++#define __lsx_vrepli_w(/*si10*/ _1) ((__m128i)__builtin_lsx_vrepli_w((_1))) ++ ++#endif /* defined(__loongarch_sx) */ ++#endif /* _GCC_LOONGSON_SXINTRIN_H */ +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +new file mode 100644 +index 000000000..20638559d +--- /dev/null ++++ b/gcc/config/loongarch/predicates.md +@@ -0,0 +1,639 @@ ++;; Predicate definitions for LARCH. ++;; Copyright (C) 2004-2018 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++(define_predicate "const_uns_arith_operand" ++ (and (match_code "const_int") ++ (match_test "SMALL_OPERAND_UNSIGNED (INTVAL (op))"))) ++ ++(define_predicate "uns_arith_operand" ++ (ior (match_operand 0 "const_uns_arith_operand") ++ (match_operand 0 "register_operand"))) ++ ++(define_predicate "const_lu32i_operand" ++ (and (match_code "const_int") ++ (match_test "LU32I_OPERAND (INTVAL (op))"))) ++ ++(define_predicate "const_lu52i_operand" ++ (and (match_code "const_int") ++ (match_test "LU52I_OPERAND (INTVAL (op))"))) ++ ++(define_predicate "const_arith_operand" ++ (and (match_code "const_int") ++ (match_test "IMM12_OPERAND (INTVAL (op))"))) ++ ++(define_predicate "const_imm16_operand" ++ (and (match_code "const_int") ++ (match_test "IMM16_OPERAND (INTVAL (op))"))) ++ ++(define_predicate "arith_operand" ++ (ior (match_operand 0 "const_arith_operand") ++ (match_operand 0 "register_operand"))) ++ ++(define_predicate "const_immlsa_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 1, 4)"))) ++ ++(define_predicate "const_lsx_branch_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), -1024, 1023)"))) ++ ++(define_predicate "const_uimm3_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) ++ ++(define_predicate "const_uimm4_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 15)"))) ++ ++(define_predicate "const_uimm5_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 31)"))) ++ ++(define_predicate "const_uimm6_operand" ++ (and (match_code "const_int") ++ (match_test "UIMM6_OPERAND (INTVAL (op))"))) ++ ++(define_predicate "const_uimm7_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 127)"))) ++ ++(define_predicate "const_uimm8_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 255)"))) ++ ++(define_predicate "const_uimm14_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 16383)"))) ++ ++(define_predicate "const_uimm15_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 32767)"))) ++ ++(define_predicate "const_imm5_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), -16, 15)"))) ++ ++(define_predicate "const_imm10_operand" ++ (and (match_code "const_int") ++ (match_test "IMM10_OPERAND (INTVAL (op))"))) ++ ++(define_predicate "const_imm12_operand" ++ (and (match_code "const_int") ++ (match_test "IMM12_OPERAND (INTVAL (op))"))) ++ ++(define_predicate "const_imm13_operand" ++ (and (match_code "const_int") ++ (match_test "IMM13_OPERAND (INTVAL (op))"))) ++ ++(define_predicate "reg_imm10_operand" ++ (ior (match_operand 0 "const_imm10_operand") ++ (match_operand 0 "register_operand"))) ++ ++(define_predicate "aq8b_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 0)"))) ++ ++(define_predicate "aq8h_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 1)"))) ++ ++(define_predicate "aq8w_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 2)"))) ++ ++(define_predicate "aq8d_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 3)"))) ++ ++(define_predicate "aq10b_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 0)"))) ++ ++(define_predicate "aq10h_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 1)"))) ++ ++(define_predicate "aq10w_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 2)"))) ++ ++(define_predicate "aq10d_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 3)"))) ++ ++(define_predicate "aq12b_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 12, 0)"))) ++ ++(define_predicate "aq12h_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 11, 1)"))) ++ ++(define_predicate "aq12w_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 2)"))) ++ ++(define_predicate "aq12d_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 9, 3)"))) ++ ++(define_predicate "sle_operand" ++ (and (match_code "const_int") ++ (match_test "SMALL_OPERAND (INTVAL (op) + 1)"))) ++ ++(define_predicate "sleu_operand" ++ (and (match_operand 0 "sle_operand") ++ (match_test "INTVAL (op) + 1 != 0"))) ++ ++(define_predicate "const_0_operand" ++ (and (match_code "const_int,const_double,const_vector") ++ (match_test "op == CONST0_RTX (GET_MODE (op))"))) ++ ++(define_predicate "const_m1_operand" ++ (and (match_code "const_int,const_double,const_vector") ++ (match_test "op == CONSTM1_RTX (GET_MODE (op))"))) ++ ++(define_predicate "reg_or_m1_operand" ++ (ior (match_operand 0 "const_m1_operand") ++ (match_operand 0 "register_operand"))) ++ ++(define_predicate "reg_or_0_operand" ++ (ior (match_operand 0 "const_0_operand") ++ (match_operand 0 "register_operand"))) ++ ++(define_predicate "const_1_operand" ++ (and (match_code "const_int,const_double,const_vector") ++ (match_test "op == CONST1_RTX (GET_MODE (op))"))) ++ ++(define_predicate "reg_or_1_operand" ++ (ior (match_operand 0 "const_1_operand") ++ (match_operand 0 "register_operand"))) ++ ++;; These are used in vec_merge, hence accept bitmask as const_int. ++(define_predicate "const_exp_2_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 1)"))) ++ ++(define_predicate "const_exp_4_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 3)"))) ++ ++(define_predicate "const_exp_8_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 7)"))) ++ ++(define_predicate "const_exp_16_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 15)"))) ++ ++(define_predicate "const_exp_32_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 31)"))) ++ ++;; This is used for indexing into vectors, and hence only accepts const_int. ++(define_predicate "const_0_or_1_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 1)"))) ++ ++(define_predicate "const_2_or_3_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 2, 3)"))) ++ ++(define_predicate "const_0_to_3_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 3)"))) ++ ++(define_predicate "const_0_to_7_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) ++ ++(define_predicate "const_4_to_7_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 4, 7)"))) ++ ++(define_predicate "const_8_to_15_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) ++ ++(define_predicate "const_16_to_31_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) ++ ++(define_predicate "qi_mask_operand" ++ (and (match_code "const_int") ++ (match_test "UINTVAL (op) == 0xff"))) ++ ++(define_predicate "hi_mask_operand" ++ (and (match_code "const_int") ++ (match_test "UINTVAL (op) == 0xffff"))) ++ ++(define_predicate "lu52i_mask_operand" ++ (and (match_code "const_int") ++ (match_test "UINTVAL (op) == 0xfffffffffffff"))) ++ ++(define_predicate "shift_mask_operand" ++ (and (match_code "const_int") ++ (ior (match_test "UINTVAL (op) == 0x3fffffffc") ++ (match_test "UINTVAL (op) == 0x1fffffffe") ++ (match_test "UINTVAL (op) == 0x7fffffff8") ++ (match_test "UINTVAL (op) == 0xffffffff0")))) ++ ++ ++ ++(define_predicate "si_mask_operand" ++ (and (match_code "const_int") ++ (match_test "UINTVAL (op) == 0xffffffff"))) ++ ++(define_predicate "and_load_operand" ++ (ior (match_operand 0 "qi_mask_operand") ++ (match_operand 0 "hi_mask_operand") ++ (match_operand 0 "si_mask_operand"))) ++ ++(define_predicate "low_bitmask_operand" ++ (and (match_code "const_int") ++ (match_test "low_bitmask_len (mode, INTVAL (op)) > 12"))) ++ ++(define_predicate "and_reg_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const_uns_arith_operand") ++ (match_operand 0 "low_bitmask_operand") ++ (match_operand 0 "si_mask_operand"))) ++ ++(define_predicate "and_operand" ++ (ior (match_operand 0 "and_load_operand") ++ (match_operand 0 "and_reg_operand"))) ++ ++(define_predicate "d_operand" ++ (and (match_code "reg") ++ (match_test "GP_REG_P (REGNO (op))"))) ++ ++(define_predicate "lwsp_swsp_operand" ++ (and (match_code "mem") ++ (match_test "lwsp_swsp_address_p (XEXP (op, 0), mode)"))) ++ ++(define_predicate "db4_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 4, 0)"))) ++ ++(define_predicate "db7_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 7, 0)"))) ++ ++(define_predicate "db8_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 8, 0)"))) ++ ++(define_predicate "ib3_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op) - 1, 3, 0)"))) ++ ++(define_predicate "sb4_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 4, 0)"))) ++ ++(define_predicate "sb5_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 5, 0)"))) ++ ++(define_predicate "sb8_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 0)"))) ++ ++(define_predicate "sd8_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 3)"))) ++ ++(define_predicate "ub4_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 0)"))) ++ ++(define_predicate "ub8_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 8, 0)"))) ++ ++(define_predicate "uh4_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 1)"))) ++ ++(define_predicate "uw4_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 2)"))) ++ ++(define_predicate "uw5_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 5, 2)"))) ++ ++(define_predicate "uw6_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 6, 2)"))) ++ ++(define_predicate "uw8_operand" ++ (and (match_code "const_int") ++ (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 8, 2)"))) ++ ++(define_predicate "addiur2_operand" ++ (and (match_code "const_int") ++ (ior (match_test "INTVAL (op) == -1") ++ (match_test "INTVAL (op) == 1") ++ (match_test "INTVAL (op) == 4") ++ (match_test "INTVAL (op) == 8") ++ (match_test "INTVAL (op) == 12") ++ (match_test "INTVAL (op) == 16") ++ (match_test "INTVAL (op) == 20") ++ (match_test "INTVAL (op) == 24")))) ++ ++(define_predicate "addiusp_operand" ++ (and (match_code "const_int") ++ (ior (match_test "(IN_RANGE (INTVAL (op), 2, 257))") ++ (match_test "(IN_RANGE (INTVAL (op), -258, -3))")))) ++ ++(define_predicate "andi16_operand" ++ (and (match_code "const_int") ++ (ior (match_test "IN_RANGE (INTVAL (op), 1, 4)") ++ (match_test "IN_RANGE (INTVAL (op), 7, 8)") ++ (match_test "IN_RANGE (INTVAL (op), 15, 16)") ++ (match_test "IN_RANGE (INTVAL (op), 31, 32)") ++ (match_test "IN_RANGE (INTVAL (op), 63, 64)") ++ (match_test "INTVAL (op) == 255") ++ (match_test "INTVAL (op) == 32768") ++ (match_test "INTVAL (op) == 65535")))) ++ ++(define_predicate "movep_src_register" ++ (and (match_code "reg") ++ (ior (match_test ("IN_RANGE (REGNO (op), 2, 3)")) ++ (match_test ("IN_RANGE (REGNO (op), 16, 20)"))))) ++ ++(define_predicate "movep_src_operand" ++ (ior (match_operand 0 "const_0_operand") ++ (match_operand 0 "movep_src_register"))) ++ ++(define_predicate "fcc_reload_operand" ++ (and (match_code "reg,subreg") ++ (match_test "ST_REG_P (true_regnum (op))"))) ++ ++(define_predicate "muldiv_target_operand" ++ (match_operand 0 "register_operand")) ++ ++(define_predicate "const_call_insn_operand" ++ (match_code "const,symbol_ref,label_ref") ++{ ++ enum loongarch_symbol_type symbol_type; ++ ++ if (!loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_CALL, &symbol_type)) ++ return false; ++ ++ switch (symbol_type) ++ { ++ case SYMBOL_GOT_DISP: ++ /* Without explicit relocs, there is no special syntax for ++ loading the address of a call destination into a register. ++ Using "la $25,foo; jal $25" would prevent the lazy binding ++ of "foo", so keep the address of global symbols with the ++ jal macro. */ ++ return 1; ++ ++ default: ++ return false; ++ } ++}) ++ ++(define_predicate "call_insn_operand" ++ (ior (match_operand 0 "const_call_insn_operand") ++ (match_operand 0 "register_operand"))) ++ ++(define_predicate "is_const_call_local_symbol" ++ (and (match_operand 0 "const_call_insn_operand") ++ (ior (match_test "loongarch_global_symbol_p (op) == 0") ++ (match_test "loongarch_symbol_binds_local_p (op) != 0")) ++ (match_test "CONSTANT_P (op)"))) ++ ++(define_predicate "is_const_call_weak_symbol" ++ (and (match_operand 0 "const_call_insn_operand") ++ (not (match_operand 0 "is_const_call_local_symbol")) ++ (match_test "loongarch_weak_symbol_p (op) != 0") ++ (match_test "CONSTANT_P (op)"))) ++ ++(define_predicate "is_const_call_plt_symbol" ++ (and (match_operand 0 "const_call_insn_operand") ++ (match_test "flag_plt != 0") ++ (match_test "loongarch_global_symbol_noweak_p (op) != 0") ++ (match_test "CONSTANT_P (op)"))) ++ ++(define_predicate "is_const_call_global_noplt_symbol" ++ (and (match_operand 0 "const_call_insn_operand") ++ (match_test "flag_plt == 0") ++ (match_test "loongarch_global_symbol_noweak_p (op) != 0") ++ (match_test "CONSTANT_P (op)"))) ++ ++;; A legitimate CONST_INT operand that takes more than one instruction ++;; to load. ++(define_predicate "splittable_const_int_operand" ++ (match_code "const_int") ++{ ++ ++ /* Don't handle multi-word moves this way; we don't want to introduce ++ the individual word-mode moves until after reload. */ ++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD) ++ return false; ++ ++ /* Otherwise check whether the constant can be loaded in a single ++ instruction. */ ++// return !LUI_INT (op) && !SMALL_INT (op) && !SMALL_INT_UNSIGNED (op); ++ return !LUI_INT (op) && !SMALL_INT (op) && !SMALL_INT_UNSIGNED (op) ++ && !LU52I_INT (op); ++}) ++ ++(define_predicate "move_operand" ++ (match_operand 0 "general_operand") ++{ ++ enum loongarch_symbol_type symbol_type; ++ ++ /* The thinking here is as follows: ++ ++ (1) The move expanders should split complex load sequences into ++ individual instructions. Those individual instructions can ++ then be optimized by all rtl passes. ++ ++ (2) The target of pre-reload load sequences should not be used ++ to store temporary results. If the target register is only ++ assigned one value, reload can rematerialize that value ++ on demand, rather than spill it to the stack. ++ ++ (3) If we allowed pre-reload passes like combine and cse to recreate ++ complex load sequences, we would want to be able to split the ++ sequences before reload as well, so that the pre-reload scheduler ++ can see the individual instructions. This falls foul of (2); ++ the splitter would be forced to reuse the target register for ++ intermediate results. ++ ++ (4) We want to define complex load splitters for combine. These ++ splitters can request a temporary scratch register, which avoids ++ the problem in (2). They allow things like: ++ ++ (set (reg T1) (high SYM)) ++ (set (reg T2) (low (reg T1) SYM)) ++ (set (reg X) (plus (reg T2) (const_int OFFSET))) ++ ++ to be combined into: ++ ++ (set (reg T3) (high SYM+OFFSET)) ++ (set (reg X) (lo_sum (reg T3) SYM+OFFSET)) ++ ++ if T2 is only used this once. */ ++ switch (GET_CODE (op)) ++ { ++ case CONST_INT: ++ return !splittable_const_int_operand (op, mode); ++ ++ case CONST: ++ case SYMBOL_REF: ++ case LABEL_REF: ++ return (loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &symbol_type)); ++ default: ++ return true; ++ } ++}) ++ ++(define_predicate "consttable_operand" ++ (match_test "CONSTANT_P (op)")) ++ ++(define_predicate "symbolic_operand" ++ (match_code "const,symbol_ref,label_ref") ++{ ++ enum loongarch_symbol_type type; ++ return loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type); ++}) ++ ++(define_predicate "force_to_mem_operand" ++ (match_code "const,symbol_ref,label_ref") ++{ ++ enum loongarch_symbol_type symbol_type; ++ return (loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &symbol_type) ++ && loongarch_use_pcrel_pool_p[(int) symbol_type]); ++}) ++ ++(define_predicate "got_disp_operand" ++ (match_code "const,symbol_ref,label_ref") ++{ ++ enum loongarch_symbol_type type; ++ return (loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type) ++ && type == SYMBOL_GOT_DISP); ++}) ++ ++(define_predicate "symbol_ref_operand" ++ (match_code "symbol_ref")) ++ ++(define_predicate "stack_operand" ++ (and (match_code "mem") ++ (match_test "loongarch_stack_address_p (XEXP (op, 0), GET_MODE (op))"))) ++ ++ ++ ++(define_predicate "equality_operator" ++ (match_code "eq,ne")) ++ ++(define_predicate "extend_operator" ++ (match_code "zero_extend,sign_extend")) ++ ++(define_predicate "trap_comparison_operator" ++ (match_code "eq,ne,lt,ltu,ge,geu")) ++ ++(define_predicate "order_operator" ++ (match_code "lt,ltu,le,leu,ge,geu,gt,gtu")) ++ ++;; For NE, cstore uses sltu instructions in which the first operand is $0. ++ ++(define_predicate "loongarch_cstore_operator" ++ (ior (match_code "eq,gt,gtu,ge,geu,lt,ltu,le,leu") ++ (match_code "ne"))) ++ ++(define_predicate "small_data_pattern" ++ (and (match_code "set,parallel,unspec,unspec_volatile,prefetch") ++ (match_test "loongarch_small_data_pattern_p (op)"))) ++ ++(define_predicate "mem_noofs_operand" ++ (and (match_code "mem") ++ (match_code "reg" "0"))) ++ ++;; Return 1 if the operand is in non-volatile memory. ++(define_predicate "non_volatile_mem_operand" ++ (and (match_operand 0 "memory_operand") ++ (not (match_test "MEM_VOLATILE_P (op)")))) ++ ++(define_predicate "const_vector_same_val_operand" ++ (match_code "const_vector") ++{ ++ return loongarch_const_vector_same_val_p (op, mode); ++}) ++ ++(define_predicate "const_vector_same_simm5_operand" ++ (match_code "const_vector") ++{ ++ return loongarch_const_vector_same_int_p (op, mode, -16, 15); ++}) ++ ++(define_predicate "const_vector_same_uimm5_operand" ++ (match_code "const_vector") ++{ ++ return loongarch_const_vector_same_int_p (op, mode, 0, 31); ++}) ++ ++(define_predicate "const_vector_same_ximm5_operand" ++ (match_code "const_vector") ++{ ++ return loongarch_const_vector_same_int_p (op, mode, -31, 31); ++}) ++ ++(define_predicate "const_vector_same_uimm6_operand" ++ (match_code "const_vector") ++{ ++ return loongarch_const_vector_same_int_p (op, mode, 0, 63); ++}) ++ ++(define_predicate "const_vector_same_uimm8_operand" ++ (match_code "const_vector") ++{ ++ return loongarch_const_vector_same_int_p (op, mode, 0, 255); ++}) ++ ++(define_predicate "par_const_vector_shf_set_operand" ++ (match_code "parallel") ++{ ++ return loongarch_const_vector_shuffle_set_p (op, mode); ++}) ++ ++(define_predicate "reg_or_vector_same_val_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const_vector_same_val_operand"))) ++ ++(define_predicate "reg_or_vector_same_simm5_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const_vector_same_simm5_operand"))) ++ ++(define_predicate "reg_or_vector_same_uimm5_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const_vector_same_uimm5_operand"))) ++ ++(define_predicate "reg_or_vector_same_ximm5_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const_vector_same_ximm5_operand"))) ++ ++(define_predicate "reg_or_vector_same_uimm6_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const_vector_same_uimm6_operand"))) +diff --git a/gcc/config/loongarch/rtems.h b/gcc/config/loongarch/rtems.h +new file mode 100644 +index 000000000..bbb70b040 +--- /dev/null ++++ b/gcc/config/loongarch/rtems.h +@@ -0,0 +1,39 @@ ++/* Definitions for rtems targeting a LARCH using ELF. ++ Copyright (C) 1996-2018 Free Software Foundation, Inc. ++ Contributed by Joel Sherrill (joel@OARcorp.com). ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published ++ by the Free Software Foundation; either version 3, or (at your ++ option) any later version. ++ ++ GCC is distributed in the hope that it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++ License for more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++/* Specify predefined symbols in preprocessor. */ ++ ++#define TARGET_OS_CPP_BUILTINS() \ ++do { \ ++ builtin_define ("__rtems__"); \ ++ builtin_define ("__USE_INIT_FINI__"); \ ++ builtin_assert ("system=rtems"); \ ++} while (0) ++ ++/* No sdata. ++ * The RTEMS BSPs expect -G0 ++ */ ++#undef LARCH_DEFAULT_GVALUE ++#define LARCH_DEFAULT_GVALUE 0 +diff --git a/gcc/config/loongarch/sde.opt b/gcc/config/loongarch/sde.opt +new file mode 100644 +index 000000000..321217d51 +--- /dev/null ++++ b/gcc/config/loongarch/sde.opt +@@ -0,0 +1,28 @@ ++; LARCH SDE options. ++; ++; Copyright (C) 2010-2018 Free Software Foundation, Inc. ++; ++; This file is part of GCC. ++; ++; GCC is free software; you can redistribute it and/or modify it under ++; the terms of the GNU General Public License as published by the Free ++; Software Foundation; either version 3, or (at your option) any later ++; version. ++; ++; GCC is distributed in the hope that it will be useful, but WITHOUT ++; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++; License for more details. ++; ++; You should have received a copy of the GNU General Public License ++; along with GCC; see the file COPYING3. If not see ++; . ++ ++; -mcode-xonly is a traditional alias for -mcode-readable=pcrel and ++; -mno-data-in-code is a traditional alias for -mcode-readable=no. ++ ++mno-data-in-code ++Target RejectNegative Alias(mcode-readable=, no) ++ ++mcode-xonly ++Target RejectNegative Alias(mcode-readable=, pcrel) +diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md +new file mode 100644 +index 000000000..e3eb43e16 +--- /dev/null ++++ b/gcc/config/loongarch/sync.md +@@ -0,0 +1,616 @@ ++;; Machine description for LARCH atomic operations. ++;; Copyright (C) 2011-2018 Free Software Foundation, Inc. ++;; Contributed by Andrew Waterman (andrew@sifive.com). ++;; Based on LARCH target for GNU compiler. ++ ++;; This file is part of GCC. ++ ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++ ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++ ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++(define_c_enum "unspec" [ ++ UNSPEC_COMPARE_AND_SWAP ++ UNSPEC_COMPARE_AND_SWAP_ADD ++ UNSPEC_COMPARE_AND_SWAP_SUB ++ UNSPEC_COMPARE_AND_SWAP_AND ++ UNSPEC_COMPARE_AND_SWAP_XOR ++ UNSPEC_COMPARE_AND_SWAP_OR ++ UNSPEC_COMPARE_AND_SWAP_NAND ++ UNSPEC_SYNC_OLD_OP ++ UNSPEC_SYNC_EXCHANGE ++ UNSPEC_ATOMIC_STORE ++ UNSPEC_MEMORY_BARRIER ++]) ++ ++(define_code_iterator any_atomic [plus ior xor and]) ++(define_code_attr atomic_optab ++ [(plus "add") (ior "or") (xor "xor") (and "and")]) ++ ++;; This attribute gives the format suffix for atomic memory operations. ++(define_mode_attr amo [(SI "w") (DI "d")]) ++ ++;; expands to the name of the atomic operand that implements a particular code. ++(define_code_attr amop [(ior "or") ++ (xor "xor") ++ (and "and") ++ (plus "add")]) ++;; Memory barriers. ++ ++(define_expand "mem_thread_fence" ++ [(match_operand:SI 0 "const_int_operand" "")] ;; model ++ "" ++{ ++ if (INTVAL (operands[0]) != MEMMODEL_RELAXED) ++ { ++ rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode)); ++ MEM_VOLATILE_P (mem) = 1; ++ emit_insn (gen_mem_thread_fence_1 (mem, operands[0])); ++ } ++ DONE; ++}) ++ ++;; Until the LARCH memory model (hence its mapping from C++) is finalized, ++;; conservatively emit a full FENCE. ++(define_insn "mem_thread_fence_1" ++ [(set (match_operand:BLK 0 "" "") ++ (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER)) ++ (match_operand:SI 1 "const_int_operand" "")] ;; model ++ "" ++ "dbar\t0") ++ ++;; Atomic memory operations. ++ ++;; Implement atomic stores with amoswap. Fall back to fences for atomic loads. ++(define_insn "atomic_store" ++ [(set (match_operand:GPR 0 "memory_operand" "+ZB") ++ (unspec_volatile:GPR ++ [(match_operand:GPR 1 "reg_or_0_operand" "rJ") ++ (match_operand:SI 2 "const_int_operand")] ;; model ++ UNSPEC_ATOMIC_STORE))] ++ "" ++ "amswap%A2.\t$zero,%z1,%0" ++ [(set (attr "length") (const_int 8))]) ++ ++(define_insn "atomic_" ++ [(set (match_operand:GPR 0 "memory_operand" "+ZB") ++ (unspec_volatile:GPR ++ [(any_atomic:GPR (match_dup 0) ++ (match_operand:GPR 1 "reg_or_0_operand" "rJ")) ++ (match_operand:SI 2 "const_int_operand")] ;; model ++ UNSPEC_SYNC_OLD_OP))] ++ "" ++ "am%A2.\t$zero,%z1,%0" ++ [(set (attr "length") (const_int 8))]) ++ ++(define_insn "atomic_fetch_" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ++ (match_operand:GPR 1 "memory_operand" "+ZB")) ++ (set (match_dup 1) ++ (unspec_volatile:GPR ++ [(any_atomic:GPR (match_dup 1) ++ (match_operand:GPR 2 "reg_or_0_operand" "rJ")) ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ UNSPEC_SYNC_OLD_OP))] ++ "" ++ "am%A3.\t%0,%z2,%1" ++ [(set (attr "length") (const_int 8))]) ++ ++(define_insn "atomic_exchange" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ++ (unspec_volatile:GPR ++ [(match_operand:GPR 1 "memory_operand" "+ZB") ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ UNSPEC_SYNC_EXCHANGE)) ++ (set (match_dup 1) ++ (match_operand:GPR 2 "register_operand" "r"))] ++ "" ++ "amswap%A3.\t%0,%z2,%1" ++ [(set (attr "length") (const_int 8))]) ++ ++(define_insn "atomic_cas_value_strong" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ++ (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (set (match_dup 1) ++ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ++ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ++ (match_operand:SI 4 "const_int_operand") ;; mod_s ++ (match_operand:SI 5 "const_int_operand")] ;; mod_f ++ UNSPEC_COMPARE_AND_SWAP)) ++ (clobber (match_scratch:GPR 6 "=&r"))] ++ "" ++{ ++ return "%G5\n\t" ++ "1:\n\t" ++ "ll.\t%0,%1\n\t" ++ "bne\t%0,%z2,2f\n\t" ++ "or%i3\t%6,$zero,%3\n\t" ++ "sc.\t%6,%1\n\t" ++ "beq\t$zero,%6,1b\n\t" ++ "b\t3f\n\t" ++ "2:\n\t" ++ "dbar\t0x700\n\t" ++ "3:\n\t"; ++} ++ [(set (attr "length") (const_int 32))]) ++ ++(define_expand "atomic_compare_and_swap" ++ [(match_operand:SI 0 "register_operand" "") ;; bool output ++ (match_operand:GPR 1 "register_operand" "") ;; val output ++ (match_operand:GPR 2 "memory_operand" "") ;; memory ++ (match_operand:GPR 3 "reg_or_0_operand" "") ;; expected value ++ (match_operand:GPR 4 "reg_or_0_operand" "") ;; desired value ++ (match_operand:SI 5 "const_int_operand" "") ;; is_weak ++ (match_operand:SI 6 "const_int_operand" "") ;; mod_s ++ (match_operand:SI 7 "const_int_operand" "")] ;; mod_f ++ "" ++{ ++ emit_insn (gen_atomic_cas_value_strong (operands[1], operands[2], ++ operands[3], operands[4], ++ operands[6], operands[7])); ++ ++ rtx compare = operands[1]; ++ if (operands[3] != const0_rtx) ++ { ++ rtx difference = gen_rtx_MINUS (mode, operands[1], operands[3]); ++ compare = gen_reg_rtx (mode); ++ emit_insn (gen_rtx_SET (compare, difference)); ++ } ++ ++ if (word_mode != mode) ++ { ++ rtx reg = gen_reg_rtx (word_mode); ++ emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare))); ++ compare = reg; ++ } ++ ++ emit_insn (gen_rtx_SET (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx))); ++ DONE; ++}) ++ ++(define_expand "atomic_test_and_set" ++ [(match_operand:QI 0 "register_operand" "") ;; bool output ++ (match_operand:QI 1 "memory_operand" "+ZB") ;; memory ++ (match_operand:SI 2 "const_int_operand" "")] ;; model ++ "" ++{ ++ /* We have no QImode atomics, so use the address LSBs to form a mask, ++ then use an aligned SImode atomic. */ ++ rtx result = operands[0]; ++ rtx mem = operands[1]; ++ rtx model = operands[2]; ++ rtx addr = force_reg (Pmode, XEXP (mem, 0)); ++ rtx tmp_reg = gen_reg_rtx (Pmode); ++ rtx zero_reg = gen_rtx_REG (Pmode, 0); ++ ++ rtx aligned_addr = gen_reg_rtx (Pmode); ++ emit_move_insn (tmp_reg, gen_rtx_PLUS (Pmode, zero_reg, GEN_INT (-4))); ++ emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, tmp_reg)); ++ ++ rtx aligned_mem = change_address (mem, SImode, aligned_addr); ++ set_mem_alias_set (aligned_mem, 0); ++ ++ rtx offset = gen_reg_rtx (SImode); ++ emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr), ++ GEN_INT (3))); ++ ++ rtx tmp = gen_reg_rtx (SImode); ++ emit_move_insn (tmp, GEN_INT (1)); ++ ++ rtx shmt = gen_reg_rtx (SImode); ++ emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3))); ++ ++ rtx word = gen_reg_rtx (SImode); ++ emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp, shmt)); ++ ++ tmp = gen_reg_rtx (SImode); ++ emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model)); ++ ++ emit_move_insn (gen_lowpart (SImode, result), ++ gen_rtx_LSHIFTRT (SImode, tmp, shmt)); ++ DONE; ++}) ++ ++ ++ ++(define_insn "atomic_cas_value_cmp_and_7_" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ++ (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (set (match_dup 1) ++ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ++ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ++ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ++ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ++ (match_operand:SI 6 "const_int_operand")] ;; model ++ UNSPEC_COMPARE_AND_SWAP)) ++ (clobber (match_scratch:GPR 7 "=&r"))] ++ "" ++{ ++ return "%G6\n\t" ++ "1:\n\t" ++ "ll.\t%0,%1\n\t" ++ "and\t%7,%0,%2\n\t" ++ "bne\t%7,%z4,2f\n\t" ++ "and\t%7,%0,%z3\n\t" ++ "or%i5\t%7,%7,%5\n\t" ++ "sc.\t%7,%1\n\t" ++ "beq\t$zero,%7,1b\n\t" ++ "b\t3f\n\t" ++ "2:\n\t" ++ "dbar\t0x700\n\t" ++ "3:\n\t"; ++} ++ [(set (attr "length") (const_int 40))]) ++ ++ ++(define_expand "atomic_compare_and_swap" ++ [(match_operand:SI 0 "register_operand" "") ;; bool output ++ (match_operand:SHORT 1 "register_operand" "") ;; val output ++ (match_operand:SHORT 2 "memory_operand" "") ;; memory ++ (match_operand:SHORT 3 "reg_or_0_operand" "") ;; expected value ++ (match_operand:SHORT 4 "reg_or_0_operand" "") ;; desired value ++ (match_operand:SI 5 "const_int_operand" "") ;; is_weak ++ (match_operand:SI 6 "const_int_operand" "") ;; mod_s ++ (match_operand:SI 7 "const_int_operand" "")] ;; mod_f ++ "" ++{ ++ union loongarch_gen_fn_ptrs generator; ++ generator.fn_7 = gen_atomic_cas_value_cmp_and_7_si; ++ loongarch_expand_atomic_qihi (generator, ++ operands[1], ++ operands[2], ++ operands[3], ++ operands[4], ++ operands[7]); ++ ++ rtx compare = operands[1]; ++ if (operands[3] != const0_rtx) ++ { ++ machine_mode mode = GET_MODE (operands[3]); ++ rtx op1 = convert_modes (SImode, mode, operands[1], true); ++ rtx op3 = convert_modes (SImode, mode, operands[3], true); ++ rtx difference = gen_rtx_MINUS (SImode, op1, op3); ++ compare = gen_reg_rtx (SImode); ++ emit_insn (gen_rtx_SET (compare, difference)); ++ } ++ ++ if (word_mode != mode) ++ { ++ rtx reg = gen_reg_rtx (word_mode); ++ emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare))); ++ compare = reg; ++ } ++ ++ emit_insn (gen_rtx_SET (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx))); ++ DONE; ++}) ++ ++ ++ ++ ++(define_insn "atomic_cas_value_add_7_" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res ++ (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (set (match_dup 1) ++ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask ++ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask ++ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ;; old val ++ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ;; new val ++ (match_operand:SI 6 "const_int_operand")] ;; model ++ UNSPEC_COMPARE_AND_SWAP_ADD)) ++ (clobber (match_scratch:GPR 7 "=&r")) ++ (clobber (match_scratch:GPR 8 "=&r"))] ++ "" ++{ ++ return "%G6\n\t" ++ "1:\n\t" ++ "ll.\t%0,%1\n\t" ++ "and\t%7,%0,%3\n\t" ++ "add.w\t%8,%0,%z5\n\t" ++ "and\t%8,%8,%z2\n\t" ++ "or%i8\t%7,%7,%8\n\t" ++ "sc.\t%7,%1\n\t" ++ "beq\t$zero,%7,1b"; ++} ++ ++ [(set (attr "length") (const_int 32))]) ++ ++ ++ ++(define_insn "atomic_cas_value_sub_7_" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res ++ (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (set (match_dup 1) ++ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask ++ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask ++ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ;; old val ++ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ;; new val ++ (match_operand:SI 6 "const_int_operand")] ;; model ++ UNSPEC_COMPARE_AND_SWAP_SUB)) ++ (clobber (match_scratch:GPR 7 "=&r")) ++ (clobber (match_scratch:GPR 8 "=&r"))] ++ "" ++{ ++ return "%G6\n\t" ++ "1:\n\t" ++ "ll.\t%0,%1\n\t" ++ "and\t%7,%0,%3\n\t" ++ "sub.w\t%8,%0,%z5\n\t" ++ "and\t%8,%8,%z2\n\t" ++ "or%i8\t%7,%7,%8\n\t" ++ "sc.\t%7,%1\n\t" ++ "beq\t$zero,%7,1b"; ++} ++ [(set (attr "length") (const_int 32))]) ++ ++ ++ ++(define_insn "atomic_cas_value_and_7_" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res ++ (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (set (match_dup 1) ++ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask ++ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask ++ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ;; old val ++ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ;; new val ++ (match_operand:SI 6 "const_int_operand")] ;; model ++ UNSPEC_COMPARE_AND_SWAP_AND)) ++ (clobber (match_scratch:GPR 7 "=&r")) ++ (clobber (match_scratch:GPR 8 "=&r"))] ++ "" ++{ ++ return "%G6\n\t" ++ "1:\n\t" ++ "ll.\t%0,%1\n\t" ++ "and\t%7,%0,%3\n\t" ++ "and\t%8,%0,%z5\n\t" ++ "and\t%8,%8,%z2\n\t" ++ "or%i8\t%7,%7,%8\n\t" ++ "sc.\t%7,%1\n\t" ++ "beq\t$zero,%7,1b"; ++} ++ [(set (attr "length") (const_int 32))]) ++ ++(define_insn "atomic_cas_value_xor_7_" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res ++ (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (set (match_dup 1) ++ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask ++ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask ++ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ;; old val ++ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ;; new val ++ (match_operand:SI 6 "const_int_operand")] ;; model ++ UNSPEC_COMPARE_AND_SWAP_XOR)) ++ (clobber (match_scratch:GPR 7 "=&r")) ++ (clobber (match_scratch:GPR 8 "=&r"))] ++ "" ++{ ++ return "%G6\n\t" ++ "1:\n\t" ++ "ll.\t%0,%1\n\t" ++ "and\t%7,%0,%3\n\t" ++ "xor\t%8,%0,%z5\n\t" ++ "and\t%8,%8,%z2\n\t" ++ "or%i8\t%7,%7,%8\n\t" ++ "sc.\t%7,%1\n\t" ++ "beq\t$zero,%7,1b"; ++} ++ ++ [(set (attr "length") (const_int 32))]) ++ ++(define_insn "atomic_cas_value_or_7_" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res ++ (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (set (match_dup 1) ++ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask ++ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask ++ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ;; old val ++ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ;; new val ++ (match_operand:SI 6 "const_int_operand")] ;; model ++ UNSPEC_COMPARE_AND_SWAP_OR)) ++ (clobber (match_scratch:GPR 7 "=&r")) ++ (clobber (match_scratch:GPR 8 "=&r"))] ++ "" ++{ ++ return "%G6\n\t" ++ "1:\n\t" ++ "ll.\t%0,%1\n\t" ++ "and\t%7,%0,%3\n\t" ++ "or\t%8,%0,%z5\n\t" ++ "and\t%8,%8,%z2\n\t" ++ "or%i8\t%7,%7,%8\n\t" ++ "sc.\t%7,%1\n\t" ++ "beq\t$zero,%7,1b"; ++} ++ ++ [(set (attr "length") (const_int 32))]) ++ ++(define_insn "atomic_cas_value_nand_7_" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res ++ (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (set (match_dup 1) ++ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask ++ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask ++ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ;; old val ++ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ;; new val ++ (match_operand:SI 6 "const_int_operand")] ;; model ++ UNSPEC_COMPARE_AND_SWAP_NAND)) ++ (clobber (match_scratch:GPR 7 "=&r")) ++ (clobber (match_scratch:GPR 8 "=&r"))] ++ "" ++{ ++ return "%G6\n\t" ++ "1:\n\t" ++ "ll.\t%0,%1\n\t" ++ "and\t%7,%0,%3\n\t" ++ "and\t%8,%0,%z5\n\t" ++ "xor\t%8,%8,%z2\n\t" ++ "or%i8\t%7,%7,%8\n\t" ++ "sc.\t%7,%1\n\t" ++ "beq\t$zero,%7,1b"; ++} ++ [(set (attr "length") (const_int 32))]) ++ ++(define_expand "atomic_exchange" ++ [(set (match_operand:SHORT 0 "register_operand") ++ (unspec_volatile:SHORT ++ [(match_operand:SHORT 1 "memory_operand") ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ UNSPEC_SYNC_EXCHANGE)) ++ (set (match_dup 1) ++ (match_operand:SHORT 2 "register_operand"))] ++ "" ++{ ++ union loongarch_gen_fn_ptrs generator; ++ generator.fn_7 = gen_atomic_cas_value_cmp_and_7_si; ++ loongarch_expand_atomic_qihi (generator, ++ operands[0], ++ operands[1], ++ operands[1], ++ operands[2], ++ operands[3]); ++ DONE; ++}) ++ ++ ++(define_expand "atomic_fetch_add" ++ [(set (match_operand:SHORT 0 "register_operand" "=&r") ++ (match_operand:SHORT 1 "memory_operand" "+ZB")) ++ (set (match_dup 1) ++ (unspec_volatile:SHORT ++ [(plus:SHORT (match_dup 1) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ UNSPEC_SYNC_OLD_OP))] ++ "" ++{ ++ union loongarch_gen_fn_ptrs generator; ++ generator.fn_7 = gen_atomic_cas_value_add_7_si; ++ loongarch_expand_atomic_qihi (generator, ++ operands[0], ++ operands[1], ++ operands[1], ++ operands[2], ++ operands[3]); ++ DONE; ++}) ++ ++(define_expand "atomic_fetch_sub" ++ [(set (match_operand:SHORT 0 "register_operand" "=&r") ++ (match_operand:SHORT 1 "memory_operand" "+ZB")) ++ (set (match_dup 1) ++ (unspec_volatile:SHORT ++ [(minus:SHORT (match_dup 1) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ UNSPEC_SYNC_OLD_OP))] ++ "" ++{ ++ union loongarch_gen_fn_ptrs generator; ++ generator.fn_7 = gen_atomic_cas_value_sub_7_si; ++ loongarch_expand_atomic_qihi (generator, ++ operands[0], ++ operands[1], ++ operands[1], ++ operands[2], ++ operands[3]); ++ DONE; ++}) ++ ++(define_expand "atomic_fetch_and" ++ [(set (match_operand:SHORT 0 "register_operand" "=&r") ++ (match_operand:SHORT 1 "memory_operand" "+ZB")) ++ (set (match_dup 1) ++ (unspec_volatile:SHORT ++ [(and:SHORT (match_dup 1) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ UNSPEC_SYNC_OLD_OP))] ++ "" ++{ ++ union loongarch_gen_fn_ptrs generator; ++ generator.fn_7 = gen_atomic_cas_value_and_7_si; ++ loongarch_expand_atomic_qihi (generator, ++ operands[0], ++ operands[1], ++ operands[1], ++ operands[2], ++ operands[3]); ++ DONE; ++}) ++ ++(define_expand "atomic_fetch_xor" ++ [(set (match_operand:SHORT 0 "register_operand" "=&r") ++ (match_operand:SHORT 1 "memory_operand" "+ZB")) ++ (set (match_dup 1) ++ (unspec_volatile:SHORT ++ [(xor:SHORT (match_dup 1) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ UNSPEC_SYNC_OLD_OP))] ++ "" ++{ ++ union loongarch_gen_fn_ptrs generator; ++ generator.fn_7 = gen_atomic_cas_value_xor_7_si; ++ loongarch_expand_atomic_qihi (generator, ++ operands[0], ++ operands[1], ++ operands[1], ++ operands[2], ++ operands[3]); ++ DONE; ++}) ++ ++(define_expand "atomic_fetch_or" ++ [(set (match_operand:SHORT 0 "register_operand" "=&r") ++ (match_operand:SHORT 1 "memory_operand" "+ZB")) ++ (set (match_dup 1) ++ (unspec_volatile:SHORT ++ [(ior:SHORT (match_dup 1) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ UNSPEC_SYNC_OLD_OP))] ++ "" ++{ ++ union loongarch_gen_fn_ptrs generator; ++ generator.fn_7 = gen_atomic_cas_value_or_7_si; ++ loongarch_expand_atomic_qihi (generator, ++ operands[0], ++ operands[1], ++ operands[1], ++ operands[2], ++ operands[3]); ++ DONE; ++}) ++ ++(define_expand "atomic_fetch_nand" ++ [(set (match_operand:SHORT 0 "register_operand" "=&r") ++ (match_operand:SHORT 1 "memory_operand" "+ZB")) ++ (set (match_dup 1) ++ (unspec_volatile:SHORT ++ [(not:SHORT (and:SHORT (match_dup 1) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ"))) ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ UNSPEC_SYNC_OLD_OP))] ++ "" ++{ ++ union loongarch_gen_fn_ptrs generator; ++ generator.fn_7 = gen_atomic_cas_value_nand_7_si; ++ loongarch_expand_atomic_qihi (generator, ++ operands[0], ++ operands[1], ++ operands[1], ++ operands[2], ++ operands[3]); ++ DONE; ++}) +diff --git a/gcc/config/loongarch/t-linux b/gcc/config/loongarch/t-linux +new file mode 100644 +index 000000000..479f4293e +--- /dev/null ++++ b/gcc/config/loongarch/t-linux +@@ -0,0 +1,23 @@ ++# Copyright (C) 2003-2018 Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++MULTILIB_OSDIRNAMES := ../lib$(call if_multiarch,:loongarch64-linux-gnu) ++MULTIARCH_DIRNAME := $(call if_multiarch,loongarch64-linux-gnu) ++ ++# haven't supported lp32 yet ++MULTILIB_EXCEPTIONS = mabi=lp32 +diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch +new file mode 100644 +index 000000000..5689da44a +--- /dev/null ++++ b/gcc/config/loongarch/t-loongarch +@@ -0,0 +1,45 @@ ++# Copyright (C) 2002-2018 Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++$(srcdir)/config/loongarch/loongarch-tables.opt: $(srcdir)/config/loongarch/genopt.sh \ ++ $(srcdir)/config/loongarch/loongarch-cpus.def ++ $(SHELL) $(srcdir)/config/loongarch/genopt.sh $(srcdir)/config/loongarch > \ ++ $(srcdir)/config/loongarch/loongarch-tables.opt ++ ++frame-header-opt.o: $(srcdir)/config/loongarch/frame-header-opt.c ++ $(COMPILE) $< ++ $(POSTCOMPILE) ++ ++loongarch-c.o: $(srcdir)/config/loongarch/loongarch-c.c $(CONFIG_H) $(SYSTEM_H) \ ++ coretypes.h $(TM_H) $(TREE_H) output.h $(C_COMMON_H) $(TARGET_H) ++ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ ++ $(srcdir)/config/loongarch/loongarch-c.c ++ ++loongarch-builtins.o: $(srcdir)/config/loongarch/loongarch-builtins.c $(CONFIG_H) \ ++ $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TREE_H) $(RECOG_H) langhooks.h \ ++ $(DIAGNOSTIC_CORE_H) $(OPTABS_H) $(srcdir)/config/loongarch/loongarch-ftypes.def \ ++ $(srcdir)/config/loongarch/loongarch-modes.def ++ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ ++ $(srcdir)/config/loongarch/loongarch-builtins.c ++loongarch-d.o: $(srcdir)/config/loongarch/loongarch-d.c ++ $(COMPILE) $< ++ $(POSTCOMPILE) ++ ++comma=, ++MULTILIB_OPTIONS = $(subst $(comma),/, $(patsubst %, mabi=%, $(subst $(comma),$(comma)mabi=,$(TM_MULTILIB_CONFIG)))) ++MULTILIB_DIRNAMES = $(subst $(comma), ,$(TM_MULTILIB_CONFIG)) +diff --git a/gcc/config/loongarch/x-native b/gcc/config/loongarch/x-native +new file mode 100644 +index 000000000..827d21f1a +--- /dev/null ++++ b/gcc/config/loongarch/x-native +@@ -0,0 +1,3 @@ ++driver-native.o : $(srcdir)/config/loongarch/driver-native.c \ ++ $(CONFIG_H) $(SYSTEM_H) ++ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< +diff --git a/gcc/configure.ac b/gcc/configure.ac +index a6eb3526b..d9677f0c5 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -845,6 +845,9 @@ AC_ARG_ENABLE(fixed-point, + mips*-*-*) + enable_fixed_point=yes + ;; ++ loongarch*-*-*) ++ enable_fixed_point=yes ++ ;; + *) + AC_MSG_WARN([fixed-point is not supported for this target, ignored]) + enable_fixed_point=no +@@ -3323,6 +3326,17 @@ x: + tls_first_minor=16 + tls_as_opt='-32 --fatal-warnings' + ;; ++ loongarch*-*-*) ++ conftest_s=' ++ .section .tdata,"awT",@progbits ++x: .word 2 ++ .text ++ la.tls.gd $a0,x ++ bl __tls_get_addr' ++ tls_first_major=0 ++ tls_first_minor=0 ++ tls_as_opt='--fatal-warnings' ++ ;; + m68k-*-*) + conftest_s=' + .section .tdata,"awT",@progbits +@@ -4859,6 +4873,17 @@ pointers into PC-relative form.]) + [Requesting --with-nan= requires assembler support for -mnan=]) + fi + ;; ++ loongarch*-*-*) ++ gcc_GAS_CHECK_FEATURE([.dtprelword support], ++ gcc_cv_as_loongarch_dtprelword, [2,18,0],, ++ [.section .tdata,"awT",@progbits ++x: ++ .word 2 ++ .text ++ .dtprelword x+0x8000],, ++ [AC_DEFINE(HAVE_AS_DTPRELWORD, 1, ++ [Define if your assembler supports .dtprelword.])]) ++ ;; + s390*-*-*) + gcc_GAS_CHECK_FEATURE([.gnu_attribute support], + gcc_cv_as_s390_gnu_attribute, [2,18,0],, +@@ -4892,11 +4917,11 @@ pointers into PC-relative form.]) + ;; + esac + +-# Mips and HP-UX need the GNU assembler. ++# Mips, LoongArch and HP-UX need the GNU assembler. + # Linux on IA64 might be able to use the Intel assembler. + + case "$target" in +- mips*-*-* | *-*-hpux* ) ++ mips*-*-* | loongarch*-*-* | *-*-hpux* ) + if test x$gas_flag = xyes \ + || test x"$host" != x"$build" \ + || test ! -x "$gcc_cv_as" \ +@@ -4916,9 +4941,9 @@ esac + # ??? Once 2.11 is released, probably need to add first known working + # version to the per-target configury. + case "$cpu_type" in +- aarch64 | alpha | arc | arm | avr | bfin | cris | i386 | m32c | m68k \ +- | microblaze | mips | nios2 | pa | riscv | rs6000 | score | sparc | spu \ +- | tilegx | tilepro | visium | xstormy16 | xtensa) ++ aarch64 | alpha | arc | arm | avr | bfin | cris | i386 | loongarch | m32c \ ++ | m68k | microblaze | mips | nios2 | pa | riscv | rs6000 | score | sparc \ ++ | spu | tilegx | tilepro | visium | xstormy16 | xtensa) + insn="nop" + ;; + ia64 | s390) +diff --git a/gcc/targhooks.c b/gcc/targhooks.c +index fafcc6c51..9a6baaf4b 100644 +--- a/gcc/targhooks.c ++++ b/gcc/targhooks.c +@@ -1806,7 +1806,7 @@ default_print_patchable_function_entry (FILE *file, + + unsigned i; + for (i = 0; i < patch_area_size; ++i) +- fprintf (file, "\t%s\n", nop_templ); ++ output_asm_insn (nop_templ, NULL); + } + + bool +diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C +index 2e0ef685f..424979a60 100644 +--- a/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C ++++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C +@@ -1,6 +1,6 @@ + // PR c++/49673: check that test_data goes into .rodata + // { dg-do compile { target c++11 } } +-// { dg-additional-options -G0 { target { { alpha*-*-* frv*-*-* ia64-*-* lm32*-*-* m32r*-*-* microblaze*-*-* mips*-*-* nios2-*-* powerpc*-*-* rs6000*-*-* } && { ! { *-*-darwin* *-*-aix* alpha*-*-*vms* } } } } } ++// { dg-additional-options -G0 { target { { alpha*-*-* frv*-*-* ia64-*-* lm32*-*-* m32r*-*-* microblaze*-*-* mips*-*-* loongarch*-*-* nios2-*-* powerpc*-*-* rs6000*-*-* } && { ! { *-*-darwin* *-*-aix* alpha*-*-*vms* } } } } } + // { dg-final { scan-assembler "\\.rdata" { target mips*-*-* } } } + // { dg-final { scan-assembler "rodata" { target { { *-*-linux-gnu *-*-gnu* *-*-elf } && { ! { mips*-*-* riscv*-*-* } } } } } } + +diff --git a/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C b/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C +index 341735879..141182b0d 100644 +--- a/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C ++++ b/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C +@@ -7,7 +7,7 @@ + function. However, some platforms use all bits to encode a + function pointer. Such platforms use the lowest bit of the delta, + that is shifted left by one bit. */ +-#if defined __MN10300__ || defined __SH5__ || defined __arm__ || defined __thumb__ || defined __mips__ || defined __aarch64__ ++#if defined __MN10300__ || defined __SH5__ || defined __arm__ || defined __thumb__ || defined __mips__ || defined __aarch64__ || defined __loongarch__ + #define ADJUST_PTRFN(func, virt) ((void (*)())(func)) + #define ADJUST_DELTA(delta, virt) (((delta) << 1) + !!(virt)) + #else +diff --git a/gcc/testsuite/g++.old-deja/g++.pt/ptrmem6.C b/gcc/testsuite/g++.old-deja/g++.pt/ptrmem6.C +index 9f4bbe43f..8f8f7017a 100644 +--- a/gcc/testsuite/g++.old-deja/g++.pt/ptrmem6.C ++++ b/gcc/testsuite/g++.old-deja/g++.pt/ptrmem6.C +@@ -25,7 +25,7 @@ int main() { + h<&B::j>(); // { dg-error "" } + g<(void (A::*)()) &A::f>(); // { dg-error "" "" { xfail c++11 } } + h<(int A::*) &A::i>(); // { dg-error "" "" { xfail c++11 } } +- g<(void (A::*)()) &B::f>(); // { dg-error "" "" { xfail { c++11 && { aarch64*-*-* arm*-*-* mips*-*-* } } } } ++ g<(void (A::*)()) &B::f>(); // { dg-error "" "" { xfail { c++11 && { aarch64*-*-* arm*-*-* mips*-*-* loongarch*-*-* } } } } + h<(int A::*) &B::j>(); // { dg-error "" } + g<(void (A::*)()) 0>(); // { dg-error "" "" { target { ! c++11 } } } + h<(int A::*) 0>(); // { dg-error "" "" { target { ! c++11 } } } +diff --git a/gcc/testsuite/gcc.dg/20020312-2.c b/gcc/testsuite/gcc.dg/20020312-2.c +index f5929e0b0..9bbbdf617 100644 +--- a/gcc/testsuite/gcc.dg/20020312-2.c ++++ b/gcc/testsuite/gcc.dg/20020312-2.c +@@ -35,6 +35,8 @@ extern void abort (void); + /* PIC register is r1, but is used even without -fpic. */ + #elif defined(__lm32__) + /* No pic register. */ ++#elif defined(__loongarch__) ++/* No pic register. */ + #elif defined(__M32R__) + /* No pic register. */ + #elif defined(__m68k__) +diff --git a/gcc/testsuite/gcc.dg/loop-8.c b/gcc/testsuite/gcc.dg/loop-8.c +index 842c0e773..95ec8d8d8 100644 +--- a/gcc/testsuite/gcc.dg/loop-8.c ++++ b/gcc/testsuite/gcc.dg/loop-8.c +@@ -1,6 +1,6 @@ + /* { dg-do compile } */ + /* { dg-options "-O1 -fdump-rtl-loop2_invariant" } */ +-/* { dg-skip-if "unexpected IV" { "hppa*-*-* mips*-*-* visium-*-* powerpc*-*-* riscv*-*-*" } } */ ++/* { dg-skip-if "unexpected IV" { "hppa*-*-* mips*-*-* loongarch*-*-* visium-*-* powerpc*-*-* riscv*-*-*" } } */ + + void + f (int *a, int *b) +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c +index eda711822..00f8fcb4f 100644 +--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c ++++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-3.c +@@ -5,7 +5,7 @@ + + When the condition is true, we distribute "(int) (a + b)" as + "(int) a + (int) b", otherwise we keep the original. */ +-/* { dg-do compile { target { { ! mips64 } && { ! spu-*-* } } } } */ ++/* { dg-do compile { target { { ! mips64 } && { ! spu-*-* } && { ! loongarch-*-* } } } } */ + /* { dg-options "-O -fno-tree-forwprop -fno-tree-ccp -fwrapv -fdump-tree-fre1-details" } */ + + /* From PR14844. */ +diff --git a/gcc/testsuite/gcc.target/loongarch/insn_correctness_check.c b/gcc/testsuite/gcc.target/loongarch/insn_correctness_check.c +new file mode 100644 +index 000000000..fa24ed4dd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/insn_correctness_check.c +@@ -0,0 +1,159432 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -mlasx -w" } */ ++/* { dg-timeout 500 } */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#define ASSERTEQ_64(line, ref, res) \ ++do{ \ ++ int fail = 0; \ ++ for(size_t i = 0; i < sizeof(res)/sizeof(res[0]); ++i){ \ ++ long *temp_ref = &ref[i], *temp_res = &res[i]; \ ++ if(abs(*temp_ref - *temp_res) > 0){ \ ++ printf(" error: %s at line %ld , expected "#ref"[%ld]:0x%lx, got: 0x%lx\n", \ ++ __FILE__, line, i, *temp_ref, *temp_res); \ ++ fail = 1; \ ++ } \ ++ } \ ++ if(fail == 1) abort(); \ ++}while(0) ++ ++#define ASSERTEQ_32(line, ref, res) \ ++do{ \ ++ int fail = 0; \ ++ for(size_t i = 0; i < sizeof(res)/sizeof(res[0]); ++i){ \ ++ int *temp_ref = &ref[i], *temp_res = &res[i]; \ ++ if(abs(*temp_ref - *temp_res) > 0){ \ ++ printf(" error: %s at line %ld , expected "#ref"[%ld]:0x%x, got: 0x%x\n", \ ++ __FILE__, line, i, *temp_ref, *temp_res); \ ++ fail = 1; \ ++ } \ ++ } \ ++ if(fail == 1) abort(); \ ++}while(0) ++ ++#define ASSERTEQ_int(line, ref, res) \ ++do{ \ ++ if (ref != res){ \ ++ printf(" error: %s at line %ld , expected %d, got %d\n", \ ++ __FILE__, line, ref, res); \ ++ } \ ++}while(0) ++ ++int main() { ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int*)& __m128_op0[3]) = 0x0000c77c; ++ *((int*)& __m128_op0[2]) = 0x000047cd; ++ *((int*)& __m128_op0[1]) = 0x0000c0f1; ++ *((int*)& __m128_op0[0]) = 0x00006549; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_op1[3]) = 0x34ec5670cd4b5ec0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4f111e4b8e0d7291; ++ *((unsigned long*)& __m256i_op1[1]) = 0xeaa81f47dc3bdd09; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0e0d5fde5df99830; ++ *((unsigned long*)& __m256i_op2[3]) = 0x80c72fcd40fb3bc0; ++ *((unsigned long*)& __m256i_op2[2]) = 0x84bd087966d4ace0; ++ *((unsigned long*)& __m256i_op2[1]) = 0x26aa68b274dc1322; ++ *((unsigned long*)& __m256i_op2[0]) = 0xe072db2bb9d4cd40; ++ *((unsigned long*)& __m256i_result[3]) = 0x044819410d87e69a; ++ *((unsigned long*)& __m256i_result[2]) = 0x21d3905ae3e93be0; ++ *((unsigned long*)& __m256i_result[1]) = 0x5125883a30da0f20; ++ *((unsigned long*)& __m256i_result[0]) = 0x6d7b2d3ac2777aeb; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0xb9884ab93b0b80a0; ++ *((unsigned long*)& __m128i_result[0]) = 0xf11e970c68000000; ++ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1b71a083b3dec3cd; ++ *((unsigned long*)& __m128i_op1[0]) = 0x373a13323b4cdbc1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0802010808400820; ++ *((unsigned long*)& __m128i_result[0]) = 0x8004080408100802; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000c77c000047cd; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000c0f100006549; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa486083e6536d81d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x58bc43853ea123ed; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000a486083e; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000058bc4385; ++ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1828f0e09bad7249; ++ *((unsigned long*)& __m256i_op0[2]) = 0x07ffc1b723953cec; ++ *((unsigned long*)& __m256i_op0[1]) = 0x61f2e9b333aab104; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6bf742aa0d7856a0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d(__m256i_op0,12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x34ec5670cd4b5ec0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4f111e4b8e0d7291; ++ *((unsigned long*)& __m256i_op0[1]) = 0xeaa81f47dc3bdd09; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0e0d5fde5df99830; ++ *((unsigned long*)& __m256i_op1[3]) = 0x67390c19e4b17547; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbacda0f96d2cec01; ++ *((unsigned long*)& __m256i_op1[1]) = 0xee20ad1adae2cc16; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5a2003c6a406fe53; ++ *((unsigned long*)& __m256i_op2[3]) = 0x80c72fcd40fb3bc0; ++ *((unsigned long*)& __m256i_op2[2]) = 0x84bd087966d4ace0; ++ *((unsigned long*)& __m256i_op2[1]) = 0x26aa68b274dc1322; ++ *((unsigned long*)& __m256i_op2[0]) = 0xe072db2bb9d4cd40; ++ *((unsigned long*)& __m256i_result[3]) = 0x372e9d75e8aab100; ++ *((unsigned long*)& __m256i_result[2]) = 0x5464fbfc416b9f71; ++ *((unsigned long*)& __m256i_result[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long*)& __m256i_result[0]) = 0x0d8264202b8ea3f0; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa486c90f6537b8d7; ++ *((unsigned long*)& __m128i_op0[0]) = 0x58bcc2013ea1cc1e; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffa486c90f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000058bcc201; ++ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0xf3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x372e9d75e8aab100; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5464fbfc416b9f71; ++ *((unsigned long*)& __m256i_op0[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0d8264202b8ea3f0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x80c72fcd40fb3bc0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x84bd087966d4ace0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x26aa68b274dc1322; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe072db2bb9d4cd40; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffcd42ffffecc0; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000475ffff4c51; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000740dffffad17; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003f4bffff7130; ++ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffa486c90f; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000058bcc201; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffa486c90f; ++ *((unsigned long*)& __m128d_result[0]) = 0x1f52d710bf295626; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x81f7f2599f0509c2; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x51136d3c78388916; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffc0fcffffcf83; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000288a00003c1c; ++ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x053531f7c6334908; ++ *((unsigned long*)& __m256d_op0[2]) = 0x8e41dcbff87e7900; ++ *((unsigned long*)& __m256d_op0[1]) = 0x12eb8332e3e15093; ++ *((unsigned long*)& __m256d_op0[0]) = 0x9a7491f9e016ccd4; ++ *((unsigned long*)& __m256d_op1[3]) = 0x345947dcd192b5c4; ++ *((unsigned long*)& __m256d_op1[2]) = 0x182100c72280e687; ++ *((unsigned long*)& __m256d_op1[1]) = 0x4a1c80bb8e892e00; ++ *((unsigned long*)& __m256d_op1[0]) = 0x063ecfbd58abc4b7; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x34598d0fd19314cb; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1820939b2280fa86; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4a1c269b8e892a3a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x063f2bb758abc664; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffc0fcffffcf83; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000288a00003c1c; ++ *((unsigned long*)& __m256i_result[3]) = 0x3459730f2f6d1435; ++ *((unsigned long*)& __m256i_result[2]) = 0x19212d61237f2b03; ++ *((unsigned long*)& __m256i_result[1]) = 0x4a1c266572772a3a; ++ *((unsigned long*)& __m256i_result[0]) = 0x063f032d58557648; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3133c6409eecf8b0; ++ *((unsigned long*)& __m256i_op0[2]) = 0xddf50db3c617a115; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa432ea5a0913dc8e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x29d403af367b4545; ++ *((unsigned long*)& __m256i_op1[3]) = 0x38a966b31be83ee9; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5f6108dc25b8e028; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf41a56e8a20878d7; ++ *((unsigned long*)& __m256i_op1[0]) = 0x683b8b67e20c8ee5; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x81f7f2599f0509c2; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x51136d3c78388916; ++ *((unsigned long*)& __m256i_op1[3]) = 0x044819410d87e69a; ++ *((unsigned long*)& __m256i_op1[2]) = 0x21d3905ae3e93be0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x5125883a30da0f20; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6d7b2d3ac2777aeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x000019410000e69a; ++ *((unsigned long*)& __m256i_result[2]) = 0xf259905a09c23be0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000883a00000f20; ++ *((unsigned long*)& __m256i_result[0]) = 0x6d3c2d3a89167aeb; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xa486c90f; ++ *((int*)& __m128_op0[2]) = 0x157ca12e; ++ *((int*)& __m128_op0[1]) = 0x58bcc201; ++ *((int*)& __m128_op0[0]) = 0x2e635d65; ++ *((int*)& __m128_op1[3]) = 0x6d564875; ++ *((int*)& __m128_op1[2]) = 0xf8760005; ++ *((int*)& __m128_op1[1]) = 0x8dc5a4d1; ++ *((int*)& __m128_op1[0]) = 0x79ffa22f; ++ *((int*)& __m128_op2[3]) = 0xffffffff; ++ *((int*)& __m128_op2[2]) = 0xd2436487; ++ *((int*)& __m128_op2[1]) = 0x0fa96b88; ++ *((int*)& __m128_op2[0]) = 0x5f94ab13; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xd24271c4; ++ *((int*)& __m128_result[1]) = 0x2711bad1; ++ *((int*)& __m128_result[0]) = 0xe8e309ed; ++ __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1828f0e09bad7249; ++ *((unsigned long*)& __m256i_op0[2]) = 0x07ffc1b723953cec; ++ *((unsigned long*)& __m256i_op0[1]) = 0x61f2e9b333aab104; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6bf742aa0d7856a0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000019410000e69a; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf259905a09c23be0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000883a00000f20; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6d3c2d3a89167aeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000090100008492; ++ *((unsigned long*)& __m256i_result[2]) = 0xf000104808420300; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000e20; ++ *((unsigned long*)& __m256i_result[0]) = 0x04082d108006284b; ++ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x04481940fbb7e6bf; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf2781966e6991966; ++ *((unsigned long*)& __m256i_op0[1]) = 0x51258839aeda77c6; ++ *((unsigned long*)& __m256i_op0[0]) = 0xcf25f0e00f1ff0e0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0501030100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001030100000301; ++ *((unsigned long*)& __m256i_result[1]) = 0x0102000200000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0002000004030000; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffd24271c4; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2711bad1e8e309ed; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0020002000200020; ++ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x38a966b31be83ee9; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5f6108dc25b8e028; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf41a56e8a20878d7; ++ *((unsigned long*)& __m256i_op0[0]) = 0x683b8b67e20c8ee5; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffcd42ffffecc0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000475ffff4c51; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000740dffffad17; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003f4bffff7130; ++ *((unsigned long*)& __m256i_result[3]) = 0x38a966b31be83ee9; ++ *((unsigned long*)& __m256i_result[2]) = 0x5f6108dc25b80001; ++ *((unsigned long*)& __m256i_result[1]) = 0xf41a56e8a20878d7; ++ *((unsigned long*)& __m256i_result[0]) = 0x683b8b67e20c0001; ++ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000001b3c4c0a5c; ++ *((unsigned long*)& __m256i_result[3]) = 0x3c4c0a5c3c4c0a5c; ++ *((unsigned long*)& __m256i_result[2]) = 0x3c4c0a5c3c4c0a5c; ++ *((unsigned long*)& __m256i_result[1]) = 0x3c4c0a5c3c4c0a5c; ++ *((unsigned long*)& __m256i_result[0]) = 0x3c4c0a5c3c4c0a5c; ++ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffa486c90f; ++ *((unsigned long*)& __m128i_op2[0]) = 0x1f52d710bf295626; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0501030102141923; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffd5020738b43ddb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x010200023b8e4174; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff4ff4e11410b40; ++ *((unsigned long*)& __m256i_op1[3]) = 0x01fa022a01a401e5; ++ *((unsigned long*)& __m256i_op1[2]) = 0x030d03aa0079029b; ++ *((unsigned long*)& __m256i_op1[1]) = 0x024c01f901950261; ++ *((unsigned long*)& __m256i_op1[0]) = 0x008102c2008a029f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101070102041903; ++ *((unsigned long*)& __m256i_result[2]) = 0xdfd506073ab435db; ++ *((unsigned long*)& __m256i_result[1]) = 0x110202023bae4176; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff6ff4a15418b40; ++ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0501030102141923; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffd5020738b43ddb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x010200023b8e4174; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff4ff4e11410b40; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000019410000e69a; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf259905a09c23be0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000883a00000f20; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6d3c2d3a89167aeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000501e99b; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000109973de7; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001020f22; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000001890b7a39; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x372e9d75e8aab100; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc5c085372cfabfba; ++ *((unsigned long*)& __m256i_op0[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000501e99b; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000109973de7; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001020f22; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000001890b7a39; ++ *((unsigned long*)& __m256i_result[3]) = 0x1b974ebaf6d64d4e; ++ *((unsigned long*)& __m256i_result[2]) = 0x62e0429c1b48fed1; ++ *((unsigned long*)& __m256i_result[1]) = 0x18b985adf63f548c; ++ *((unsigned long*)& __m256i_result[0]) = 0x032c796ecbdecc3b; ++ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x8a228acac14e440a; ++ *((unsigned long*)& __m128d_op1[0]) = 0xc77c47cdc0f16549; ++ *((unsigned long*)& __m128d_op2[1]) = 0xffffffffd24271c4; ++ *((unsigned long*)& __m128d_op2[0]) = 0x2711bad1e8e309ed; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffd24271c4; ++ *((unsigned long*)& __m128d_result[0]) = 0x2711bad1e8e309ed; ++ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x38a966b31be83ee9; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5f6108dc25b80001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf41a56e8a20878d7; ++ *((unsigned long*)& __m256i_op0[0]) = 0x683b8b67e20c0001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000501e99b; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000109973de7; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001020f22; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000001890b7a39; ++ *((unsigned long*)& __m256i_result[3]) = 0x38a966b301f41ffd; ++ *((unsigned long*)& __m256i_result[2]) = 0x5f6108ee13ff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0xf41a56e8d10201f6; ++ *((unsigned long*)& __m256i_result[0]) = 0x683b8b34f1020001; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x38a966b301f41ffd; ++ *((unsigned long*)& __m256d_op0[2]) = 0x5f6108ee13ff0000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xf41a56e8d10201f6; ++ *((unsigned long*)& __m256d_op0[0]) = 0x683b8b34f1020001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d(__m128i_op0,0x35); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x372e9d75e8aab100; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc5c085372cfabfba; ++ *((unsigned long*)& __m256i_op0[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000019410000e69a; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf259905a0c126604; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000883a00000f20; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6d3c2d3aa1c82947; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000f647000007d6; ++ *((unsigned long*)& __m256i_result[2]) = 0x031b358c021ee663; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000faaf0000f9f8; ++ *((unsigned long*)& __m256i_result[0]) = 0x02b4fdadfa9704df; ++ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000015d050192cb; ++ *((unsigned long*)& __m256d_op0[2]) = 0x028e509508b16ee9; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000033ff01020e23; ++ *((unsigned long*)& __m256d_op0[0]) = 0x151196b58fd1114d; ++ *((unsigned long*)& __m256d_op1[3]) = 0x372e9d75e8aab100; ++ *((unsigned long*)& __m256d_op1[2]) = 0xc5c085372cfabfba; ++ *((unsigned long*)& __m256d_op1[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000019410000e69a; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf259905a0c126604; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000883a00000f20; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6d3c2d3aa1c82947; ++ *((unsigned long*)& __m256i_result[3]) = 0x000019410000e6aa; ++ *((unsigned long*)& __m256i_result[2]) = 0xf259905a0c126614; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000883a00000f30; ++ *((unsigned long*)& __m256i_result[0]) = 0x6d3c2d3aa1c82957; ++ __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x0001ffaa; ++ *((int*)& __m256_op1[6]) = 0x0000040e; ++ *((int*)& __m256_op1[5]) = 0x00007168; ++ *((int*)& __m256_op1[4]) = 0x00007bb6; ++ *((int*)& __m256_op1[3]) = 0x0001ffe8; ++ *((int*)& __m256_op1[2]) = 0x0001fe9c; ++ *((int*)& __m256_op1[1]) = 0x00002282; ++ *((int*)& __m256_op1[0]) = 0x00001680; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff60090958; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0fa96b88d9944d42; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00001802041b0013; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1828f0e09bad7249; ++ *((unsigned long*)& __m256i_op0[2]) = 0x07ffc1b723953cec; ++ *((unsigned long*)& __m256i_op0[1]) = 0x61f2e9b333aab104; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6bf742aa0d7856a0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0d41c9a7bdd239a7; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0b025d0ef8fdf987; ++ *((unsigned long*)& __m256i_op1[1]) = 0x002944f92da5a708; ++ *((unsigned long*)& __m256i_op1[0]) = 0x038cf4ea999922ef; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff0000ffff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0xff000000ffffff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffff00ff; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000015d050192cb; ++ *((unsigned long*)& __m256i_op0[2]) = 0x028e509508b16ee9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000033ff01020e23; ++ *((unsigned long*)& __m256i_op0[0]) = 0x151196b58fd1114d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff0000ffff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff000000ffffff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffffffff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000fffffaff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffd7200fffff74f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000702f; ++ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xc0c00000c0c00000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xc0c00c01c2cd0009; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0003ff540000081c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0003ffd00003fd38; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001ffaa0000040e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000716800007bb6; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001ffe80001fe9c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000228200001680; ++ *((unsigned long*)& __m256i_op2[3]) = 0x372e9d75e8aab100; ++ *((unsigned long*)& __m256i_op2[2]) = 0xc5c085372cfabfba; ++ *((unsigned long*)& __m256i_op2[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long*)& __m256i_result[3]) = 0x002e4db200000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000315ac0000d658; ++ *((unsigned long*)& __m256i_result[1]) = 0x00735278007cf94c; ++ *((unsigned long*)& __m256i_result[0]) = 0x0003ed8800031b38; ++ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001ffff; ++ __m128i_out = __lsx_vsrli_w(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vfclass_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000015d050192cb; ++ *((unsigned long*)& __m256i_op0[2]) = 0x028e509508b16ee9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000033ff01020e23; ++ *((unsigned long*)& __m256i_op0[0]) = 0x151196b58fd1114d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001ffaa0000040e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000716800007bb6; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001ffe80001fe9c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000228200001680; ++ *((unsigned long*)& __m256i_result[3]) = 0x000100ab000500a0; ++ *((unsigned long*)& __m256i_result[2]) = 0x000200b800080124; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001011b000200aa; ++ *((unsigned long*)& __m256i_result[0]) = 0x00150118008f0091; ++ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000019410000e69a; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf259905a0c126604; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000883a00000f20; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6d3c2d3aa1c82947; ++ *((unsigned long*)& __m256i_op1[3]) = 0x372e9d75e8aab100; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc5c085372cfabfba; ++ *((unsigned long*)& __m256i_op1[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000b6b60001979a; ++ *((unsigned long*)& __m256i_result[2]) = 0x00011591000125be; ++ *((unsigned long*)& __m256i_result[1]) = 0x000093950000a915; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001201600004783; ++ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x372e9d75e8aab100; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc5c085372cfabfba; ++ *((unsigned long*)& __m256i_op0[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0658f2dc0eb21e3c; ++ *((int*)& __m256_result[7]) = 0x4e5cba76; ++ *((int*)& __m256_result[6]) = 0xcdbaaa78; ++ *((int*)& __m256_result[5]) = 0xce68fdeb; ++ *((int*)& __m256_result[4]) = 0x4e33eaff; ++ *((int*)& __m256_result[3]) = 0x4e45cc2d; ++ *((int*)& __m256_result[2]) = 0xcda41b30; ++ *((int*)& __m256_result[1]) = 0x4ccb1e5c; ++ *((int*)& __m256_result[0]) = 0x4d6b21e4; ++ __m256_out = __lasx_xvffint_s_w(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0013; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00001802041b0013; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc0c00000c0c00000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc0c00c01c2cd0009; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0e2d5626ff75cdbc; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5db4b156e2002a78; ++ *((unsigned long*)& __m256i_op0[1]) = 0xeeffbeb03ba3e6b0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0c16e25eb28d27ea; ++ *((unsigned long*)& __m256d_result[3]) = 0x41ac5aac4c000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xc161464880000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xc1b1004150000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x41cdd1f358000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00001802041b0013; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000007f7f02; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x1828f0e09bad7249; ++ *((unsigned long*)& __m256d_op0[2]) = 0x07ffc1b723953cec; ++ *((unsigned long*)& __m256d_op0[1]) = 0x61f2e9b333aab104; ++ *((unsigned long*)& __m256d_op0[0]) = 0x6bf742aa0d7856a0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4e5cba76cdbaaa78; ++ *((unsigned long*)& __m256i_op0[2]) = 0xce68fdeb4e33eaff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4e45cc2dcda41b30; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4ccb1e5c4d6b21e4; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x44bb2cd3a35c2fd0; ++ *((unsigned long*)& __m256i_result[0]) = 0xca355ba46a95e31c; ++ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x002e4db200000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000315ac0000d658; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00735278007cf94c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0003ed8800031b38; ++ *((unsigned long*)& __m256i_result[3]) = 0xffd1b24e00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffcea54ffff29a8; ++ *((unsigned long*)& __m256i_result[1]) = 0xff8cad88ff8306b4; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffc1278fffce4c8; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffd1b24e00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffcea54ffff29a8; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff8cad88ff8306b4; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffc1278fffce4c8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0e2d5626ff75cdbc; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5db4b156e2002a78; ++ *((unsigned long*)& __m256i_op1[1]) = 0xeeffbeb03ba3e6b0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0c16e25eb28d27ea; ++ *((unsigned long*)& __m256i_result[3]) = 0xf96d674800000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x44a4330e2c7116c0; ++ *((unsigned long*)& __m256i_result[1]) = 0x14187a7822b653c0; ++ *((unsigned long*)& __m256i_result[0]) = 0xfbe0b866962b96d0; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc1bdceee242071db; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe8c7b756d76aa578; ++ *((unsigned long*)& __m128i_result[1]) = 0xe0dee7779210b8ed; ++ *((unsigned long*)& __m128i_result[0]) = 0xf463dbabebb5d2bc; ++ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x44bb2cd3a35c2fd0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xca355ba46a95e31c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000100ab000500a0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000200b800080124; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001011b000200aa; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00150118008f0091; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f057f0b7f5b007f; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff0000ffff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff000000ffffff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffffffff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x01fa022a01a401e5; ++ *((unsigned long*)& __m256i_op1[2]) = 0x030d03aa0079029b; ++ *((unsigned long*)& __m256i_op1[1]) = 0x024c01f901950261; ++ *((unsigned long*)& __m256i_op1[0]) = 0x008102c2008a029f; ++ *((unsigned long*)& __m256i_op2[3]) = 0x002e4db200000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000315ac0000d658; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00735278007cf94c; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0003ed8800031b38; ++ *((unsigned long*)& __m256i_result[3]) = 0x01a72334ffff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0xff4f6838ff937648; ++ *((unsigned long*)& __m256i_result[1]) = 0x00a2afb7fff00ecb; ++ *((unsigned long*)& __m256i_result[0]) = 0xffce110f004658c7; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0013; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x00001802041b0014; ++ __m128i_out = __lsx_vsub_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffd1b24e00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffcea54ffff29a8; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff8cad88ff8306b4; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffc1278fffce4c8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0802010000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0806030008080001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0801010108010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0806000008060302; ++ __m256i_out = __lasx_xvclo_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x44bb2cd3a35c2fd0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xca355ba46a95e31c; ++ *((unsigned long*)& __m256i_result[3]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long*)& __m256i_result[2]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long*)& __m256i_result[1]) = 0x61d849f0c0794ced; ++ *((unsigned long*)& __m256i_result[0]) = 0xe75278c187b20039; ++ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf96d674800000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x44a4330e2c7116c0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x14187a7822b653c0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfbe0b866962b96d0; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffd1b24e00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffcea54ffff29a8; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff8cad88ff8306b4; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffc1278fffce4c8; ++ *((unsigned long*)& __m256i_result[3]) = 0xebfd15f000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x01700498ff8f1600; ++ *((unsigned long*)& __m256i_result[1]) = 0xf520c7c024221300; ++ *((unsigned long*)& __m256i_result[0]) = 0x00802fd0ff540a80; ++ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xebfd15f000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01700498ff8f1600; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf520c7c024221300; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00802fd0ff540a80; ++ *((unsigned long*)& __m256i_op1[3]) = 0xebfd15f000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01700498ff8f1600; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf520c7c024221300; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00802fd0ff540a80; ++ *((unsigned long*)& __m256i_op2[3]) = 0xf96d674800000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x44a4330e2c7116c0; ++ *((unsigned long*)& __m256i_op2[1]) = 0x14187a7822b653c0; ++ *((unsigned long*)& __m256i_op2[0]) = 0xfbe0b866962b96d0; ++ *((unsigned long*)& __m256i_result[3]) = 0xebfd15f000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x015c6a7facc39600; ++ *((unsigned long*)& __m256i_result[1]) = 0xfa070a51cbd95300; ++ *((unsigned long*)& __m256i_result[0]) = 0x00c7463075439280; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x1b976395; ++ *((int*)& __m256_op0[6]) = 0x2fc4c101; ++ *((int*)& __m256_op0[5]) = 0xe37affb4; ++ *((int*)& __m256_op0[4]) = 0x2fc05f69; ++ *((int*)& __m256_op0[3]) = 0x18b988e6; ++ *((int*)& __m256_op0[2]) = 0x4facb558; ++ *((int*)& __m256_op0[1]) = 0xe5fb66c8; ++ *((int*)& __m256_op0[0]) = 0x1da8e5bb; ++ *((int*)& __m256_op1[7]) = 0x01a72334; ++ *((int*)& __m256_op1[6]) = 0xffff00ff; ++ *((int*)& __m256_op1[5]) = 0xff4f6838; ++ *((int*)& __m256_op1[4]) = 0xff937648; ++ *((int*)& __m256_op1[3]) = 0x00a2afb7; ++ *((int*)& __m256_op1[2]) = 0xfff00ecb; ++ *((int*)& __m256_op1[1]) = 0xffce110f; ++ *((int*)& __m256_op1[0]) = 0x004658c7; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f057f0b7f5b007f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7f00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f7fff7fff7fff00; ++ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf96d674800000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x44a4330e2c7116c0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x14187a7822b653c0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfbe0b866962b96d0; ++ *((unsigned long*)& __m256i_result[3]) = 0xf90c0c0c00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0ca40c0c0c0c0cc0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0c0c0c0c0cb60cc0; ++ *((unsigned long*)& __m256i_result[0]) = 0xfbe0b80c960c96d0; ++ __m256i_out = __lasx_xvmini_b(__m256i_op0,12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xf90c0c0c00000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0ca40c0c0c0c0cc0; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0c0c0c0c0cb60cc0; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfbe0b80c960c96d0; ++ *((unsigned long*)& __m256d_op1[3]) = 0x1b9763952fc4c101; ++ *((unsigned long*)& __m256d_op1[2]) = 0xe37affb42fc05f69; ++ *((unsigned long*)& __m256d_op1[1]) = 0x18b988e64facb558; ++ *((unsigned long*)& __m256d_op1[0]) = 0xe5fb66c81da8e5bb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f057f0b7f5b007f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000007f007f5; ++ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1b9763952fc4c101; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe37affb42fc05f69; ++ *((unsigned long*)& __m256i_op1[1]) = 0x18b988e64facb558; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe5fb66c81da8e5bb; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xe37affb42fc05f69; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x65fb66c81da8e5ba; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x61d849f0c0794ced; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe75278c187b20039; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf90c0c0c00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0ca40c0c0c0c0cc0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0c0c0c0c0cb60cc0; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfbe0b80c960c96d0; ++ *((unsigned long*)& __m256i_result[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long*)& __m256i_result[2]) = 0x146014141414146e; ++ *((unsigned long*)& __m256i_result[1]) = 0x36722a7e66972cd6; ++ *((unsigned long*)& __m256i_result[0]) = 0xf19998668e5f4b84; ++ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000007f007f5; ++ *((unsigned long*)& __m256i_op1[3]) = 0x002e4db200000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000315ac0000d658; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00735278007cf94c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0003ed8800031b38; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x3d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x146014141414146e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x36722a7e66972cd6; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf19998668e5f4b84; ++ long_op1 = 0x0000007942652524; ++ *((unsigned long*)& __m256i_result[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000007942652524; ++ *((unsigned long*)& __m256i_result[1]) = 0x36722a7e66972cd6; ++ *((unsigned long*)& __m256i_result[0]) = 0xf19998668e5f4b84; ++ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0013; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00001802041b0014; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003004; ++ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00d6c1c830160048; ++ *((unsigned long*)& __m256i_op1[1]) = 0x36722a7e66972cd6; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe3aebaf4df958004; ++ *((unsigned long*)& __m256i_result[3]) = 0xc58a0a0a07070706; ++ *((unsigned long*)& __m256i_result[2]) = 0x006b60e4180b0023; ++ *((unsigned long*)& __m256i_result[1]) = 0x1b39153f334b966a; ++ *((unsigned long*)& __m256i_result[0]) = 0xf1d75d79efcac002; ++ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f7f02; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00003f803f800100; ++ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xe37affb42fc05f69; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x65fb66c81da8e5ba; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long*)& __m256d_op2[2]) = 0x00d6c1c830160048; ++ *((unsigned long*)& __m256d_op2[1]) = 0x36722a7e66972cd6; ++ *((unsigned long*)& __m256d_op2[0]) = 0xe3aebaf4df958004; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0x00d6c1c830160048; ++ *((unsigned long*)& __m256d_result[1]) = 0x36722a7e66972cd6; ++ *((unsigned long*)& __m256d_result[0]) = 0xe3aebaf4df958004; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0014; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000c01020d8009; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00d6c1c830160048; ++ *((unsigned long*)& __m256i_op0[1]) = 0x36722a7e66972cd6; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe3aebaf4df958004; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x006be0e4180b0024; ++ *((unsigned long*)& __m256i_result[1]) = 0x1b39153f334b166b; ++ *((unsigned long*)& __m256i_result[0]) = 0xf1d7dd7aefcac002; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f800000976801fe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x837c1ae57f8012ed; ++ *((unsigned long*)& __m128i_result[1]) = 0x976801fd6897fe02; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f8012ec807fed13; ++ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f801fa06451ef11; ++ *((unsigned long*)& __m128i_op0[0]) = 0x68bcf93435ed25ed; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffb64c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000003900; ++ *((unsigned long*)& __m128i_result[0]) = 0x68bcf93435ed25ed; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffc00; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffc00; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003900; ++ *((unsigned long*)& __m128i_op0[0]) = 0x68bcf93435ed25ed; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_wu(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00003f803f800100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000c01020d8009; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000003004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000c01020d5005; ++ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f801fa06451ef11; ++ *((unsigned long*)& __m128i_op1[0]) = 0x68bcf93435ed25ed; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01fa022a01a401e5; ++ *((unsigned long*)& __m256i_op0[2]) = 0x030d03aa0079029b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x024c01f901950261; ++ *((unsigned long*)& __m256i_op0[0]) = 0x008102c2008a029f; ++ *((unsigned long*)& __m256i_result[3]) = 0x54000000ca000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x5400000036000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xf2000000c2000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x840000003e000000; ++ __m256i_out = __lasx_xvslli_w(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000400000004000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000400000007004; ++ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xc58a0a0a; ++ *((int*)& __m256_op0[6]) = 0x07070706; ++ *((int*)& __m256_op0[5]) = 0x006b60e4; ++ *((int*)& __m256_op0[4]) = 0x180b0023; ++ *((int*)& __m256_op0[3]) = 0x1b39153f; ++ *((int*)& __m256_op0[2]) = 0x334b966a; ++ *((int*)& __m256_op0[1]) = 0xf1d75d79; ++ *((int*)& __m256_op0[0]) = 0xefcac002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00003004; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0xff800000; ++ *((int*)& __m128_result[1]) = 0xff800000; ++ *((int*)& __m128_result[0]) = 0xc3080000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc58a0a0a07070706; ++ *((unsigned long*)& __m256i_op0[2]) = 0x006b60e4180b0023; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1b39153f334b966a; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf1d75d79efcac002; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,-1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000400000004000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000400000007004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0x00a300a300a300a3; ++ *((unsigned long*)& __m128i_result[0]) = 0x00a300a300a300a3; ++ __m128i_out = __lsx_vldi(1187); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00d6c1c830160048; ++ *((unsigned long*)& __m256i_op0[1]) = 0x36722a7e66972cd6; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe3aebaf4df958004; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffe000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100020001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000fffffffffffe; ++ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0x80000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00a300a3; ++ *((int*)& __m128_op1[2]) = 0x00a300a3; ++ *((int*)& __m128_op1[1]) = 0x00a300a3; ++ *((int*)& __m128_op1[0]) = 0x00a300a3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffe000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100020001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fffffffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00d6c1c830160048; ++ *((unsigned long*)& __m256i_op1[1]) = 0x36722a7e66972cd6; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe3aebaf4df958004; ++ *((unsigned long*)& __m256i_result[3]) = 0xc5890a0a07070707; ++ *((unsigned long*)& __m256i_result[2]) = 0x006be0e4180b8024; ++ *((unsigned long*)& __m256i_result[1]) = 0x1b399540334c966c; ++ *((unsigned long*)& __m256i_result[0]) = 0x71d7dd7aefcac001; ++ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc5890a0a07070707; ++ *((unsigned long*)& __m256i_op1[2]) = 0x006be0e4180b8024; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1b399540334c966c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x71d7dd7aefcac001; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00d6c1c830160048; ++ *((unsigned long*)& __m256i_op0[1]) = 0x36722a7e66972cd6; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe3aebaf4df958004; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8b1414140e0e0e0e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x36722a7e66972cd6; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0013; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001ffff00000000; ++ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x2f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00001802; ++ *((int*)& __m128_op0[0]) = 0x041b0013; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00001802; ++ *((int*)& __m128_op0[0]) = 0x041b0013; ++ *((int*)& __m128_op1[3]) = 0xff800000; ++ *((int*)& __m128_op1[2]) = 0xff800000; ++ *((int*)& __m128_op1[1]) = 0xff800000; ++ *((int*)& __m128_op1[0]) = 0xc3080000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8b1414140e0e0e0e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x36722a7e66972cd6; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc58a0a0a07070706; ++ *((unsigned long*)& __m256i_op1[2]) = 0x006b60e4180b0023; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1b39153f334b966a; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf1d75d79efcac002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x006b60e40e0e0e0e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x36722a7e66972cd6; ++ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0edf8d7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffbe8bc70f; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe0edf8d7; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffbe8bc70f; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe06df8d7; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffbe8b470f; ++ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x8b141414; ++ *((int*)& __m256_op0[4]) = 0x0e0e0e0e; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x36722a7e; ++ *((int*)& __m256_op0[0]) = 0x66972cd6; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001ffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff800000c3080000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff81ffffc3080000; ++ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffc00; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffc00; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0xbf800000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0xcf000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000045eef14fe8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffc00; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffc00; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffc00; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffc00; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000020000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000020000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x23); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000020000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000020000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000003ffffffff; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbf8000000000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcf00000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xbf80000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xcf00000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1040400000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0961000100000001; ++ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x10404000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x09610001; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff0001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003a099512; ++ *((unsigned long*)& __m256i_op0[1]) = 0x280ac9da313763f5; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe032c738adcc6bbf; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xfffe000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0001000100020001; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000fffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0001; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000003a099512; ++ *((unsigned long*)& __m256i_result[1]) = 0x280ac9da313763f5; ++ *((unsigned long*)& __m256i_result[0]) = 0xe032c738adcc6bbf; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffe000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100020001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fffffffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff000000010000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000095120000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc9da000063f50000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc7387fff6bbfffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffc81aca; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003a0a9512; ++ *((unsigned long*)& __m256i_op0[1]) = 0x280ac9da313863f4; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe032c739adcc6bbd; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffe000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100020001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000fffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffdffffffc81aca; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff3a0b9512; ++ *((unsigned long*)& __m256i_result[1]) = 0x280bc9db313a63f5; ++ *((unsigned long*)& __m256i_result[0]) = 0xe032c738adcb6bbb; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffc81aca; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003a0a9512; ++ *((unsigned long*)& __m256i_op0[1]) = 0x280ac9da313863f4; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe032c739adcc6bbd; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x006b58e20e1e0e0f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3672227c66a72cd7; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000003594; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000082fb80e; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000c7e8; ++ *((unsigned long*)& __m256i_result[0]) = 0x1ad6119c12def7bb; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbf8000000000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xcf00000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x074132a240000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbf8000000000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xcf00000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff000000010000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000095120000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc9da000063f50000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc7387fff6bbfffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbf8000000000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xcf00000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xbf8000000000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xcf00000000000000; ++ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffff000000010000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000095120000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xc9da000063f50000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xc7387fff6bbfffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xfffe000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x4001000100020000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff000000010000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000095120000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc9da000063f50000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc7387fff6bbfffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffdffffffc81aca; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff3a0b9512; ++ *((unsigned long*)& __m256i_op1[1]) = 0x280bc9db313a63f5; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe032c738adcb6bbb; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff800001010400; ++ *((unsigned long*)& __m256i_result[2]) = 0x000180009d120004; ++ *((unsigned long*)& __m256i_result[1]) = 0xc9da080067f50020; ++ *((unsigned long*)& __m256i_result[0]) = 0xc73c7fff6bbfffff; ++ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe06df8d7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffbe8b470f; ++ *((unsigned long*)& __m256i_result[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe06df0d7; ++ *((unsigned long*)& __m256i_result[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffbe8b470f; ++ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbf8000000000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xcf00000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x003f00000000003f; ++ *((unsigned long*)& __m128i_result[0]) = 0x003f000000000000; ++ __m128i_out = __lsx_vsat_hu(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff800000c3080002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d(__m128i_op0,7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x639c3fffb5dffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xb8c7800094400001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0063009c003f00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00b500df00ff00fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x00b800c700800000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0094004000000001; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvldi(-4080); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x639c3fffb5dffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xb8c7800094400001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0008000e000c000f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0009000100040001; ++ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffe06df0d7; ++ *((unsigned long*)& __m256d_op1[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffbe8b470f; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffbe8b470f; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000800080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc9d8080067f50020; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc70000020000c000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf000f00000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000f000f0000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xf0f008000ff5000f; ++ *((unsigned long*)& __m256i_result[0]) = 0xf00000020000f000; ++ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xc090c40000000000; ++ __m128d_out = __lsx_vflogb_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbf8000000000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcf00000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x92); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000800080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc9d8080067f50020; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc70000020000c000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe06df0d7; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffbe8b470f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007ffffffff7ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x49d8080067f4f81f; ++ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8001000080000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000800080000728; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8001800080008000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x800080008000b8f1; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000ffff8000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff80008000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x800080008000b8f1; ++ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00007ffffffff7ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x49d8080067f4f81f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007f00fffff7ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xd8490849f467f867; ++ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0xb7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003f00000000003f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003f000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00007ffffffff7ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x49d8080067f4f81f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7ffff7ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x080008000800f81f; ++ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0xa8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000ffff8000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff80008000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x800080008000b8f1; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x074132a240000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000ffff8000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x06f880008000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x800080008000b8f1; ++ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xbf800000; ++ *((int*)& __m128_op0[2]) = 0x0000ffff; ++ *((int*)& __m128_op0[1]) = 0xcf000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x003f0000; ++ *((int*)& __m128_op1[2]) = 0x0000003f; ++ *((int*)& __m128_op1[1]) = 0x003f0000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe06df8d7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffbe8b470f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe06df0d7; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffbe8b470f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbf8000000000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcf00000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007f00; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7ffe7fffeffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffd84900000849; ++ *((unsigned long*)& __m256i_op0[0]) = 0x07fffc670800f086; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x311d9b643ec1fe01; ++ *((unsigned long*)& __m256i_op1[0]) = 0x344ade20fe00fd01; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007f00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x311d73ad3ec2064a; ++ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007f00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x311d73ad3ec2064a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000001fc000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000c475ceb40000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000fb0819280000; ++ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe06df0d7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x988eb37e000fb33d; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffed95be394b1e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000ffff8000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x06f880008000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x800080008000b8f1; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000ffff8000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x06f880008000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x800080008000b8f1; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000010180000101; ++ *((unsigned long*)& __m256i_result[2]) = 0xfa08800080000101; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x800080008000480f; ++ __m256i_out = __lasx_xvneg_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fc000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000c475ceb40000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fb0819280000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x074132a240000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000003a0200; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000c9; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x0000ffff; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x0000ffff; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00ff00ff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0040000000ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0040000000000000; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x36); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007f00; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7ffe7fffeffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffd84900000849; ++ *((unsigned long*)& __m256i_op0[0]) = 0x07fffc670800f086; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3922d40000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000c85221c0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf7ebfab800000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000f20; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000009f0; ++ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000ffff8000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x06f880008000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x800080008000b8f1; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000010180000101; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfa08800080000101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x800080008000480f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001010000010100; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101000000010100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000000010100; ++ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f20; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000009f0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000001000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000800000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000800080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc9d8080067f50020; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc70000020000c000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000010100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000001000100; ++ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000800000000000; ++ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0040000000ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0040000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0040000000ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0040000000000000; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvflogb_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0040000000ff00ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0040000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0020000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0020c00000000000; ++ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000001000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffe651bfff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000010100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000001000100; ++ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f20; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000009f0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00010101; ++ *((int*)& __m256_op1[6]) = 0x01010101; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00010100; ++ *((int*)& __m256_op1[1]) = 0x00010000; ++ *((int*)& __m256_op1[0]) = 0x01000100; ++ *((int*)& __m256_result[7]) = 0x00010101; ++ *((int*)& __m256_result[6]) = 0x01010101; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00010100; ++ *((int*)& __m256_result[1]) = 0x00010000; ++ *((int*)& __m256_result[0]) = 0x01000100; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00010101; ++ *((int*)& __m256_op0[6]) = 0x01010101; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00010100; ++ *((int*)& __m256_op0[1]) = 0x00010000; ++ *((int*)& __m256_op0[0]) = 0x01000100; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xbf7f7fff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xe651bfff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0001010101010101; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000010100; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0001000001000100; ++ *((unsigned long*)& __m256d_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op2[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long*)& __m256d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op2[0]) = 0xffffffffe651bfff; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffe651bfff; ++ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0cc08723ff900001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xcc9b89f2f6cef440; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0cc08723006fffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x3364760e09310bc0; ++ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0cc08723ff900001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xcc9b89f2f6cef440; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x7); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000020202; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000002020202; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000020200; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x25); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xbf7f7fff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xe651bfff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0xffffffff; ++ *((int*)& __m256_op2[2]) = 0xf328dfff; ++ *((int*)& __m256_op2[1]) = 0x6651bfff; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffff328dfff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6651bfff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffff328dfff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6651bfff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffe0001c3fe4001; ++ *((unsigned long*)& __m256i_result[0]) = 0x8ffe800100000000; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe651bfff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffe651bfff; ++ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0cc08723ff900001; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xcc9b89f2f6cef440; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xfffffff8; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xff800000; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xfffffff8; ++ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x07070707; ++ *((int*)& __m256_op0[5]) = 0x01020400; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00020100; ++ *((int*)& __m256_op0[1]) = 0x07030200; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffff80; ++ *((int*)& __m256_op1[6]) = 0xfefeff00; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x01000400; ++ *((int*)& __m256_op1[3]) = 0xffffff80; ++ *((int*)& __m256_op1[2]) = 0xfeff0000; ++ *((int*)& __m256_op1[1]) = 0x02020080; ++ *((int*)& __m256_op1[0]) = 0x5c800400; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0xffffffff; ++ *((int*)& __m256_op2[2]) = 0xf328dfff; ++ *((int*)& __m256_op2[1]) = 0x6651bfff; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xffffff80; ++ *((int*)& __m256_result[6]) = 0x46867f79; ++ *((int*)& __m256_result[5]) = 0x80000000; ++ *((int*)& __m256_result[4]) = 0x80000000; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xf328dfff; ++ *((int*)& __m256_result[1]) = 0x6651bfff; ++ *((int*)& __m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffff8046867f79; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffff328dfff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6651bfff80000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffff8046867f79; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffff328dfff; ++ *((unsigned long*)& __m256i_result[0]) = 0x6651bfff80000000; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffff8046867f79; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffff328dfff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6651bfff80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ff80; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000468600007f79; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000f3280000dfff; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffff8046867f79; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffff328dfff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6651bfff80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00010001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00010001; ++ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe651bfff; ++ *((unsigned long*)& __m256i_result[3]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long*)& __m256i_result[2]) = 0x1d1d1d1ddd9d9d1d; ++ *((unsigned long*)& __m256i_result[1]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long*)& __m256i_result[0]) = 0x1d1d1d1d046fdd1d; ++ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffbf7f00007fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffe651ffffbfff; ++ *((int*)& __m256_result[7]) = 0x4f800000; ++ *((int*)& __m256_result[6]) = 0x4f800000; ++ *((int*)& __m256_result[5]) = 0x4f7fffbf; ++ *((int*)& __m256_result[4]) = 0x46fffe00; ++ *((int*)& __m256_result[3]) = 0x4f800000; ++ *((int*)& __m256_result[2]) = 0x4f800000; ++ *((int*)& __m256_result[1]) = 0x4f7fffe6; ++ *((int*)& __m256_result[0]) = 0x4f7fffc0; ++ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000468600007f79; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000007070707; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0102040000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000020100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0703020000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000707; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000010200000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000070300000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000707; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000010200000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000070300000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000468600007f79; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1d1d1d1ddd9d9d1d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1d1d1d1d046fdd1d; ++ *((unsigned long*)& __m256i_result[3]) = 0x00001d1d00001d1d; ++ *((unsigned long*)& __m256i_result[2]) = 0x00001d1d00007f79; ++ *((unsigned long*)& __m256i_result[1]) = 0x00001d1d00001d1d; ++ *((unsigned long*)& __m256i_result[0]) = 0x00001d1d0000dd1d; ++ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff49fe4200000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffff800000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffff00fe81; ++ *((unsigned long*)& __m256i_result[0]) = 0xfe808d00eefffff8; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000007070707; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0102040000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000020100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0703020000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff49fe4200000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0003f8040002f607; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0002728b00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffff328dfff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6651bfff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0003f8040002f607; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffff328dfff; ++ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffff328dfff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6651bfff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0202020201010000; ++ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4f8000004f800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4f7fffbf0000fe00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000004f800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4f7fffe64f7fffc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff49fe4200000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffbf0000fe000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fe020000fe22; ++ *((unsigned long*)& __m256i_result[0]) = 0xffe6fe42ffc00000; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0002000200000022; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0049004200000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000007f00000022; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000007f00000000; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xf328dfff; ++ *((int*)& __m256_op1[1]) = 0x6651bfff; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x0000ffff; ++ *((int*)& __m256_op2[6]) = 0x0000ff80; ++ *((int*)& __m256_op2[5]) = 0x00004686; ++ *((int*)& __m256_op2[4]) = 0x00007f79; ++ *((int*)& __m256_op2[3]) = 0x0000ffff; ++ *((int*)& __m256_op2[2]) = 0x0000ffff; ++ *((int*)& __m256_op2[1]) = 0x0000f328; ++ *((int*)& __m256_op2[0]) = 0x0000dfff; ++ *((int*)& __m256_result[7]) = 0x0000ffff; ++ *((int*)& __m256_result[6]) = 0x0000ff80; ++ *((int*)& __m256_result[5]) = 0x00004686; ++ *((int*)& __m256_result[4]) = 0x00007f79; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0x0000ffff; ++ *((int*)& __m256_result[1]) = 0x0000f328; ++ *((int*)& __m256_result[0]) = 0x0000dfff; ++ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xfe02fe02; ++ *((int*)& __m256_op0[2]) = 0xfee5fe22; ++ *((int*)& __m256_op0[1]) = 0xff49fe42; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x0000ffff; ++ *((int*)& __m256_op1[6]) = 0x0000ff80; ++ *((int*)& __m256_op1[5]) = 0x00004686; ++ *((int*)& __m256_op1[4]) = 0x00007f79; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0x0000ffff; ++ *((int*)& __m256_op1[1]) = 0x0000f328; ++ *((int*)& __m256_op1[0]) = 0x0000dfff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000468600007f79; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long*)& __m256d_op1[0]) = 0xff49fe4200000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x00020001ffb6ffe0; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0049004200000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256d_result[0]) = 0xbf28b0686066be60; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0202020201010000; ++ int_op1 = 0x00000045eef14fe8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000eef14fe8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0202020201010000; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000eef14fe8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0202020201010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000eef14fe8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0202020201010000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long*)& __m256i_op2[0]) = 0xff49fe4200000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000eef14fe8; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffe928f1313c9cc; ++ *((unsigned long*)& __m256i_result[0]) = 0x4244020201010000; ++ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff49fe4200000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbf28b0686066be60; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xff49fe4200000000; ++ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0xbf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000007f00000022; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000007f00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000007f00000022; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000007f00000000; ++ __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_wu(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00020001ffb6ffe0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0049004200000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ff80; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000468600007f79; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000f3280000dfff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffb7; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000004c00000000; ++ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbf28b0686066be60; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x40d74f979f99419f; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000007f00000022; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000007f00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000003f00000011; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000003f00000000; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xc2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000460086; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f0079; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000f30028; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000df00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbf28b0686066be60; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffff00ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff00ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ffffff00ff; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000460086; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f0079; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000f30028; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000df00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w(__m256i_op0,-8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x40d74f979f99419f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000022; ++ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000468600007f79; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000022; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff80; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000468600008078; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffff328ffffe021; ++ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsub_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7f7f7f7f00007f7f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3f28306860663e60; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x40d74f979f99419f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007fff7fff7fff; ++ __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x40d74f979f99419f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xbf28b0686066be60; ++ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000022; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_w(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000468600007f79; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00007fff7fff7fff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ff80; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000468600007f79; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000f3280000dfff; ++ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ int_result = 0x000000000000ffff; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x6); ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((int*)& __m128_result[3]) = 0x4b7f00ff; ++ *((int*)& __m128_result[2]) = 0x4b7f00ff; ++ *((int*)& __m128_result[1]) = 0x4b7f00ff; ++ *((int*)& __m128_result[0]) = 0x4b7f00ff; ++ __m128_out = __lsx_vffint_s_w(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00007fff7fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007ffe81fdfe03; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x40d74f979f99419f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x40d74f979f99419f; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000fff8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007fff7fff7fff; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vmskltz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfeffffffffffffff; ++ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x38); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000030000; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xc9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0001; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00007ffe81fdfe03; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_du(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0000ff80; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0000ffff; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x60b53246; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x60b5054d; ++ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfeffffffffff0002; ++ __m128i_out = __lsx_vadd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x72); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffff0002; ++ *((unsigned long*)& __m128i_op2[1]) = 0x54beed87bc3f2be1; ++ *((unsigned long*)& __m128i_op2[0]) = 0x8024d8f6a494afcb; ++ *((unsigned long*)& __m128i_result[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long*)& __m128i_result[0]) = 0x0024d8f6a494006a; ++ __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0024d8f6a494006a; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x5641127843c0d41e; ++ *((unsigned long*)& __m128i_result[0]) = 0xfedb27095b6bff95; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5641127843c0d41e; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfedb27095b6bff95; ++ *((unsigned long*)& __m128i_op1[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0024d8f6a494006a; ++ *((unsigned long*)& __m128i_result[1]) = 0xff7fffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xff7fffffffffffff; ++ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff7fffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff7fffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffff7ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x64); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00001f41ffffbf00; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00001f41ffffbf00; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00001f41ffffbf00; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000000; ++ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x2b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0024d8f6a494006a; ++ *((unsigned long*)& __m128i_result[1]) = 0xa8beed87bc3f2bd3; ++ *((unsigned long*)& __m128i_result[0]) = 0x0024d8f6a494005c; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0024d8f6a494006a; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001a8beed86; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000010024d8f5; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x54beed87bc3f2be1; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8024d8f6a494afcb; ++ *((unsigned long*)& __m128i_result[1]) = 0x54feed87bc3f2be1; ++ *((unsigned long*)& __m128i_result[0]) = 0x8064d8f6a494afcb; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x36); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x54feed87; ++ *((int*)& __m128_op0[2]) = 0xbc3f2be1; ++ *((int*)& __m128_op0[1]) = 0x8064d8f6; ++ *((int*)& __m128_op0[0]) = 0xa494afcb; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0xff800000; ++ *((int*)& __m128_result[1]) = 0xff800000; ++ *((int*)& __m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x54feed87bc3f2be1; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8064d8f6a494afcb; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00001f41ffffbf00; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000040000fff8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfedb27095b6bff95; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h(__m128i_op0,9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x56411278; ++ *((int*)& __m128_op0[2]) = 0x43c0d41e; ++ *((int*)& __m128_op0[1]) = 0x0124d8f6; ++ *((int*)& __m128_op0[0]) = 0xa494006b; ++ *((int*)& __m128_op1[3]) = 0x7f800000; ++ *((int*)& __m128_op1[2]) = 0xff800000; ++ *((int*)& __m128_op1[1]) = 0xff800000; ++ *((int*)& __m128_op1[0]) = 0xff800000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc2409edab019323f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x460f3b393ef4be3a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x460f3b393ef4be3a; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0100000100010001; ++ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x56411278; ++ *((int*)& __m128_op0[2]) = 0x43c0d41e; ++ *((int*)& __m128_op0[1]) = 0x0124d8f6; ++ *((int*)& __m128_op0[0]) = 0xa494006b; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x460f3b393ef4be3a; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xc2409eda; ++ *((int*)& __m128_op1[2]) = 0xb019323f; ++ *((int*)& __m128_op1[1]) = 0x460f3b39; ++ *((int*)& __m128_op1[0]) = 0x3ef4be3a; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x460f3b39; ++ *((int*)& __m128_result[0]) = 0x3ef4be3a; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xc2409edab019323f; ++ *((unsigned long*)& __m128d_op0[0]) = 0x460f3b393ef4be3a; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0100000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000000040000fff8; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x000000040000fff8; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x000000040000fff8; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000000040000fff8; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x000000040000fff8; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000040000fff8; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00001f41ffffbf00; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x010180068080fff9; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitrev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000300; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000303; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00007fff7fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007fff7fff7fff; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x7a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000040000fff8; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x010180068080fff9; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x7fff7fff; ++ *((int*)& __m256_op0[4]) = 0x7fff7fff; ++ *((int*)& __m256_op0[3]) = 0x7fff01fd; ++ *((int*)& __m256_op0[2]) = 0x7fff7fff; ++ *((int*)& __m256_op0[1]) = 0x00007fff; ++ *((int*)& __m256_op0[0]) = 0x7fff7fff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfc2f3183ef7ffff7; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w(__m256i_op0,0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0xc5c5c5c5c5c5c5c5; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1515151515151515; ++ *((unsigned long*)& __m256i_result[2]) = 0x1515151515151515; ++ *((unsigned long*)& __m256i_result[1]) = 0x1515151515151515; ++ *((unsigned long*)& __m256i_result[0]) = 0x1515151515151515; ++ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1515151515151515; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1515151515151515; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1515151515151515; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1515151515151515; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00007ffe81fdfe03; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7ffe800000000000; ++ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff3cff3cff3cff3c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff3cff3cff3cff3c; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff3cff3cff3cff3c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff3cff3cff3cff3c; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff3cff3cff3cff3c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff3cff3cff3cff3c; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff3cff3cff3cff3c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff3cff3cff3cff3c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00007ffe81fdfe03; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x80007ffe81fdfe03; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3fff3fff3fff3fff; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_h(__m256i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x7fff7fff; ++ *((int*)& __m256_op0[4]) = 0x7fff7fff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x7fff7fff; ++ *((int*)& __m256_op0[0]) = 0x7fff7fff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000001b3c4c0a5c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x3d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000007fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000007fff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x2a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1000000000000000; ++ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x4f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000011; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000011; ++ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0010100000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0010100000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010000000000000; ++ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x33); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000011; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000011; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000011; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000011; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0010100000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0010100000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0feff00000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0feff00000000000; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000001; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000001; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000001; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000001; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000001; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x36); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0feff00000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0feff00000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffffffff; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff1001100100000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010100000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff1001100100000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010100000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfcc4004400400000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0040400000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfcc4004400400000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0040400000000000; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000001; ++ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x36); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h(__m128i_op0,3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0010100000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0010100000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_du(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000010000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000010000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000010000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000010000000; ++ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000a0000000a; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000a0000000a; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffeb; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffeb; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffefffffffef; ++ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffefffffffef; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_q(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffefffffffef; ++ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x3d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff00ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff00ff00; ++ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000f0000000f; ++ __m256i_out = __lasx_xvclz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xbff0000000000000; ++ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xdff8000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xdff8000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xdff8000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xdff8000000000000; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrp_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256d_op1[3]) = 0xdff8000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xdff8000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xdff8000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xdff8000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff00ff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff00ff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x01010101fe01fe01; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x01010101fe01fe01; ++ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000040100000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040100000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000040100000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040100000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0080200000802000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0080200000802000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_wu(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xbff0000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0080200000802000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0080200000802000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0080200000802000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0080200000802000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du(__m128i_op0,0x20); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x5d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1e18000000000000; ++ __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b(__m128i_op0,13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x1e180000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x1e180000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x1e180000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x1e180000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00802000; ++ *((int*)& __m256_op1[6]) = 0x00802000; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0x00802000; ++ *((int*)& __m256_op1[2]) = 0x00802000; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1e1800001e180000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1e1800001e180000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1e18000000000000; ++ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xfe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xbff0800000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xbff0800000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x1e1800001e180000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x1e1800001e180000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x1e1800001e180000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x1e1800001e180000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x2f03988e2052463e; ++ *((unsigned long*)& __m256d_result[2]) = 0x2f03988e1409212e; ++ *((unsigned long*)& __m256d_result[1]) = 0x2f03988e2052463e; ++ *((unsigned long*)& __m256d_result[0]) = 0x2f03988e1409212e; ++ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0200020002000200; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[0]) = 0xff01ff01ff01ff01; ++ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000397541c58; ++ *((unsigned long*)& __m256i_result[3]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256i_result[2]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256i_result[1]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256i_result[0]) = 0x97541c5897541c58; ++ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0080200000802000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0080200000802000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00200020ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x1e0000001e000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00200020ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x1e0000001e000000; ++ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long*)& __m256i_result[2]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long*)& __m256i_result[1]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long*)& __m256i_result[0]) = 0xffe0ffe0ffe0ffe0; ++ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0080200000802000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0080200000802000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00800080ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00800080ffffffff; ++ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h(__m128i_op0,6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1e1800001e180000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1e1800001e180000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1e18000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffe0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000001e18; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffe0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000001e18; ++ __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x70); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00800080ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00800080ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffe0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001e18; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffe0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001e18; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff001f; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000007fe268; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff001f; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000007fe268; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1e17ffffd0fc6772; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1e17ffffebf6ded2; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1e17ffffd0fc6772; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1e17ffffebf6ded2; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xe1e800002f03988d; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xe1e800002f03988d; ++ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xffff001f; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x007fe268; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xffff001f; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x007fe268; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0xffff001f; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x007fe268; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0xffff001f; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x007fe268; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0xffff001f; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0xffff001f; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7a7cad6eca32ccc1; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7a7cad6efe69abd1; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7a7cad6eca32ccc1; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7a7cad6efe69abd1; ++ *((unsigned long*)& __m256i_result[3]) = 0xff86005300360034; ++ *((unsigned long*)& __m256i_result[2]) = 0xff86005300020055; ++ *((unsigned long*)& __m256i_result[1]) = 0xff86005300360034; ++ *((unsigned long*)& __m256i_result[0]) = 0xff86005300020055; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256i_op0[2]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256i_op0[1]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256i_op0[0]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffc00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffc00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffc00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffc00000000; ++ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x22); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xffffffff; ++ *((int*)& __m128_result[1]) = 0xff800000; ++ *((int*)& __m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffe0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000001e18; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffe0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000001e18; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffe0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001e18; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffe0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001e18; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000010000ffe1; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000101001e18; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000010000ffe1; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000101001e18; ++ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffeff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffeff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff001f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff001f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000000000000ffe0; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000001e18; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000000000ffe0; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000001e18; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff1f; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffeff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff1f; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffeff; ++ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffdfe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffdfe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe1e800002f03988d; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe1e800002f03988d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff0f400001781cc4; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff0f400001781cc4; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256d_op0[2]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256d_op0[1]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256d_op0[0]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256d_op1[3]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256d_op1[2]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256d_op1[1]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256d_op1[0]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256i_op0[2]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256i_op0[1]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256i_op0[0]) = 0x97541c5897541c58; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffeffffff88; ++ *((unsigned long*)& __m256i_op0[2]) = 0x61e0000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffeffffff88; ++ *((unsigned long*)& __m256i_op0[0]) = 0x61e0000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffefe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffefe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01fe02; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01fe02; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h(__m128i_op0,-3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfrecip_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x7ff80000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffff7fffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffff8000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffff7fffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffff8000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000003fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7ff8010000000001; ++ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0005252800052528; ++ *((unsigned long*)& __m128i_result[0]) = 0x0005252800052528; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d(__m128i_op0,7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_result[1]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_result[0]) = 0x52527d7d52527d7d; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001010101; ++ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x3f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000010000ffe1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000101001e18; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000010000ffe1; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000101001e18; ++ *((unsigned long*)& __m256i_op1[3]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op1[2]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op1[1]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op1[0]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000000010000ffe1; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000101001e18; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000010000ffe1; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000101001e18; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000101001e18; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000101001e18; ++ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op0[2]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op0[1]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op0[0]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffeffffff88; ++ *((unsigned long*)& __m256i_op1[2]) = 0x61e0000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffeffffff88; ++ *((unsigned long*)& __m256i_op1[0]) = 0x61e0000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010ffc80010ff52; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff1ffca0011ffcb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010ffc80010ff52; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff1ffca0011ffcb; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffefe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffefb; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffefb; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffefe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x67eee33567eee435; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x67eee33567eee435; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00e0000000e00000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0010ffc80010ff52; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff1ffca0011ffcb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0010ffc80010ff52; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff1ffca0011ffcb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010bfc80010bf52; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff1bfca0011bfcb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010bfc80010bf52; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff1bfca0011bfcb; ++ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffff7fffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffff8000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000808081; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000808081; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000808081; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000808081; ++ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op0[2]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op0[1]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op0[0]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op1[3]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op1[2]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op1[1]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_op1[0]) = 0x98111cca98111cca; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000399400003994; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000399400003994; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000399400003994; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000399400003994; ++ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00e0000000e00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000e0000000e0; ++ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000e0000000e0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00e0000000e00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000e0000000e0; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x01010101; ++ *((int*)& __m256_op0[5]) = 0x55555501; ++ *((int*)& __m256_op0[4]) = 0xfefefeab; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x01010101; ++ *((int*)& __m256_op0[1]) = 0x55555501; ++ *((int*)& __m256_op0[0]) = 0xfefefeab; ++ *((int*)& __m256_op1[7]) = 0x00000105; ++ *((int*)& __m256_op1[6]) = 0xfffffefb; ++ *((int*)& __m256_op1[5]) = 0xffffff02; ++ *((int*)& __m256_op1[4]) = 0x000000fe; ++ *((int*)& __m256_op1[3]) = 0x00000105; ++ *((int*)& __m256_op1[2]) = 0xfffffefb; ++ *((int*)& __m256_op1[1]) = 0xffffff02; ++ *((int*)& __m256_op1[0]) = 0x000000fe; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000e0000000e0; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000fc00; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000fc00; ++ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x01010101; ++ *((int*)& __m256_op0[5]) = 0x55555501; ++ *((int*)& __m256_op0[4]) = 0xfefefeab; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x01010101; ++ *((int*)& __m256_op0[1]) = 0x55555501; ++ *((int*)& __m256_op0[0]) = 0xfefefeab; ++ *((int*)& __m256_op1[7]) = 0x0010bfc8; ++ *((int*)& __m256_op1[6]) = 0x0010bf52; ++ *((int*)& __m256_op1[5]) = 0xfff1bfca; ++ *((int*)& __m256_op1[4]) = 0x0011bfcb; ++ *((int*)& __m256_op1[3]) = 0x0010bfc8; ++ *((int*)& __m256_op1[2]) = 0x0010bf52; ++ *((int*)& __m256_op1[1]) = 0xfff1bfca; ++ *((int*)& __m256_op1[0]) = 0x0011bfcb; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x1414141414141415; ++ *((unsigned long*)& __m128i_result[0]) = 0x1414141414141415; ++ __m128i_out = __lsx_vaddi_bu(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffff1f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffeff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffff1f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffeff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0010ffc80010ff52; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff1ffca0011ffcb; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0010ffc80010ff52; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff1ffca0011ffcb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff1ffca0011feca; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff1ffca0011feca; ++ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0010bfc80010bf52; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff1bfca0011bfcb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0010bfc80010bf52; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff1bfca0011bfcb; ++ *((unsigned long*)& __m256i_result[3]) = 0xf5f5bfc8f5f5bff5; ++ *((unsigned long*)& __m256i_result[2]) = 0xf5f1bfcaf5f5bfcb; ++ *((unsigned long*)& __m256i_result[1]) = 0xf5f5bfc8f5f5bff5; ++ *((unsigned long*)& __m256i_result[0]) = 0xf5f1bfcaf5f5bfcb; ++ __m256i_out = __lasx_xvmini_b(__m256i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffefb; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffefb; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; ++ int_op1 = 0x0000000059815d00; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000399400003994; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000399400003994; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000399400003994; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000399400003994; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000fff00000fff; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_du(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128d_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000052527d7d; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000052527d7d; ++ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x000000000000fc00; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000fc00; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf5f5bfbaf5f5bfbe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf5f0bfb8f5d8bfe8; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf5f5bfbaf5f5bfbe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf5f0bfb8f5d8bfe8; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf5f5bfbaf5f5bfbe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf5f0bfb8f5d8bfe8; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf5f5bfbaf5f5bfbe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf5f0bfb8f5d8bfe8; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff5f5c; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x6c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256d_op2[3]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256d_op2[2]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256d_op2[1]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256d_op2[0]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffff5f5c; ++ __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff5f5c; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000105fffffefb; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffff02000000fe; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000105fffffefb; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffff02000000fe; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffff1f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffeff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffff1f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffeff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000105fffffefb; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffff02000000fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000105fffffefb; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff02000000fe; ++ *((unsigned long*)& __m256i_result[3]) = 0xf7ffffffffffff1f; ++ *((unsigned long*)& __m256i_result[2]) = 0xbffffffffffffeff; ++ *((unsigned long*)& __m256i_result[1]) = 0xf7ffffffffffff1f; ++ *((unsigned long*)& __m256i_result[0]) = 0xbffffffffffffeff; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf7ffffffffffff1f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbffffffffffffeff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf7ffffffffffff1f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbffffffffffffeff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff6fffefffe005b; ++ *((unsigned long*)& __m256i_result[2]) = 0xffbefffefffe005a; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff6fffefffe005b; ++ *((unsigned long*)& __m256i_result[0]) = 0xffbefffefffe005a; ++ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000e0000000e0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000e0000000e0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000c400; ++ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00217f19ffde80e6; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00037f94fffc806b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00217f19ffde80e6; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00037f94fffc806b; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff000000000000; ++ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000fff00000fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00ff0fff005f0f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00ff0fff005f0f; ++ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffff000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf7ffffffffffff1f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbffffffffffffeff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf7ffffffffffff1f; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbffffffffffffeff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x7); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff605a; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff605a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101000000000000; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffff5f5c; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffff5f5c; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffff5f5c; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffff5f5c; ++ *((int*)& __m256_op2[7]) = 0x0000000f; ++ *((int*)& __m256_op2[6]) = 0x0000000f; ++ *((int*)& __m256_op2[5]) = 0xff00ff0f; ++ *((int*)& __m256_op2[4]) = 0xff005f0f; ++ *((int*)& __m256_op2[3]) = 0x0000000f; ++ *((int*)& __m256_op2[2]) = 0x0000000f; ++ *((int*)& __m256_op2[1]) = 0xff00ff0f; ++ *((int*)& __m256_op2[0]) = 0xff005f0f; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffff5f5c; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffff5f5c; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffff5f5c; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffff5f5c; ++ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000c400; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x001000100010c410; ++ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001000100010c410; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007fff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007fff7fffffff; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0xfe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff605a; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff605a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffebeb8; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffebeb8; ++ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffff5f5c; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffff605a; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffff5f5c; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffff605a; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffff5f5c; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffff605a; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffff5f5c; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffff605a; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256d_op0[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfffffffffffebeb8; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfffffffffffebeb8; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001000100010c410; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00007fff7fffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00007fff7fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x37); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff605a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff605a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff605a; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff5f5c; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff605a; ++ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x2d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0x0060005a; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0x0060005a; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0x5f13ccf5; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0x5f13ccf5; ++ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); ++ *((int*)& __m256_op0[7]) = 0xfffffff8; ++ *((int*)& __m256_op0[6]) = 0xffffff08; ++ *((int*)& __m256_op0[5]) = 0x00ff00f8; ++ *((int*)& __m256_op0[4]) = 0x00ffcff8; ++ *((int*)& __m256_op0[3]) = 0xfffffff8; ++ *((int*)& __m256_op0[2]) = 0xffffff08; ++ *((int*)& __m256_op0[1]) = 0x00ff00f8; ++ *((int*)& __m256_op0[0]) = 0x00ffcff8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000008000000080; ++ __m256i_out = __lasx_xvfclass_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffff605a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffff605a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffff605a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffff605a; ++ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101008000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101008000000080; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_w(__m128i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_result[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_result[0]) = 0x45c5c5c545c5c5c5; ++ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x3a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op2[0]) = 0x001000100010c410; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0x64); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_result[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_result[0]) = 0x45c5c5c545c5c5c5; ++ __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xc5c5c5c4; ++ *((int*)& __m256_op0[6]) = 0xc5c5c5c4; ++ *((int*)& __m256_op0[5]) = 0x45c5c5c5; ++ *((int*)& __m256_op0[4]) = 0x45c5c5c5; ++ *((int*)& __m256_op0[3]) = 0xc5c5c5c4; ++ *((int*)& __m256_op0[2]) = 0xc5c5c5c4; ++ *((int*)& __m256_op0[1]) = 0x45c5c5c5; ++ *((int*)& __m256_op0[0]) = 0x45c5c5c5; ++ *((int*)& __m256_result[7]) = 0xc5c5c800; ++ *((int*)& __m256_result[6]) = 0xc5c5c800; ++ *((int*)& __m256_result[5]) = 0x45c5c800; ++ *((int*)& __m256_result[4]) = 0x45c5c800; ++ *((int*)& __m256_result[3]) = 0xc5c5c800; ++ *((int*)& __m256_result[2]) = 0xc5c5c800; ++ *((int*)& __m256_result[1]) = 0x45c5c800; ++ *((int*)& __m256_result[0]) = 0x45c5c800; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x44); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x4370100000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x4370100000000000; ++ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000008000000080; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000008000000080; ++ *((unsigned long*)& __m256d_op1[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256d_op1[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256d_op1[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256d_op1[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256d_result[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256d_result[0]) = 0x45c5c5c545c5c5c5; ++ __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_result[3]) = 0xc5c4c5c5c5c5c5c5; ++ *((unsigned long*)& __m256i_result[2]) = 0xc5c545c545c545c5; ++ *((unsigned long*)& __m256i_result[1]) = 0xc5c4c5c5c5c5c5c5; ++ *((unsigned long*)& __m256i_result[0]) = 0xc5c545c545c545c5; ++ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x3d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffff01ffffff08; ++ *((unsigned long*)& __m256i_op1[2]) = 0x43700f0100003008; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffff01ffffff08; ++ *((unsigned long*)& __m256i_op1[0]) = 0x43700f0100003008; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000f8; ++ *((unsigned long*)& __m256i_result[2]) = 0xbc8ff0ffffffcff8; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000f8; ++ *((unsigned long*)& __m256i_result[0]) = 0xbc8ff0ffffffcff8; ++ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc5c4c5c5c5c5c5c5; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc5c545c545c545c5; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc5c4c5c5c5c5c5c5; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc5c545c545c545c5; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000ff000000f8; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbc8ff0ffffffcff8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000f8; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbc8ff0ffffffcff8; ++ *((unsigned long*)& __m256i_result[3]) = 0xfcfcfcfcfc040404; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fbfffffc; ++ *((unsigned long*)& __m256i_result[1]) = 0xfcfcfcfcfc040404; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fbfffffc; ++ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000059815d00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vneg_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000007942652524; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4265252400000000; ++ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x000000ff; ++ *((int*)& __m256_op0[6]) = 0x000000f8; ++ *((int*)& __m256_op0[5]) = 0xbc8ff0ff; ++ *((int*)& __m256_op0[4]) = 0xffffcff8; ++ *((int*)& __m256_op0[3]) = 0x000000ff; ++ *((int*)& __m256_op0[2]) = 0x000000f8; ++ *((int*)& __m256_op0[1]) = 0xbc8ff0ff; ++ *((int*)& __m256_op0[0]) = 0xffffcff8; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffff8ffffff08; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00f800ffcff8; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffff8ffffff08; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00f800ffcff8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000003868686a20; ++ *((unsigned long*)& __m256i_result[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000003868686a20; ++ *((unsigned long*)& __m256i_result[0]) = 0x0045b8ae81bce1d8; ++ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x21); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_h(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000003868686a20; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000003868686a20; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000003868686a20; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000003868686a20; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4ee85545068f3133; ++ *((unsigned long*)& __m128i_op0[0]) = 0x870968c1f56bb3cd; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x004e005500060031; ++ *((unsigned long*)& __m128i_result[0]) = 0xff870068fff5ffb3; ++ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x42652524; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000003900000000; ++ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x004e005500060031; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff870068fff5ffb3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xc5c5c5c5c5c5c5c5; ++ *((unsigned long*)& __m256i_result[2]) = 0x45c5c5c645c5c5c6; ++ *((unsigned long*)& __m256i_result[1]) = 0xc5c5c5c5c5c5c5c5; ++ *((unsigned long*)& __m256i_result[0]) = 0x45c5c5c645c5c5c6; ++ __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000003868686a20; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000003868686a20; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001a00000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000900000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001a00000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000900000000; ++ __m256i_out = __lasx_xvclz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0027002a00030018; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f4300177f7a7f59; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0027002a00080018; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f4300177f7a7f59; ++ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_result[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long*)& __m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long*)& __m256i_result[0]) = 0x45c5c5c545c5c5c5; ++ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xb0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000003868686a20; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000003868686a20; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x47); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000003868686a20; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000003868686a20; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00386a20b8aee1d8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00386a20b8aee1d8; ++ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x004e005500060031; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff870068fff5ffb3; ++ *((unsigned long*)& __m128i_op1[1]) = 0x004e005500060031; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff870068fff5ffb3; ++ *((unsigned long*)& __m128i_result[1]) = 0x04e00060ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x04e00060ffffffff; ++ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x454c2996; ++ *((int*)& __m128_op0[2]) = 0x0ffe354e; ++ *((int*)& __m128_op0[1]) = 0x9e063f80; ++ *((int*)& __m128_op0[0]) = 0x2742ba3e; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x42652524; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvclz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x04e00060ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x04e00060ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x04e00060ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x04e00060ffffffff; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4ee85545ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x870968c1f56bb3cd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x870968c1f56bb3cd; ++ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000001a00000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000900000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000001a00000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000900000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x870968c1f56bb3cd; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000001a00000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000900000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000001a00000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000900000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000001a; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000001a; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000007fff800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x52527d7d52527d7d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000007fff800000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2e2b34ca59fa4c88; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_hu(__m128i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2e2b34ca59fa4c88; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long*)& __m128i_result[1]) = 0x3e2b34ca59fa4c88; ++ *((unsigned long*)& __m128i_result[0]) = 0x3b2c8aefd44be966; ++ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000001a00000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000900000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000001a00000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000900000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x04e00060ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x04e00060ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x007fffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x007fffffffffffff; ++ __m128i_out = __lsx_vsat_w(__m128i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fe01fe01; ++ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_d(__m256i_op0,11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3e2b34ca59fa4c88; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long*)& __m128i_result[1]) = 0x0007658000115de0; ++ *((unsigned long*)& __m128i_result[0]) = 0x001a8960001d2cc0; ++ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b(__m128i_op0,0x4); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m256i_result[2]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m256i_result[1]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m256i_result[0]) = 0xe9e9e9e9e9e9e9e9; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080808000008080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080000080800000; ++ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080808000008080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080000080800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fe01fe01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m256i_result[3]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m256i_result[2]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m256i_result[1]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m256i_result[0]) = 0xe9e9e9e9e9e9e9e9; ++ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0xf7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808000008080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080000080800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001010100010100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x2f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0007658000115de0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001a8960001d2cc0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4000400040004000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4000400040004000; ++ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00007fffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00007fffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff8001; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff8001; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000017ffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000017ffffffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000017ffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000017ffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff8001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff0ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff0ffff0000; ++ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0007658000115de0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001a8960001d2cc0; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ffffff00ffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ffffff; ++ __m128i_out = __lsx_vslt_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfe07e5fe; ++ *((int*)& __m128_op0[2]) = 0xfefdddfe; ++ *((int*)& __m128_op0[1]) = 0x00020100; ++ *((int*)& __m128_op0[0]) = 0xfedd0c00; ++ *((int*)& __m128_result[3]) = 0x7fc00000; ++ *((int*)& __m128_result[2]) = 0x7fc00000; ++ *((int*)& __m128_result[1]) = 0x1e801ffc; ++ *((int*)& __m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xef0179a47c793879; ++ *((unsigned long*)& __m128d_op0[0]) = 0x9f9e7e3e9ea3ff41; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffc000007fc00000; ++ *((unsigned long*)& __m128d_result[0]) = 0x9e801ffc7fc00000; ++ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff80fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xd52aaaaa555555ab; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff80fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xd52aaaaa555555ab; ++ *((unsigned long*)& __m256i_result[3]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_result[2]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_result[1]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_result[0]) = 0x555555ab555555ab; ++ __m256i_out = __lasx_xvreplve0_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfe07e5fefefdddfe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00020100fedd0c00; ++ *((unsigned long*)& __m128i_result[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffff02fff4; ++ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffc0008001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffffc0008001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffc0008001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffffc0008001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffffc0007fe9; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffffc0007fe9; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffc0007fe9; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffffc0007fe9; ++ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00003fe00ffe3fe0; ++ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op1[3]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op1[2]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op1[1]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op1[0]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_result[2]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_result[1]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_result[0]) = 0x555555ab555555ab; ++ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0007658000115de0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001a8960001d2cc0; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffc000007fc00000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9e801ffc7fc00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ffff0000ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff00ff0000ff; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000017ffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000017ffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000017ffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000017ffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff0ffff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff0ffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000017ffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000017ffffffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000017ffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000017ffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000ffffff02fff4; ++ *((unsigned long*)& __m128i_result[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00003fe00ffe3fe0; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffff02fff4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvmini_w(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00010000; ++ __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; ++ int_op1 = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_result[2]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_result[1]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_result[0]) = 0x555555ab555555ab; ++ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_result[3]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_result[2]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_result[1]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_result[0]) = 0x005500550055ffab; ++ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000080008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op1[2]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op1[1]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op1[0]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000080008000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000080008000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vmaxi_d(__m128i_op0,2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f0000007f000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f0000007f000000; ++ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffff0ffff0000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffffff0ffff0000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffff02fff4; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffff02fff4; ++ *((unsigned long*)& __m128i_op2[1]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7e44bde9b842ff23; ++ *((unsigned long*)& __m128i_result[0]) = 0x00011e80007edff8; ++ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; ++ *((unsigned long*)& __m256i_result[3]) = 0x1555156a1555156a; ++ *((unsigned long*)& __m256i_result[2]) = 0x1555156a1555156a; ++ *((unsigned long*)& __m256i_result[1]) = 0x1555156a1555156a; ++ *((unsigned long*)& __m256i_result[0]) = 0x1555156a1555156a; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff020000fff4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00001ee100000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f0000007f000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f0000007f000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1555156a1555156a; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1555156a1555156a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1555156a1555156a; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1555156a1555156a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7e44bde9b842ff23; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00011e80007edff8; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001ffffff; ++ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00ed0008005e00a2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x007a007600150077; ++ *((unsigned long*)& __m128i_result[1]) = 0x0003000000010000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0007007f03fe0000; ++ __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3ff0000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff020000fff4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ed0008005e00a2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x007a007600150077; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00ed0008005e00a2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x007a007600150077; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fc0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1e801ffc00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff020000fff4; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fc0000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1e801ffc00000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0000ffff; ++ *((int*)& __m256_op0[6]) = 0xc0008001; ++ *((int*)& __m256_op0[5]) = 0x0000ffff; ++ *((int*)& __m256_op0[4]) = 0xc0008001; ++ *((int*)& __m256_op0[3]) = 0x0000ffff; ++ *((int*)& __m256_op0[2]) = 0xc0008001; ++ *((int*)& __m256_op0[1]) = 0x0000ffff; ++ *((int*)& __m256_op0[0]) = 0xc0008001; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffe; ++ __m256i_out = __lasx_xvftint_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ int_op1 = 0x0000007942652524; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff2524ffffffff; ++ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_op0[2]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_op0[1]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_op0[0]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_result[3]) = 0x0004000400040805; ++ *((unsigned long*)& __m256i_result[2]) = 0x0004000400040805; ++ *((unsigned long*)& __m256i_result[1]) = 0x0004000400040805; ++ *((unsigned long*)& __m256i_result[0]) = 0x0004000400040805; ++ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0ff8010000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0ff8010000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff020000fff4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fc0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1e801ffc00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000080007f80800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000000; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0004007c00fc0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x3ff1808001020101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x3ff1808001020101; ++ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0004007c00fc0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x047c0404fc00fcfc; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x8a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xe17fe003; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0004007c00fc0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000fc0000; ++ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfe07e5fefefdddfe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00020100fedd0c00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000b0000000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000201000000000b; ++ __m128i_out = __lsx_vmaxi_w(__m128i_op0,11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000b0000000b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fc0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000b0000000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002010000fc000b; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000b0000000b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000b0000000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000201000000000b; ++ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000080007f80800; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000001000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x000000ff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x000000ff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x0000ffff; ++ *((int*)& __m256_op1[6]) = 0xc0008001; ++ *((int*)& __m256_op1[5]) = 0x0000ffff; ++ *((int*)& __m256_op1[4]) = 0xc0008001; ++ *((int*)& __m256_op1[3]) = 0x0000ffff; ++ *((int*)& __m256_op1[2]) = 0xc0008001; ++ *((int*)& __m256_op1[1]) = 0x0000ffff; ++ *((int*)& __m256_op1[0]) = 0xc0008001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000080007f80800; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00047fff00007fff; ++ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000b0000000b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0005000501800005; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001fc0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3ff1808001020101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3ff1808001020101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000ff7f1080ef8; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0100000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000ff7f1080ef8; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0100000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x003ff18080010201; ++ *((unsigned long*)& __m256i_result[2]) = 0x0100000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x003ff18080010201; ++ *((unsigned long*)& __m256i_result[0]) = 0x0100000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_op0[2]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_op0[1]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_op0[0]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_op1[3]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_op1[2]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_op1[1]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_op1[0]) = 0x005500550055ffab; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x003ff18080010201; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0100000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x003ff18080010201; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0100000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000f18080010000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000f18080010000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000b0000000b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000201000000000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000201000000000b; ++ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000fffe00010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000fffe00010001; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc06500550055ffab; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc06500550055ffab; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d(__m128i_op0,15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0555550000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0555550000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xc06500550055ffab; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xc06500550055ffab; ++ *((unsigned long*)& __m256i_result[3]) = 0x0555550000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0555550000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[2]) = 0x3ff1808001020101; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[0]) = 0x3ff1808001020101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc06500550055ffab; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc06500550055ffab; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00550000ffab0001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00550000ffab0001; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000f18080010000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000f18080010000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000078c0c0008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000078c0c0008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8493941335f5cc0c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x625a7312befcb21e; ++ *((unsigned long*)& __m128d_result[1]) = 0x43e092728266beba; ++ *((unsigned long*)& __m128d_result[0]) = 0x43d8969cc4afbf2d; ++ __m128d_out = __lsx_vffint_d_lu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00fc0000; ++ *((int*)& __m128_op1[3]) = 0xfe07e5fe; ++ *((int*)& __m128_op1[2]) = 0xfefdddfe; ++ *((int*)& __m128_op1[1]) = 0x00020100; ++ *((int*)& __m128_op1[0]) = 0xfedd0c00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x0); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fc0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfe07e5fefefdddfe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00020100fedd0c00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0005000501800005; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfe07e5fefefdddfe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00020100fedd0008; ++ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8493941335f5cc0c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x625a7312befcb21e; ++ *((unsigned long*)& __m128i_result[1]) = 0x8493941300000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000002befcb21e; ++ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[2]) = 0x3ff1808001020101; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[0]) = 0x3ff1808001020101; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0ff80100ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0ff80100ffffffff; ++ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000201000000000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000020100; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000fc0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000007fff8000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001008100000005; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x84939413; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000002; ++ *((int*)& __m128_op0[0]) = 0xbefcb21e; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000017000000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc06500550055ffab; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000017000000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc06500550055ffab; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000017000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000017000000080; ++ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000007fff8000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001008100000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0800080077ff8800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0801088108000805; ++ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x7fff8000; ++ *((int*)& __m128_op0[1]) = 0x00010081; ++ *((int*)& __m128_op0[0]) = 0x00000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000020000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000100; ++ __m128i_out = __lsx_vfclass_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x01000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x01000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000f18080010000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000f18080010000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x3b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000017000000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc06500550055ffab; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000017000000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc06500550055ffab; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x43e092728266beba; ++ *((unsigned long*)& __m128i_op1[0]) = 0x43d8969cc4afbf2d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001e; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001e; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000017000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000017000000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001700080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001700080; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000020000000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000100000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000080000; ++ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000210011084; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001e1f; ++ __m128i_out = __lsx_vmsknz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x3d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000100000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000080000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_hu(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0x0000ffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0ff80100ffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0ff80100ffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000017000000080; ++ *((unsigned long*)& __m256d_op1[2]) = 0xc06500550055ffab; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000017000000080; ++ *((unsigned long*)& __m256d_op1[0]) = 0xc06500550055ffab; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000170; ++ *((int*)& __m256_op0[6]) = 0x00000080; ++ *((int*)& __m256_op0[5]) = 0xc0650055; ++ *((int*)& __m256_op0[4]) = 0x0055ffab; ++ *((int*)& __m256_op0[3]) = 0x00000170; ++ *((int*)& __m256_op0[2]) = 0x00000080; ++ *((int*)& __m256_op0[1]) = 0xc0650055; ++ *((int*)& __m256_op0[0]) = 0x0055ffab; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x7f800000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001700080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001700080; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x4177000800000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x4177000800000000; ++ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000210011084; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001700080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001700080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001700080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001700080; ++ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffe90ffffff80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffe90ffffff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffff90ffffff80; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff90ffffff80; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vinsgr2vr_d(__m128i_op0,long_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffff90ffffff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffff90ffffff80; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffff90ffffff80; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff90ffffff80; ++ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101010100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000210011084; ++ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ff000000ff; ++ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffff90ffffff80; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffff90ffffff80; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffff90ffffff80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff90ffffff80; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffff90ffffff80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff90ffffff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff01ff70ff01ff80; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff01ff70ff01ff80; ++ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffff90ffffff80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff90ffffff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000006f0000007f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000006f0000007f; ++ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000007fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001001; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000feff01; ++ *((unsigned long*)& __m128i_result[0]) = 0x00feff0100000000; ++ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffff90; ++ *((int*)& __m256_op0[4]) = 0xffffff80; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffff90; ++ *((int*)& __m256_op0[0]) = 0xffffff80; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_h(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x3); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff6fff6fff6fff6; ++ __m256i_out = __lasx_xvmini_h(__m256i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001ffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001ffff; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrint_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0001ffff00000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m128d_result[1]) = 0x5ff6a0a40ea8f47c; ++ *((unsigned long*)& __m128d_result[0]) = 0x5ff6a0a40e9da42a; ++ __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5ff6a0a40ea8f47c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5ff6a0a40e9da42a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0001ffff00000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0xdb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000006f0000007f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000006f0000007f; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffff90ffffff81; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffff90ffffff81; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff90ff81; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff90ff81; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000007f; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff90ff81; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff90ff81; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,-3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffff90ffffff81; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffff90ffffff81; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff000000ff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000ff000000ff00; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ff0000ff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x01fc020000fe0100; ++ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffecffffffec; ++ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op2[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op2[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op2[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffecffffffec; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5ff6a0a40ea8f47c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5ff6a0a40e9da42a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001ffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff0000ff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x01fc020000fe0100; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff0000ff0000; ++ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001ffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff0000ff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x01fc020000fe0100; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x7); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff0000ff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x4b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffff90ffffff81; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffff90ffffff81; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff90ff81; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff90ff81; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; ++ int_result = 0x000000000000007f; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x4); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x0000ffff; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x0000ffff; ++ *((int*)& __m128_op1[0]) = 0x0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0000ffff; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x0000ffff; ++ *((int*)& __m128_op0[0]) = 0x0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffff6ff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffff6ff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000f6ff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000f6ff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ff0000ff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x01fc020000fe0100; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000f6ff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000f6ff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffff6ff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffff6ff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000900ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000900ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d(__m256i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff0000ff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x01fc020000fe0100; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000003fc0003; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x56); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000017fda829; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffff6ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffff6ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x28); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000f4012ceb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000f4012ceb; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000017fda829; ++ __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000017f0a82; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000fb8000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000fb8000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000017f0a82; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000003f; ++ __m128i_out = __lsx_vsat_w(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000017f0a82; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000017fda829; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x27); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000017fda829; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000f6ff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000f6ff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000f6ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000f6ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x007fffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x007fffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x007fffff00000000; ++ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long*)& __m128i_result[1]) = 0x0040004000400040; ++ *((unsigned long*)& __m128i_result[0]) = 0x0040004017fda869; ++ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x17fda829; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x14131211100f0e0d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0c0b0a0908070605; ++ *((unsigned long*)& __m256i_op0[1]) = 0x14131211100f0e0d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0c0b0a0908070605; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0a09080706050403; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0a09080706050403; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000017fda829; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000017fda829; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000f6ff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000f6ff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000001e5; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x5000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[0]) = 0xff01ff01ff01ff01; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000f6ff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000f6ff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000001; ++ *((int*)& __m128_op0[2]) = 0xfffffffe; ++ *((int*)& __m128_op0[1]) = 0x00000001; ++ *((int*)& __m128_op0[0]) = 0xfffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_w(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_hu(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslti_b(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0a09080706050403; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0a09080706050403; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0504840303028201; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0504840303028201; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x007fffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000fffe0001fffe; ++ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000003ffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000003ffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000003ffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000003ffffffffff; ++ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x29); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffe6ffffffe6; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffe6ffffffe6; ++ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0a09080706050403; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0a09080706050403; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0003000200000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0003000200000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000017fda829; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x5c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000055555501; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000005555555554; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000005555555554; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000005555555554; ++ __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff00ff7f00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x32); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x007fffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001000f000e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000fff1000ffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000f000e; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000ffffe; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000005555555554; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000005555555554; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001000f000e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000fff1000ffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002a55005501; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002a55000001; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000002a55005501; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000002a55000001; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x36280000; ++ *((int*)& __m128_result[1]) = 0x42a00000; ++ *((int*)& __m128_result[0]) = 0x42a02000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m128_op0[3]) = 0xff00ff7f; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000f000e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000ffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x003fffff00070007; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000007ffff; ++ __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000036280000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x42a0000042a02000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000036280000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x42a0000042a02000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0x9f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0x2c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff7fffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0040000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x80808080; ++ *((int*)& __m256_op0[6]) = 0x80808080; ++ *((int*)& __m256_op0[5]) = 0x80808080; ++ *((int*)& __m256_op0[4]) = 0x80808080; ++ *((int*)& __m256_op0[3]) = 0x80808080; ++ *((int*)& __m256_op0[2]) = 0x80808080; ++ *((int*)& __m256_op0[1]) = 0x80808080; ++ *((int*)& __m256_op0[0]) = 0x80808080; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x80000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrmh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefefe; ++ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000005555555554; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000005555555554; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0xe2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffe0000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000036280001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x42a0000042a02001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000005555555554; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000005555555554; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000036280001; ++ *((unsigned long*)& __m128i_result[0]) = 0x42a0000042a02001; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000036280001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x42a0000042a02001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000036280001; ++ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xe0000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xe0000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xe0000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xe0000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x80000000; ++ *((int*)& __m256_op1[4]) = 0x80000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x80000000; ++ *((int*)& __m256_op1[0]) = 0x80000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0x80000000; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x004200a000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x004200a000200001; ++ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff80000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff80000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff80000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff80000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfc00000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001c; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001c; ++ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffe0000000; ++ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001c; ++ *((unsigned long*)& __m128i_result[1]) = 0x004200a000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x004200a000200000; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffe0000000; ++ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x004200a000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x004200a000200000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200000; ++ *((unsigned long*)& __m128i_result[1]) = 0x004200a000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x004200a000200000; ++ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff00007fff7fff; ++ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x004200a000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x004200a000200001; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7fff00007fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x004200a000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x004200a000200000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffffff; ++ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x004200a0; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x004200a0; ++ *((int*)& __m128_op0[0]) = 0x00200001; ++ *((int*)& __m128_op1[3]) = 0x004200a0; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x004200a0; ++ *((int*)& __m128_op1[0]) = 0x00200000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff000000; ++ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffe003c1f0077; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffff0074230438; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff0000000438; ++ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200001; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000efffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffeffffffff; ++ __m128i_out = __lsx_vneg_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_result[3]) = 0x1fffffff1fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0383634303836343; ++ *((unsigned long*)& __m256i_result[1]) = 0x1fffffff1fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0383634303836343; ++ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x23); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_result[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_result[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_result[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_result[0]) = 0x1c1b1a191c1b1a19; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffeffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffeffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffeffffffff; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001000000; ++ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x28); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xefffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000efffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrp_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000efffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsrlrn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xfffffffe; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000efffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000efffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000002; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000002; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0x51); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1fffffff1fffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0383634303836343; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1fffffff1fffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0383634303836343; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001000000; ++ __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffeff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffeff00000000; ++ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000401000000; ++ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_result[3]) = 0xffe4ffe6ffe5ffe6; ++ *((unsigned long*)& __m256i_result[2]) = 0xffe4ffe6ffe5ffe6; ++ *((unsigned long*)& __m256i_result[1]) = 0xffe4ffe6ffe5ffe6; ++ *((unsigned long*)& __m256i_result[0]) = 0xffe4ffe6ffe5ffe6; ++ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x68); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1fffffff1fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0383634303836343; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1fffffff1fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0383634303836343; ++ *((unsigned long*)& __m256i_result[3]) = 0x0002ffff0002ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0002ffff0002ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0002000200020002; ++ __m256i_out = __lasx_xvmini_h(__m256i_op0,2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000400000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000400000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000400000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000000; ++ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x1); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffff1fffffff1; ++ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du(__m128i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x6c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffe4ffe6; ++ *((int*)& __m256_op0[6]) = 0xffe5ffe6; ++ *((int*)& __m256_op0[5]) = 0xffe4ffe6; ++ *((int*)& __m256_op0[4]) = 0xffe5ffe6; ++ *((int*)& __m256_op0[3]) = 0xffe4ffe6; ++ *((int*)& __m256_op0[2]) = 0xffe5ffe6; ++ *((int*)& __m256_op0[1]) = 0xffe4ffe6; ++ *((int*)& __m256_op0[0]) = 0xffe5ffe6; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000401000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000402000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000402000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000402000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000402000000; ++ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h(__m256i_op0,-8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_result[3]) = 0x0036003200360032; ++ *((unsigned long*)& __m256i_result[2]) = 0x0036003200360032; ++ *((unsigned long*)& __m256i_result[1]) = 0x0036003200360032; ++ *((unsigned long*)& __m256i_result[0]) = 0x0036003200360032; ++ __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0xc4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0036003200360032; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0036003200360032; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0036003200360032; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0036003200360032; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x28); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((int*)& __m128_result[3]) = 0xffffe000; ++ *((int*)& __m128_result[2]) = 0xffffe000; ++ *((int*)& __m128_result[1]) = 0xffffe000; ++ *((int*)& __m128_result[0]) = 0xffffe000; ++ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0002fffeffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0002fffeffff; ++ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000900000009; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0x99); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00007fff; ++ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffff0002fffeffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffff0002fffeffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_result[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_result[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_result[0]) = 0x1c1b1a191c1b1a19; ++ __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0xd2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0xffffffffffffffff; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x1); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h(__m128i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffe001ffffe001; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffe001ffffe001; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x1d1a1b181d1a1b18; ++ *((unsigned long*)& __m256i_result[2]) = 0x9c9b9a999c9b9a99; ++ *((unsigned long*)& __m256i_result[1]) = 0x1d1a1b181d1a1b18; ++ *((unsigned long*)& __m256i_result[0]) = 0x9c9b9a999c9b9a99; ++ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,-16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x03ff03ff03ff03ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x03ff03ff03ff03ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x438ff81ff81ff820; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_l(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x438ff81ff81ff820; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x03ff03ff03ff03ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000043; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x78); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0202020202020202; ++ *((unsigned long*)& __m128i_result[0]) = 0x0202020202020202; ++ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffe001ffffe001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffe001ffffe001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7f00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff000000000000; ++ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7f00000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7f00000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7f00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2000200020002000; ++ *((unsigned long*)& __m256i_result[2]) = 0x2000200020002000; ++ *((unsigned long*)& __m256i_result[1]) = 0x2000200020002000; ++ *((unsigned long*)& __m256i_result[0]) = 0x2000200020002000; ++ __m256i_out = __lasx_xvbitrevi_h(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0200020002000200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0200020002000200; ++ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7f00000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7f00000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x73); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0200020002000200; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0200020002000200; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff02000200; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff02000200; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffe00001ffe200; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff02000200; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffdfff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffdfff; ++ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffdfff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffdfff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffe00001ffe200; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffdfff; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffe001; ++ *((int*)& __m128_op0[2]) = 0xffffe001; ++ *((int*)& __m128_op0[1]) = 0xffffe001; ++ *((int*)& __m128_op0[0]) = 0xffffe001; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffe000; ++ *((int*)& __m128_op1[0]) = 0x01ffe200; ++ *((int*)& __m128_op2[3]) = 0x04040383; ++ *((int*)& __m128_op2[2]) = 0x83838404; ++ *((int*)& __m128_op2[1]) = 0x04040383; ++ *((int*)& __m128_op2[0]) = 0x83838404; ++ *((int*)& __m128_result[3]) = 0xffffe001; ++ *((int*)& __m128_result[2]) = 0xffffe001; ++ *((int*)& __m128_result[1]) = 0xffffe001; ++ *((int*)& __m128_result[0]) = 0xffffe001; ++ __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x30); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffff00000000; ++ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_op2[1]) = 0x03ff03ff03ff03ff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvori_b(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2000200020002000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2000200020002000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2000200020002000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2000200020002000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7f00000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007f000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff0000; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000003fb000003fb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000003fb000003fb; ++ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d(__m128i_op0,15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffff4fffffff4; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffff4fffffff4; ++ __m128i_out = __lsx_vmini_w(__m128i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000007f000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7f00000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffdfff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffdfff; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffe000; ++ *((int*)& __m128_op1[0]) = 0x01ffe200; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0403cfcf01c1595e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x837cd5db43fc55d4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff80007fff; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d(__m256i_op0,5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0403cfcf01c1595e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x837cd5db43fc55d4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000cb4a; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000cb4a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000f909; ++ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf000e001bf84df83; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff8e001ff84e703; ++ *((unsigned long*)& __m128i_result[1]) = 0x14042382c3ffa481; ++ *((unsigned long*)& __m128i_result[0]) = 0x040c238283ff9d01; ++ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0403cfcf01c1595e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x837cd5db43fc55d4; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x7f800000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf000e001bf84df83; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff8e001ff84e703; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ca354688; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff35cab978; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff35cab978; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff35cab978; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010035; ++ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ca354688; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_result[1]) = 0x00040003ff83ff84; ++ *((unsigned long*)& __m128i_result[0]) = 0x00040003ff4dffca; ++ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000f909; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0403cfcf01c1595e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x837cd5db43fc55d4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0404038383838404; ++ *((unsigned long*)& __m128i_result[1]) = 0x0007005200440062; ++ *((unsigned long*)& __m128i_result[0]) = 0x0080005e007f00d8; ++ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010100000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000040400000383; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff8383ffff7d0d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000040400000383; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffe000ffff1fff; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0c03e17edd781b11; ++ *((unsigned long*)& __m128i_op0[0]) = 0x342caf9be5579ebe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000f909; ++ *((unsigned long*)& __m128i_result[1]) = 0x0c03e17edd781b11; ++ *((unsigned long*)& __m128i_result[0]) = 0x342caf9be55700b5; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ca354688; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00040003ff83ff84; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00040003ff4dffca; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000040400000383; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000383ffff1fff; ++ __m128i_out = __lsx_vsrlrn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000383ffff1fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ca354688; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000038335ca2777; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000404; ++ *((int*)& __m128_op1[2]) = 0x00000383; ++ *((int*)& __m128_op1[1]) = 0xffffe000; ++ *((int*)& __m128_op1[0]) = 0xffff1fff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x7f800000; ++ *((int*)& __m256_op1[6]) = 0x7f800000; ++ *((int*)& __m256_op1[5]) = 0x7f800000; ++ *((int*)& __m256_op1[4]) = 0x7f800000; ++ *((int*)& __m256_op1[3]) = 0x7f800000; ++ *((int*)& __m256_op1[2]) = 0x7f800000; ++ *((int*)& __m256_op1[1]) = 0x7f800000; ++ *((int*)& __m256_op1[0]) = 0x7f800000; ++ *((int*)& __m256_result[7]) = 0xff800000; ++ *((int*)& __m256_result[6]) = 0xff800000; ++ *((int*)& __m256_result[5]) = 0xff800000; ++ *((int*)& __m256_result[4]) = 0xff800000; ++ *((int*)& __m256_result[3]) = 0xff800000; ++ *((int*)& __m256_result[2]) = 0xff800000; ++ *((int*)& __m256_result[1]) = 0xff800000; ++ *((int*)& __m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000038335ca2777; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000800800000; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000463fd2902d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5ccd54bbfcac806c; ++ unsigned_int_result = 0x00000000000000ac; ++ unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0x2); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000800800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000800800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000004000000000; ++ __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007ff000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007ff000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007ff000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0c03e17edd781b11; ++ *((unsigned long*)& __m128i_op0[0]) = 0x342caf9be55700b5; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00040003ff83ff84; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00040003ff4dffca; ++ *((unsigned long*)& __m128i_result[1]) = 0x0c07e181ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x3430af9effffffff; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000004000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000040400000383; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000007; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffc0ffff003f; ++ __m128i_out = __lsx_vsrai_h(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0c07e181ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3430af9effffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000040000000400; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000040000000400; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0c03e17edd781b11; ++ *((unsigned long*)& __m128i_op0[0]) = 0x342caf9be55700b5; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000040400000383; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0c03e17edd781b11; ++ *((unsigned long*)& __m128i_result[0]) = 0x342caf9bffff1fff; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0xcc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000001; ++ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0c03e17edd781b11; ++ *((unsigned long*)& __m128i_op0[0]) = 0x342caf9bffff1fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000040000000400; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0c037fff342c7fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000004000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffc000000000; ++ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x34); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000040400000383; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000040400000383; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffe000ffff1fff; ++ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0028e0a1; ++ *((int*)& __m128_op0[2]) = 0xa000a041; ++ *((int*)& __m128_op0[1]) = 0x01000041; ++ *((int*)& __m128_op0[0]) = 0x00010001; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x01000001; ++ *((int*)& __m128_op1[1]) = 0x00010001; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x01000001; ++ *((int*)& __m128_op2[1]) = 0xffffe000; ++ *((int*)& __m128_op2[0]) = 0xffff1fff; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x01000001; ++ *((int*)& __m128_result[1]) = 0xffffe000; ++ *((int*)& __m128_result[0]) = 0xffff1fff; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslei_wu(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000401000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000004; ++ __m128i_out = __lsx_vmaxi_w(__m128i_op0,4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffc000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff0000; ++ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x2); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000401000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000040100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010000; ++ __m128i_out = __lsx_vbsrl_v(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000401000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000004; ++ *((int*)& __m128_result[3]) = 0x40800000; ++ *((int*)& __m128_result[2]) = 0x4b800000; ++ *((int*)& __m128_result[1]) = 0x47800080; ++ *((int*)& __m128_result[0]) = 0x40800000; ++ __m128_out = __lsx_vffint_s_w(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000040400000383; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000383; ++ *((unsigned long*)& __m128i_result[0]) = 0xe400000003ffc001; ++ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000401000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000401000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000001; ++ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000383; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe400000003ffc001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffe000ffff2382; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[2]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[1]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[0]) = 0x0005000500050005; ++ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000090100000a; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffe009ffff2008; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000040000000400; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d(__m128i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000040100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff2382; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000040100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010000; ++ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_w(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[2]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[1]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[0]) = 0x0005000500050005; ++ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00040100; ++ *((int*)& __m128_op0[1]) = 0x00010001; ++ *((int*)& __m128_op0[0]) = 0x00010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000040100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000384; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe3f0200004003ffd; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff00ff00ff00; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000007f00000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000401000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000004; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00000000007f0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; ++ *((unsigned long*)& __m128i_result[0]) = 0x0404040404000404; ++ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000007f00000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000401000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000110000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000007f00000004; ++ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000007f0000; ++ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000007f0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvneg_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000501000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000008; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000040100; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010400100203; ++ *((unsigned long*)& __m128i_result[0]) = 0x0103010301020109; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000007f00; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000000; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000050005; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000007f00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000007f00; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000000; ++ __m128i_out = __lsx_vmaxi_d(__m128i_op0,-4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00007f00; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x01000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffffffc; ++ __m128i_out = __lsx_vneg_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x3a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000050005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101010400100203; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0103010301020109; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000110000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000007f00000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0202000402020202; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000200000010000; ++ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0001000100000004; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000501000002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000008; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000050005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000505; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000505; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000505; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x02020004; ++ *((int*)& __m128_op0[2]) = 0x02020202; ++ *((int*)& __m128_op0[1]) = 0x00002000; ++ *((int*)& __m128_op0[0]) = 0x00010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000505; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000004fb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2001240128032403; ++ *((unsigned long*)& __m128i_op1[0]) = 0x288b248c00010401; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffdfffefffff7ffe; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000004fb; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0008; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0008; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000004fb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000004fb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000004fb; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0800000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xfffefffe; ++ *((int*)& __m128_op0[0]) = 0xfffffffc; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xfffefffe; ++ *((int*)& __m128_op1[0]) = 0xfffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000505; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefefefefefefefe; ++ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000800000008; ++ __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000505; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000004fb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000004fb; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000004fb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000004fb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffef000004ea; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffefffffffef; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffef000004ea; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffef000004ea; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffefffffffef; ++ __m256i_out = __lasx_xvslli_h(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffef000004ea; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffefffffffef; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000002020202; ++ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000004fb; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000004fb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xef); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffeffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100010102; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000001; ++ *((int*)& __m256_op0[4]) = 0x00010102; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000101; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0018796d; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffffffc; ++ __m128i_out = __lsx_vmini_b(__m128i_op0,4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_h(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001010300010102; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000410041; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002020202; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x5b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickve_w(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000081; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffcff; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000102; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000102; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000102; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010103; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffcff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfffffeff; ++ *((int*)& __m128_op0[2]) = 0xfffffeff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xfffffcff; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00fffefe; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128d_result[1]) = 0x800000ff000000ff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x800000ff000000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x800000ff080000ff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000102; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000fffffffefe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000fffefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000fffefe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000808080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfe02fe02; ++ *((int*)& __m128_op0[2]) = 0xfe02fe02; ++ *((int*)& __m128_op0[1]) = 0xfe02fe02; ++ *((int*)& __m128_op0[0]) = 0xfe02fe02; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000800000008; ++ __m128i_out = __lsx_vfclass_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000800000008; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000000020000; ++ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff8000000000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000800000000ffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long*)& __m128i_op2[0]) = 0xd705c77a7025c899; ++ *((unsigned long*)& __m128i_result[1]) = 0xffcb410000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffeb827ffffffff; ++ __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000808080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000808080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080404040; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0002000000020000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000fffefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000fffefe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long*)& __m256i_result[2]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long*)& __m256i_result[0]) = 0x0a0a0a0a0a0a0a0a; ++ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd705c77a7025c899; ++ unsigned_int_result = 0x000000000000edfa; ++ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x5); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000102; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefd; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffefffffefd; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2700000000002727; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000002727; ++ *((unsigned long*)& __m128i_op1[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long*)& __m128i_op1[0]) = 0xd705c77a7025c899; ++ *((unsigned long*)& __m128i_result[1]) = 0xc9c00000000009c9; ++ *((unsigned long*)& __m128i_result[0]) = 0x0013938000000000; ++ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_hu(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffcb410000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffeb827ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000800000008; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd705c77a7025c899; ++ *((unsigned long*)& __m128i_result[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long*)& __m128i_result[0]) = 0xedfaedfaedfaedfa; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fff7fff7fffdefd; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x800000ff000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ffffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000800000008; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000009; ++ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000009; ++ *((unsigned long*)& __m128i_op1[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long*)& __m128i_op1[0]) = 0xd705c77a7025c899; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x03fdfffcfefe03fe; ++ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffefefefe; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010000800100008; ++ __m128i_out = __lsx_vclz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefd; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffbf4; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffc; ++ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffff01; ++ __m128i_out = __lsx_vneg_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long*)& __m256i_result[2]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long*)& __m256i_result[1]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long*)& __m256i_result[0]) = 0x6c6c6c6c6c6c6c6c; ++ __m256i_out = __lasx_xvori_b(__m256i_op0,0x6c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffbf4; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000006; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000308; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long*)& __m256i_op1[2]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefefe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000003c; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffbf4; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256i_result[3]) = 0xf800f800f800c000; ++ *((unsigned long*)& __m256i_result[2]) = 0xf800f800f800a000; ++ *((unsigned long*)& __m256i_result[1]) = 0xf800f800f800e000; ++ *((unsigned long*)& __m256i_result[0]) = 0xf800f800f800e000; ++ __m256i_out = __lasx_xvslli_h(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x7c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff0002fffefffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0002ff7e8286; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff0002fffefffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0002ffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0202000002020202; ++ *((unsigned long*)& __m256i_result[2]) = 0x0202000002010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0202000002020202; ++ *((unsigned long*)& __m256i_result[0]) = 0x0202000002020000; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000fff08; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000fff09; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff80ff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff80000000ffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff80ff0010ff06; ++ *((unsigned long*)& __m128i_result[0]) = 0x00007f01000eff0a; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff80ff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff80000000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000808; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long*)& __m128i_op0[0]) = 0xedfaedfaedfaedfa; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff80ff0010ff06; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00007f01000eff0a; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff80ff0010ff06; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000804000004141; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00017fff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007fff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf800f800f800c000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf800f800f800a000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf800f800f800e000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf800f800f800e000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf800f800f800c000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf800f800f800a000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf800f800f800e000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf800f800f800e000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff80ff0010ff06; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xf6fd377cf705f680; ++ *((unsigned long*)& __m128i_result[0]) = 0xc0000000bfff8000; ++ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff80ff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff80000000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000001fffe; ++ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffff80ff0010ff06; ++ *((unsigned long*)& __m128d_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long*)& __m128d_op1[0]) = 0xedfaedfaedfaedfa; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000300000003; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0202000002020202; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0202000002010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0202000002020202; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0202000002020000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x01fe000000ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x01fe000001fe0000; ++ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x01fe000000ff00ff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x01fe000001fe0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xcccccccc0000cccc; ++ *((unsigned long*)& __m128i_result[0]) = 0xcccccccc0000cccc; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x33); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000040000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x4000000010000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000040000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000040000010; ++ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000001fffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f7f7f7f00107f04; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f0000fd7f0000fd; ++ *((unsigned long*)& __m128i_result[1]) = 0x7e7e7e7eff0f7f04; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f0000fd7f01fffb; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000808; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff0000fffe0000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000fefc0000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000fffe0000; ++ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000fffffefc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000fffffffe0; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000fffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000fffffffff; ++ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x7b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf800f800f800c000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf800f800f800a000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf800f800f800e000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf800f800f800e000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff00ffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff8080000004000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000080000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff8080000000000; ++ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000022666621; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffdd9999da; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f7f7f7f00107f04; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f0000fd7f0000fd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000066621; ++ *((unsigned long*)& __m128i_result[0]) = 0x01ff00085e9900ab; ++ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf800f800f800c000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf800f800f800a000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf800f800f800e000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf800f800f800e000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff00ffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0001000100010000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x020afefb08140000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0003fffc00060000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf800f7fff8ffc0ff; ++ *((unsigned long*)& __m256i_result[2]) = 0xf8fff7fff7ffa000; ++ *((unsigned long*)& __m256i_result[1]) = 0xf800f800f800e000; ++ *((unsigned long*)& __m256i_result[0]) = 0xf800f800f800e000; ++ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000001f; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000300000003; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000002; ++ *((int*)& __m128_op0[2]) = 0x00000002; ++ *((int*)& __m128_op0[1]) = 0x00000003; ++ *((int*)& __m128_op0[0]) = 0x00000003; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3fc000003fc00000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3fc000003fc00000; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3fc000003fc00000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3fc000003fc00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_w(__m128i_op0,1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x5eff0000; ++ *((int*)& __m128_result[2]) = 0x5eff0000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3fc000003fc00000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3fc000003fc00000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3fc000003fc00000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3fc000003fc00000; ++ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f7f7f7f00107f04; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f0000fd7f0000fd; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_d(__m256i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffff00ffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffff00ffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7f800000; ++ *((int*)& __m128_op0[2]) = 0x7f800000; ++ *((int*)& __m128_op0[1]) = 0x7f800000; ++ *((int*)& __m128_op0[0]) = 0x7f800000; ++ *((int*)& __m128_op1[3]) = 0x00000002; ++ *((int*)& __m128_op1[2]) = 0x00000002; ++ *((int*)& __m128_op1[1]) = 0x00000003; ++ *((int*)& __m128_op1[0]) = 0x00000003; ++ *((int*)& __m128_op2[3]) = 0x3fc00000; ++ *((int*)& __m128_op2[2]) = 0x3fc00000; ++ *((int*)& __m128_op2[1]) = 0x3fc00000; ++ *((int*)& __m128_op2[0]) = 0x3fc00000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000800000008; ++ __m128i_out = __lsx_vpcnt_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f8000007f800000; ++ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000020afefb1; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f350104f7ebffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000003fffc1; ++ *((unsigned long*)& __m256i_op1[0]) = 0x005c0003fff9ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000fe6a021; ++ *((unsigned long*)& __m256i_result[1]) = 0x2000000020000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000b8000; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x23); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00feff0000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00feff0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ffff0000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ffff0000000000; ++ __m128i_out = __lsx_vslti_b(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00010001; ++ *((int*)& __m256_op0[6]) = 0x00010000; ++ *((int*)& __m256_op0[5]) = 0x020afefb; ++ *((int*)& __m256_op0[4]) = 0x08140000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000001; ++ *((int*)& __m256_op0[1]) = 0x0003fffc; ++ *((int*)& __m256_op0[0]) = 0x00060000; ++ *((int*)& __m256_op1[7]) = 0x80000000; ++ *((int*)& __m256_op1[6]) = 0x40000000; ++ *((int*)& __m256_op1[5]) = 0x40000000; ++ *((int*)& __m256_op1[4]) = 0x10000010; ++ *((int*)& __m256_op1[3]) = 0x80000000; ++ *((int*)& __m256_op1[2]) = 0x40000000; ++ *((int*)& __m256_op1[1]) = 0x80000000; ++ *((int*)& __m256_op1[0]) = 0x40000010; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x000000ff; ++ *((int*)& __m256_op2[4]) = 0x0001ffff; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x0000ffff; ++ *((int*)& __m256_op2[0]) = 0x00010000; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0x80020000; ++ *((int*)& __m256_result[5]) = 0x828aff0b; ++ *((int*)& __m256_result[4]) = 0x8001ffff; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0x80000002; ++ *((int*)& __m256_result[1]) = 0x8000ffff; ++ *((int*)& __m256_result[0]) = 0x800d0002; ++ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000300000003; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffc0003fffa0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x01fb010201f900ff; ++ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x020afefb08140000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0003fffc00060000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff00ffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff0001ff02; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff020afefc; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000003fefd; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xfffffffe; ++ *((int*)& __m256_op0[5]) = 0xfffffffe; ++ *((int*)& __m256_op0[4]) = 0xfffffefc; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xfffffffe; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xfffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffefffffefc; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0209fefb08140000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0003fffc00060000; ++ *((unsigned long*)& __m256d_result[3]) = 0x6100000800060005; ++ *((unsigned long*)& __m256d_result[2]) = 0x5ee1c073b800c916; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x5ff00007fff9fff3; ++ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff0001ff02; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff020afefc; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000003fefd; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffefffefff7fff7; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7ffffffbfffb; ++ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7f800000; ++ *((int*)& __m128_op0[2]) = 0x7f800000; ++ *((int*)& __m128_op0[1]) = 0x7f800000; ++ *((int*)& __m128_op0[0]) = 0x7f800000; ++ *((int*)& __m128_op1[3]) = 0x7f800000; ++ *((int*)& __m128_op1[2]) = 0x7f800000; ++ *((int*)& __m128_op1[1]) = 0x7f800000; ++ *((int*)& __m128_op1[0]) = 0x7f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00ffff0000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00ffff0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x007f7f80807f7f80; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ffff0000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ffff0000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f80000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ff00000000; ++ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f80000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0701000007010000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0701000000000000; ++ __m128i_out = __lsx_vpcnt_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000ff00000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00ffff0000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00ffff0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000020afefb1; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7f350104f7ebffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000003fffc1; ++ *((unsigned long*)& __m256d_op0[0]) = 0x005c0003fff9ffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffff80cb; ++ *((int*)& __m256_op1[6]) = 0xfffffdf8; ++ *((int*)& __m256_op1[5]) = 0x00000815; ++ *((int*)& __m256_op1[4]) = 0x00000104; ++ *((int*)& __m256_op1[3]) = 0xffffffa4; ++ *((int*)& __m256_op1[2]) = 0xfffffffd; ++ *((int*)& __m256_op1[1]) = 0x00000007; ++ *((int*)& __m256_op1[0]) = 0x00000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du(__m128i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff80cbfffffdf8; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000081500000104; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffa4fffffffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000700000002; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff80cbfffffdf8; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffa4fffffffd; ++ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00080000000cc916; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000006fff3; ++ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ffff00ff000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff8080000004000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000080000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff8080000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000005f000000f0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000f9; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000f3; ++ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0001ff02; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff020afefc; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000003fefd; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff0001ff04; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff02a0fefc; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000cfefd; ++ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x807f7f8000ffff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff00feff00; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0000ffff; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0001ff04; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff02a0fefc; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000cfefd; ++ *((unsigned long*)& __m256i_op1[3]) = 0x6100000800060005; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5ee1c073b800c916; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long*)& __m256i_op2[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op2[2]) = 0xfffffffefffffefc; ++ *((unsigned long*)& __m256i_op2[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op2[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffff7fffbfefa; ++ *((unsigned long*)& __m256i_result[2]) = 0xff1eff1902a0fea4; ++ *((unsigned long*)& __m256i_result[1]) = 0xff10000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff10fff9ff13fd17; ++ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00080000000cc916; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000006fff3; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00f8000000f41bfb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000fa0106; ++ __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x56); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x807f7f8000ffff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff00feff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0107070100080800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000080800070800; ++ __m128i_out = __lsx_vpcnt_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ffff00ff000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00080005c073c916; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000100000007fff3; ++ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000000010000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000000010000; ++ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000bf6e0000c916; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000030000fff3; ++ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f78787f00f7f700; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000f7f700f7f700; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800000004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000bf6e0000c916; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000030000fff3; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000bf6e0000c916; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000030000fff3; ++ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xbea2e127; ++ *((int*)& __m256_op1[6]) = 0xc046721f; ++ *((int*)& __m256_op1[5]) = 0x1729c073; ++ *((int*)& __m256_op1[4]) = 0x816edebe; ++ *((int*)& __m256_op1[3]) = 0xde91f010; ++ *((int*)& __m256_op1[2]) = 0x000006f9; ++ *((int*)& __m256_op1[1]) = 0x5ef1f90e; ++ *((int*)& __m256_op1[0]) = 0xfefaf30d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010102; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010201010204; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010102; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010102; ++ __m256i_out = __lasx_xvneg_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbea2e127c046721f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1729c073816edebe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xde91f010000006f9; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5ef1f90efefaf30d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000060000108; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001060005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fef0001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xbfa3e127c147721f; ++ *((unsigned long*)& __m256i_result[2]) = 0x1729c173836edfbe; ++ *((unsigned long*)& __m256i_result[1]) = 0xdf91f111808007fb; ++ *((unsigned long*)& __m256i_result[0]) = 0x5ff1f90ffffbf30f; ++ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffc500000002d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000034; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbfa3e127c147721f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1729c173836edfbe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xdf91f111808007fb; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5ff1f90ffffbf30f; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff280016; ++ *((unsigned long*)& __m256i_result[2]) = 0xd193a30f94b9b7df; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000001001a; ++ *((unsigned long*)& __m256i_result[0]) = 0xc88840fdf887fd87; ++ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000bea20000e127; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000c0460000721f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000de910000f010; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000006f9; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000bea20; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000c0460; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000de910; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff80000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x37); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800000004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000bf6e0000c916; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000030000fff3; ++ *((unsigned long*)& __m256i_op1[3]) = 0x001175f10e4330e8; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff8f0842ff29211e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000e00ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff00ff; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01480000052801a2; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffdcff64; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbea2e127c046721f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1729c073816edebe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xde91f010000006f9; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5ef1f90efefaf30d; ++ *((unsigned long*)& __m256i_result[3]) = 0x515f93f023600fb9; ++ *((unsigned long*)& __m256i_result[2]) = 0x948b39e0b7405f6f; ++ *((unsigned long*)& __m256i_result[1]) = 0x48ef087800007c83; ++ *((unsigned long*)& __m256i_result[0]) = 0x78af877c7d7f86f9; ++ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0101010101010102; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0101010201010204; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0101010101010102; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0101010101010102; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000e00ff00ff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010201010204; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010102; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010102; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x515f93f0; ++ *((int*)& __m256_op0[6]) = 0x23600fb9; ++ *((int*)& __m256_op0[5]) = 0x948b39e0; ++ *((int*)& __m256_op0[4]) = 0xb7405f6f; ++ *((int*)& __m256_op0[3]) = 0x48ef0878; ++ *((int*)& __m256_op0[2]) = 0x00007c83; ++ *((int*)& __m256_op0[1]) = 0x78af877c; ++ *((int*)& __m256_op0[0]) = 0x7d7f86f9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000df93f0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000077843; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x001175f10e4330e8; ++ *((unsigned long*)& __m256d_op0[2]) = 0xff8f0842ff29211e; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long*)& __m256d_op1[3]) = 0x001175f10e4330e8; ++ *((unsigned long*)& __m256d_op1[2]) = 0xff8f0842ff29211e; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_h(__m128i_op0,-1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01480000052801a2; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffdcff64; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbea2e127c046721f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1729c073816edebe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xde91f010000006f9; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5ef1f90efefaf30d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00170000028500de; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fd02f20d; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010203; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvneg_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x01480000052801a2; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffdcff64; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0101010101010203; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000060000108; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001060005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fef0001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x001175f10e4330e8; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff8f0842ff29211e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long*)& __m256i_result[3]) = 0x001175f10e4330e8; ++ *((unsigned long*)& __m256i_result[2]) = 0xff8f0842ff29211e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffff8d9ffa7103d; ++ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x39); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x001175f10e4330e8; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff8f0842ff29211e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long*)& __m256i_result[3]) = 0x001151510a431048; ++ *((unsigned long*)& __m256i_result[2]) = 0x5b0b08425b09011a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x5b5b58595b031019; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x5b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01480000052801a2; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffdcff64; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x001175f10e4330e8; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff8f0842ff29211e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000df93f0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000077843; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000003800000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x001175f10e4330e8; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff8f0842ff29211e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010203; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffcfa; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffefefffffefe; ++ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff80000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fff80000; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffcfa; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff80000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff80000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00070007; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0007ffff; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff80000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff0000; ++ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000fff80000; ++ *((unsigned long*)& __m128d_result[1]) = 0x80000000fff8fff8; ++ *((unsigned long*)& __m128d_result[0]) = 0x80000000fff80000; ++ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffcfa; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffcfa; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffcfa; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff8fffffff8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff8fc000000; ++ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x25); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x80000000fff8fff8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff80000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xc6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x60000108; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x01060005; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x7fef0001; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000001; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0xfffffff8; ++ *((int*)& __m256_op1[4]) = 0xfffffff8; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0xfffffff8; ++ *((int*)& __m256_op1[0]) = 0xfc000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0101010101010203; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffcfa; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffefefffffefe; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xfff8fff8; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0xfff80000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0xfff8fff8; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0xfff80000; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x6d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffe2ffe2ffe2ffe2; ++ *((unsigned long*)& __m128i_result[0]) = 0xffe2ffe2ffe2ffe2; ++ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000010100000102; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010100000102; ++ __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffcfa; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xfffffff8fffffff8; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xfffffff8fc000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfafafafafafafafa; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000fefefe; ++ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffefefffffefe; ++ __m256i_out = __lasx_xvslli_h(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x80000000fff8fff8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff80000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f800000fff8fff8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f800000fff80000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_result[0]) = 0x80000000fff80000; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ff7f0000ff7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ff7f0000ff7f; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfafafafafafafafa; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000fefefe; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0101010101010203; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000010100000102; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000010100000102; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffefd; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffefd; ++ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff80000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x80000000fff80000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000004000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff8004000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xc08f780000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256d_result[1]) = 0xc08f780000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvflogb_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfafafafafafafafa; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fefefe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000004000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff8004000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8282828282828282; ++ *((unsigned long*)& __m128i_result[0]) = 0x8282828282828282; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x82); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfafafafafafafafa; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fefefe; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xf9fbf9fbf9fbf9fb; ++ *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0xfdfffdfffdfffdff; ++ *((unsigned long*)& __m256i_result[0]) = 0xff01ff01fffffdff; ++ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_w(__m128i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffefd; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffefd; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xc08f7800; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xfffffefd; ++ *((int*)& __m256_op0[3]) = 0xc08f7800; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000101; ++ *((int*)& __m256_op1[4]) = 0x00000102; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000101; ++ *((int*)& __m256_op1[0]) = 0x00000102; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000a0a08000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5350a08000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffefd; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffefd; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffefd; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffefd; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fd; ++ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f017f807f017d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f017f807f017f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000017f0000017d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000017f0000017f; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00007dfd; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00007dfd; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_b(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000004000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff8004000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000001; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000001; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000017f0000017d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000017f0000017f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000017f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000017f; ++ __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000017f0000017d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000017f0000017f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000017f0000017d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000017f0000017f; ++ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000004000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff8004000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffc002000000000; ++ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x2e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000010001; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000017f0000017d; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000017f0000017f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000017f; ++ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffc002000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc002000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffc002000000000; ++ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_result[2]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_result[1]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_result[0]) = 0x1717171717171717; ++ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfffc002000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffc002000000000; ++ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff000607f7; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000010017e7d1; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff000607f7; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000001001807f1; ++ *((unsigned long*)& __m256i_result[3]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_result[2]) = 0x000607f700000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_result[0]) = 0x000607f700000001; ++ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc001fffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffc001fffffffff; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff000607f7; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000010017e7d1; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff000607f7; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000001001807f1; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000017f0000017d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000017f0000017f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000002e0000002e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000002e0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000002e0000002e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000002e0000fffe; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000002e0000002e; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000002e0000ffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000002e0000002e; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000002e0000fffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000f7bc0001f7bd; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000f93b0000017c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000f7bc0001f7bd; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000f93b0000017b; ++ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffc001fffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000200000; ++ *((unsigned long*)& __m128i_result[0]) = 0x001fff8004000000; ++ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffefdfffffefd; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((int*)& __m256_result[7]) = 0x4b808080; ++ *((int*)& __m256_result[6]) = 0x4b808080; ++ *((int*)& __m256_result[5]) = 0x4f800000; ++ *((int*)& __m256_result[4]) = 0x4f7fffff; ++ *((int*)& __m256_result[3]) = 0x4b808080; ++ *((int*)& __m256_result[2]) = 0x4b808080; ++ *((int*)& __m256_result[1]) = 0x4f800000; ++ *((int*)& __m256_result[0]) = 0x4f800000; ++ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0000007f; ++ *((int*)& __m128_op0[2]) = 0x0000007f; ++ *((int*)& __m128_op0[1]) = 0x0000007f; ++ *((int*)& __m128_op0[0]) = 0x0000007f; ++ *((int*)& __m128_op1[3]) = 0x3ff00000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xfffc0020; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffc001f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010202050120; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010102020202; ++ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000f7bc0001f7bd; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000f93b0000017c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000f7bc0001f7bd; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000f93b0000017b; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff2f7bcfff2f7bd; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff2f93bfff2fff2; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff2f7bcfff2f7bd; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff2f93bfff2fff2; ++ __m256i_out = __lasx_xvmini_h(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101010202050120; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101010102020202; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000607f700000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000607f700000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffe81; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc001fffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010000200020002; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff2f7bcfff2f7bd; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff2f93bfff2fff2; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff2f7bcfff2f7bd; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff2f93bfff2fff2; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffcf800fffcfffc; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffcfffc; ++ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefdfffffefd; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0xfffffffffffffefd; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x4); ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000100; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000100; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xfffffefdfffffefd; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000100; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffff7d80000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000100; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffe81; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe81; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0001ffff8002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0010000400020004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff20ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc0020ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x07fff80000008000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000007ffe001; ++ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00cf01fe01fe01fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000301de01fe01fe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc002000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0f00000000000000; ++ __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x17171717; ++ *((int*)& __m256_op0[6]) = 0x17171717; ++ *((int*)& __m256_op0[5]) = 0x000607f7; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x17171717; ++ *((int*)& __m256_op0[2]) = 0x17171717; ++ *((int*)& __m256_op0[1]) = 0x000607f7; ++ *((int*)& __m256_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vclo_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc002000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00003ff000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000fffc00000000; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffcf800fffcf800; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffcf800fffcf800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000607f700000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000607f700000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000002e0000002e; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000002e0000ffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000002e0000002e; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000002e0000fffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_result[2]) = 0x000607f700000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x1717171717171717; ++ *((unsigned long*)& __m256i_result[0]) = 0x000607f700000001; ++ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00003ff000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000fffc00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000002e0000002e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000002e0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000002e0000002e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000002e0000fffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000002e; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000002e; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000002e; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000fffe; ++ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00003ff000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000fffc00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc001fffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00001ff800000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x7ffe800e80000000; ++ __m128i_out = __lsx_vavgr_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000307fffe72e800; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00003ff000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000fffc00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00001ff800000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ffe800e80000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00005555; ++ *((int*)& __m256_op1[6]) = 0x00005555; ++ *((int*)& __m256_op1[5]) = 0x000307ff; ++ *((int*)& __m256_op1[4]) = 0xfe72e815; ++ *((int*)& __m256_op1[3]) = 0x00005555; ++ *((int*)& __m256_op1[2]) = 0x00005555; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000015; ++ *((int*)& __m256_result[7]) = 0x00005555; ++ *((int*)& __m256_result[6]) = 0x00005555; ++ *((int*)& __m256_result[5]) = 0x000307ff; ++ *((int*)& __m256_result[4]) = 0xfe72e815; ++ *((int*)& __m256_result[3]) = 0x00005555; ++ *((int*)& __m256_result[2]) = 0x00005555; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000015; ++ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00003ff000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000fffc00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000fffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0000ffff; ++ *((int*)& __m128_op0[2]) = 0x0000ffff; ++ *((int*)& __m128_op0[1]) = 0x0000ffff; ++ *((int*)& __m128_op0[0]) = 0x0000fffe; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffcf800fffcf800; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00003fee; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000004; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000002; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff100fffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffdf100fffc; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff100fffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000000; ++ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x21); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001ffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001ffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x30); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d(__m256i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffcf800fffcf800; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffcf800fffcf800; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7fffffff; ++ *((int*)& __m128_op0[2]) = 0x7fffffff; ++ *((int*)& __m128_op0[1]) = 0x7fffffff; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffcf800fffcf800; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_result[3]) = 0x0008000800000003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0806050008060500; ++ *((unsigned long*)& __m256i_result[1]) = 0x0008000800000003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010000000100; ++ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001fffe00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001ffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001ffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_du(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0000ffff; ++ *((int*)& __m128_op0[2]) = 0x0000ffff; ++ *((int*)& __m128_op0[1]) = 0x0000ffff; ++ *((int*)& __m128_op0[0]) = 0x0000fffe; ++ *((int*)& __m128_op1[3]) = 0x0000ffff; ++ *((int*)& __m128_op1[2]) = 0x0000ffff; ++ *((int*)& __m128_op1[1]) = 0x0000ffff; ++ *((int*)& __m128_op1[0]) = 0x0000fffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x16161616a16316b0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x16161616a16316b0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_d_q(__m128i_op0,__m128i_op1,0x7c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[3]) = 0xf9f8f9f8f9f9f900; ++ *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f9f9f9f9e0; ++ *((unsigned long*)& __m256i_result[1]) = 0xf9f8f9f8f9f9f900; ++ *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f9f9f9f900; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000a16316b0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000063636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000a1630000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000080000; ++ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[3]) = 0xff01ff010000fff9; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff19; ++ *((unsigned long*)& __m256i_result[1]) = 0xff02ff020001fffa; ++ *((unsigned long*)& __m256i_result[0]) = 0x000100010001fffa; ++ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000080000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000a16316b0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000063636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x16161616a16316b0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000a16316b0; ++ *((unsigned long*)& __m128i_result[0]) = 0x16161616a16316b0; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xa7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfe82fe0200000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe82fe0200000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xc177d01fe0000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff01ff010000fff9; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff19; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff02ff020001fffa; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000100010001fffa; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[3]) = 0x00fe01ff0006ffcf; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000e62f8f; ++ *((unsigned long*)& __m256i_result[1]) = 0x00fe02fe0006ffd6; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000006ffd6; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff100fffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff100fffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff01ff010000fff9; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff19; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff02ff020001fffa; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000100010001fffa; ++ *((unsigned long*)& __m256i_result[3]) = 0x807f807f00000380; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007380; ++ *((unsigned long*)& __m256i_result[1]) = 0xc03fc03f000001c0; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001c0; ++ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000a16316b0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x16161616a16316b0; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ffffa10016; ++ *((unsigned long*)& __m128i_result[0]) = 0x01150115ffa10016; ++ __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x27); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000a1630000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000a1630000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0001ffff0001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000a163000016b0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0303000103030001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000030300000303; ++ __m128i_out = __lsx_vmini_bu(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff7100fffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ffffa10016; ++ *((unsigned long*)& __m128i_op1[0]) = 0x01150115ffa10016; ++ *((unsigned long*)& __m128i_result[1]) = 0x000100fe000070a1; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000115ffffffa1; ++ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000f9f900; ++ *((unsigned long*)& __m256i_op0[2]) = 0x79f9f9f900f9f9e0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000f9f900; ++ *((unsigned long*)& __m256i_op0[0]) = 0x79f9f9f900f9f900; ++ *((unsigned long*)& __m256i_result[3]) = 0x00f9f90079f9f9f9; ++ *((unsigned long*)& __m256i_result[2]) = 0x79f9f9f900000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00f9f90079f9f9f9; ++ *((unsigned long*)& __m256i_result[0]) = 0x79f9f9f900000000; ++ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0x97); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x000100fe000070a1; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000115ffffffa1; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000d; ++ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe86ce7eb5e9ce950; ++ *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; ++ *((unsigned long*)& __m128i_result[0]) = 0xec68e3ef5a98ed54; ++ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000008; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00080000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffe40; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00f9f90079f9f9f9; ++ *((unsigned long*)& __m256i_op1[2]) = 0x79f9f9f900000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00f9f90079f9f9f9; ++ *((unsigned long*)& __m256i_op1[0]) = 0x79f9f9f900000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff8c80; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffe40; ++ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xf436f3f5; ++ *((int*)& __m128_op0[0]) = 0x2f4ef4a8; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffe40; ++ __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_w(__m128i_op0,1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_lu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0004000000040000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0004000000040000; ++ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff6fff6fff6fff6; ++ __m128i_out = __lsx_vmini_h(__m128i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_d(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0004000000040000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0004000000040000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0004000000040000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0004000000040000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0100010001000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0100010001000000; ++ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0100010001000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0100010001000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0004000400040004; ++ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x807f807f00000380; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007380; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc03fc03f000001c0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000001c0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_result[3]) = 0x807f807f00000380; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007380; ++ *((unsigned long*)& __m256i_result[1]) = 0xc03fc03f000001c0; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001c0; ++ __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0100010001000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf436f3f52f4ef4a8; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00f9f90079f9f9f9; ++ *((unsigned long*)& __m256i_op1[2]) = 0x79f9f9f900000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00f9f90079f9f9f9; ++ *((unsigned long*)& __m256i_op1[0]) = 0x79f9f9f900000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x2000200020002000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff80000000000000; ++ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffc0; ++ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffe40; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000040004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff0e400; ++ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff8000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff8000000000; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf436f3f52f4ef4a8; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xf4b6f3f52f4ef4a8; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xf436f3f5; ++ *((int*)& __m128_op0[0]) = 0x2f4ef4a8; ++ *((int*)& __m128_op1[3]) = 0xff800000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xff800000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0xff800000; ++ *((int*)& __m128_result[0]) = 0x2f4ef4a8; ++ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0004000400040004; ++ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffff8000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffff8000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff8000000000; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001600000016; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001600000016; ++ __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00800000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xf4b6f3f5; ++ *((int*)& __m128_op0[0]) = 0x2f4ef4a8; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007380; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000f1c00; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffbfffc; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001600000016; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001600000016; ++ *((int*)& __m128_result[3]) = 0x41b00000; ++ *((int*)& __m128_result[2]) = 0x41b00000; ++ *((int*)& __m128_result[1]) = 0x41b00000; ++ *((int*)& __m128_result[0]) = 0x41b00000; ++ __m128_out = __lsx_vffint_s_wu(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff0e400; ++ __m256i_out = __lasx_xvmini_w(__m256i_op0,12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff8000002f4ef4a8; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000f4a8; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffbfffc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffbfffc; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff0e400; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000007380; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000f1c00; ++ *((unsigned long*)& __m256d_op2[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op2[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256d_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op2[0]) = 0x00000000fff0e400; ++ *((unsigned long*)& __m256d_result[3]) = 0x80000000ffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0x80000000ffff8c80; ++ *((unsigned long*)& __m256d_result[1]) = 0x80000000ffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0x80000000fff0e400; ++ __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffe40; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff0e400; ++ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x000000ff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0000ff00; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x7f800000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_b(__m256i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0080000000000000; ++ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc1bdceee242070db; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe8c7b756d76aa478; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffbfffc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long*)& __m256i_op1[3]) = 0x80000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x80000000ffff8c80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x80000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x80000000fff0e400; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000f1a40; ++ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long*)& __m128i_result[1]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xf4b6f3f52f4ef4a8; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long*)& __m256i_op1[3]) = 0x80000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x80000000ffff8c80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x80000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x80000000fff0e400; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff01ff01; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff01c000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff01ff01; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000f1000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff00fff0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000007f7f; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000007f7f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000007f7f; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007f007f78; ++ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002ff5; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc2cf2471e9b7d7a4; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000027f5; ++ *((unsigned long*)& __m128i_result[0]) = 0xc2cf2471e9b7d7a4; ++ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff01ff01; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff01c000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff01ff01; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000f1000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000001341c4000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001000310000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000033e87ef1; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000002e2100; ++ __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff1739ffff48aa; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff2896ffff5b88; ++ *((unsigned long*)& __m128i_result[1]) = 0x3f3f17393f3f3f3f; ++ *((unsigned long*)& __m128i_result[0]) = 0x3f3f283f3f3f3f3f; ++ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000033e87ef1; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002e2100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000033007e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000021; ++ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007f7f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f7f7f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007f7f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f007f78; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000033007e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000021; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007f7f00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007f7f00007fff; ++ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xff800000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xff800000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffff1739; ++ *((int*)& __m128_op1[2]) = 0xffff48aa; ++ *((int*)& __m128_op1[1]) = 0xffff2896; ++ *((int*)& __m128_op1[0]) = 0xffff5b88; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007f7f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f7f7f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007f7f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f007f78; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffbfffc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f00007f7f0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f00fffb7f78fffc; ++ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001341c4000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001000310000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000033e87ef1; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000002e2100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000011c00; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000e8f1; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000103100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000002e00; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00007f7f00000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00007f7f00007fff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000f1a40; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x2757de72; ++ *((int*)& __m128_op0[2]) = 0x33d771a3; ++ *((int*)& __m128_op0[1]) = 0x166891d5; ++ *((int*)& __m128_op0[0]) = 0x1e8b7eff; ++ *((int*)& __m128_op1[3]) = 0x2757de72; ++ *((int*)& __m128_op1[2]) = 0x33d771a3; ++ *((int*)& __m128_op1[1]) = 0x166891d5; ++ *((int*)& __m128_op1[0]) = 0x1e8b7eff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001341c4000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001000310000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00007f7f00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00007f7f00007fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000007f00340040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000007f000000ff; ++ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000033e87ef1; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002e2100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x80008000b3e8fef1; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x80008000802ea100; ++ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vfclass_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0080000000000000; ++ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7ff77fff7ff7; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7ff77fff7ff7; ++ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x195f307a5d04acbb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6a1a3fbb3c90260e; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x195f307a5d04acbb; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x195f307a5d04acbb; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6a1a3fbb3c90260e; ++ *((unsigned long*)& __m128i_result[1]) = 0x19df307a5d04acbb; ++ *((unsigned long*)& __m128i_result[0]) = 0x5ed032b06bde1ab6; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff5fff4002ffff5; ++ __m128i_out = __lsx_vsrari_h(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7ff77fff7ff7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7ff77fff7ff7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001000010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000022; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000022; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000004; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x3e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x19df307a5d04acbb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5ed032b06bde1ab6; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x19de307a5d04acba; ++ *((unsigned long*)& __m128i_result[0]) = 0x5ed032b06bde1ab6; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002e2100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001000010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000012e2110; ++ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff5fff4002ffff5; ++ *((unsigned long*)& __m128i_op1[1]) = 0xaa858644fb8b3d49; ++ *((unsigned long*)& __m128i_op1[0]) = 0x18499e2cee2cc251; ++ *((unsigned long*)& __m128i_result[1]) = 0x8644000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xaed495f03343a685; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000012e2110; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_result[2]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_result[1]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80008000b3e8fef1; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80008000802ea100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000012e2110; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x012e2110012e2110; ++ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffbfffc; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xfffffffe; ++ *((int*)& __m128_op0[0]) = 0xbe6ed565; ++ *((int*)& __m128_op1[3]) = 0x195f307a; ++ *((int*)& __m128_op1[2]) = 0x5d04acbb; ++ *((int*)& __m128_op1[1]) = 0x6a1a3fbb; ++ *((int*)& __m128_op1[0]) = 0x3c90260e; ++ *((int*)& __m128_op2[3]) = 0xffffffff; ++ *((int*)& __m128_op2[2]) = 0xffffffff; ++ *((int*)& __m128_op2[1]) = 0xfffffffe; ++ *((int*)& __m128_op2[0]) = 0xbe6ed565; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xffffffff; ++ *((int*)& __m128_result[1]) = 0xfffffffe; ++ *((int*)& __m128_result[0]) = 0x3e730941; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x012e2110012e2110; ++ int_op1 = 0x00000000000000ac; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000ac; ++ *((unsigned long*)& __m256i_result[0]) = 0x012e2110012e2110; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x195f307a5d04acbb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6a1a3fbb3c90260e; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xe6a0cf86a2fb5345; ++ *((unsigned long*)& __m128i_result[0]) = 0x95e5c045c36fd9f2; ++ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffebe6ed565; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffebe6ed565; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffbe6ed563; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00007f7f00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00007f7f00007fff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000007f00340040; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000007f000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020200008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0008010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00007f7f00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007f7f00007fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000040000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007f7f00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000040000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007f7f00007fff; ++ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x2a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x195f307a5d04acbb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x002e2100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0d1bffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd915e98e2d8df4d1; ++ *((unsigned long*)& __m128i_result[1]) = 0xd0b1ffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x9d519ee8d2d84f1d; ++ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020200008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0008010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020000020200000; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020000020200000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0008000001010000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101000001010000; ++ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8644000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xaed495f03343a685; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffbe6ed563; ++ *((unsigned long*)& __m128i_result[1]) = 0x8644ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000fffe; ++ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0080000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0080000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000007f00340040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000007f000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbe6ed563; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd0b1ffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9d519ee8d2d84f1d; ++ *((unsigned long*)& __m128i_result[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long*)& __m128i_result[0]) = 0xdffdbffeba6f5543; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbe6ed563; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd0b1ffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9d519ee8d2d84f1d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8644ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000fffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4a6d0000ffff0000; ++ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002e2100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000040002; ++ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,-3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfefd7f7e7f7f7f7f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9d519ee8d2d84f1d; ++ *((unsigned long*)& __m128i_op2[1]) = 0x8644ffff0000ffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff0000fffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long*)& __m128i_result[0]) = 0xd83c8081ffff8080; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long*)& __m128i_op0[0]) = 0xdffdbffeba6f5543; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ffffff000000ff; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x2020000020200000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x2020000020200000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0008000001010000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0101000001010000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ffffff000000ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ffffff000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff000000ff00; ++ __m128i_out = __lsx_vbsll_v(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020000020200000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020000020200000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0008000001010000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101000001010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020000020200000; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020000020200000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0008000001010000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101000001010000; ++ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020000020200000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020000020200000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0008000001010000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101000001010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020000020200000; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020000020200000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0008000001010000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101000001010000; ++ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffff000000ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000040002; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x000000000000007f; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd83c8081ffff8080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000f; ++ __m128i_out = __lsx_vmskltz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x000000000000007f; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000020001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x3b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000040002; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000040002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xefefefefefefefef; ++ *((unsigned long*)& __m256i_result[2]) = 0xefefefefefefefef; ++ *((unsigned long*)& __m256i_result[1]) = 0xefefefefefefef6e; ++ *((unsigned long*)& __m256i_result[0]) = 0xeeeeeeeeeeeeeeee; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0xefefefef; ++ *((int*)& __m256_op0[6]) = 0xefefefef; ++ *((int*)& __m256_op0[5]) = 0xefefefef; ++ *((int*)& __m256_op0[4]) = 0xefefefef; ++ *((int*)& __m256_op0[3]) = 0xefefefef; ++ *((int*)& __m256_op0[2]) = 0xefefef6e; ++ *((int*)& __m256_op0[1]) = 0xeeeeeeee; ++ *((int*)& __m256_op0[0]) = 0xeeeeeeee; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd83c8081ffff8080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long*)& __m128i_result[0]) = 0xd83c8081ffff8080; ++ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x2020000020200000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2020000020200000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0008000001010000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101000001010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long*)& __m128i_op1[0]) = 0xd83c8081ffff8080; ++ *((unsigned long*)& __m128i_result[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long*)& __m128i_result[0]) = 0xd83c8081ffff808f; ++ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0018001800180018; ++ *((unsigned long*)& __m128i_result[0]) = 0x0018001800180018; ++ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xefefefefefefefef; ++ *((unsigned long*)& __m256i_op0[2]) = 0xefefefefefefefef; ++ *((unsigned long*)& __m256i_op0[1]) = 0xefefefefefefef6e; ++ *((unsigned long*)& __m256i_op0[0]) = 0xeeeeeeeeeeeeeeee; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x1010101010101012; ++ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101012; ++ *((unsigned long*)& __m256i_result[1]) = 0x1010101010101093; ++ *((unsigned long*)& __m256i_result[0]) = 0x1111111111111113; ++ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long*)& __m128d_op0[0]) = 0xdffdbffeba6f5543; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long*)& __m128d_op1[0]) = 0xdffdbffeba6f5543; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff00fff0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff00fffffff0; ++ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xfefd7f7f; ++ *((int*)& __m128_op1[2]) = 0x7f7f7f7e; ++ *((int*)& __m128_op1[1]) = 0xdffdbffe; ++ *((int*)& __m128_op1[0]) = 0xba6f5543; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x7f7f7f7e; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m256_op0[7]) = 0x10101010; ++ *((int*)& __m256_op0[6]) = 0x10101012; ++ *((int*)& __m256_op0[5]) = 0x10101010; ++ *((int*)& __m256_op0[4]) = 0x10101012; ++ *((int*)& __m256_op0[3]) = 0x10101010; ++ *((int*)& __m256_op0[2]) = 0x10101093; ++ *((int*)& __m256_op0[1]) = 0x11111111; ++ *((int*)& __m256_op0[0]) = 0x11111113; ++ *((int*)& __m256_result[7]) = 0xc2be0000; ++ *((int*)& __m256_result[6]) = 0xc2be0000; ++ *((int*)& __m256_result[5]) = 0xc2be0000; ++ *((int*)& __m256_result[4]) = 0xc2be0000; ++ *((int*)& __m256_result[3]) = 0xc2be0000; ++ *((int*)& __m256_result[2]) = 0xc2be0000; ++ *((int*)& __m256_result[1]) = 0xc2ba0000; ++ *((int*)& __m256_result[0]) = 0xc2ba0000; ++ __m256_out = __lasx_xvflogb_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xe2ecd48adedc7c82; ++ *((unsigned long*)& __m128i_op0[0]) = 0x25d666472b01d18d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0303020102020001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000000000201; ++ __m128i_out = __lsx_vclo_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000020001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0018001800180018; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0018001800180018; ++ *((unsigned long*)& __m128i_op1[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long*)& __m128i_op1[0]) = 0xd83c8081ffff808f; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff489b693120950; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffc45a851c40c18; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0018001800180018; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0018001800180018; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd8248069ffe78077; ++ *((unsigned long*)& __m128i_op1[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long*)& __m128i_op1[0]) = 0xd83c8081ffff808f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xd82480697f678077; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0303020102020001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000201; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xd82480697f678077; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0301020100000004; ++ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1010101010101012; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1010101010101012; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1010101010101093; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1111111111111113; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1010101110101011; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1111111211111112; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x2); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000020001; ++ *((unsigned long*)& __m256i_result[3]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_result[1]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_result[0]) = 0x1010101010121011; ++ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x10101011; ++ *((int*)& __m256_op1[4]) = 0x10101011; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x11111112; ++ *((int*)& __m256_op1[0]) = 0x11111112; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long*)& __m128d_op0[0]) = 0xdffdbffeba6f5543; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff489b693120950; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffc45a851c40c18; ++ *((unsigned long*)& __m128i_result[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long*)& __m128i_result[0]) = 0xe0dd268932a5edf9; ++ __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd82480697f678077; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffff00fffffff0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00fffffff0; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8080808080808081; ++ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808081; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1010101110101011; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1111111211111112; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004444; ++ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x2e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe0dd268932a5edf9; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe0dd268932a5edf9; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xd8248069ffe78077; ++ *((unsigned long*)& __m128i_result[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long*)& __m128i_result[0]) = 0xbddaa86803e33c2a; ++ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd82480697f678077; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8080808080808081; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8080808080808081; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000808000008080; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000808000008081; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffff00fffffff0; ++ *((unsigned long*)& __m256i_result[3]) = 0x9f9f9f9f9f9f9f9f; ++ *((unsigned long*)& __m256i_result[2]) = 0x9f9f9f9fffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x9f9f9f9f9f9f9f9f; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff9fffffffff; ++ __m256i_out = __lasx_xvori_b(__m256i_op0,0x9f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long*)& __m128i_op0[0]) = 0xbddaa86803e33c2a; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long*)& __m128i_op1[0]) = 0xbddaa86803e33c2a; ++ *((unsigned long*)& __m128i_result[1]) = 0xff0600d50e9ef518; ++ *((unsigned long*)& __m128i_result[0]) = 0xffefffa8007c000f; ++ __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff489b693120950; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffc45a851c40c18; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000a; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000a; ++ __m128i_out = __lsx_vmaxi_d(__m128i_op0,10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff489b693120950; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffc45a851c40c18; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c63636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff489b693120950; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc45a851c40c18; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4811fda96793b23a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8f10624016be82fd; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfda9b23a624082fd; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd8248069ffe78077; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m128i_out = __lsx_vmini_bu(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd8248069ffe78077; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xd8248069ffe78077; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xe31c86e90cda86f7; ++ __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe31c86e90cda86f7; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000000e3; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x38); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff489b693120950; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc45a851c40c18; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffc45a851c40c18; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x48); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff0600d50e9ef518; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffefffa8007c000f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c63636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmsknz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c63636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x9c9c9c9c00000000; ++ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c63636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000e3; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfda9b23a624082fd; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; ++ *((int*)& __m128_result[3]) = 0x43630000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0xdc159371; ++ *((int*)& __m128_result[0]) = 0x4f7fff00; ++ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000808000008080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000808000008081; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000808000008080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000808000008081; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000081; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x68); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d(__m128i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfda9b23a624082fd; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001010000; ++ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfda9b23a624082fd; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7da9b23a624082fd; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001010000; ++ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xd8248069; ++ *((int*)& __m128_op0[0]) = 0x7f678077; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xd8248069; ++ *((int*)& __m128_op1[0]) = 0x7f678077; ++ *((int*)& __m128_result[3]) = 0x7fc00000; ++ *((int*)& __m128_result[2]) = 0x7fc00000; ++ *((int*)& __m128_result[1]) = 0x3f800000; ++ *((int*)& __m128_result[0]) = 0x3f800000; ++ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7da9b23a624082fd; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x2002040404010420; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010180800101; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2002040404010420; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0101010180800101; ++ *((unsigned long*)& __m128i_result[1]) = 0x2002040404010420; ++ *((unsigned long*)& __m128i_result[0]) = 0x9c9c9c9c80800101; ++ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x03574e3a03574e3a; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x9c9c9c9c00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x03574e3a; ++ *((int*)& __m128_op1[2]) = 0x03574e3a; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslei_wu(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffff00fffffff0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffff00; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_result[2]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_result[1]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_result[0]) = 0x0202020202020202; ++ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7da9b23a624082fd; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x03574e39e496cbc9; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001010000; ++ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7da9b23a624082fd; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0505050505050505; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000005050000; ++ __m128i_out = __lsx_vmini_bu(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x03574e39e496cbc9; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x03574e38e496cbc9; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x05050505; ++ *((int*)& __m128_op0[2]) = 0x05050505; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x05050000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x03574e38; ++ *((int*)& __m128_op1[0]) = 0xe496cbc9; ++ *((int*)& __m128_result[3]) = 0x05050505; ++ *((int*)& __m128_result[2]) = 0x05050505; ++ *((int*)& __m128_result[1]) = 0x03574e38; ++ *((int*)& __m128_result[0]) = 0xe496cbc9; ++ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x3e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000f; ++ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000005050000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0505000005050505; ++ *((unsigned long*)& __m128i_result[1]) = 0x0028280000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0028280000282800; ++ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x03574e3b94f2ca31; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000001f807b89; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000005050000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0505000005050505; ++ *((unsigned long*)& __m128i_result[1]) = 0x000d02540000007e; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001400140014; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000005050000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0505000005050505; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000d02540000007e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001400140014; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0505050505050505; ++ *((unsigned long*)& __m128i_op2[0]) = 0x03574e38e496cbc9; ++ *((unsigned long*)& __m128i_result[1]) = 0x0005000400000004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0400001001150404; ++ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000006597cc3d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7505853d654185f5; ++ *((unsigned long*)& __m128i_op1[0]) = 0x01010000fefe0101; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000006595cc1d; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0505050505050505; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000005050000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0028280000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0028280000282800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0028280000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000282800; ++ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0005000400000004; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0400001001150404; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0005000400000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0400001001150404; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000800000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000000; ++ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x23); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0028280000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0028280000282800; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x7505853d654185f5; ++ *((unsigned long*)& __m128i_op2[0]) = 0x01010000fefe0101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0028280000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x012927ffff272800; ++ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcfb799f1; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0282800002828282; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5555001400005111; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffabbeab55110140; ++ *((unsigned long*)& __m128i_result[1]) = 0xaaaaffebcfb748e0; ++ *((unsigned long*)& __m128i_result[0]) = 0xfd293eab528e7ebe; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5555001400005111; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffabbeab55110140; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5555001400005111; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffabbeab55110140; ++ *((unsigned long*)& __m128i_result[1]) = 0xaaaa00280000a222; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe567c56aa220280; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0028280000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x012927ffff272800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0028280000000000; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7505853d654185f5; ++ *((unsigned long*)& __m128i_op0[0]) = 0x01010000fefe0101; ++ *((unsigned long*)& __m128i_result[1]) = 0x7545c57d6541c5f5; ++ *((unsigned long*)& __m128i_result[0]) = 0x41414040fefe4141; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x40); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x000d0254; ++ *((int*)& __m128_op0[2]) = 0x0000007e; ++ *((int*)& __m128_op0[1]) = 0x00000014; ++ *((int*)& __m128_op0[0]) = 0x00140014; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xaaaaffebcfb748e0; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfd293eab528e7ebe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0xffeb48e03eab7ebe; ++ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xaaaaffebcfb748e0; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfd293eab528e7ebe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xaaaaffebcfb748e0; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfd293eab528e7ebe; ++ *((unsigned long*)& __m128i_result[1]) = 0xf6e91c0000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x51cfd7c000000000; ++ __m128i_out = __lsx_vslli_d(__m128i_op0,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffeb48e03eab7ebe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffc0fac01200f800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0f80eac01f80ef80; ++ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000001fc1a568; ++ *((unsigned long*)& __m128i_op0[0]) = 0x02693fe0e7beb077; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d(__m128i_op0,-6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000001fc1a568; ++ *((unsigned long*)& __m128i_op0[0]) = 0x02693fe0e7beb077; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000030000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0006000200000000; ++ __m128i_out = __lsx_vclz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcfb799f1; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0282800002828282; ++ *((int*)& __m128_result[3]) = 0xffffe000; ++ *((int*)& __m128_result[2]) = 0xffffe000; ++ *((int*)& __m128_result[1]) = 0xc1f6e000; ++ *((int*)& __m128_result[0]) = 0xbb3e2000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m128_op0[3]) = 0xf6e91c00; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x51cfd7c0; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x880c91b8; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x2d1da85b; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x80008000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x80008000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x80008000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x80008000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long*)& __m128i_result[1]) = 0x7404443064403aec; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000d6eefefc0498; ++ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf6e91c0000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x51cfd7c000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffd000700000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0014fff500000000; ++ __m128i_out = __lsx_vsrai_h(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0x7f800000; ++ *((int*)& __m128_op0[1]) = 0x2d1da85b; ++ *((int*)& __m128_op0[0]) = 0x7f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000007fffffff; ++ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7505443065413aed; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0100d6effefd0498; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x2e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7404443064403aec; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000d6eefefc0498; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff7f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2d1da85b7f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x002d001dd6a8ee5b; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe7ffc8004009800; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7505443065413aed; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0100d6effefd0498; ++ *((unsigned long*)& __m128i_result[1]) = 0xb71289fdfbea3f69; ++ *((unsigned long*)& __m128i_result[0]) = 0x4e17c2ffb4851a40; ++ __m128i_out = __lsx_vmul_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xda4643d5301c4000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc1fc0d3bf55c4000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7505853d654185f5; ++ *((unsigned long*)& __m128i_op1[0]) = 0x01010000fefe0101; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000800000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000800000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000800000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000800000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000800000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000800000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000800000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100010000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100010000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100010000; ++ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff7f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2d1da85b7f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7505853d654185f5; ++ *((unsigned long*)& __m128i_op1[0]) = 0x01010000fefe0101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000013d; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x40); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffd000700000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0014fff500000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f03000780000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f15000a7f010101; ++ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000013d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000030000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0006000200000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0006000200000000; ++ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00100010; ++ *((int*)& __m128_op0[2]) = 0x00030000; ++ *((int*)& __m128_op0[1]) = 0x00060002; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100010000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100010000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100010000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100010000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100010080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100010000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100010080; ++ __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000750500006541; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000100fffffefd; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0110000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0110000000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0110000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0110000000000080; ++ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0110000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0110000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0110000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0110000000000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0110000000000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0110000000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0110000000000004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0110000000000080; ++ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000030000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0006000200000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7505445465593af1; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0100d6effefd0498; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000030000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0006000200000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff0000000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff0000000000080; ++ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0000001a; ++ *((int*)& __m128_op0[2]) = 0xfffffff7; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001afffffff7; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000750500006541; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000100fffffefd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000000; ++ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000080; ++ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x7f80780000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7f80780000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004000; ++ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000002400180004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000024; ++ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff00000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x7fffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7e00fe0000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000030000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00060001fffe8003; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; ++ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7f80780000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7f80780000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00001000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00001000; ++ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7505445465593af1; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_hu(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xf000000000000000; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0x00001000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0x00001000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1000000000000000; ++ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; ++ *((int*)& __m256_result[7]) = 0xc6000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0xc6000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000024; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000024; ++ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffc0ff81000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000600000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffc0ff81000000; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xc600000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xc600000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000750500006541; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000100fffffefd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7f80780000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7f80780000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00200010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1c80780000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1c80780000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7f80780000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7f80780000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000f0000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000f0000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1fe01e0000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1fe01e0000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x22); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x6b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffc0ff81000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffff0ffe04000; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1090918800000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1090918800000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1c80780000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1c80780000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1c80780000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1c80780000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004000; ++ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1fe01e0000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1fe01e0000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1fe01e0000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1fe01e0000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000400000004000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000400000204010; ++ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1fe01e0000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1fe01e0000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1fe01e0100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x1fe01e0100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf000f00000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf000f00000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xf000f00000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xf000f00000000001; ++ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1fe01e0000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1fe01e0000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000f0000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000f0000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xfffffff0ffe04000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001fc0000; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_w(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1fe01e0100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1fe01e0100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1fe01e0100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1fe01e0100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000400000004000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000400000204010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000020000010200; ++ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffff0ffe04000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d(__m128i_op0,0x3f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf000f00000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf000f00000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x6300000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xf000f00000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x6300000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xf000f00000000001; ++ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x41); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000001; ++ *((int*)& __m256_op0[6]) = 0x00000001; ++ *((int*)& __m256_op0[5]) = 0x00000001; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x00000001; ++ *((int*)& __m256_op0[2]) = 0x00000001; ++ *((int*)& __m256_op0[1]) = 0x00000001; ++ *((int*)& __m256_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x39); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffff0ffe04000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_w(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001fc0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000040004000100; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001fc0000; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffffc00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffffc00; ++ __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffc00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9cffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9cffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xce7ffffffffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xce7ffffffffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0x6300000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000002010; ++ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001fc0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000002010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001fbdff0; ++ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9cffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9cffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_h(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9cffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9cffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_result[0]) = 0x0400040004000400; ++ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xce7ffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xce7ffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x327f010101010102; ++ *((unsigned long*)& __m256i_result[2]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x327f010101010102; ++ *((unsigned long*)& __m256i_result[0]) = 0x6300000000000000; ++ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x22); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_h(__m128i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7fc00000; ++ *((int*)& __m128_result[2]) = 0x7fc00000; ++ *((int*)& __m128_result[1]) = 0x7fc00000; ++ *((int*)& __m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m256_op0[7]) = 0x327f0101; ++ *((int*)& __m256_op0[6]) = 0x01010102; ++ *((int*)& __m256_op0[5]) = 0x63000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x327f0101; ++ *((int*)& __m256_op0[2]) = 0x01010102; ++ *((int*)& __m256_op0[1]) = 0x63000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xce7fffff; ++ *((int*)& __m256_op1[6]) = 0xfffffffe; ++ *((int*)& __m256_op1[5]) = 0x63000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0xce7fffff; ++ *((int*)& __m256_op1[2]) = 0xfffffffe; ++ *((int*)& __m256_op1[1]) = 0x63000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x327f010101010102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x327f010101010102; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff4; ++ __m256i_out = __lasx_xvmini_d(__m256i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xce7ffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xce7ffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6300000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff39ffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff39ffffff; ++ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x5e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0400040004000400; ++ *((unsigned long*)& __m128d_result[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128d_result[0]) = 0x0400040004000400; ++ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000040004000100; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0x39ffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0x39ffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7f800000; ++ *((int*)& __m128_op0[2]) = 0x7f800000; ++ *((int*)& __m128_op0[1]) = 0x7f800000; ++ *((int*)& __m128_op0[0]) = 0x7f800000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0400040004000400; ++ unsigned_int_result = 0x0000000000000400; ++ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x5); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_hu(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9cffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9cffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x6300000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x6300000000000001; ++ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000001000000010; ++ *((unsigned long*)& __m256d_op1[3]) = 0x45d5555545d55555; ++ *((unsigned long*)& __m256d_op1[2]) = 0x74555555e8aaaaaa; ++ *((unsigned long*)& __m256d_op1[1]) = 0x45d5555545d55555; ++ *((unsigned long*)& __m256d_op1[0]) = 0x74555555e8aaaaaa; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff39ffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff39ffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; ++ *((unsigned long*)& __m128i_result[0]) = 0x0404040404040404; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x5555555536aaaaac; ++ *((unsigned long*)& __m256i_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256i_op0[1]) = 0x5555555536aaaaac; ++ *((unsigned long*)& __m256i_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x5555555536aaaaac; ++ *((unsigned long*)& __m256i_result[2]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256i_result[1]) = 0x5555555536aaaaac; ++ *((unsigned long*)& __m256i_result[0]) = 0x55555555aaaaaaac; ++ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffff39ffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffff39ffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x5555555536aaaaac; ++ *((unsigned long*)& __m256i_op1[2]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256i_op1[1]) = 0x5555555536aaaaac; ++ *((unsigned long*)& __m256i_op1[0]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x6300000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6300000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x5555555536aaaaac; ++ *((unsigned long*)& __m256i_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256i_op0[1]) = 0x5555555536aaaaac; ++ *((unsigned long*)& __m256i_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff39ffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff39ffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x2b2b2b2b1bd5d5d6; ++ *((unsigned long*)& __m256i_result[2]) = 0x2a2a2a2af2d5d5d6; ++ *((unsigned long*)& __m256i_result[1]) = 0x2b2b2b2b1bd5d5d6; ++ *((unsigned long*)& __m256i_result[0]) = 0x2a2a2a2af2d5d5d6; ++ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000aaaa00008bfe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000aaaa0000aaaa; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000aaaa00008bfe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000aaaa0000aaaa; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000aaaa00008bfe; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000aaaa0000aaaa; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000aaaa00008bfe; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000aaaa0000aaaa; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000aaaa00008bfe; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000aaaa0000aaaa; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000aaaa00008bfe; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000aaaa0000aaaa; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000aaaa00008bfe; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000aaaa0000aaaa; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000aaaa00008bfe; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000aaaa0000aaaa; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2b2b2b2b1bd5d5d6; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2a2a2a2af2d5d5d6; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2b2b2b2b1bd5d5d6; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2a2a2a2af2d5d5d6; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002a0000002a; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002a0000002a; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffff2ffffffd5; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffd5ffffffd6; ++ __m256i_out = __lasx_vext2xv_w_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x55555555; ++ *((int*)& __m256_op0[6]) = 0x36aaaaac; ++ *((int*)& __m256_op0[5]) = 0x55555555; ++ *((int*)& __m256_op0[4]) = 0xaaaaaaac; ++ *((int*)& __m256_op0[3]) = 0x55555555; ++ *((int*)& __m256_op0[2]) = 0x36aaaaac; ++ *((int*)& __m256_op0[1]) = 0x55555555; ++ *((int*)& __m256_op0[0]) = 0xaaaaaaac; ++ *((unsigned long*)& __m256i_result[3]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_result[2]) = 0x5555555580000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_result[0]) = 0x5555555580000000; ++ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000010; ++ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000001fffe; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5555555580000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5555555580000000; ++ int_result = 0x0000000055555555; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x5); ++ *((int*)& __m128_op0[3]) = 0xc1bdceee; ++ *((int*)& __m128_op0[2]) = 0x242070db; ++ *((int*)& __m128_op0[1]) = 0xe8c7b756; ++ *((int*)& __m128_op0[0]) = 0xd76aa478; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff800000000000; ++ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5555555580000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5555555580000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x555555553f800000; ++ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x55555555; ++ *((int*)& __m256_op0[6]) = 0x3f800000; ++ *((int*)& __m256_op0[5]) = 0x55555555; ++ *((int*)& __m256_op0[4]) = 0x80000000; ++ *((int*)& __m256_op0[3]) = 0x55555555; ++ *((int*)& __m256_op0[2]) = 0x3f800000; ++ *((int*)& __m256_op0[1]) = 0x55555555; ++ *((int*)& __m256_op0[0]) = 0x80000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x0001fffe; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x0001fffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0000aaaa; ++ *((int*)& __m256_op0[6]) = 0x00008bfe; ++ *((int*)& __m256_op0[5]) = 0x0000aaaa; ++ *((int*)& __m256_op0[4]) = 0x0000aaaa; ++ *((int*)& __m256_op0[3]) = 0x0000aaaa; ++ *((int*)& __m256_op0[2]) = 0x00008bfe; ++ *((int*)& __m256_op0[1]) = 0x0000aaaa; ++ *((int*)& __m256_op0[0]) = 0x0000aaaa; ++ *((unsigned long*)& __m256d_result[3]) = 0x3795554000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x37917fc000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x3795554000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x37917fc000000000; ++ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff5556aaaa; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff5556aaaa; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0006ffff0004ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0006ffff0004ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0006ffff0004ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00020000aaa95556; ++ *((unsigned long*)& __m256i_result[1]) = 0x0006ffff0004ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00020000aaa95556; ++ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vslli_h(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x2b2b2b2b1bd68080; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2a2ad4d4f2d8807e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x2b2b2b2b1bd68080; ++ *((unsigned long*)& __m256i_op1[0]) = 0x2a2ad4d4f2d8807e; ++ *((unsigned long*)& __m256i_result[3]) = 0xd4d5d4d5e42a7f80; ++ *((unsigned long*)& __m256i_result[2]) = 0xd5d62b2c0d287f82; ++ *((unsigned long*)& __m256i_result[1]) = 0xd4d5d4d5e42a7f80; ++ *((unsigned long*)& __m256i_result[0]) = 0xd5d62b2c0d287f82; ++ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xfffffffc; ++ *((int*)& __m256_op0[4]) = 0x5556aaa8; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xfffffffc; ++ *((int*)& __m256_op0[0]) = 0x5556aaa8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000055; ++ *((int*)& __m256_op0[6]) = 0x36aaaaac; ++ *((int*)& __m256_op0[5]) = 0x55555555; ++ *((int*)& __m256_op0[4]) = 0xaaaaaaac; ++ *((int*)& __m256_op0[3]) = 0x00000055; ++ *((int*)& __m256_op0[2]) = 0x36aaaaac; ++ *((int*)& __m256_op0[1]) = 0x55555555; ++ *((int*)& __m256_op0[0]) = 0xaaaaaaac; ++ *((int*)& __m256_op1[7]) = 0x00060000; ++ *((int*)& __m256_op1[6]) = 0x00040000; ++ *((int*)& __m256_op1[5]) = 0x00025555; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00060000; ++ *((int*)& __m256_op1[2]) = 0x00040000; ++ *((int*)& __m256_op1[1]) = 0x00025555; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffc5556aaa8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffc5556aaa8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000007070205; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000002020100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000007070205; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000002020100; ++ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0002555500000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0002555500000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffdaaaaffffffff; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0002555500000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0002555500000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0002555400000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0002555400000000; ++ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000005536aaaaac; ++ *((unsigned long*)& __m256d_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000005536aaaaac; ++ *((unsigned long*)& __m256d_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0002555400000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0002555400000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000005554; ++ *((unsigned long*)& __m256i_op1[2]) = 0xaaaa0000aaacfffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000005554; ++ *((unsigned long*)& __m256i_op1[0]) = 0xaaaa0000aaacfffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000054; ++ *((unsigned long*)& __m256i_result[2]) = 0x00aa000000ac00fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000054; ++ *((unsigned long*)& __m256i_result[0]) = 0x00aa000000ac00fe; ++ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000005536aaaaac; ++ *((unsigned long*)& __m256i_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000005536aaaaac; ++ *((unsigned long*)& __m256i_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000060102150101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000060102150101; ++ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_w(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x6664666466646664; ++ *((unsigned long*)& __m128i_result[0]) = 0x6664666466646664; ++ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x66); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000054; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00aa000000ac00fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000054; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00aa000000ac00fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0002a80000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0002b0000003f800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0002a80000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0002b0000003f800; ++ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000005536aaaaac; ++ *((unsigned long*)& __m256i_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000005536aaaaac; ++ *((unsigned long*)& __m256i_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000005136aaaaa8; ++ *((unsigned long*)& __m256i_result[2]) = 0x55515551aaaaaaa8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000005136aaaaa8; ++ *((unsigned long*)& __m256i_result[0]) = 0x55515551aaaaaaa8; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00aa000000ac00fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00aa000000ac00fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xff00000000000000; ++ __m128i_out = __lsx_vbsll_v(__m128i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128d_op0[0]) = 0xff00000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000008; ++ __m128i_out = __lsx_vfclass_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff5556aaaa; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff5556aaaa; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fffffffbffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfe7ffffffeffffc0; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfe7ffffffeffffc0; ++ __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0002555500000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0002555500000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1818181818181818; ++ *((unsigned long*)& __m256i_result[2]) = 0x1818181818181818; ++ *((unsigned long*)& __m256i_result[1]) = 0x1818181818181818; ++ *((unsigned long*)& __m256i_result[0]) = 0x1818181818181818; ++ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x555555553f800000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffe0000fffe0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffe0000fffe0000; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0002555500000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0002555500000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x3b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0007000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0007000000000000; ++ __m256i_out = __lasx_xvmini_hu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x40000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x40000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000060000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000060000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000fe00ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000ff00fe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000fe00ff; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000fe00ff; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffe1; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x5980000000000000; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000016600000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000016600000000; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000016600000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000016600000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000016600000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000016600000000; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x3); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000060000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000060000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000060000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000060000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x0000fffe; ++ *((int*)& __m128_op0[0]) = 0x0000ffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x5980000000000000; ++ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00060000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00060000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000166; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000166; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00555555553f8000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00555555553f8000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000fffe0000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001fffe; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x555555553f800000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x555555553f800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x59800000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x59800000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x59800000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x59800000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x2c27000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x2c27000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00fe00ff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x5900000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x5900000000000000; ++ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x59800000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x59800000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x0eb7aaaa; ++ *((int*)& __m256_op1[6]) = 0xa6e6ac80; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x0eb7aaaa; ++ *((int*)& __m256_op1[2]) = 0xa6e6ac80; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x555555553f800000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x555555553f800000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x353bb67af686ad9b; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x353bb67af686ad9b; ++ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_result[2]) = 0x5982000200020002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_result[0]) = 0x5982000200020002; ++ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x353bb67af686ad9b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x353bb67af686ad9b; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0200000200000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2c27000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0200000200000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x2c27000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1cfd000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1cfd000000000000; ++ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0200000200000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2c27000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0200000200000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x2c27000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000400000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000400000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x41d6600000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x41d6600000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w(__m128i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1cfd000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1cfd000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1cfd000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1cfd000000000000; ++ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1cfd000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1cfd000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1cfd000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1cfd000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x59800000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x59800000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x41d66000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x41d66000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x41d6600000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x41d6600000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0x41d6600000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0x41d6600000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7fffffffffffffff; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1cfd000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1cfd000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1cfd000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1cfd000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x56a09e662ab46b31; ++ *((unsigned long*)& __m128i_op0[0]) = 0xb4b8122ef4054bb3; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x56a09e662ab46b31; ++ *((unsigned long*)& __m128i_result[0]) = 0xb4b8122ef4054bb3; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000400000001; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000400000001; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x56a09e662ab46b31; ++ *((unsigned long*)& __m128i_op1[0]) = 0xb4b8122ef4054bb3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x02b504f305a5c091; ++ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_result[2]) = 0x6aeaeaeaeaeaeaea; ++ *((unsigned long*)& __m256i_result[1]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_result[0]) = 0x6aeaeaeaeaeaeaea; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x02b504f305a5c091; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x02b504f305a5c091; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000005602d2; ++ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ac; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_op1[2]) = 0x6aeaeaeaeaeaeaea; ++ *((unsigned long*)& __m256i_op1[1]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6aeaeaeaeaeaeaea; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,-3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x3c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000fe00ff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x0000ffff; ++ *((int*)& __m256_op0[4]) = 0x0000ffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x0000ffff; ++ *((int*)& __m256_op0[0]) = 0x0000ffff; ++ *((int*)& __m256_op1[7]) = 0x0eb7aaaa; ++ *((int*)& __m256_op1[6]) = 0xa6e6ac80; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x0eb7aaaa; ++ *((int*)& __m256_op1[2]) = 0xa6e6ac80; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffff01ff01; ++ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000fe00ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffff01ff01; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000101fd01fe; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000101fd01fe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0001000101fd01fe; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0020000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0020000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x4b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x73); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000020; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0020000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0020000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffff01ff01; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0020000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0020000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffff01ff01; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffff01ff01; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffff01ff01; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffefefffffefe; ++ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff02; ++ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff02; ++ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000101fd01fe; ++ *((unsigned long*)& __m128i_result[1]) = 0xff80ff80ff80ff80; ++ *((unsigned long*)& __m128i_result[0]) = 0xff80ff8080008000; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000101fd01fe; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff80ff80ff80ff80; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff80ff8080008000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000101fd01fe; ++ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ff02; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000001fe; ++ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0xff800000; ++ *((int*)& __m128_result[1]) = 0xffffffff; ++ *((int*)& __m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001400000014; ++ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000001fe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128d_op1[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x3c600000ff800000; ++ *((unsigned long*)& __m128d_result[0]) = 0xfffffffffffffffe; ++ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001400000014; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000001; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xfffffffe; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0xffffff02; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0011001100110011; ++ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001400000014; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff02; ++ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_result[0]) = 0x04000400fbfffb02; ++ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x3c600000ff800000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffefe00000000; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3c600000ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0f180000ffe00000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0f180000ffe00000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3c600000ff800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x3c5fffffff7fffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffefffeff00feff; ++ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h(__m256i_op0,-3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3c600000ff800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xff01ff01; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0xffffffff; ++ *((int*)& __m128_op2[2]) = 0xffffffff; ++ *((int*)& __m128_op2[1]) = 0xffffffff; ++ *((int*)& __m128_op2[0]) = 0xff01ff01; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xffffffff; ++ *((int*)& __m128_result[1]) = 0xffffffff; ++ *((int*)& __m128_result[0]) = 0x7f01ff01; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3c600000ff800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0xc39fffff007fffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000fe00fd; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefe00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x7f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3c5fffffff7fffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffefffeff00feff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x03fc03fc03f803f8; ++ *((unsigned long*)& __m256i_result[2]) = 0x03fc03fc03f803f8; ++ *((unsigned long*)& __m256i_result[1]) = 0x03fc03fc03f803f8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000013ffffffec; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000013ffffebd8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000013ffffffec; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000013ffffebd8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x03fc03fc03f803f8; ++ *((unsigned long*)& __m256d_op0[2]) = 0x03fc03fc03f803f8; ++ *((unsigned long*)& __m256d_op0[1]) = 0x03fc03fc03f803f8; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7be2468acf15f39c; ++ *((unsigned long*)& __m256d_result[2]) = 0x7be2468acf15f39c; ++ *((unsigned long*)& __m256d_result[1]) = 0x7be2468acf15f39c; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc39fffff007fffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00fd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffff0e700000000; ++ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x32); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xc39fffff007fffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000fe00fd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000003f803f4; ++ __m128i_out = __lsx_vslli_w(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffefe00000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000003f803f4; ++ *((unsigned long*)& __m128i_result[1]) = 0x1000000010000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100100000; ++ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000003f803f4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000003f803f4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001003f803f4; ++ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7be2468acf15f39c; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7be2468acf15f39c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7be2468acf15f39c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7be2468acf15f39c; ++ *((unsigned long*)& __m256i_result[2]) = 0x7be2468acf15f39c; ++ *((unsigned long*)& __m256i_result[1]) = 0x7be2468acf15f39c; ++ *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xc39fffff007fffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000fe00fd; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x78c00000ff000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x61cf003f0000007f; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000003c607f80; ++ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000013ffffffec; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000013ffffebd8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000013ffffffec; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000013ffffebd8; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffec; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffebd8; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffec; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffebd8; ++ __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff7f01ff01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x78c00000ff000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff7f01ff01; ++ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000003ff000003ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000003ff000003ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x36); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x85); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffec; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffebd8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffec; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffebd8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffec; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffec; ++ __m256i_out = __lasx_xvexth_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x78c00000ff000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x78c00000ff000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x78c00000ff000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x78c00000ff000000; ++ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000003ff000003ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000003ff000003ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffec; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffec; ++ *((unsigned long*)& __m256i_result[3]) = 0x000003ff000003ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000003ff000003ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1000000010000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100100000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff1; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff1; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfffffefe00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffff1; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x78c00000ff000000; ++ int_op1 = 0x0000000000000400; ++ *((unsigned long*)& __m128i_result[1]) = 0xff000000ff000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff000000ff000000; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff000000ff000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff000000ff000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff000000ff000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff000000ff000000; ++ __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000003ff000003ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000003ff000003ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xfffffefe00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000003ff000003ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000003ff000003ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000003ff000003ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000003ff000003ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff7f01ff01; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000d; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x3b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000400; ++ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_result[0]) = 0x0400040004000400; ++ __m128i_out = __lsx_vreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x78c00000ff000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000078c00000; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffec; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffec; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010000000100000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010000000100000; ++ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0010000000100000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0010000000100000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b(__m128i_op0,-2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff7f01ff01; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff7f01ff01; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffe03; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffe03; ++ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefe00000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x0000000d; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xfffffe03; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xfffffe03; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefe00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffefe00000000; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_hu(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1000000010000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100100000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x2000000020000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200200000; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000b5207f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2000000020000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200200000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x6a57a30ff0000000; ++ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000d; ++ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x37); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x4f800000; ++ __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000078c00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000078c00000; ++ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000078c00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000d; ++ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0xf7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff00000000; ++ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000b5207f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x00000000b5207f80; ++ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x00000000b5207f80; ++ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1000000010000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000180100100000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000b5207f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00001801b5307f80; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000078c00000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6a57a30ff0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000f0000000; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff01fffffffeff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff01fffffffeff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff01fffffffeff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff01fffffffeff; ++ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x2); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w(__m128i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000f0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslei_wu(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000b5207f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000b5207f80; ++ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000400; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128d_op0[0]) = 0x6a57a30ff0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x6a57a30ff0000000; ++ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00001801f0307f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00001801f0307f80; ++ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000f0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6a57a30ff0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x01fe01fe01fe01fe; ++ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrecip_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x01fe01fe00000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_result[1]) = 0x000d000d000d000d; ++ *((unsigned long*)& __m128i_result[0]) = 0x000d000d000d000d; ++ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000b5207f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000400; ++ *((unsigned long*)& __m256i_result[3]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_result[2]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_result[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_result[0]) = 0x0400040004000400; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000d000d000d000d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000d000d000d000d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000680000006800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_result[3]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_result[2]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_result[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_result[0]) = 0x0400040004000400; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x2d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x01fe01fe00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x01fe01fe00000000; ++ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_result[0]) = 0x040004000400040d; ++ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_op0[0]) = 0x040004000400040d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0501050105010501; ++ *((unsigned long*)& __m128i_result[0]) = 0x050105010501050c; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w(__m256i_op0,-2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0400040004000400; ++ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_op0[0]) = 0x040004000400040d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_result[0]) = 0x040004000400040d; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffc0000fffc0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffc0000fffc0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffc0000fffc0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffc0000fffc0000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x05010501; ++ *((int*)& __m128_op1[2]) = 0x05010501; ++ *((int*)& __m128_op1[1]) = 0x05010501; ++ *((int*)& __m128_op1[0]) = 0x0501050c; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffc0000fffc0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffc0000fffc0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffc0000fffc0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffc0000fffc0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0002000200020002; ++ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x0000000000000400; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000400; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000400; ++ __m128i_out = __lsx_vreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x6a57a30ff0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000400; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000400; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000400; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000400; ++ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_result[1]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x1410141014101410; ++ *((unsigned long*)& __m256i_result[2]) = 0x1410141014101410; ++ *((unsigned long*)& __m256i_result[1]) = 0x1410141014101410; ++ *((unsigned long*)& __m256i_result[0]) = 0x1410141014101410; ++ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000400; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000040d; ++ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000040d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000040d; ++ __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x000000000000040d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000400; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000400; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000040d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xcc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000040d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitrev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x25); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000006; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x33); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000fff3; ++ __m128i_out = __lsx_vneg_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000006; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000006; ++ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21f32eafa486fd38; ++ *((unsigned long*)& __m128i_op0[0]) = 0x407c2ca3d3430357; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x21f32eaf5b7a02c8; ++ *((unsigned long*)& __m128i_result[0]) = 0x407c2ca32cbd0357; ++ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00010001; ++ *((int*)& __m256_op1[6]) = 0x00010001; ++ *((int*)& __m256_op1[5]) = 0x00010001; ++ *((int*)& __m256_op1[4]) = 0x00010001; ++ *((int*)& __m256_op1[3]) = 0x00010001; ++ *((int*)& __m256_op1[2]) = 0x00010001; ++ *((int*)& __m256_op1[1]) = 0x00010001; ++ *((int*)& __m256_op1[0]) = 0x00010001; ++ *((int*)& __m256_result[7]) = 0x00010001; ++ *((int*)& __m256_result[6]) = 0x00010001; ++ *((int*)& __m256_result[5]) = 0x00010001; ++ *((int*)& __m256_result[4]) = 0x00010001; ++ *((int*)& __m256_result[3]) = 0x00010001; ++ *((int*)& __m256_result[2]) = 0x00010001; ++ *((int*)& __m256_result[1]) = 0x00010001; ++ *((int*)& __m256_result[0]) = 0x00010001; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21f32eaf5b7a02c8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x407c2ca32cbd0357; ++ *((unsigned long*)& __m128i_result[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long*)& __m128i_result[0]) = 0x203e16d116de012b; ++ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000fff3; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x000000000000040d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010400; ++ __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00010400; ++ *((int*)& __m128_op1[3]) = 0x10f917d7; ++ *((int*)& __m128_op1[2]) = 0x2d3d01e4; ++ *((int*)& __m128_op1[1]) = 0x203e16d1; ++ *((int*)& __m128_op1[0]) = 0x16de012b; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x21f32eaf; ++ *((int*)& __m128_op0[2]) = 0x5b7a02c8; ++ *((int*)& __m128_op0[1]) = 0x407c2ca3; ++ *((int*)& __m128_op0[0]) = 0x2cbd0357; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00010400; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long*)& __m128i_op0[0]) = 0x203e16d116de012b; ++ *((unsigned long*)& __m128i_result[1]) = 0x887c8beb969e00f2; ++ *((unsigned long*)& __m128i_result[0]) = 0x101f8b680b6f8095; ++ __m128i_out = __lsx_vrotri_h(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000040d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff0008ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff0008ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long*)& __m128i_op1[0]) = 0x203e16d116de012b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long*)& __m128i_op0[0]) = 0x203e16d116de012b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000017d7000001e4; ++ *((unsigned long*)& __m128i_result[0]) = 0x000016d10000012b; ++ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x887c8beb; ++ *((int*)& __m128_op0[2]) = 0x969e00f2; ++ *((int*)& __m128_op0[1]) = 0x101f8b68; ++ *((int*)& __m128_op0[0]) = 0x0b6f8095; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long*)& __m128i_op1[0]) = 0x203e16d116de012b; ++ *((unsigned long*)& __m128i_result[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long*)& __m128i_result[0]) = 0x203e16d116de012b; ++ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000800080008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000800080008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000800080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000800080008000; ++ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfa31dfa21672e711; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1304db85e468073a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x80008000; ++ *((int*)& __m256_op0[6]) = 0x80008000; ++ *((int*)& __m256_op0[5]) = 0x80008000; ++ *((int*)& __m256_op0[4]) = 0x80008000; ++ *((int*)& __m256_op0[3]) = 0x80008000; ++ *((int*)& __m256_op0[2]) = 0x80008000; ++ *((int*)& __m256_op0[1]) = 0x80008000; ++ *((int*)& __m256_op0[0]) = 0x80008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfa31dfa21672e711; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1304db85e468073a; ++ *((unsigned long*)& __m128i_op2[1]) = 0x887c8beb969e00f2; ++ *((unsigned long*)& __m128i_op2[0]) = 0x101f8b680b6f8095; ++ *((unsigned long*)& __m128i_result[1]) = 0x7582ed22cb1c6e12; ++ *((unsigned long*)& __m128i_result[0]) = 0x35aaa61c944f34c2; ++ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long*)& __m128i_op0[0]) = 0x203e16d116de012b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long*)& __m128i_result[0]) = 0x203e16d116de012b; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000800080008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000800080008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000800080008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007fff; ++ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff51cf8da; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffd6040188; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000101fffff8b68; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000b6fffff8095; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffff51cffffd604; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_w(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0000101f; ++ *((int*)& __m128_op0[2]) = 0xffff8b68; ++ *((int*)& __m128_op0[1]) = 0x00000b6f; ++ *((int*)& __m128_op0[0]) = 0xffff8095; ++ *((int*)& __m128_op1[3]) = 0x10f917d7; ++ *((int*)& __m128_op1[2]) = 0x2d3d01e4; ++ *((int*)& __m128_op1[1]) = 0x203e16d1; ++ *((int*)& __m128_op1[0]) = 0x16de012b; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long*)& __m128i_op1[0]) = 0x203e16d116de012b; ++ *((unsigned long*)& __m128i_result[1]) = 0x00f900d7003d00e4; ++ *((unsigned long*)& __m128i_result[0]) = 0x003e00d100de002b; ++ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf51cf8dad6040188; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0982e2daf234ed87; ++ *((unsigned long*)& __m128i_result[1]) = 0xf51cf8dad6040188; ++ *((unsigned long*)& __m128i_result[0]) = 0x0982eadaf234ed87; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x2b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00f900d7003d00e4; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003e00d100de002b; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f4000007f040000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f0200007f020000; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000101fffff8b68; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000b6fffff8095; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000b6fffff8095; ++ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff51cf8da; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffd6040188; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffff8f8dada; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff01018888; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x50); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf51cf8dad6040188; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0982e2daf234ed87; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff51cf8da; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffd6040188; ++ *((unsigned long*)& __m128i_result[1]) = 0x00020002000d0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000020f2300ee; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00020002000d0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000020f2300ee; ++ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x7f4000007f040000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7f0200007f020000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfffffffff8f8dada; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffff01018888; ++ *((unsigned long*)& __m128d_result[1]) = 0xfffffffff8f8dada; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffff01018888; ++ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xfffffffff8f8dada; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff01018888; ++ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7f400000; ++ *((int*)& __m128_op0[2]) = 0x7f040000; ++ *((int*)& __m128_op0[1]) = 0x7f020000; ++ *((int*)& __m128_op0[0]) = 0x7f020000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0x0014002c; ++ *((int*)& __m128_op1[1]) = 0xfffefffe; ++ *((int*)& __m128_op1[0]) = 0x003b0013; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0x3ea5016b; ++ *((int*)& __m128_result[1]) = 0xfffefffe; ++ *((int*)& __m128_result[0]) = 0x3f6fb04d; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff8f8dada; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff01018888; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff8f8dada; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff01018888; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffff8f8dada; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff01018888; ++ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff8f8dada; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff01018888; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff3ea5016b; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffefffe3f6fb04d; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000d96f; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffd83b; ++ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000002aaad555; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000002aaad555; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007fff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007fff00000000; ++ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff8f8dada; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff01018888; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010108082626; ++ *((unsigned long*)& __m128i_result[0]) = 0x01010101ffff7878; ++ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff8f8dada; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff01018888; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000145ad; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000300003e6e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff8f8da00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff01018888; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffff00ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00ffff00; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x73); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf51cf8dad6040188; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0982e2daf234ed87; ++ *((unsigned long*)& __m128i_result[1]) = 0xf51cf8dad6040188; ++ *((unsigned long*)& __m128i_result[0]) = 0x0982e2daf234ed87; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00007fff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007fff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_result[2]) = 0x0202810102020202; ++ *((unsigned long*)& __m256i_result[1]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_result[0]) = 0x0202810102020202; ++ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfffffffff8f8da00; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffff01018888; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000000003ea5016c; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfffefefd3f7027c5; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf51cf8dad6040188; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0982e2daf234ed87; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0ae3072529fbfe78; ++ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0202020202020202; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0202810102020202; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0202020202020202; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0202810102020202; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x00007fff00000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x00007fff00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x00007fff00000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x00007fff00000000; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0202810102020202; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0202810102020202; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000003f00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000003f00000000; ++ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf51cf8dad6040188; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0982e2daf234ed87; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xf51df8dbd6050189; ++ *((unsigned long*)& __m128i_result[0]) = 0x0983e2dbf235ed87; ++ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000003f00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000003f00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000003f00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000003f00000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000003f00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000003f00000000; ++ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xf51df8db; ++ *((int*)& __m128_op0[2]) = 0xd6050189; ++ *((int*)& __m128_op0[1]) = 0x0983e2db; ++ *((int*)& __m128_op0[0]) = 0xf235ed87; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0x3ea5016b; ++ *((int*)& __m128_op1[1]) = 0xfffefffe; ++ *((int*)& __m128_op1[0]) = 0x3f6fb04d; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x4000400000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000040004000; ++ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffff00ff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00ffff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffe000000f6; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe000000f6; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x01010101ffffff00; ++ *((unsigned long*)& __m128i_result[0]) = 0x01010101000000f6; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000003f00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000003f00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0202810102020202; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0202020202020202; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0202810102020202; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000fefe0000fefe; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007fff0000fefe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fefe0000fefe; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007fff0000fefe; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffff0000010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfe00fe00fe00fd01; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fffefe0100f6; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffff0000010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0100010000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0100010000010000; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000003f00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000003f00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000003f0000; ++ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0100010000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0100010000010000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffff0000010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfe00fe00fe00fd01; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe00fffefe0100f6; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff0001ffffff0a; ++ __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffff700000009; ++ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; ++ int_op1 = 0x0000000000000400; ++ *((unsigned long*)& __m256i_result[3]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_result[2]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_result[1]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_result[0]) = 0x003f003f003f003f; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000003f0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000003f0; ++ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x003f003f003f003f; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_result[2]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_result[1]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_result[0]) = 0x003f003f003f003f; ++ __m256i_out = __lasx_xvreplve_w(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xfffffff7; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffff0000010000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x5d5d5d5d5d5d5d55; ++ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x5d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_op2[2]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_op2[1]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_op2[0]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000017e; ++ __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000008; ++ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x8f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long*)& __m128i_result[1]) = 0xfc01fd13fc02fe0c; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe00fd14fe01fd16; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000003f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000003f0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x30); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfc01fd13fc02fe0c; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fd14fe01fd16; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffff0000010000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfc01fd1300000001; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe00fd1400010000; ++ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000001f; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001f; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000001f; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001f; ++ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff0001ffffff0a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100000101; ++ *((unsigned long*)& __m128i_result[0]) = 0x000100ff010101f6; ++ __m128i_out = __lsx_vneg_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000003f0000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfc01fd1300000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fd1400010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfc01fd1300000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe00fd1400010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5d5d5d5d5d5d5d55; ++ *((unsigned long*)& __m128i_result[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe00fcfffe21fd01; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfc01fd1300000001; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfe00fd1400010000; ++ *((unsigned long*)& __m128d_op2[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long*)& __m128d_op2[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffff0000010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xabff54f1ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xa5f7458b000802ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fff7fc01; ++ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5d5d5d5d5d5d5d55; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x5d5d5d005d5d5d55; ++ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000017e; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000005e02; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000005e02; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000005e02; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000005e02; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfffffff700000009; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfffffff700000009; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xabff54e911f71b07; ++ *((unsigned long*)& __m128i_op0[0]) = 0xa9ec4882f216ea11; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xaa0051e90ff91808; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfc01fd1300000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fd1400010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff7fc01; ++ *((unsigned long*)& __m128i_result[1]) = 0xfe00fe8980000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff007e8a7ffc7e00; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff7fc01; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x01ff000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x01ff000000000000; ++ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff7fc01; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x80000000fff7fc01; ++ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000003effe1; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000003effe1; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000003effe1; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000003effe1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000005e02; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000005e02; ++ *((unsigned long*)& __m256i_result[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_result[2]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long*)& __m256i_result[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_result[0]) = 0xc2c2c2c2c2c29cc0; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0xc2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000005e02; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000005e02; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfc01fd1300000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe00fd1400010000; ++ *((unsigned long*)& __m128i_result[1]) = 0xc72ef153fc02fdf7; ++ *((unsigned long*)& __m128i_result[0]) = 0xca31bf15fd010000; ++ __m128i_out = __lsx_vmul_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfc01fd1300000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe00fd1400010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f0000007f000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080000180800100; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff7fc01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x80000000fff6fc00; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000080000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x80000000fff6fc00; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000080000000; ++ __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff6fc00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f0000007f000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080000180800100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff00ffff; ++ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffc01; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffc01; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001fffffffe; ++ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256d_op0[2]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long*)& __m256d_op0[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256d_op0[0]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xe161616161616161; ++ *((unsigned long*)& __m256i_op2[2]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_op2[1]) = 0xe161616161616161; ++ *((unsigned long*)& __m256i_op2[0]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128d_op1[1]) = 0xb55ccf30f52a6a68; ++ *((unsigned long*)& __m128d_op1[0]) = 0x4e0018eceb82c53a; ++ *((unsigned long*)& __m128d_result[1]) = 0x355ccf30f52a6a68; ++ *((unsigned long*)& __m128d_result[0]) = 0xce0018eceb82c53a; ++ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefff6fff80002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff000000fefb0000; ++ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefff6fff80002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x82c53a0000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc72ef153fc02fdf7; ++ *((unsigned long*)& __m128i_result[1]) = 0x007d00c500ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0038000e0003ff03; ++ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0x80000000; ++ *((int*)& __m256_result[5]) = 0x80000000; ++ *((int*)& __m256_result[4]) = 0x80000000; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0x80000000; ++ *((int*)& __m256_result[1]) = 0x80000000; ++ *((int*)& __m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefff6fff80002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x82c53a0000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc72ef153fc02fdf7; ++ *((unsigned long*)& __m128i_result[1]) = 0x82c539ffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xc72df14afbfafdf9; ++ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1716151416151413; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1514131214131211; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff3fff3fff3fff3; ++ __m128i_out = __lsx_vmini_h(__m128i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe161616161616161; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe161616161616161; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe161616161614e60; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_result[2]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_result[1]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_result[0]) = 0xe161616161614e60; ++ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000aaaaaaaa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000aaab555b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000aaaaaaaa; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000aaab555b; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x007d00c50177ac5b; ++ *((unsigned long*)& __m128i_op0[0]) = 0xac82aa88a972a36a; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000c5ac01015b; ++ *((unsigned long*)& __m128i_result[0]) = 0xaaacac88a3a9a96a; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x7c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslli_h(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x82c539ffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc72df14afbfafdf9; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7d3ac60000000000; ++ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xe161616161616161; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_op1[1]) = 0xe161616161616161; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000061; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000061; ++ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff7fc01; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000f; ++ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffff800fffff800; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffff800fffff800; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffff800fffff800; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffff800fffff800; ++ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x82c539ffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc72df14afbfafdf9; ++ *((unsigned long*)& __m128i_op1[1]) = 0x82c539ffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc72df14afbfafdf9; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff7fc01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x82c539ffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc72df14afbfafdf9; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x23); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fbf83468; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fbf83468; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe161616161616161; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe161616161616161; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7d3ac60000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000007d3ac600; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xfffefff6; ++ *((int*)& __m128_op0[0]) = 0xfff80002; ++ *((int*)& __m128_op1[3]) = 0x000000c5; ++ *((int*)& __m128_op1[2]) = 0xac01015b; ++ *((int*)& __m128_op1[1]) = 0xaaacac88; ++ *((int*)& __m128_op1[0]) = 0xa3a9a96a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe161616161616161; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe161616161616161; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256d_result[3]) = 0xc1be9e9e9f000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x41d8585858400000; ++ *((unsigned long*)& __m256d_result[1]) = 0xc1be9e9e9f000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x41d8585858400000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256d_op0[2]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256d_op0[1]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256d_op0[0]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007d3ac600; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b(__m128i_op0,0x7); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffefff6fff80002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x08fdc221bfdb1927; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4303c67e9b7fb213; ++ *((unsigned long*)& __m128i_op1[1]) = 0x08fdc221bfdb1927; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4303c67e9b7fb213; ++ *((unsigned long*)& __m128i_result[1]) = 0x00100184017e0032; ++ *((unsigned long*)& __m128i_result[0]) = 0x0086018c01360164; ++ __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000800080008000; ++ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000c5ac01015b; ++ *((unsigned long*)& __m128i_op1[0]) = 0xaaacac88a3a9a96a; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000f; ++ __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00100184017e0032; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0086018c01360164; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffff33c4b1e67; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000800c0004300c; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x08fdc221; ++ *((int*)& __m128_op0[2]) = 0xbfdb1927; ++ *((int*)& __m128_op0[1]) = 0x4303c67e; ++ *((int*)& __m128_op0[0]) = 0x9b7fb213; ++ *((int*)& __m128_op1[3]) = 0x0000800c; ++ *((int*)& __m128_op1[2]) = 0x0004300c; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d(__m256i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x009500b10113009c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x009500b10113009c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000005d5d; ++ __m128i_out = __lsx_vmsknz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xc1be9e9e9f000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x41d8585858400000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xc1be9e9e9f000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x41d8585858400000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000005d5d; ++ *((unsigned long*)& __m128d_op1[1]) = 0x08fdc221bfdb1927; ++ *((unsigned long*)& __m128d_op1[0]) = 0x4303c67e9b7fb213; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w(__m256i_op0,15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffefff6fff80002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xc1be9e9e9f000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x41d8585858400000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xc1be9e9e9f000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x41d8585858400000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc1be9e9e9f000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x41d8585858400000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc1be9e9e9f000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x41d8585858400000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1076000016160000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1610000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1076000016160000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1610000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000000000000; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x31); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000005d5d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000005d5d; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x014200c200c200ae; ++ *((unsigned long*)& __m256i_op0[2]) = 0x014200c200c200ae; ++ *((unsigned long*)& __m256i_op0[1]) = 0x014200c200c200ae; ++ *((unsigned long*)& __m256i_op0[0]) = 0x014200c200c200ae; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_h(__m256i_op0,-4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe161616161614e60; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xe161616161614f61; ++ *((unsigned long*)& __m256i_result[2]) = 0xe161616161614f61; ++ *((unsigned long*)& __m256i_result[1]) = 0xe161616161614f61; ++ *((unsigned long*)& __m256i_result[0]) = 0xe161616161614f61; ++ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000005d5d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x41); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0002000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0002000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00ff00ff00ff00fe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xffffffff; ++ *((int*)& __m128_result[1]) = 0xffffffff; ++ *((int*)& __m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xe161616161614f61; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe161616161614f61; ++ *((unsigned long*)& __m256i_op1[1]) = 0xe161616161614f61; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe161616161614f61; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000616100004f61; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000616100004f61; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000616100004f61; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000616100004f61; ++ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfebdff3eff3dff52; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfebdff3eff3dff52; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfebdff3eff3dff52; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfebdff3eff3dff52; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1086658a18ba3594; ++ *((unsigned long*)& __m256i_op1[2]) = 0x160fe9f000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1086658a18ba3594; ++ *((unsigned long*)& __m256i_op1[0]) = 0x160fe9f000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x07a232640bfc1a73; ++ *((unsigned long*)& __m256i_result[2]) = 0x0a66f497ff9effa9; ++ *((unsigned long*)& __m256i_result[1]) = 0x07a232640bfc1a73; ++ *((unsigned long*)& __m256i_result[0]) = 0x0a66f497ff9effa9; ++ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfebdff3eff3dff52; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfebdff3eff3dff52; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfebdff3eff3dff52; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfebdff3eff3dff52; ++ *((unsigned long*)& __m256i_result[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_result[2]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_result[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_result[0]) = 0xffc0ffc0ffc0ffc0; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1086658a18ba3594; ++ *((unsigned long*)& __m256i_op0[2]) = 0x160fe9f000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1086658a18ba3594; ++ *((unsigned long*)& __m256i_op0[0]) = 0x160fe9f000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xe161616161614f61; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe161616161614f61; ++ *((unsigned long*)& __m256i_op1[1]) = 0xe161616161614f61; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe161616161614f61; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000616100004f61; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000616100004f61; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000616100004f61; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000616100004f61; ++ *((unsigned long*)& __m256i_result[3]) = 0x108659e46485f7e1; ++ *((unsigned long*)& __m256i_result[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long*)& __m256i_result[1]) = 0x108659e46485f7e1; ++ *((unsigned long*)& __m256i_result[0]) = 0x4df5b1a3ed5e02c1; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[0]) = 0xff01ff01ff01ff01; ++ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000c5ac01015b; ++ *((unsigned long*)& __m128i_op0[0]) = 0xaaacac88a3a9a96a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ffffff1e9e9e9e; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff9e9eb09e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ffffff1e9e9e9e; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff9e9eb09e; ++ *((unsigned long*)& __m256i_result[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_result[2]) = 0xffc00000ffc0ffc0; ++ *((unsigned long*)& __m256i_result[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_result[0]) = 0xffc00000ffc0ffc0; ++ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long*)& __m256d_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long*)& __m256d_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long*)& __m256d_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a0a0a0a0a0a0a0a; ++ __m128i_out = __lsx_vmaxi_b(__m128i_op0,10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfebdff3eff3dff52; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfebdff3eff3dff52; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfebdff3eff3dff52; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfebdff3eff3dff52; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffc00000ffc0ffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffc00000ffc0ffc0; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff90000fff9fff9; ++ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000000e; ++ __m128i_out = __lsx_vmini_bu(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long*)& __m256i_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff0004ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff0004ff; ++ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000e13; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000e13; ++ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00ffffff1e9e9e9e; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff9e9eb09e; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00ffffff1e9e9e9e; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff9e9eb09e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x66); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_du(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffc00000ffc0ffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffc00000ffc0ffc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long*)& __m256i_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffcfee0fe00ffe0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffcfee0fe00ffe0; ++ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b(__m128i_op0,11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000001300000013; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000001300000013; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ffffffffff; ++ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_b(__m128i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long*)& __m256i_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000001fff9fff8; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000001fff9fff8; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000001fff9fff8; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000001fff9fff8; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long*)& __m256i_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long*)& __m256i_result[3]) = 0x081abb9d36ee1037; ++ *((unsigned long*)& __m256i_result[2]) = 0x1617eb17129bfd38; ++ *((unsigned long*)& __m256i_result[1]) = 0x081abb9d36ee1037; ++ *((unsigned long*)& __m256i_result[0]) = 0x1617eb17129bfd38; ++ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmini_h(__m256i_op0,6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x80000000; ++ *((int*)& __m128_result[2]) = 0x80000000; ++ *((int*)& __m128_result[1]) = 0x80000000; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000001fff9fff8; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fff9fff8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000001fff9fff8; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fff9fff8; ++ *((unsigned long*)& __m256i_op1[3]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_op1[1]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffcfee0fe00ffe0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffcfee0fe00ffe0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fffc0000fee0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000fe000000ffe0; ++ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x2); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0080001300000013; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0080001300000013; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0080001300000013; ++ *((unsigned long*)& __m128i_result[0]) = 0x0080001300000013; ++ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001300000013; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffefffefffefffe; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff900000003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff900000003; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x3f3f3f3900000003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x3f3f3f3900000003; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x3f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffc0000fee0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fe000000ffe0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff900000003; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff900000003; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7ffe00007f000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_result[1]) = 0x0303030303030303; ++ *((unsigned long*)& __m128i_result[0]) = 0x0303030303030303; ++ __m128i_out = __lsx_vpcnt_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0052005200520052; ++ *((unsigned long*)& __m128i_result[0]) = 0x0052005200520052; ++ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff900000003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff900000003; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0000; ++ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0xffff0000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0xffff0000; ++ *((int*)& __m256_op1[7]) = 0xfffefffe; ++ *((int*)& __m256_op1[6]) = 0xfffefffe; ++ *((int*)& __m256_op1[5]) = 0xfffefffe; ++ *((int*)& __m256_op1[4]) = 0xfffefffe; ++ *((int*)& __m256_op1[3]) = 0xfffefffe; ++ *((int*)& __m256_op1[2]) = 0xfffefffe; ++ *((int*)& __m256_op1[1]) = 0xfffefffe; ++ *((int*)& __m256_op1[0]) = 0xfffefffe; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0xffff0000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0xffff0000; ++ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ffe00007f000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe00007f000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x1616161616161616; ++ *((unsigned long*)& __m256i_result[2]) = 0x161616167fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7ffe16167f161616; ++ *((unsigned long*)& __m256i_result[0]) = 0x161616167fffffff; ++ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_wu(__m128i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256d_op1[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000800000000; ++ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff90000fff9fff9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe00007f000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff000100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff7fff00007f00; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff000100007fff; ++ __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xcd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x79); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe00007f000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x017e00ff017e00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff017e01fe; ++ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7fff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7f007f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f7f7f7f7fff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_result[2]) = 0xbfbfbfbfbfff807f; ++ *((unsigned long*)& __m256i_result[1]) = 0xbf803fbfbfbfbfbf; ++ *((unsigned long*)& __m256i_result[0]) = 0xbfbfbfbfbfff807f; ++ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_result[0]) = 0x5252525252525252; ++ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff00; ++ __m256i_out = __lasx_xvmskgez_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3a2a3a2a3a2a3a2a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3a2a3a2a3aaa45aa; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3a553f7f7a2a3a2a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3a2a3a2a3aaa45aa; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x1d949d949d949d95; ++ *((unsigned long*)& __m256i_result[2]) = 0x1d949d949e1423d4; ++ *((unsigned long*)& __m256i_result[1]) = 0x1de9a03f3dd41d95; ++ *((unsigned long*)& __m256i_result[0]) = 0x1d949d949e1423d4; ++ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000003fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000003fffffff; ++ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x1f3d2101; ++ *((int*)& __m256_op0[6]) = 0x1f3d2101; ++ *((int*)& __m256_op0[5]) = 0x1f3d2101; ++ *((int*)& __m256_op0[4]) = 0xd07dbf01; ++ *((int*)& __m256_op0[3]) = 0x9f1fd080; ++ *((int*)& __m256_op0[2]) = 0x1f3d2101; ++ *((int*)& __m256_op0[1]) = 0x1f3d2101; ++ *((int*)& __m256_op0[0]) = 0xd07dbf01; ++ *((int*)& __m256_op1[7]) = 0x1d949d94; ++ *((int*)& __m256_op1[6]) = 0x9d949d95; ++ *((int*)& __m256_op1[5]) = 0x1d949d94; ++ *((int*)& __m256_op1[4]) = 0x9e1423d4; ++ *((int*)& __m256_op1[3]) = 0x1de9a03f; ++ *((int*)& __m256_op1[2]) = 0x3dd41d95; ++ *((int*)& __m256_op1[1]) = 0x1d949d94; ++ *((int*)& __m256_op1[0]) = 0x9e1423d4; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x8001b72e; ++ *((int*)& __m256_result[6]) = 0x0001b72e; ++ *((int*)& __m256_result[5]) = 0x8001b72e; ++ *((int*)& __m256_result[4]) = 0xaf12d5f0; ++ *((int*)& __m256_result[3]) = 0x00024763; ++ *((int*)& __m256_result[2]) = 0x9d9cb530; ++ *((int*)& __m256_result[1]) = 0x8001b72e; ++ *((int*)& __m256_result[0]) = 0xaf12d5f0; ++ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfffefffe; ++ *((int*)& __m256_op0[6]) = 0xfffefffe; ++ *((int*)& __m256_op0[5]) = 0xfffefffe; ++ *((int*)& __m256_op0[4]) = 0xfffefffe; ++ *((int*)& __m256_op0[3]) = 0xfffefffe; ++ *((int*)& __m256_op0[2]) = 0xfffefffe; ++ *((int*)& __m256_op0[1]) = 0xfffefffe; ++ *((int*)& __m256_op0[0]) = 0xfffefffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vneg_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x017e017e01dd61de; ++ *((unsigned long*)& __m256d_op0[2]) = 0x5d637d043bc4fc43; ++ *((unsigned long*)& __m256d_op0[1]) = 0x01dcc2dce31bc35d; ++ *((unsigned long*)& __m256d_op0[0]) = 0x5e041d245b85fc43; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x5d637d043bc4fc43; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x5e041d245b85fc43; ++ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x017e00ff017e00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_op1[1]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_result[3]) = 0x1f9d9f9d1f9db29f; ++ *((unsigned long*)& __m256i_result[2]) = 0x1f9d9f9d201cb39e; ++ *((unsigned long*)& __m256i_result[1]) = 0x201c9f9d201cb29f; ++ *((unsigned long*)& __m256i_result[0]) = 0x1f9d9f9d201cb39e; ++ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1616161616161616; ++ *((unsigned long*)& __m256i_op0[2]) = 0x161616167fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe16167f161616; ++ *((unsigned long*)& __m256i_op0[0]) = 0x161616167fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long*)& __m256i_result[2]) = 0x2c2c2c2cfefefefe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfefc2c2cfe2c2c2c; ++ *((unsigned long*)& __m256i_result[0]) = 0x2c2c2c2cfefefefe; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1616161616161616; ++ *((unsigned long*)& __m256i_op0[2]) = 0x161616167fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe16167f161616; ++ *((unsigned long*)& __m256i_op0[0]) = 0x161616167fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xc7c7c7c7c7c7c7c7; ++ *((unsigned long*)& __m256i_result[2]) = 0xc7c7c7c7ae2e2e2e; ++ *((unsigned long*)& __m256i_result[1]) = 0xae2fc7c7aec7c7c7; ++ *((unsigned long*)& __m256i_result[0]) = 0xc7c7c7c7ae2e2e2e; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0xd1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1f9d9f9d1f9db29f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1f9d9f9d201cb39e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x201c9f9d201cb29f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1f9d9f9d201cb39e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007773; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000003373; ++ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dffbfff00000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0200400000000001; ++ unsigned_int_result = 0x0000000000000001; ++ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x2); ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007773; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003373; ++ *((unsigned long*)& __m256i_result[3]) = 0xbbbbbbbbbbbbbbbb; ++ *((unsigned long*)& __m256i_result[2]) = 0xbbbbbbbbbbbb8888; ++ *((unsigned long*)& __m256i_result[1]) = 0xbbbbbbbbbbbbbbbb; ++ *((unsigned long*)& __m256i_result[0]) = 0xbbbbbbbbbbbb8888; ++ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x44); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000007773; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000003373; ++ *((unsigned long*)& __m256d_op1[3]) = 0x1616161616161616; ++ *((unsigned long*)& __m256d_op1[2]) = 0x161616167fffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x7ffe16167f161616; ++ *((unsigned long*)& __m256d_op1[0]) = 0x161616167fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000100000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x2c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007773; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003373; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_du(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0800000008000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0800000008000000; ++ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000100000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x000100010001fffe; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000003fffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000003fffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x1); ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffff2fffffff2; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff2fffffff2; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffff2fffffff2; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff2fffffff2; ++ __m256i_out = __lasx_xvmini_w(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000100010001fffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000100010001fffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000020002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000020002; ++ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_result[3]) = 0x0703030307030203; ++ *((unsigned long*)& __m256i_result[2]) = 0x0703030307030203; ++ *((unsigned long*)& __m256i_result[1]) = 0x0703030307030203; ++ *((unsigned long*)& __m256i_result[0]) = 0x0703030307030203; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000003fffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000003fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x003fffffff000000; ++ __m128i_out = __lsx_vbsrl_v(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000020002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0303030303030303; ++ *((unsigned long*)& __m128i_result[0]) = 0x0303030303030303; ++ __m128i_out = __lsx_vmaxi_bu(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00005555aaabfffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003fffffff000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000000ab; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0003000300030003; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003000700020005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0003000300030003; ++ *((unsigned long*)& __m128i_result[0]) = 0x0003000700020005; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000100010001fffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff000100000000; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0003000300030003; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0003000700020005; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrint_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000017e007ffe02; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000100010001fffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000100010001fffd; ++ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000100010001fffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000800080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf6f6f6f6f6f6f6f6; ++ *((unsigned long*)& __m256i_result[2]) = 0xf6f6f6f6f6f6f6f6; ++ *((unsigned long*)& __m256i_result[1]) = 0xf6f6f6f6f6f6f6f6; ++ *((unsigned long*)& __m256i_result[0]) = 0xf6f6f6f6f6f6f6f6; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00ff00ff; ++ *((int*)& __m256_op0[6]) = 0x00ff00ff; ++ *((int*)& __m256_op0[5]) = 0x00ff00ff; ++ *((int*)& __m256_op0[4]) = 0x017e01fe; ++ *((int*)& __m256_op0[3]) = 0x017e00ff; ++ *((int*)& __m256_op0[2]) = 0x017e00ff; ++ *((int*)& __m256_op0[1]) = 0x00ff00ff; ++ *((int*)& __m256_op0[0]) = 0x017e01fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01fe8001b72e0001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xb72e8001b72eaf12; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01fe000247639d9c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xb5308001b72eaf12; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0002ff80ffb70000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long*)& __m256i_result[1]) = 0x00010000002fff9e; ++ *((unsigned long*)& __m256i_result[0]) = 0xffb5ff80ffd0ffd8; ++ __m256i_out = __lasx_xvdiv_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000100010001fffd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x38); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vexth_du_wu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8001b72e0001b72e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8001b72eaf12d5f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000247639d9cb530; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8001b72eaf12d5f0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_result[3]) = 0xff81ffe50001ffe5; ++ *((unsigned long*)& __m256i_result[2]) = 0xff81ffe5ffa6ffc6; ++ *((unsigned long*)& __m256i_result[1]) = 0x000200aafe9affe5; ++ *((unsigned long*)& __m256i_result[0]) = 0xff81ffe5ffa6ffc6; ++ __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x017e00ff017e00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x01fe8001b72e0001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xb72e8001b72eaf12; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01fe000247639d9c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xb5308001b72eaf12; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff017e00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x017e00ff017e01fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff017e00ff; ++ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffff000100000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff000100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff000100000000; ++ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xb70036db12c4007e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xb7146213fc1e0049; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000fefe02fffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xb71c413b199d04b5; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e00ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e01fe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xb70012c4b714fc1e; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff017e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fe02b71c199d; ++ *((unsigned long*)& __m256i_result[0]) = 0x017e017e00ff017e; ++ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01fe8001b72e0001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xb72e8001b72eaf12; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01fe000247639d9c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xb5308001b72eaf12; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x26); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007fffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007fffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000100010001fffd; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000100010; ++ __m128i_out = __lsx_vpcnt_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000100010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000100010; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhsubw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((int*)& __m128_op2[3]) = 0x00307028; ++ *((int*)& __m128_op2[2]) = 0x003f80b0; ++ *((int*)& __m128_op2[1]) = 0x0040007f; ++ *((int*)& __m128_op2[0]) = 0xff800000; ++ *((int*)& __m128_result[3]) = 0x80307028; ++ *((int*)& __m128_result[2]) = 0xffffffff; ++ *((int*)& __m128_result[1]) = 0x8040007f; ++ *((int*)& __m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xff00ff00ff00ff00; ++ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xb70036db; ++ *((int*)& __m256_op1[6]) = 0x12c4007e; ++ *((int*)& __m256_op1[5]) = 0xb7146213; ++ *((int*)& __m256_op1[4]) = 0xfc1e0049; ++ *((int*)& __m256_op1[3]) = 0x000000fe; ++ *((int*)& __m256_op1[2]) = 0xfe02fffe; ++ *((int*)& __m256_op1[1]) = 0xb71c413b; ++ *((int*)& __m256_op1[0]) = 0x199d04b5; ++ *((int*)& __m256_op2[7]) = 0xb70036db; ++ *((int*)& __m256_op2[6]) = 0x12c4007e; ++ *((int*)& __m256_op2[5]) = 0xb7146213; ++ *((int*)& __m256_op2[4]) = 0xfc1e0049; ++ *((int*)& __m256_op2[3]) = 0x000000fe; ++ *((int*)& __m256_op2[2]) = 0xfe02fffe; ++ *((int*)& __m256_op2[1]) = 0xb71c413b; ++ *((int*)& __m256_op2[0]) = 0x199d04b5; ++ *((int*)& __m256_result[7]) = 0x370036db; ++ *((int*)& __m256_result[6]) = 0x92c4007e; ++ *((int*)& __m256_result[5]) = 0x37146213; ++ *((int*)& __m256_result[4]) = 0x7c1e0049; ++ *((int*)& __m256_result[3]) = 0x800000fe; ++ *((int*)& __m256_result[2]) = 0x7e02fffe; ++ *((int*)& __m256_result[1]) = 0x371c413b; ++ *((int*)& __m256_result[0]) = 0x999d04b5; ++ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m128_op0[3]) = 0x80307028; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x8040007f; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0002ff80ffb70000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00010000002fff9e; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffb5ff80ffd0ffd8; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0048007f002f0028; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x004a007f002f0028; ++ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0049ffd2; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01620133004b0032; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0002ff80ffb70000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00010000002fff9e; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffb5ff80ffd0ffd8; ++ *((unsigned long*)& __m256i_result[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long*)& __m256i_result[2]) = 0x0002ff80ffb70000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long*)& __m256i_result[0]) = 0x00010000002fff9e; ++ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000020302030; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000020302030; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xb70036db12c4007e; ++ *((unsigned long*)& __m256i_op1[2]) = 0xb7146213fc1e0049; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000fefe02fffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xb71c413b199d04b5; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x43); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000020302030; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000020302030; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000100010; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000100010; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0049ffd2; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00630064004bffd0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x80307028ffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x8040007fffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00307028003f80b0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0040007fff800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000003f80b0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff800000; ++ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000008000000080; ++ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000000003f80b0; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ff800000; ++ *((unsigned long*)& __m128d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xb70036db12c4007e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xb7146213fc1e0049; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000fefe02fffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xb71c413b199d04b5; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff00ff00ffff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xff000000ff00ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffff00ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00000000ff00ff; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xb327b9363c99d32e; ++ *((unsigned long*)& __m128i_op0[0]) = 0xa1e7b475d925730f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000003f80b0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff800000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m128i_result[1]) = 0xb327b9363c992b2e; ++ *((unsigned long*)& __m128i_result[0]) = 0xa1e7b475d925730f; ++ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8001b72e0001b72e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8001b72eaf12d5f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000247639d9cb530; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8001b72eaf12d5f0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe056fd9d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffceba70; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000003f80b0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xb327b9363c992b2e; ++ *((unsigned long*)& __m128i_op1[0]) = 0xa1e7b475d925730f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000001ff00; ++ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x370036db92c4007e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x371462137c1e0049; ++ *((unsigned long*)& __m256i_op0[1]) = 0x800000fe7e02fffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x371c413b999d04b5; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0002ff80ffb70000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00010000002fff9e; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffb5ff80ffd0ffd8; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffff00ff00ffff00; ++ *((unsigned long*)& __m256i_op2[2]) = 0xff000000ff00ff00; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffff00ffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xff00000000ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x37fe365b920d007e; ++ *((unsigned long*)& __m256i_result[2]) = 0x381462137d1e0149; ++ *((unsigned long*)& __m256i_result[1]) = 0x80ff00fe7e020060; ++ *((unsigned long*)& __m256i_result[0]) = 0x381c413b99cd04dd; ++ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xb327b9363c992b2e; ++ *((unsigned long*)& __m128i_op1[0]) = 0xa1e7b475d925730f; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff3c992b2e; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff730f; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0xa6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x80307028ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8040007fffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0101ff010101; ++ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff00ff00ffff00; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff000000ff00ff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffff00ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00000000ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000180000000; ++ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffe5; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00307028003f80b0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0040007fff800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffc0ffffff81; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff008000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0060e050007f0160; ++ *((unsigned long*)& __m128i_result[0]) = 0x0040007fff800000; ++ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00007f8000007f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000003fc; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000003fc; ++ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xb70036db12c4007e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xb7146213fc1e0049; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000fefe02fffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xb71c413b199d04b5; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvclo_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffff81ffffeb2f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003f6ee0570b4e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000018de; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffb4ffcec0f1; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffff81ffffeb2f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003f6ee0570b4e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000018de; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffb4ffcec0f1; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000001ffffeab0; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000e0574abc; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000018de; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000001ffcec0a5; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0060e050007f0160; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0040007fff800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0002ff80ffb70000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00010000002fff9e; ++ *((int*)& __m256_result[7]) = 0x34000000; ++ *((int*)& __m256_result[6]) = 0xfff00000; ++ *((int*)& __m256_result[5]) = 0xfff6e000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x33800000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x363c0000; ++ *((int*)& __m256_result[0]) = 0xfff3c000; ++ __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003fc; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000003fc; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0x3c992b2e; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffff730f; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff730f; ++ __m128i_out = __lsx_vfrintrz_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101017f0101017f; ++ __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0002ff80ffb70000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00010000002fff9e; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffb5ff80ffd0ffd8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0002ff80ffb70000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long*)& __m256i_result[1]) = 0x00010000002fff9e; ++ *((unsigned long*)& __m256i_result[0]) = 0xffb5ff80ffd0ffd8; ++ __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000180000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0002ff80ffb70000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00010000002fff9e; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffd2; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ff8000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000080000000; ++ __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x34000000fff00000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff6e00000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3380000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x363c0000fff3c000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffb7146213; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffc1e0049; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffb71c413b; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf3317da580000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x34000000fff00000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff6e00000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x3380000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x363c0000fff3c000; ++ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x34000000fff00000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff6e00000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3380000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x363c0000fff3c000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000030000000c; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001100000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000500000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000010; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1268f057137a0267; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0048137ef886fae0; ++ *((unsigned long*)& __m128i_result[1]) = 0xff000000ff00ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xff00ff0000000000; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff946c; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff946b; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff3c992b2e; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff730f; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffff946c; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffff946b; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff946c; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffdffff946c; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1268f057137a0267; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0048137ef886fae0; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000490000004d; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffffe2; ++ __m128i_out = __lsx_vsrai_w(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000000f3; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000f3; ++ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xb70036db12c4007e; ++ *((unsigned long*)& __m256i_op1[2]) = 0xb7146213fc1e0049; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000fefe02fffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xb71c413b199d04b5; ++ *((unsigned long*)& __m256i_op2[3]) = 0xb70036db12c4007e; ++ *((unsigned long*)& __m256i_op2[2]) = 0xb7146213fc1e0049; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000fefe02fffe; ++ *((unsigned long*)& __m256i_op2[0]) = 0xb71c413b199d04b5; ++ *((unsigned long*)& __m256i_result[3]) = 0xd100645944100004; ++ *((unsigned long*)& __m256i_result[2]) = 0xd1908469108400d1; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000404040104; ++ *((unsigned long*)& __m256i_result[0]) = 0xd1108199714910f9; ++ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff3c992b2e; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff730f; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff3c992b2e; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff730f; ++ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x34000000fff00000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfff6e00000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x3380000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x363c0000fff3c000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x000000030000000c; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000001100000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000500000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000800000010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff000000ff00ff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0100000001000100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0100010000000000; ++ __m128i_out = __lsx_vneg_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xd100645944100004; ++ *((unsigned long*)& __m256i_op0[2]) = 0xd1908469108400d1; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000404040104; ++ *((unsigned long*)& __m256i_op0[0]) = 0xd1108199714910f9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000004040104; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffd1108199; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000714910f9; ++ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100010000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff732a; ++ *((unsigned long*)& __m128i_result[1]) = 0x807f7fff807f807f; ++ *((unsigned long*)& __m128i_result[0]) = 0x807f807f7fff3995; ++ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000490000004d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001ffffffff; ++ long_int_result = 0x00000001ffffffff; ++ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x0); ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000004; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000004040104; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffd1108199; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000714910f9; ++ *((unsigned long*)& __m256d_op1[3]) = 0x000000030000000c; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000001100000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000500000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000800000010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffe5; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff2; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff2; ++ __m128i_out = __lsx_vavgr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000490000004d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff000000ff00ff00; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff00ff0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000049ffffff4d; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff01ffffffff; ++ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000001faea9ec; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000003; ++ *((int*)& __m256_op1[6]) = 0x0000000c; ++ *((int*)& __m256_op1[5]) = 0x00000011; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000005; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000008; ++ *((int*)& __m256_op1[0]) = 0x00000010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000004040104; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffd1108199; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000714910f9; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffd10000006459; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000441000000004; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000040400000104; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffd10000000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffd1108199; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000104; ++ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffe5; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffe5; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100010000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff732a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0100000001000100; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0100010000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100010000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000490000004d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ffffffffff; ++ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1268f057137a0267; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0048137ef886fae0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000006; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ffffffffff; ++ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000073; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000002a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000003a; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000015; ++ __m128i_out = __lsx_vavgr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffd10000006459; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000441000000004; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000040400000104; ++ *((unsigned long*)& __m256i_result[3]) = 0x0f0f0f0f0f0f6459; ++ *((unsigned long*)& __m256i_result[2]) = 0x0f0f44100f0f0f0f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0f0f0f0f0f0f0f0f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0f0f0f0f0f0f0f0f; ++ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100010000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000010001000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff00ff00ffffff; ++ __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000049; ++ *((int*)& __m128_op0[2]) = 0x0000004d; ++ *((int*)& __m128_op0[1]) = 0x00000001; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000001; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000001; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x80000000; ++ *((int*)& __m128_result[2]) = 0x80000000; ++ *((int*)& __m128_result[1]) = 0x80000001; ++ *((int*)& __m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000490000004d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000490000004d; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001fffffff9; ++ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000006; ++ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000490000004d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000073; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000002a; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000049000000c0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffff29; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000049000000c0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffff29; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ffff7f00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff007f0101017f; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000006; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffd10000006459; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000441000000004; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000040400000104; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdb801b6d0962003f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000007fff01ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xdb8e209d0cce025a; ++ *((unsigned long*)& __m256i_result[3]) = 0x88888a6d0962002e; ++ *((unsigned long*)& __m256i_result[2]) = 0xdb8a3109fe0f0020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000007fff01fffb; ++ *((unsigned long*)& __m256i_result[0]) = 0xdb8e20990cce025a; ++ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x88); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000073; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000002a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdb801b6d0962003f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000007fff01ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xdb8e209d0cce025a; ++ *((unsigned long*)& __m256i_op1[3]) = 0xb70036db12c4007e; ++ *((unsigned long*)& __m256i_op1[2]) = 0xb7146213fc1e0049; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000fefe02fffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xb71c413b199d04b5; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffcc8000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000000007dfdff4b; ++ *((unsigned long*)& __m256i_result[3]) = 0xdb801b6d0962003f; ++ *((unsigned long*)& __m256i_result[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long*)& __m256i_result[1]) = 0x9a7f997fff01ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xbe632a4f1c3c5653; ++ __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xb70036db12c4007e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xb7146213fc1e0049; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000fefe02fffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xb71c413b199d04b5; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00b7003600120000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00b7006200fc0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000fe00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00b7004100190004; ++ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00b7003600120000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00b7006200fc0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000fe00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00b7004100190004; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdb801b6d0962003f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9a7f997fff01ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbe632a4f1c3c5653; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffe54affffffd3; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffcfae000000d8; ++ *((unsigned long*)& __m256i_result[1]) = 0x00006681000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffd668ffffa9c6; ++ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffcc8000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007dfdff4b; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x003ffff300000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000001f7f7f; ++ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000003a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000015; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000049000000c0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffff29; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000049000000c0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff29; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000049000000c0; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffffff29; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000100000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000c0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffff29; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000020000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000183fffffe5; ++ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ffff7f00ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff007f0101017f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000020000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000183fffffe5; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000073; ++ *((unsigned long*)& __m128i_op2[0]) = 0x000000000000002a; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ffff7f00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff007f0101017f; ++ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000073; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000010000002b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000400000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffcc8000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007dfdff4b; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xff01ff3400000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff83ff01; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff01ff3400000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff83ff01; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000020000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000183fffffe5; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000400000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000400000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x2b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_result[2]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_result[1]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_result[0]) = 0xbabababababababa; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0xba); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x88888a6d0962002e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xdb8a3109fe0f0020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000007fff01fffb; ++ *((unsigned long*)& __m256i_op0[0]) = 0xdb8e20990cce025a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff01ff3400000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff83ff01; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0962002efe0f0020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff01fffb8667012d; ++ __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_op1[1]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xbabababababababa; ++ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000c0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001ffffff29; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00000000000000c0; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00000001ffffff29; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff2900000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdb801b6d0962003f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long*)& __m256i_op0[1]) = 0x9a7f997fff01ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbe632a4f1c3c5653; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00ff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xbabababababababa; ++ *((unsigned long*)& __m256d_op0[2]) = 0xbabababababababa; ++ *((unsigned long*)& __m256d_op0[1]) = 0xbabababababababa; ++ *((unsigned long*)& __m256d_op0[0]) = 0xbabababababababa; ++ *((unsigned long*)& __m256d_op1[3]) = 0x88888a6d0962002e; ++ *((unsigned long*)& __m256d_op1[2]) = 0xdb8a3109fe0f0020; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000007fff01fffb; ++ *((unsigned long*)& __m256d_op1[0]) = 0xdb8e20990cce025a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000400000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff2900000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000401000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff2900000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xa41aa42e; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xa41aa42e; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffcc80; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x7dfdff4b; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000005be55bd2; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000401000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000800; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000800; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000800; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000800; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000401000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000401000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0080200000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000401000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000080000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000080000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000080000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000080000000000; ++ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdb801b6d0962003f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9a7f997fff01ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbe632a4f1c3c5653; ++ *((unsigned long*)& __m256i_result[3]) = 0x247fe49409620040; ++ *((unsigned long*)& __m256i_result[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long*)& __m256i_result[1]) = 0x6580668200fe0002; ++ *((unsigned long*)& __m256i_result[0]) = 0x419cd5b11c3c5654; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000401000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_hu(__m128i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vslli_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x247fe49409620040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long*)& __m256i_op1[1]) = 0x6580668200fe0002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x419cd5b11c3c5654; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffcc8000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000000007dfdff4b; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xbabababababababa; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xbabababababababa; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000005be55bd2; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffcc8000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007dfdff4b; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x247fe49409620040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long*)& __m256i_op0[1]) = 0x6580668200fe0002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x419cd5b11c3c5654; ++ *((unsigned long*)& __m256i_op1[3]) = 0x247fe49409620040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long*)& __m256i_op1[1]) = 0x6580668200fe0002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x419cd5b11c3c5654; ++ *((unsigned long*)& __m256i_result[3]) = 0x247fe49409620040; ++ *((unsigned long*)& __m256i_result[2]) = 0x247fe49409620040; ++ *((unsigned long*)& __m256i_result[1]) = 0x6580668200fe0002; ++ *((unsigned long*)& __m256i_result[0]) = 0x6580668200fe0002; ++ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff6; ++ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x247fe49409620040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long*)& __m256i_op0[1]) = 0x6580668200fe0002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x419cd5b11c3c5654; ++ *((unsigned long*)& __m256i_result[3]) = 0x247fe49409620040; ++ *((unsigned long*)& __m256i_result[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long*)& __m256i_result[1]) = 0x6580668200fe0002; ++ *((unsigned long*)& __m256i_result[0]) = 0x419cd5b11c3c5654; ++ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x3f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffcc8000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff82037dfd0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0xbf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256d_op1[1]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffff6; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x247fe49409620040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x247fe49409620040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x6580668200fe0002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6580668200fe0002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x247fe49409620040; ++ *((unsigned long*)& __m256i_result[2]) = 0x247fe49409620040; ++ *((unsigned long*)& __m256i_result[1]) = 0x6580668200fe0002; ++ *((unsigned long*)& __m256i_result[0]) = 0x6580668200fe0002; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long*)& __m256i_result[2]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000003f3f3f3f; ++ *((unsigned long*)& __m256i_result[0]) = 0x3f3f3f3f00000000; ++ __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffff6; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffff6; ++ *((unsigned long*)& __m256i_op2[3]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long*)& __m256i_op2[2]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000003f3f3f3f; ++ *((unsigned long*)& __m256i_op2[0]) = 0x3f3f3f3f00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000003f3f3f3c; ++ *((unsigned long*)& __m256i_result[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000003f3f3f3c; ++ *((unsigned long*)& __m256i_result[0]) = 0x8787878a00000000; ++ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0019081900190019; ++ *((unsigned long*)& __m128i_result[0]) = 0x0019081900190019; ++ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffff0000; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007f7f7f7f0000; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; ++ int_op1 = 0x00000000000000ac; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe17cec8fe08008ac; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe0801f41e0800168; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9240f24a84b18025; ++ *((unsigned long*)& __m256i_op1[2]) = 0x9240f24a84b18025; ++ *((unsigned long*)& __m256i_op1[1]) = 0xb2c0b341807f8006; ++ *((unsigned long*)& __m256i_op1[0]) = 0xb2c0b341807f8006; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000012481e4950; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001658166830; ++ __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x5b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007f7f7f7f0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xf6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000c0; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000c0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000c0; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000c0; ++ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00190819; ++ *((int*)& __m128_op1[2]) = 0x00190019; ++ *((int*)& __m128_op1[1]) = 0x00190819; ++ *((int*)& __m128_op1[0]) = 0x00190019; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000c0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000c0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000c0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000c0; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000012481e4950; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000001658166830; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000080; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000080; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff7fff; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x39); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x3f3f3f3c; ++ *((int*)& __m256_op0[5]) = 0xc6c6c6c6; ++ *((int*)& __m256_op0[4]) = 0x8787878a; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x3f3f3f3c; ++ *((int*)& __m256_op0[1]) = 0x8787878a; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff9c9d00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff7fff7fff7fff7; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff7fff7fff7fff7; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff7fff7fff7fff7; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff7fff7fff7fff7; ++ __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x9240f24a84b18025; ++ *((unsigned long*)& __m256i_op0[2]) = 0x9240f24a84b18025; ++ *((unsigned long*)& __m256i_op0[1]) = 0xb2c0b341807f8006; ++ *((unsigned long*)& __m256i_op0[0]) = 0xb2c0b341807f8006; ++ *((unsigned long*)& __m256i_result[3]) = 0x009200f200840080; ++ *((unsigned long*)& __m256i_result[2]) = 0x009200f200840080; ++ *((unsigned long*)& __m256i_result[1]) = 0x00b200b300800080; ++ *((unsigned long*)& __m256i_result[0]) = 0x00b200b300800080; ++ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000003f3f3f3c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000003f3f3f3c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8787878a00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_d(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffff800; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfffffffffffff800; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffff800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffff800; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffff6; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffff6; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000000003f3f3f3c; ++ *((unsigned long*)& __m256i_op2[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000003f3f3f3c; ++ *((unsigned long*)& __m256i_op2[0]) = 0x8787878a00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffe3; ++ *((unsigned long*)& __m256i_result[2]) = 0x63636344c3c3c4f6; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffc3; ++ *((unsigned long*)& __m256i_result[0]) = 0xc3c3c500fffffff6; ++ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x009200f200840080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x009200f200840080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00b200b300800080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00b200b300800080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x009200f200840080; ++ *((unsigned long*)& __m256i_result[2]) = 0x009200f200840080; ++ *((unsigned long*)& __m256i_result[1]) = 0x00b200b300800080; ++ *((unsigned long*)& __m256i_result[0]) = 0x00b200b300800080; ++ __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000003f3f3f3c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000003f3f3f3c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8787878a00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003f3fc6c68787; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003f3f87870000; ++ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffff0000; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00003f3f; ++ *((int*)& __m256_op1[4]) = 0xc6c68787; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00003f3f; ++ *((int*)& __m256_op1[0]) = 0x87870000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffff0000; ++ *((int*)& __m128_op0[1]) = 0x00ff0000; ++ *((int*)& __m128_op0[0]) = 0x00ff0000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000800; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0xffffffff; ++ *((int*)& __m128_op2[2]) = 0xfffff800; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xfffff800; ++ *((int*)& __m128_result[1]) = 0x80000000; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffe15; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffe15; ++ __m128i_out = __lsx_vldi(3605); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x9240000000008025; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffff24affff8025; ++ *((unsigned long*)& __m256i_op0[1]) = 0xb2c0000000008006; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffb341ffff8006; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9240000000008025; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffff24affff8025; ++ *((unsigned long*)& __m256i_op1[1]) = 0xb2c0000000008006; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffb341ffff8006; ++ *((unsigned long*)& __m256i_result[3]) = 0xff2400000000ff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffeffe4fffeff00; ++ *((unsigned long*)& __m256i_result[1]) = 0xff6400000000ff00; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffeff66fffeff00; ++ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffe15; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffe15; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x009200f200840080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x009200f200840080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00b200b300800080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00b200b300800080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffff800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffc0000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffc0000000000000; ++ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x83); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000e00000080; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000e00000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000e00000080; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000e00000080; ++ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xff240000; ++ *((int*)& __m256_op0[6]) = 0x0000ff00; ++ *((int*)& __m256_op0[5]) = 0xfffeffe4; ++ *((int*)& __m256_op0[4]) = 0xfffeff00; ++ *((int*)& __m256_op0[3]) = 0xff640000; ++ *((int*)& __m256_op0[2]) = 0x0000ff00; ++ *((int*)& __m256_op0[1]) = 0xfffeff66; ++ *((int*)& __m256_op0[0]) = 0xfffeff00; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffff0000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000080; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffe; ++ __m256i_out = __lasx_xvaddwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffd; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffff800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x001fffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x4b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003f3fc6c68787; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f87870000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003f3fc6c68787; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003f3f87870000; ++ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003f3fc6c68787; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f87870000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003e3ec6c68686; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000fffffeff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003e3e87870000; ++ __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff2400000000ff00; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffeffe4fffeff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff6400000000ff00; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffeff66fffeff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0080808080808080; ++ *((unsigned long*)& __m256i_result[2]) = 0x0080808080808080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0080808100808080; ++ *((unsigned long*)& __m256i_result[0]) = 0x0080808000808080; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfffffffffffff800; ++ *((unsigned long*)& __m128d_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffff00000080; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01fe04; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01fe04; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x01010101; ++ *((int*)& __m128_op0[2]) = 0x01010101; ++ *((int*)& __m128_op0[1]) = 0x01010101; ++ *((int*)& __m128_op0[0]) = 0x01010101; ++ *((int*)& __m128_result[3]) = 0xc2fa0000; ++ *((int*)& __m128_result[2]) = 0xc2fa0000; ++ *((int*)& __m128_result[1]) = 0xc2fa0000; ++ *((int*)& __m128_result[0]) = 0xc2fa0000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x21); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000100da000100fd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001ffe20001fefd; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001009a000100fd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001ff640001fefd; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000edff00fffd; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000fff10000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000cdff00fffd; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ff320000ffff; ++ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x47000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_w(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x01010101010000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffff800; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffefffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffef800; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080807; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080807; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffef; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffef; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0x5f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01010101010000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffef; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffef; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0100feff0100eeef; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000001010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0100feff00feef11; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000001010; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffef; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffef; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0404ffff00000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0404040800000010; ++ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfffefffe; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xfffefffe; ++ *((int*)& __m256_op0[2]) = 0xfffefffd; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffdfffffffe0; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffdfffffffe0; ++ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000100da000100fd; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0001ffe20001fefd; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0001009a000100fd; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0001ff640001fefd; ++ *((unsigned long*)& __m256i_result[3]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3ff0000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000c2f90000bafa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000c2fa8000c2fa; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000c2f90000bafa; ++ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000002020000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000201eff0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000002020000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001fef010; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010001000000000; ++ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffff800; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0002000400000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020006; ++ unsigned_int_result = 0x0000000000020006; ++ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x0); ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffefffefffefffd; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xfffefffefffefffd; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01010101010000ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x8080808280808082; ++ *((unsigned long*)& __m256i_result[2]) = 0x8080808280808082; ++ *((unsigned long*)& __m256i_result[1]) = 0x8080808280808080; ++ *((unsigned long*)& __m256i_result[0]) = 0x8080808280808082; ++ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000c2f90000bafa; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000fffff800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x80808082; ++ *((int*)& __m256_op0[6]) = 0x80808082; ++ *((int*)& __m256_op0[5]) = 0x80808082; ++ *((int*)& __m256_op0[4]) = 0x80808082; ++ *((int*)& __m256_op0[3]) = 0x80808082; ++ *((int*)& __m256_op0[2]) = 0x80808080; ++ *((int*)& __m256_op0[1]) = 0x80808082; ++ *((int*)& __m256_op0[0]) = 0x80808082; ++ *((int*)& __m256_op1[7]) = 0x55555555; ++ *((int*)& __m256_op1[6]) = 0x55555555; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x55555555; ++ *((int*)& __m256_op1[2]) = 0x55555555; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h(__m256i_op0,14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000100da000100fd; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001ffe20001fefd; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001009a000100fd; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001ff640001fefd; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000100da000100fd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001ffe20001fefd; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001009a000100fd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001ff640001fefd; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007ff90000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000001ff60000; ++ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x8080808280808082; ++ *((unsigned long*)& __m256d_op0[2]) = 0x8080808280808082; ++ *((unsigned long*)& __m256d_op0[1]) = 0x8080808280808080; ++ *((unsigned long*)& __m256d_op0[0]) = 0x8080808280808082; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffd; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff00000000; ++ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000c2f90000bafa; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000c2f90000bafa; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000c2fa8000c2fa; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff3d06ffff4506; ++ *((unsigned long*)& __m128i_result[0]) = 0x7ffffffe7ffff800; ++ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffd; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000100da000100fd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001ffe20001fefd; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001009a000100fd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001ff640001fefd; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffff3d06ffff4506; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7ffffffe7ffff800; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffd; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffff800; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffff800; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffff800; ++ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x8a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff3d06ffff4506; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ffffffe7ffff800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000c2f90000bafa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000c2fa8000c2fa; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc2f9bafac2fac2fa; ++ __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x7ff90000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x1ff60000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0xfffffffe; ++ *((int*)& __m256_op1[4]) = 0x00000001; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0xfffffffe; ++ *((int*)& __m256_op1[0]) = 0x00000001; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000001; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000001; ++ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfffebd06fffe820c; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7fff7ffe7fff3506; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f7e3f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f7e3f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff874dc687870000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000003f7e3f; ++ *((unsigned long*)& __m256i_result[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000003f7e3f; ++ *((unsigned long*)& __m256i_result[0]) = 0xff874dc687870000; ++ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc2f9bafac2fac2fa; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbdf077eee7e20468; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe3b1cc6953e7db29; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000e7e20468; ++ *((unsigned long*)& __m128i_result[0]) = 0xc2fac2fa53e7db29; ++ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000000003f7e3f; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000003f7e3f; ++ *((unsigned long*)& __m256d_op0[0]) = 0xff874dc687870000; ++ *((unsigned long*)& __m256d_result[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffff8001; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001fffffffe; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; ++ long_int_result = 0x1f0fdf7f3e3b31d4; ++ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x1); ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000e7e20468; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc2fac2fa53e7db29; ++ *((unsigned long*)& __m128i_result[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long*)& __m128i_result[0]) = 0x00a6ffceffb60052; ++ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffebd06fffe820c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7ffe7fff3506; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffebd06fffe820c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7ffe7fff3506; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff0cffffff18; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefffefffeff6a0c; ++ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00a6ffceffb60052; ++ unsigned_int_result = 0x0000000000000084; ++ unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0xa); ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff0cffffff18; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfefffefffeff6a0c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc2f9bafac2fac2fa; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffefefe6a; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256i_op2[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256i_op2[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x61f1000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0108000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x61f1a18100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0108000000000000; ++ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f7e3f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f7e3f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff874dc687870000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fdf000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fdf000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fdf7fff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fdf7fff00000000; ++ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x35); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000c2f90000bafa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000c2fa8000c2fa; ++ *((unsigned long*)& __m128i_result[1]) = 0x7474f6fd7474fefe; ++ *((unsigned long*)& __m128i_result[0]) = 0xf474f6fef474f6fe; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x74); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x7474f6fd7474fefe; ++ *((unsigned long*)& __m128d_op0[0]) = 0xf474f6fef474f6fe; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x01fc03e000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x01fc03e000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00a6ffceffb60052; ++ *((unsigned long*)& __m128i_result[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff0; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,-16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefe6a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffefe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffc2ba; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xff84fff4; ++ *((int*)& __m128_op0[2]) = 0xff84fff4; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xfffffff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x41dfffc000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x41dfffdfffc00000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffff0c8000c212; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfefffeff7f002d06; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc2f9bafac2fac2fa; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01fc03e000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01fc03e000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00fffb0402fddf20; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00fffb0402fddf20; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001fbf9fbe29f52; ++ *((unsigned long*)& __m256i_result[2]) = 0x5b409c0000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001fbf9fbe29f52; ++ *((unsigned long*)& __m256i_result[0]) = 0x5b409c0000000000; ++ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f7e3f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f7e3f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff874dc687870000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x41dfffc000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x41dfffdfffc00000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0001fbf9fbe29f52; ++ *((unsigned long*)& __m256i_op2[2]) = 0x5b409c0000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0001fbf9fbe29f52; ++ *((unsigned long*)& __m256i_op2[0]) = 0x5b409c0000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long*)& __m256i_result[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long*)& __m256i_result[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long*)& __m256i_result[0]) = 0xff874dc687870000; ++ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00a6ffceffb60052; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xff84fff4ff84fff4; ++ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefe6a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff874dc687870000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long*)& __m256i_result[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long*)& __m256i_result[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long*)& __m256i_result[0]) = 0xff874dc687870000; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00fffb04; ++ *((int*)& __m256_op0[6]) = 0x02fddf20; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00fffb04; ++ *((int*)& __m256_op0[2]) = 0x02fddf20; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x41dfffc0; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x41dfffdf; ++ *((int*)& __m256_op1[2]) = 0xffc00000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m128i_result[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff0; ++ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xff84fff4; ++ *((int*)& __m128_op0[2]) = 0xff84fff4; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xfffffff0; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xff84fff4; ++ *((int*)& __m128_op0[2]) = 0xff84fff4; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xfffffff0; ++ *((int*)& __m128_op1[3]) = 0xff84fff4; ++ *((int*)& __m128_op1[2]) = 0xff84fff4; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xfffffff0; ++ *((int*)& __m128_result[3]) = 0xffc4fff4; ++ *((int*)& __m128_result[2]) = 0xffc4fff4; ++ *((int*)& __m128_result[1]) = 0xffffffff; ++ *((int*)& __m128_result[0]) = 0xfffffff0; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d(__m128i_op0,-4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrm_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff874dc687870000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffe367cc82f8989a; ++ *((unsigned long*)& __m256i_result[2]) = 0x4f90000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffc3aaa8d58f43c8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffe367cc82f8989a; ++ *((unsigned long*)& __m256d_op0[2]) = 0x4f90000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffc3aaa8d58f43c8; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x1f0fdf7f; ++ *((int*)& __m256_op0[6]) = 0x3e3b31d4; ++ *((int*)& __m256_op0[5]) = 0x7ff80000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x1f0fdf7f; ++ *((int*)& __m256_op0[2]) = 0x3e3b31d4; ++ *((int*)& __m256_op0[1]) = 0x7ff80000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0x80000000; ++ *((int*)& __m256_result[5]) = 0x7ff80000; ++ *((int*)& __m256_result[4]) = 0x80000000; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0x80000000; ++ *((int*)& __m256_result[1]) = 0x7ff80000; ++ *((int*)& __m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000002a5429; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000002a5429; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x30); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefe6a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000fefefe68; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000001; ++ *((int*)& __m128_op0[2]) = 0xfffffffe; ++ *((int*)& __m128_op0[1]) = 0x00000001; ++ *((int*)& __m128_op0[0]) = 0xfffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x2a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xe0f02081c1c4ce2c; ++ *((unsigned long*)& __m256i_result[2]) = 0x8008000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xe0f02081c1c4ce2c; ++ *((unsigned long*)& __m256i_result[0]) = 0x8008000000000000; ++ __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffe367cc82f8989a; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4f90000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffc3aaa8d58f43c8; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000082f8989a; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000d58f43c8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x1f0fdf7f; ++ *((int*)& __m256_op0[6]) = 0x3e3b31d4; ++ *((int*)& __m256_op0[5]) = 0x7ff80000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x1f0fdf7f; ++ *((int*)& __m256_op0[2]) = 0x3e3b31d4; ++ *((int*)& __m256_op0[1]) = 0x7ff80000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x002a5429; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x002a5429; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a5429; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a5429; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffc7418a023680; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff8845bb954b00; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffc7418a023680; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000002a5429; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff8845bb954b00; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000002a5429; ++ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000082f8989a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000d58f43c8; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010183f9999b; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x01010101d58f43c9; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffc741; ++ *((int*)& __m256_op0[6]) = 0x8a023680; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffff8845; ++ *((int*)& __m256_op0[2]) = 0xbb954b00; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffc74180000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff884580000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001fffffffe; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffc74180000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff884580000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0xbf800000; ++ *((int*)& __m256_result[6]) = 0xbf800000; ++ *((int*)& __m256_result[5]) = 0xd662fa00; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0xbf800000; ++ *((int*)& __m256_result[2]) = 0xbf800000; ++ *((int*)& __m256_result[1]) = 0xd6ef7500; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00fe01f000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00fe01f000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbf800000bf800000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xd662fa0000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xbf800000bf800000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xd6ef750000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x417e01f040800000; ++ *((unsigned long*)& __m256i_result[2]) = 0x299d060000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x417e01f040800000; ++ *((unsigned long*)& __m256i_result[0]) = 0x29108b0000000000; ++ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe0f02081c1c4ce2c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8008000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe0f02081c1c4ce2c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8008000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000b8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000b8; ++ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x60f02081c1c4ce2c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8008000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x60f02081c1c4ce2c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8008000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010183f9999b; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01010101d58f43c9; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010183f9999b; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x01010101d58f43c9; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a5429; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a5429; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000002a54290; ++ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0101010183f9999b; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[1]) = 0x01010101d58f43c9; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a5429; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a5429; ++ *((unsigned long*)& __m256i_op1[3]) = 0x417e01f040800000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x299d060000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x417e01f040800000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x29108b0000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001000000010; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefe6a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000fefefe6a; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x7c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fefefe6a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fefefe6a; ++ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fefefe6a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000fbf9; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000fbf9; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000007f00000000; ++ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000100; ++ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long*)& __m128i_result[0]) = 0x5a5a5a5a5b5a5b5a; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0x5a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001494b494a; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001494b494a; ++ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fefefe6a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000007070700; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000002010202; ++ __m128i_out = __lsx_vclo_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a5429; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a5429; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000055; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000055; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x002a542a; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x002a542a; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffefffffffe; ++ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000007070700; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002010202; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000007070700; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000002010202; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010183f95466; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01010101d58efe94; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010183f95466; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x01010101d58efe94; ++ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0xa7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000055; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000055; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffefefeff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff295329; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffefefeff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff295329; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff01010101; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00d6acd7; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff01010101; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00d6acd7; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010183f95466; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01010101d58efe94; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000101000083f95; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000101000001010; ++ *((unsigned long*)& __m256i_result[1]) = 0x00001010000d58f0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000101000001010; ++ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000007f00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7ffffffeffffffff; ++ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h(__m128i_op0,10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffefefeff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffff295329; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffefefeff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffff295329; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[3]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_result[2]) = 0x001f001f02c442af; ++ *((unsigned long*)& __m256i_result[1]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_result[0]) = 0x001f001f02c442af; ++ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x7ffffffe; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); ++ *((unsigned long*)& __m128i_op0[1]) = 0x00005a5a00005a5a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00005b5a00005b5a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a542a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a542a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000005400; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000005400; ++ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000000fefefe6a; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fefefe6a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000fefefe6a; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00fe01f000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00fe01f000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000007f8; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x2d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x001f001f02c442af; ++ *((unsigned long*)& __m256i_op0[1]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x001f001f02c442af; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00fe01f000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00fe01f000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xfffffffffefefeff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffff295329; ++ *((unsigned long*)& __m256i_op2[1]) = 0xfffffffffefefeff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffff295329; ++ *((unsigned long*)& __m256i_result[3]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_result[1]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000c40086; ++ __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7ffffffeffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x01ff01ff01ff01ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x01ff01ff01ff01ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_du_wu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a542a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a542a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000002a542a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000002a542a; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x7ffffffe; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xfefefeff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xff295329; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xfefefeff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xff295329; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000004290; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004290; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000004290; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004290; ++ __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a542a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a542a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000004290; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004290; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000004290; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004290; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000004290; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000002a96ba; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000004290; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000002a96ba; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4080808080808080; ++ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefeff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff295329; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefeff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff295329; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffe00f7ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffff629d7; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffe00f7ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffff629d7; ++ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x001e001ea1bfa1bf; ++ *((unsigned long*)& __m256d_op0[2]) = 0x001e001e83e5422e; ++ *((unsigned long*)& __m256d_op0[1]) = 0x001e001ea1bfa1bf; ++ *((unsigned long*)& __m256d_op0[0]) = 0x011f011f0244420e; ++ *((unsigned long*)& __m256d_op1[3]) = 0xfffe00f7ffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfffffffffff629d7; ++ *((unsigned long*)& __m256d_op1[1]) = 0xfffe00f7ffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfffffffffff629d7; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x01ff01ff; ++ *((int*)& __m128_op0[2]) = 0x01ff01ff; ++ *((int*)& __m128_op0[1]) = 0x01ff01ff; ++ *((int*)& __m128_op0[0]) = 0x01ff01ff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x01ff01ff; ++ *((int*)& __m128_op0[2]) = 0x01ff01ff; ++ *((int*)& __m128_op0[1]) = 0x01ff01ff; ++ *((int*)& __m128_op0[0]) = 0x01ff01ff; ++ *((int*)& __m128_result[3]) = 0xc2f80000; ++ *((int*)& __m128_result[2]) = 0xc2f80000; ++ *((int*)& __m128_result[1]) = 0xc2f80000; ++ *((int*)& __m128_result[0]) = 0xc2f80000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ffffffeffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0xff80ffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7ffffffeffffffff; ++ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xe6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xff80ffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x7ffffffe; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000083f95466; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010100005400; ++ *((unsigned long*)& __m256i_op1[3]) = 0x001e001ea1bfa1bf; ++ *((unsigned long*)& __m256i_op1[2]) = 0x001e001e83e5422e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x001e001ea1bfa1bf; ++ *((unsigned long*)& __m256i_op1[0]) = 0x011f011f0244420e; ++ *((unsigned long*)& __m256i_result[3]) = 0x000f000fd0dfd0df; ++ *((unsigned long*)& __m256i_result[2]) = 0x000f000f83ef4b4a; ++ *((unsigned long*)& __m256i_result[1]) = 0x000f000fd0dfd0df; ++ *((unsigned long*)& __m256i_result[0]) = 0x0110011001224b07; ++ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x83f95466; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x01010101; ++ *((int*)& __m256_op0[0]) = 0x00005400; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xfefefeff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xff295329; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xfefefeff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xff295329; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000004290; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000002a96ba; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000004290; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000002a96ba; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000083f95466; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0101010100005400; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000004290; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000083f95466; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000004290; ++ *((unsigned long*)& __m256d_result[0]) = 0x0101010100005400; ++ __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000002a5; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000002a5; ++ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefeff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff295329; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefeff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff295329; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_result[3]) = 0x007f00f8ff7fff80; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fff6a9d8; ++ *((unsigned long*)& __m256i_result[1]) = 0x007f00f8ff7fff80; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff6a9d8; ++ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x01ff01ff01ff01ff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x01ff01ff01ff01ff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x01ff01ff01ff01ff; ++ *((unsigned long*)& __m128d_result[0]) = 0x01ff01ff01ff01ff; ++ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000083f95466; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010100005400; ++ *((unsigned long*)& __m256i_op1[3]) = 0x007f00f8ff7fff80; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff6a9d8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x007f00f8ff7fff80; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff6a9d8; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x007f00f8ff7fff80; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff6a9d8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x007f00f8ff7fff80; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff6a9d8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000089; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000089; ++ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x02a54290; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x02a54290; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x02a54290; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x0154dc84; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x02a54290; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000089; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x82a54290; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x028aa700; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x82a54290; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x02a54287; ++ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000001ff000001ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000001ff000001ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000001ff000001ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000001ff000001ff; ++ *((unsigned long*)& __m128i_op2[1]) = 0xff80ffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x7ffffffeffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000002fe800000ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7ffffe0100000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000089; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000089; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_result[0]) = 0x000a000a000a000a; ++ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00fe01f0; ++ *((int*)& __m256_op0[6]) = 0x00010000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00c40086; ++ *((int*)& __m256_op0[3]) = 0x00fe01f0; ++ *((int*)& __m256_op0[2]) = 0x00010000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00c40086; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x82a54290; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x028aa700; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x82a54290; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x02a54287; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00010000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00c40086; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00010000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00c40086; ++ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000082a54290; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000028aa700; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000082a54290; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000002a54287; ++ *((unsigned long*)& __m256i_result[3]) = 0x007f00f841532148; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001a753c3; ++ *((unsigned long*)& __m256i_result[1]) = 0x007f00f841532148; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001b52187; ++ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_result[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_result[0]) = 0x000a000a000a000a; ++ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00fe01f0; ++ *((int*)& __m256_op0[6]) = 0x00010000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00c40086; ++ *((int*)& __m256_op0[3]) = 0x00fe01f0; ++ *((int*)& __m256_op0[2]) = 0x00010000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00c40086; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000089; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000089; ++ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x98ff98ff220e220d; ++ *((unsigned long*)& __m128d_op0[0]) = 0xa2e1a2601ff01ff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000082a54290; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000028aa700; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000082a54290; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54287; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000002a542a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000002a542a; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x803f800080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe0404041c0404040; ++ int_op1 = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xe0404041e0404041; ++ *((unsigned long*)& __m128i_result[0]) = 0xe0404041e0404041; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000400; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000002a542a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000002a542a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000242; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000242; ++ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00fe01f000010000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000c40086; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000c40086; ++ __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xe0404041e0404041; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe0404041e0404041; ++ *((unsigned long*)& __m128i_op1[1]) = 0x803f800080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe0404041c0404040; ++ *((unsigned long*)& __m128i_result[1]) = 0xe0404041e0404041; ++ *((unsigned long*)& __m128i_result[0]) = 0x803f800080000000; ++ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xe0404041e0404041; ++ *((unsigned long*)& __m128d_op0[0]) = 0xe0404041e0404041; ++ *((unsigned long*)& __m128i_result[1]) = 0xe0404041e0404041; ++ *((unsigned long*)& __m128i_result[0]) = 0xe0404041e0404041; ++ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000002a54290; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; ++ *((int*)& __m128_result[3]) = 0x35200000; ++ *((int*)& __m128_result[2]) = 0x35200000; ++ *((int*)& __m128_result[1]) = 0x35200000; ++ *((int*)& __m128_result[0]) = 0x35200000; ++ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000089; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xe0404041e0404041; ++ *((unsigned long*)& __m128i_op0[0]) = 0x803f800080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000e; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000009; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xe0404041e0404041; ++ *((unsigned long*)& __m128i_op0[0]) = 0x803f800080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xff80ffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x7ffffffe; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe0404041e0404041; ++ *((unsigned long*)& __m128i_op1[0]) = 0x803f800080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x02a54290; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0154dc84; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x02a54290; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000089; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x02a54290; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x0154dc84; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x02a54290; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000089; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x02a54290; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x0154dc84; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x02a54290; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000089; ++ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x59); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000089; ++ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0a00000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007fff00000089; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0000; ++ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x3f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op2[1]) = 0x000000004c7f4c7f; ++ *((unsigned long*)& __m128i_op2[0]) = 0xe0c0c0c0d1c7d1c6; ++ *((unsigned long*)& __m128i_result[1]) = 0x061006100613030c; ++ *((unsigned long*)& __m128i_result[0]) = 0x4d6814ef9c77ce46; ++ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0xfebcfebcfebcfebc; ++ *((unsigned long*)& __m256i_result[2]) = 0xfebcfebcfebcfebc; ++ *((unsigned long*)& __m256i_result[1]) = 0xfebcfebcfebcfebc; ++ *((unsigned long*)& __m256i_result[0]) = 0xfebcfebcfebcfebc; ++ __m256i_out = __lasx_xvldi(1724); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x061006100613030c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4d6814ef9c77ce46; ++ *((unsigned long*)& __m128i_result[1]) = 0x010f010f0112010b; ++ *((unsigned long*)& __m128i_result[0]) = 0x016701ee01760145; ++ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000001fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001fe; ++ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000fd0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000fd0000; ++ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0000; ++ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x29); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a000a000a000a00; ++ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff6fff6fff6fff6; ++ __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x010f00000111fffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0x016700dc0176003a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0a000a000a000a00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x4d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xa5a5a5a5a5a5a5a5; ++ *((unsigned long*)& __m256d_op1[2]) = 0xa5a5a5a5a5a5a5ff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xa5a5a5a5a5a5a5a5; ++ *((unsigned long*)& __m256d_op1[0]) = 0xa5a5a5a5a5a5a5ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x36); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_result[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_result[0]) = 0x000a000a000a000a; ++ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000fd0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fd0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001b0000001b; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001b00fd0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001b0000001b; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001b00fd0000; ++ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000001b0000001b; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001b00fd0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000001b0000001b; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001b00fd0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long*)& __m256i_result[2]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long*)& __m256i_result[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long*)& __m256i_result[0]) = 0xf8f8f8f8f8f8f8f8; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000fd0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fd0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000007f0000; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000fe0100000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fe0100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000a0000000a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000a00000009; ++ *((unsigned long*)& __m128i_result[1]) = 0x000a000a0000000a; ++ *((unsigned long*)& __m128i_result[0]) = 0x000a000a000a000a; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0xaf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xa5a5a5a5a5a5a5a5; ++ *((unsigned long*)& __m256d_op1[2]) = 0xa5a5a5a5a5a99e03; ++ *((unsigned long*)& __m256d_op1[1]) = 0xa5a5a5a5a5a5a5a5; ++ *((unsigned long*)& __m256d_op1[0]) = 0xa5a5a5a5a5a99e03; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000a0000000a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000a00000009; ++ *((unsigned long*)& __m128i_result[1]) = 0x0a0a0a000a0a0a00; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a0a0a0009090900; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000fe0100000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fe0100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000a000a00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000a000a00000000; ++ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000feb60000b7d0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000feb60000c7eb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000feb60000b7d0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000feb60000c7eb; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0a0a0a000a0a0a00; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0a0a0a0009090900; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000feb60000b7d0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000feb60000c7eb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000feb60000b7d0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000feb60000c7eb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0707feb60707c7eb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0707feb60707c7eb; ++ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000003fff3fff; ++ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0040000000400000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0040000000400000; ++ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001900000019; ++ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000fe0100000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000fe0100000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000001900000019; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000001900000019; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000001900000019; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0040000000400000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0040000000400000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0141010101410101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0141010101410101; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0141010101410101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0141010101410101; ++ *((unsigned long*)& __m128i_result[1]) = 0xfebffefffebffeff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfebffefffebffeff; ++ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_result[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long*)& __m256i_result[1]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_result[0]) = 0x45baa7ef6a95a985; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0141010101410101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0141010101410101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6420e0208400c4c4; ++ *((unsigned long*)& __m128i_op0[0]) = 0x20c4e0c4e0da647a; ++ *((unsigned long*)& __m128i_result[1]) = 0x6420e0208400c4e3; ++ *((unsigned long*)& __m128i_result[0]) = 0x20c4e0c4e0da6499; ++ __m128i_out = __lsx_vaddi_du(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x6420e020; ++ *((int*)& __m128_op0[2]) = 0x8400c4e3; ++ *((int*)& __m128_op0[1]) = 0x20c4e0c4; ++ *((int*)& __m128_op0[0]) = 0xe0da6499; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0141010101410101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0141010101410101; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfebffefffebffeff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfebffefffebffeff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000001b0000001b; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001b00fd0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000001b0000001b; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001b00fd0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000019; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000019; ++ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0707feb608c9328b; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc237bd65fc892985; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0707feb608c9328b; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc237bd65fc892985; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00150015003a402f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x333568ce26dcd055; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00150015003a402f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x333568ce26dcd055; ++ *((unsigned long*)& __m256i_result[3]) = 0x0e0f1192846ff912; ++ *((unsigned long*)& __m256i_result[2]) = 0x002a0074666a4db9; ++ *((unsigned long*)& __m256i_result[1]) = 0x0e0f1192846ff912; ++ *((unsigned long*)& __m256i_result[0]) = 0x002a0074666a4db9; ++ __m256i_out = __lasx_xvsrani_h_w(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0141010101410101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0141010101410101; ++ *((unsigned long*)& __m128i_result[1]) = 0x4101010141010100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00150015003a402f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x333568ce26dcd055; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00150015003a402f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x333568ce26dcd055; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000007d0d0d0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000007d0d0d0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000007d0d0d0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000007d0d0d0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000007d0d0d00000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000007d0d0d00000; ++ __m256i_out = __lasx_xvbsrl_v(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000001b0000001b; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000001b00fd0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000001b0000001b; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001b00fd0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000001b; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001b; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000001b; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000fd00000000; ++ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000be00be; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x1f1b917c9f3d5e05; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0e0f1192846ff912; ++ *((unsigned long*)& __m256i_op0[2]) = 0x002a0074666a4db9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0e0f1192846ff912; ++ *((unsigned long*)& __m256i_op0[0]) = 0x002a0074666a4db9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000018; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000018; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fff7fff05407fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fff7fff05407fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000100000018; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000100000018; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x1f60000000c00000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x1f60000000c00000; ++ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x1f1b917c; ++ *((int*)& __m128_op0[0]) = 0x9f3d5e05; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x4fa432d6; ++ *((int*)& __m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1f60000000c00000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1f60000000c00000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x60000000c0000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x60000000c0000000; ++ __m256i_out = __lasx_xvslli_h(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fff003f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fff003f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000627; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000627; ++ __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff7fff05407fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff05407fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x400040003abf4000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x400040003abf4000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000003fff3fff; ++ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000627; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000627; ++ *((unsigned long*)& __m256i_op2[3]) = 0x7fff7fff05407fff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x7fff7fff05407fff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000003fff3fff; ++ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fff003f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fff003f; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007fff; ++ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000627; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000627; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1f60000000c00000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1f60000000c00000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x7fff7fff05407fff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x7fff7fff05407fff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000627; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000627; ++ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0141010101410101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0141010101410101; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4180418041804180; ++ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x4fa432d67fc00000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0141010101410101; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0141010101410101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0408040800000004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0408040800000004; ++ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000084; ++ *((unsigned long*)& __m256i_result[3]) = 0x0084008400840084; ++ *((unsigned long*)& __m256i_result[2]) = 0x0084008400840084; ++ *((unsigned long*)& __m256i_result[1]) = 0x0084008400840084; ++ *((unsigned long*)& __m256i_result[0]) = 0x0084008400840084; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff7fff05407fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff05407fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00001fff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00001fff; ++ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000800; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100001; ++ __m256i_out = __lasx_xvclz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3e1f321529232736; ++ *((unsigned long*)& __m128i_op1[0]) = 0x161d0c373c200826; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000082020201; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000820200000201; ++ __m128i_out = __lsx_vexth_wu_hu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x65b780a3ae3bf8cb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x161d0c363c200826; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x65b780a2ae3bf8ca; ++ *((unsigned long*)& __m128i_result[0]) = 0x161d0c373c200827; ++ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x3fff3fff; ++ *((int*)& __m256_op0[6]) = 0x3fff3fff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x3fff3fff; ++ *((int*)& __m256_op0[3]) = 0x3fff3fff; ++ *((int*)& __m256_op0[2]) = 0x3fff3fff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x3fff3fff; ++ *((int*)& __m256_op1[7]) = 0x017e01fe; ++ *((int*)& __m256_op1[6]) = 0x01fe01fe; ++ *((int*)& __m256_op1[5]) = 0x05860606; ++ *((int*)& __m256_op1[4]) = 0x01fe0202; ++ *((int*)& __m256_op1[3]) = 0x017e01fe; ++ *((int*)& __m256_op1[2]) = 0x01fe0000; ++ *((int*)& __m256_op1[1]) = 0x05860606; ++ *((int*)& __m256_op1[0]) = 0x01fe0004; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x017e01fe01fe01fe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0586060601fe0202; ++ *((unsigned long*)& __m256i_op1[1]) = 0x017e01fe01fe0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0586060601fe0004; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffbfffafffffffe; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffbfffaffff0000; ++ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x65b780a2ae3bf8ca; ++ *((unsigned long*)& __m128i_op1[0]) = 0x161d0c373c200827; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000001ff; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x017e01fe01fe01fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0586060601fe0202; ++ *((unsigned long*)& __m256i_op0[1]) = 0x017e01fe01fe0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0586060601fe0004; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0010001000100001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0010001000100001; ++ *((unsigned long*)& __m256i_result[3]) = 0x017f01fe01ff01fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x05960616020e0203; ++ *((unsigned long*)& __m256i_result[1]) = 0x017f01fe01ff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x05960616020e0005; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x017f01fe01ff01fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x05960616020e0203; ++ *((unsigned long*)& __m256i_op0[1]) = 0x017f01fe01ff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x05960616020e0005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x017f01fe01ff01fe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x05960616020e0203; ++ *((unsigned long*)& __m256i_op1[1]) = 0x017f01fe01ff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x05960616020e0005; ++ *((unsigned long*)& __m256i_result[3]) = 0x00fe01fc01fe01fc; ++ *((unsigned long*)& __m256i_result[2]) = 0x012c002c001c0006; ++ *((unsigned long*)& __m256i_result[1]) = 0x00fe01fc01fe0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x012c002c001c000a; ++ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000403f3fff; ++ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffbfffafffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffbfffaffff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00fe01fc01fe01fc; ++ *((unsigned long*)& __m256i_op1[2]) = 0x012c002c001c0006; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00fe01fc01fe0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x012c002c001c000a; ++ *((unsigned long*)& __m256i_result[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long*)& __m256i_result[2]) = 0x80938013800d8002; ++ *((unsigned long*)& __m256i_result[1]) = 0x807e80fd80fe0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x80938013800d0005; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00fe01fc01fe01fc; ++ *((unsigned long*)& __m256i_op0[2]) = 0x012c002c001c0006; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00fe01fc01fe0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x012c002c001c000a; ++ long_int_result = 0x00fe01fc01fe0000; ++ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x1); ++ *((unsigned long*)& __m128i_op0[1]) = 0x4101010141010100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x4101010141010100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80938013800d8002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x807e80fd80fe0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80938013800d0005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffff00001fff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffff00001fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long*)& __m256i_result[2]) = 0x80938013800d8002; ++ *((unsigned long*)& __m256i_result[1]) = 0x807e80fd80fe0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x80938013800d0005; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000403f3fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long*)& __m256i_result[3]) = 0x38f7414938f7882f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x38f7414938f78830; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x64b680a2ae3af8ca; ++ *((unsigned long*)& __m128i_op0[0]) = 0x161c0c363c200826; ++ *((unsigned long*)& __m128i_result[1]) = 0x64b680a2ae3af8c8; ++ *((unsigned long*)& __m128i_result[0]) = 0x161c0c363c200824; ++ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4101010141010100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x64b680a2ae3af8c8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x161c0c363c200824; ++ *((unsigned long*)& __m128i_result[1]) = 0x23b57fa16d39f7c8; ++ *((unsigned long*)& __m128i_result[0]) = 0x161c0c363c200824; ++ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4101010141010100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000001ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x4101010141010100; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000001ff; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80938013800d8002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x807e80fd80fe0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80938013800d0005; ++ *((unsigned long*)& __m256i_result[3]) = 0x8091811081118110; ++ *((unsigned long*)& __m256i_result[2]) = 0x80a6802680208015; ++ *((unsigned long*)& __m256i_result[1]) = 0x8091811081110013; ++ *((unsigned long*)& __m256i_result[0]) = 0x80a6802680200018; ++ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0707feb6; ++ *((int*)& __m256_op0[6]) = 0x0707b7d0; ++ *((int*)& __m256_op0[5]) = 0x45baa7ef; ++ *((int*)& __m256_op0[4]) = 0x6a95a985; ++ *((int*)& __m256_op0[3]) = 0x0707feb6; ++ *((int*)& __m256_op0[2]) = 0x0707b7d0; ++ *((int*)& __m256_op0[1]) = 0x45baa7ef; ++ *((int*)& __m256_op0[0]) = 0x6a95a985; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000017547fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000017547fffffff; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0408040800008003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0408040800008003; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0408040800008002; ++ *((unsigned long*)& __m256i_result[0]) = 0xfbf7fbf7ffff7ffd; ++ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x23b57fa16d39f7c8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x161c0c363c200824; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x34); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000017547fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000017547fffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x80938013800d8002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x807e80fd80fe0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x80938013800d0005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000801380f380fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000801380f300fb; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4101010141010100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000001ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0020808100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x29); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x38f7414938f7882f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x38f7414938f78830; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000801380f380fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000801380f300fb; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x2c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0408040800008003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0408040800008003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff80800; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0408040800008003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x04080408fff87803; ++ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000800; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000801380f380fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000801380f300fb; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff7fedffffff05; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x0000fffd; ++ *((int*)& __m128_op1[3]) = 0x7fffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000001ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long*)& __m256i_result[3]) = 0x0707b7cff8f84830; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000354ad4c28; ++ *((unsigned long*)& __m256i_result[1]) = 0x0707b7cff8f84830; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000354ad4c28; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff82bb9784; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffc6bb97ac; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000007ffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0408040800008003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x04080408fff87803; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0707b7cff8f84830; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000354ad4c28; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0707b7cff8f84830; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000354ad4c28; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffd5a98; ++ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffd5a98; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffd5a98; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000007f3a40; ++ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0020808100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000403f3fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000403f3fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007ffe7ffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000807e7ffe; ++ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000403f3fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x3fff3fff3fff3fff; ++ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffd5a98; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000101ff01; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000fffd; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff000000ff; ++ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8091811081118110; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80a6802680208015; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8091811081110013; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80a6802680200018; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8091811081118110; ++ *((unsigned long*)& __m256i_op1[2]) = 0x80a6802680208015; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8091811081110013; ++ *((unsigned long*)& __m256i_op1[0]) = 0x80a6802680200018; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvdiv_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000101ff01; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffff6fffffff6; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffff6fffffff6; ++ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff00000000000001; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff00000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe80000000000001; ++ __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000101ff01; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007ffe7ffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000807e7ffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8091811081118110; ++ *((unsigned long*)& __m256i_op1[2]) = 0x80a6802680208015; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8091811081110013; ++ *((unsigned long*)& __m256i_op1[0]) = 0x80a6802680200018; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffefffe0000feff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffeff0000007e7f; ++ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007ffe7ffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000807e7ffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007ffe7ffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000807e7ffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007ffe7ffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000807e7ffe; ++ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000801380f380fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000801380f300fb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008013; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000080f3; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fb; ++ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0707feb60707b7d0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ffe7ffd7ffe7fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ffe7ffd7ffe8001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0707feb70707b7d1; ++ *((unsigned long*)& __m256i_result[2]) = 0x65baa7efea95a985; ++ *((unsigned long*)& __m256i_result[1]) = 0x0707feb70707b7d1; ++ *((unsigned long*)& __m256i_result[0]) = 0x65baa7ef6a95a987; ++ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff000000ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000000000; ++ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000007f3a40; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff82bb9784; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffc6bb97ac; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff82bb9784; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffc6bb97ac; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000004000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000004000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000004000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000004000000; ++ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff82bb9784; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc6bb97ac; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x7fffffff82bb9784; ++ *((unsigned long*)& __m128i_op2[0]) = 0x7fffffffc6bb97ac; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff82bb9784; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffc6bb97ac; ++ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xfe800000; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((int*)& __m128_op1[3]) = 0x7fffffff; ++ *((int*)& __m128_op1[2]) = 0x82bb9784; ++ *((int*)& __m128_op1[1]) = 0x7fffffff; ++ *((int*)& __m128_op1[0]) = 0xc6bb97ac; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe80000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe80000000000001; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000027f000000fe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe80000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000018000000000; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d(__m128i_op0,5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000007f3a40; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000007f3a40; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000d24; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000801380f380fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000801380f300fb; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000007f3a40; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x42); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe80000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00fe000000000000; ++ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007ffe7ffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000807e7ffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x7f7e7f7e7f7e7f7e; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007f7e7f7e; ++ *((unsigned long*)& __m256i_result[1]) = 0x7f7e7f7e7f7e0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000007e7f7e; ++ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00fe000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe80000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x027e0000000000ff; ++ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslei_wu(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00fdffffffffff02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe80000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe80ffffffffff02; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffe80; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x30); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x027e0000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe80ffffffffff02; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000007fffffff; ++ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000d24; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000d24; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe80ff80ffff0000; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,-16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000013; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001000000fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000013; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000001000000fb; ++ *((unsigned long*)& __m256i_result[3]) = 0x8080808180808093; ++ *((unsigned long*)& __m256i_result[2]) = 0x80808081808080fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x8080808180808093; ++ *((unsigned long*)& __m256i_result[0]) = 0x80808081808080fb; ++ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000d24; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8080808180808093; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80808081808080fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8080808180808093; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80808081808080fb; ++ *((unsigned long*)& __m256i_result[3]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long*)& __m256i_result[2]) = 0xf5f5f5f5f5f5f5fe; ++ *((unsigned long*)& __m256i_result[1]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long*)& __m256i_result[0]) = 0xf5f5f5f5f5f5f5fb; ++ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vslli_h(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0505050505050505; ++ *((unsigned long*)& __m128i_result[0]) = 0x0505050504040404; ++ __m128i_out = __lsx_vaddi_bu(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00010013000100fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00010013000100fb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000004000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000004000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_result[2]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_result[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_result[0]) = 0x0400040004000400; ++ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000004000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000004000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000004000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000004000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f3f018000000000; ++ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000004000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000004000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xff04ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[0]) = 0xff04ff00ff00ff00; ++ __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x1); ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x7f3f0180; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000800000098; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000040000ffca; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000800000098; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000000040000ff79; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x04000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x04000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800000098; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000040000ffca; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000800000098; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000040000ff79; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff04ff00ff00ff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff04ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000008000000a; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000008000000a; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x44); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000010000003f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000010000003f; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000010000003f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f007f007f007f00; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000010000003f; ++ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xf9f5f9f5f9f5f9f5; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xf9f5f9f5f9f5f9f5; ++ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x8000000a; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x8000000a; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000010000003f; ++ *((unsigned long*)& __m128d_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000010000003f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff04ff00ff00ff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff04ff00ff00ff00; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffefffefffefffe; ++ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000010000003f; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000030000003f; ++ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7fff7fff; ++ *((int*)& __m128_op0[2]) = 0x7fff7fff; ++ *((int*)& __m128_op0[1]) = 0x00000001; ++ *((int*)& __m128_op0[0]) = 0x0000003f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000010000003f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000010000003f; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000010000003f; ++ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000030000003f; ++ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f007f007f007f00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0003003f; ++ __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000030000003f; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xfffffffe; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvori_b(__m256i_op0,0x6a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000002; ++ *((int*)& __m256_op0[6]) = 0x00000002; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000002; ++ *((int*)& __m256_op0[2]) = 0x00000002; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000020; ++ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000003; ++ *((int*)& __m128_op0[0]) = 0x0000003f; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000003; ++ *((int*)& __m128_op1[0]) = 0x0000003f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000400; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000400; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe00000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff01010105; ++ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x80000000; ++ *((int*)& __m128_result[2]) = 0x80000000; ++ *((int*)& __m128_result[1]) = 0x80000000; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000fffe0000fffe; ++ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffff00; ++ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000400; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000400; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000fffe0000fffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000fffe0000fffe; ++ __m128i_out = __lsx_vmod_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc1bdceee242070db; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe8c7b756d76aa478; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001800390049ffaa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0029ff96005cff88; ++ *((unsigned long*)& __m128i_result[1]) = 0x001800390049ffaa; ++ *((unsigned long*)& __m128i_result[0]) = 0x0029ff96005cff88; ++ __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001800390049ffaa; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0029ff96005cff88; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff88; ++ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001800000039; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000049ffffffaa; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000060000000e; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000127fffffea; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x22); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001800390049ffaa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0029ff96005cff88; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00060012000e002b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000049ffffffaa; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000e002b; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffaa; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff88; ++ *((unsigned long*)& __m128i_result[1]) = 0xe5e5e5e5e5e5e5e5; ++ *((unsigned long*)& __m128i_result[0]) = 0xe5e5e5e5e4e4e46d; ++ __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00060012000e002b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000049ffffffaa; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000060000000e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000127fffffea; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000060000000e; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001201fe01e9; ++ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000060000000e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001201fe01e9; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000060000000e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001201fe01e9; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000c0000001c; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002403fc03d2; ++ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000060000000e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000127fffffea; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f0101070101010f; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000127f010116; ++ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xe5e5e5e5e5e5e5e5; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe5e5e5e5e4e4e46d; ++ *((unsigned long*)& __m128i_result[1]) = 0xe5e5e5e5e5e5e5e5; ++ *((unsigned long*)& __m128i_result[0]) = 0xe5e5e5e5e4e4e46d; ++ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff8fff8fff8fff8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff8fff8fff8fff8; ++ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf2f2e5e5e5e5e5e5; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xf2f2e5e5e5e5e5dc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xf2f2e5e5; ++ *((int*)& __m128_op0[2]) = 0xe5e5e5e5; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xe5e5e5e5; ++ *((int*)& __m128_op1[2]) = 0xe5e5e5e5; ++ *((int*)& __m128_op1[1]) = 0xe5e5e5e5; ++ *((int*)& __m128_op1[0]) = 0xe4e4e46d; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff8fff8fff8fff8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf2f2e5e5e5e5e5dc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff8fff8fff8fff8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff80ff80ff80ff80; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff80ff80ff80ff80; ++ __m256i_out = __lasx_xvslli_h(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f0101070101010f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000127f010116; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ffffffffff; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000ffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x120e120dedf1edf2; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x120e120dedf1edf2; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff8fff8fff8fff8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x8001800180018001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x8001800180018001; ++ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffff80000001; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x120e120dedf1edf2; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x120e120dedf1edf2; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000120e120d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000120e120d; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000020000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000020000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000020000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000020000000000; ++ __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x29); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000120e120d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000120e120d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000907; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000907; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x67); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000200; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000200; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000200; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000200; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0x89); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ unsigned_long_int_result = 0xffffffffffffffff; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x2); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000907; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000907; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x007e007e007e007e; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000907; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000907; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3fffffffc0000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vexth_w_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000907; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000907; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x3fffffffc0000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000200; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000200; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000200; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000200; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000009; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000009; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000009; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; ++ int_result = 0xffffffffffffffff; ++ int_out = __lsx_vpickve2gr_b(__m128i_op0,0xc); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op2[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4000400040004002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ unsigned_int_result = 0x00000000ffffffff; ++ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x2); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ long_int_result = 0xffffffffffffffff; ++ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x1); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001ffff0001ffff; ++ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xff00ff00ff00ff00; ++ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000045f3fb; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000045f3fb; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000008080809; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000008080809; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000008080809; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000008080809; ++ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x5353535353535353; ++ *((unsigned long*)& __m256i_result[2]) = 0x5353535353535353; ++ *((unsigned long*)& __m256i_result[1]) = 0x5353535353535353; ++ *((unsigned long*)& __m256i_result[0]) = 0x5353535353535353; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x53); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000045f3fb; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000045f3fb; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffba0c05; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffba0c05; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128d_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256d_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256d_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256d_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffba0c05; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffba0c05; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000483800; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000483800; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000483800; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffba0c05; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000483800; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffba0c05; ++ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x37); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe01fe01fe01fe01; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe01fe01fe01fe01; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe01fe01fe01fe01; ++ *((unsigned long*)& __m128i_op2[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op2[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0xf10cf508f904fd01; ++ *((unsigned long*)& __m128i_result[0]) = 0xf10cf508f904fd01; ++ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffba0c05; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffba0c05; ++ *((unsigned long*)& __m256i_op1[3]) = 0x5353535353535353; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5353535353535353; ++ *((unsigned long*)& __m256i_op1[1]) = 0x5353535353535353; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5353535353535353; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0303030303020000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0303030303020000; ++ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_w(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0xff807f807f807f80; ++ *((unsigned long*)& __m128i_result[0]) = 0xff807f807f807f80; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff807f807f807f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff807f807f807f80; ++ *((unsigned long*)& __m128i_result[1]) = 0xfb807b807b807b80; ++ *((unsigned long*)& __m128i_result[0]) = 0xfb807b807b807b80; ++ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf10cf508f904fd01; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf10cf508f904fd01; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf10cf508f904fd01; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf10cf508f904fd01; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffe218ffffea10; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffff208fffffa02; ++ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffe218ffffea10; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff208fffffa02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffe218ffffea10; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffff208fffffa02; ++ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[2]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[1]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[0]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op2[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op2[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op2[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op2[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0303030303020000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0303030303020000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffe218ffffea10; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff208fffffa02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007f017f01; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007f017f01; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007f017f01; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007f017f01; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000007f017f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000007f017f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[2]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[0]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[2]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[1]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[0]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[3]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_result[2]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_result[1]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_result[0]) = 0x07efefefefefefee; ++ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000045f3fb; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000045f3fb; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000004500f300fb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000004500f300fb; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d(__m128i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff9; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff9; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_op1[2]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_op1[1]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_op1[0]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x2); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_op1[2]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_op1[1]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_op1[0]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001fbfbfc; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001fbfbfc; ++ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x62); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010000000100000; ++ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x5959595959595959; ++ *((unsigned long*)& __m128i_result[0]) = 0x5959595959595959; ++ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x59); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_b(__m128i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffe218ffffea10; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff208fffffa02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffff208fffffa02; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffb80000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffb80000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffe218ffffea10; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff208fffffa02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xfffff208fffffa02; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffe218ffffea10; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffff208fffffa02; ++ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000004500f300fb; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000004500f300fb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xffb80000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xffb80000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_op1[2]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_op1[1]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_op1[0]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x07efefefefefefee; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x07efefefefefefee; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf8f8e018f8f8e810; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf8f8f008f8f8f800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000045000d0005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000045000d0005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,-8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf8f8e018f8f8e810; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf8f8f008f8f8f800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000e0180000e810; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000f0080000f800; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1010000010100000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1010000010100000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1010000010100000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1010000010100000; ++ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000004800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000004500f300fb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000004800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000004500f300fb; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000004800000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000004500f300fb; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000004800000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000004500f300fb; ++ *((unsigned long*)& __m256i_result[3]) = 0x7b7b7b7b80000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xcacacb1011040500; ++ *((unsigned long*)& __m256i_result[1]) = 0x7b7b7b7b80000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xcacacb1011040500; ++ __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0010000000100000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0010000000100000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0010000000100000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0010000000100000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000483800; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000483800; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x41cc5bb8a95fd1eb; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x41cc5bb8a95fd1eb; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x41cc5bb8a95fd1eb; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x41cc5bb8a95fd1eb; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7b7b7b7b80000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xcacacb1011040500; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7b7b7b7b80000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xcacacb1011040500; ++ *((unsigned long*)& __m256i_result[3]) = 0x49cc5bb8a95fd1eb; ++ *((unsigned long*)& __m256i_result[2]) = 0x7ff4080102102001; ++ *((unsigned long*)& __m256i_result[1]) = 0x49cc5bb8a95fd1eb; ++ *((unsigned long*)& __m256i_result[0]) = 0x7ff4080102102001; ++ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000e0180000e810; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000f0080000f800; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000e0180000e810; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000f0080000f800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000045; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000045; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010146; ++ *((unsigned long*)& __m256i_result[2]) = 0x01010101010e0106; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010146; ++ *((unsigned long*)& __m256i_result[0]) = 0x01010101010e0106; ++ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000e0180000e810; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000f0080000f800; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000e0180000e810; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000f0080000f800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000f0f800; ++ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010000000100000; ++ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010000000000; ++ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00100000; ++ *((int*)& __m256_op0[6]) = 0x00100000; ++ *((int*)& __m256_op0[5]) = 0x00100000; ++ *((int*)& __m256_op0[4]) = 0x00100000; ++ *((int*)& __m256_op0[3]) = 0x00100000; ++ *((int*)& __m256_op0[2]) = 0x00100000; ++ *((int*)& __m256_op0[1]) = 0x00100000; ++ *((int*)& __m256_op0[0]) = 0x00100000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00080000002c0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0008000000080000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00080000002c0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0008000000080000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00080000002c0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00080000002c0000; ++ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x4c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000045; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000045; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000045; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000045; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d0005; ++ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000045; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000045; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x50); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1211100f11100f0e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x100f0e0d0f0e0d0c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[1]) = 0x11000f2010000e20; ++ *((unsigned long*)& __m128i_result[0]) = 0x0f000d200e000c20; ++ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010000000100000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000483800; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000583800; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000100000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000583800; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000100000; ++ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x11000f2010000e20; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0f000d200e000c20; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x11000f200f000d20; ++ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long*)& __m128i_result[1]) = 0x11000f2010000e20; ++ *((unsigned long*)& __m128i_result[0]) = 0x0f000d200e000c20; ++ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x11000f20; ++ *((int*)& __m128_op0[2]) = 0x10000e20; ++ *((int*)& __m128_op0[1]) = 0x0f000d20; ++ *((int*)& __m128_op0[0]) = 0x0e000c20; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000d000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000d000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000583800; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000583800; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d0000; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_b(__m128i_op0,-6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000045; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000045; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000045; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000045; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000045; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000045; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d0005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000013b13380; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000013b13380; ++ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x11000f2010000e20; ++ *((unsigned long*)& __m128i_result[0]) = 0x0f000d200e000c20; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x11000f20; ++ *((int*)& __m128_op0[2]) = 0x10000e20; ++ *((int*)& __m128_op0[1]) = 0x0f000d20; ++ *((int*)& __m128_op0[0]) = 0x0e000c20; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x11000f2000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0f000d2000000000; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long*)& __m128i_result[0]) = 0xe3e3e3e3e3e3e3e3; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0xe3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00ffffff00ffff; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00ffffff00ffff; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d0000; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[2]) = 0x4000404040004040; ++ *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[0]) = 0x4000404040004040; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x40); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0008000000080000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0008000000080000; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xe3e3e3e3e3e3e3e3; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((int*)& __m128_result[3]) = 0x4f800000; ++ *((int*)& __m128_result[2]) = 0x4f800000; ++ *((int*)& __m128_result[1]) = 0x4f800000; ++ *((int*)& __m128_result[0]) = 0x4f800000; ++ __m128_out = __lsx_vffint_s_wu(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffe01fe01f; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe01fe01f; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffe01fe01f; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffe01fe01f; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xff00ffff; ++ *((int*)& __m256_op0[6]) = 0xff00ffff; ++ *((int*)& __m256_op0[5]) = 0xff00ffff; ++ *((int*)& __m256_op0[4]) = 0xff00ffff; ++ *((int*)& __m256_op0[3]) = 0xff00ffff; ++ *((int*)& __m256_op0[2]) = 0xff00ffff; ++ *((int*)& __m256_op0[1]) = 0xff00ffff; ++ *((int*)& __m256_op0[0]) = 0xff00ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000fe01020b0001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000fe01020b0001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f8000004f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f8000004f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f8000004f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f8000004f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4f8000004f800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f8000004f800000; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x64); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f8000004f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f8000004f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f8000004f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f8000004f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f8000004f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f8000004f800000; ++ *((unsigned long*)& __m128d_result[1]) = 0x43d3e0000013e000; ++ *((unsigned long*)& __m128d_result[0]) = 0x43d3e0000013e000; ++ __m128d_out = __lsx_vffint_d_l(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xff00d5007f00ffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xff00d5007f00ffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256d_result[3]) = 0x7f00d5007f00ffff; ++ *((unsigned long*)& __m256d_result[2]) = 0x7f00ffffff00ffff; ++ *((unsigned long*)& __m256d_result[1]) = 0x7f00d5007f00ffff; ++ *((unsigned long*)& __m256d_result[0]) = 0x7f00ffffff00ffff; ++ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffb080ffffb080; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffb080ffffb080; ++ *((unsigned long*)& __m128i_op2[1]) = 0x004fcfcfd01f9f9f; ++ *((unsigned long*)& __m128i_op2[0]) = 0x9f4fcfcfcf800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3504b5fd2dee1f80; ++ *((unsigned long*)& __m128i_result[0]) = 0x4676f70fc0000000; ++ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000fe01020b0001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000fe01020b0001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0fff0fff00000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0fff0fff00000020; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0fff0fff00000020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0fff0fff00000020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00d5007f00ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00d5007f00ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x43d3e0000013e000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x43d3e0000013e000; ++ *((unsigned long*)& __m128i_result[1]) = 0x43d3e0000013e000; ++ *((unsigned long*)& __m128i_result[0]) = 0x43d3e0000013e000; ++ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffff3fffffff3; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffff3fffffff3; ++ __m128i_out = __lsx_vmini_w(__m128i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x004fcfcfd01f9f9f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9f4fcfcfcf800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x004fcfcfd01f9f9f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9f4fcfcfcf800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x004f1fcfd01f9f9f; ++ *((unsigned long*)& __m128i_result[0]) = 0x9f4fcfcfcf800000; ++ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xda); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x43d3e0000013e000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x43d3e0000013e000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc14eef7fc14ea000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000ea000010fa101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x43d3e0000013e000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x43d3e0000013e000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffd3000000130000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffd3000000130000; ++ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffff3fffffff3; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffff3fffffff3; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffff3fffffff4; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffff3fffffff4; ++ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffd3000000130000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffd3000000130000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffd3000000130000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffd3000000130000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffd3000000130000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffd3000000130000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x004f1fcfd01f9f9f; ++ *((unsigned long*)& __m128d_op0[0]) = 0x9f4fcfcfcf800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000004f804f80; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000004f804f80; ++ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffd30000; ++ *((int*)& __m128_op0[2]) = 0x00130000; ++ *((int*)& __m128_op0[1]) = 0xffd30000; ++ *((int*)& __m128_op0[0]) = 0x00130000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00d5007f00ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00d5007f00ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000055ff01f90ab5; ++ *((unsigned long*)& __m256i_op0[2]) = 0xaa95eafffec6e01f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000055ff01f90ab5; ++ *((unsigned long*)& __m256i_op0[0]) = 0xaa95eafffec6e01f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfec6e01ffec6e01f; ++ *((unsigned long*)& __m256i_result[2]) = 0xfec6e01ffec6e01f; ++ *((unsigned long*)& __m256i_result[1]) = 0xfec6e01ffec6e01f; ++ *((unsigned long*)& __m256i_result[0]) = 0xfec6e01ffec6e01f; ++ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x0); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000003f00390035; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8015003f0006001f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000003f00390035; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8015003f0006001f; ++ *((unsigned long*)& __m256i_result[3]) = 0x000b004a00440040; ++ *((unsigned long*)& __m256i_result[2]) = 0x8020004a0011002a; ++ *((unsigned long*)& __m256i_result[1]) = 0x000b004a00440040; ++ *((unsigned long*)& __m256i_result[0]) = 0x8020004a0011002a; ++ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x80000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x80000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x80000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x80000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrint_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0000003f; ++ *((int*)& __m256_op0[6]) = 0x00390035; ++ *((int*)& __m256_op0[5]) = 0x8015003f; ++ *((int*)& __m256_op0[4]) = 0x0006001f; ++ *((int*)& __m256_op0[3]) = 0x0000003f; ++ *((int*)& __m256_op0[2]) = 0x00390035; ++ *((int*)& __m256_op0[1]) = 0x8015003f; ++ *((int*)& __m256_op0[0]) = 0x0006001f; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000b004a00440040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8020004a0011002a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000b004a00440040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8020004a0011002a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000004a00000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000004a0000002a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000004a00000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000004a0000002a; ++ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000b004a00440040; ++ *((unsigned long*)& __m256d_op0[2]) = 0x8020004a0011002a; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000b004a00440040; ++ *((unsigned long*)& __m256d_op0[0]) = 0x8020004a0011002a; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0fff0fff00000020; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0fff0fff00000020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x000055ff; ++ *((int*)& __m256_op0[6]) = 0x01f90ab5; ++ *((int*)& __m256_op0[5]) = 0xaa95eaff; ++ *((int*)& __m256_op0[4]) = 0xfec6e01f; ++ *((int*)& __m256_op0[3]) = 0x000055ff; ++ *((int*)& __m256_op0[2]) = 0x01f90ab5; ++ *((int*)& __m256_op0[1]) = 0xaa95eaff; ++ *((int*)& __m256_op0[0]) = 0xfec6e01f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000003f00390035; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8015003f0006001f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000003f00390035; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8015003f0006001f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x80000000001529c1; ++ *((unsigned long*)& __m256i_op1[2]) = 0x80007073cadc3779; ++ *((unsigned long*)& __m256i_op1[1]) = 0x80000000001529c1; ++ *((unsigned long*)& __m256i_op1[0]) = 0x80007073cadc3779; ++ *((unsigned long*)& __m256i_result[3]) = 0x00008000003f0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00390015003529c1; ++ *((unsigned long*)& __m256i_result[1]) = 0x00008000003f0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00390015003529c1; ++ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x004f0080004f0080; ++ *((unsigned long*)& __m128i_result[0]) = 0x004f0080004f0080; ++ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x80000000001529c1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80007073cadc3779; ++ *((unsigned long*)& __m256i_op0[1]) = 0x80000000001529c1; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80007073cadc3779; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d(__m256i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x9f009f009f009f00; ++ *((unsigned long*)& __m128i_result[0]) = 0x9f009f009f009f00; ++ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff8001ffff8001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffffffff; ++ __m256i_out = __lasx_xvslti_hu(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffffffff; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x0000ffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x0000ffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000fffffffefffe; ++ *((unsigned long*)& __m256i_result[1]) = 0xff7fffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000fffffffefffe; ++ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x004f0080; ++ *((int*)& __m128_op0[2]) = 0x004f0080; ++ *((int*)& __m128_op0[1]) = 0x004f0080; ++ *((int*)& __m128_op0[0]) = 0x004f0080; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x7fff7fff; ++ *((int*)& __m128_op2[2]) = 0x7fff7fff; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7fff7fff; ++ *((int*)& __m128_result[2]) = 0x7fff7fff; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff8001ffff8001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x001ffff0003ffff0; ++ *((unsigned long*)& __m128i_result[0]) = 0x000fffefffefffef; ++ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x4b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000004a00000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000004a0000002a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000004a00000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000004a0000002a; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000fffffffefffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff7fffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000fffffffefffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002500000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x00008024ffff8014; ++ *((unsigned long*)& __m256i_result[1]) = 0xffc0002500000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x00008024ffff8014; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000010100000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010100000000; ++ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff00000000; ++ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000004a557baac4; ++ *((unsigned long*)& __m256i_op1[2]) = 0x556caad9aabbaa88; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000004a557baac4; ++ *((unsigned long*)& __m256i_op1[0]) = 0x556caad9aabbaa88; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000004a557baac4; ++ *((unsigned long*)& __m256i_result[2]) = 0x556caad9aabbaa88; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000004a557baac4; ++ *((unsigned long*)& __m256i_result[0]) = 0x556caad9aabbaa88; ++ __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000004a557baac4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x556caad9aabbaa88; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000004a557baac4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x556caad9aabbaa88; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000004a557baac4; ++ *((unsigned long*)& __m256i_op1[2]) = 0x556caad9aabbaa88; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000004a557baac4; ++ *((unsigned long*)& __m256i_op1[0]) = 0x556caad9aabbaa88; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000004a557baac4; ++ *((unsigned long*)& __m256i_result[2]) = 0x556caad9aabbaa88; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000004a557baac4; ++ *((unsigned long*)& __m256i_result[0]) = 0x556caad9aabbaa88; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000010100000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000010100000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00008000003f0000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00390015003529c1; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00008000003f0000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00390015003529c1; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x32); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0909090909090909; ++ *((unsigned long*)& __m256i_result[2]) = 0x0909090909090909; ++ *((unsigned long*)& __m256i_result[1]) = 0x0909090909090909; ++ *((unsigned long*)& __m256i_result[0]) = 0x0909090909090909; ++ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0080000200000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00010003; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0000004a; ++ *((int*)& __m256_op0[6]) = 0x557baac4; ++ *((int*)& __m256_op0[5]) = 0x556caad9; ++ *((int*)& __m256_op0[4]) = 0xaabbaa88; ++ *((int*)& __m256_op0[3]) = 0x0000004a; ++ *((int*)& __m256_op0[2]) = 0x557baac4; ++ *((int*)& __m256_op0[1]) = 0x556caad9; ++ *((int*)& __m256_op0[0]) = 0xaabbaa88; ++ *((int*)& __m256_op1[7]) = 0x09090909; ++ *((int*)& __m256_op1[6]) = 0x09090909; ++ *((int*)& __m256_op1[5]) = 0x09090909; ++ *((int*)& __m256_op1[4]) = 0x09090909; ++ *((int*)& __m256_op1[3]) = 0x09090909; ++ *((int*)& __m256_op1[2]) = 0x09090909; ++ *((int*)& __m256_op1[1]) = 0x09090909; ++ *((int*)& __m256_op1[0]) = 0x09090909; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op2[1]) = 0x001ffff0003ffff0; ++ *((unsigned long*)& __m128i_op2[0]) = 0x000fffefffefffef; ++ *((unsigned long*)& __m128i_result[1]) = 0x8009700478185812; ++ *((unsigned long*)& __m128i_result[0]) = 0xe009f00ee7fb0800; ++ __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; ++ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0909090909090909; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0909090909090909; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0909090909090909; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0909090909090909; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010003; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0080000200000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00010003; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ff00ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ffffff00; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ff00ff00; ++ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00007ffe00007ffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010003; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0080000200000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00010003; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0080000200000003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00010002; ++ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00009f0000009f00; ++ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x4f804f80; ++ *((int*)& __m128_op0[0]) = 0x4f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001ffff0003ffff0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000fffefffefffef; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x001ffff0003ffff0; ++ *((unsigned long*)& __m128i_result[0]) = 0x000fffefffefffef; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00007fff; ++ *((int*)& __m128_op1[2]) = 0x00007fff; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00007fff; ++ *((int*)& __m128_result[2]) = 0x00007fff; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128d_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0080000200000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00010002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0080000200000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00010002; ++ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000200000003; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000ffff00010002; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0080000200000003; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000ffff00010002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x75b043c4d17db125; ++ *((unsigned long*)& __m128i_op0[0]) = 0xeef8227b596117b1; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x75b043c4d17db125; ++ *((unsigned long*)& __m128i_result[0]) = 0xeef8227b4f8017b1; ++ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001ffff0003ffff0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000fffefffefffef; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffefffef; ++ __m128i_out = __lsx_vmini_w(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x75b043c4d17db125; ++ *((unsigned long*)& __m128i_op1[0]) = 0xeef8227b4f8017b1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x027c027c000027c0; ++ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000de32400; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x027c027c000027c0; ++ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x77); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001ffff0003ffff0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000fffefffefffef; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00000000; ++ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001ffff0003ffff0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000fffefffefffef; ++ *((unsigned long*)& __m128i_result[1]) = 0x001ffff0003ffff0; ++ *((unsigned long*)& __m128i_result[0]) = 0x000fffefffefffef; ++ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000070700000707; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000009091b1b1212; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000070700000707; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000009091b1b1212; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001ffff0003ffff0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000fffefffefffef; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffefffef; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001ffff0003ffff0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000fffefffefffef; ++ *((unsigned long*)& __m128i_result[1]) = 0x001ffff0003ffff0; ++ *((unsigned long*)& __m128i_result[0]) = 0x028c026bfff027af; ++ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001ffff0003ffff0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x028c026bfff027af; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000003fc03fc00; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffc00a3009b000; ++ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x55555555; ++ *((int*)& __m256_op0[5]) = 0x00000001; ++ *((int*)& __m256_op0[4]) = 0x00000004; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x55555555; ++ *((int*)& __m256_op0[1]) = 0x00000001; ++ *((int*)& __m256_op0[0]) = 0x00000004; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00007fff00000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0040000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007fff00000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000055555555; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000004; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000055555555; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000004; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2aaaaaaa2aaaaaab; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x2aaaaaaa2aaaaaab; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1111111111111111; ++ *((unsigned long*)& __m256i_result[2]) = 0x1111111111111111; ++ *((unsigned long*)& __m256i_result[1]) = 0x1111111111111111; ++ *((unsigned long*)& __m256i_result[0]) = 0x1111111111111111; ++ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x027c027c000027c0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x027c027c000027c0; ++ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1111111111111111; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1111111111111111; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1111111111111111; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1111111111111111; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0888888888888888; ++ *((unsigned long*)& __m256i_result[2]) = 0x0888888888888888; ++ *((unsigned long*)& __m256i_result[1]) = 0x0888888888888888; ++ *((unsigned long*)& __m256i_result[0]) = 0x0888888888888888; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; ++ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x75b043c4d17db125; ++ *((unsigned long*)& __m128i_op0[0]) = 0xeef8227b4f8017b1; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x027c027c000027c0; ++ *((unsigned long*)& __m128i_result[1]) = 0x75b043c4007db125; ++ *((unsigned long*)& __m128i_result[0]) = 0xeef8227b4f8017b1; ++ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x004f0080004f0080; ++ *((unsigned long*)& __m128i_result[0]) = 0x004f0080004f0080; ++ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001000fbff9; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000002ff9afef; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000004f804f81; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000004f804f80; ++ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000001020202; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001020202; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x027c027c000027c0; ++ __m128i_out = __lsx_vmaxi_h(__m128i_op0,-6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1111111111111111; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1111111111111111; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1111111111111111; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1111111111111111; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1111111111111111; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1111111111111111; ++ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xdededededededede; ++ *((unsigned long*)& __m256i_result[2]) = 0xdededededededede; ++ *((unsigned long*)& __m256i_result[1]) = 0xdededededededede; ++ *((unsigned long*)& __m256i_result[0]) = 0xdededededededede; ++ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x21); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000004f804f81; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000004f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000010000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001400000014; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363797c63996399; ++ *((unsigned long*)& __m128i_op0[0]) = 0x171f0a1f6376441f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363797c63996399; ++ *((unsigned long*)& __m128i_op1[0]) = 0x171f0a1f6376441f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x4f804f81; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x4f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000004f804f81; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000004f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000004fc04f81; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000004fc04f80; ++ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdededededededede; ++ *((unsigned long*)& __m256i_op1[2]) = 0xdededededededede; ++ *((unsigned long*)& __m256i_op1[1]) = 0xdededededededede; ++ *((unsigned long*)& __m256i_op1[0]) = 0xdededededededede; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8888888808888888; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0888888888888888; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8888888808888888; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0888888888888888; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x77777777f7777777; ++ *((unsigned long*)& __m256i_result[2]) = 0xf777777777777777; ++ *((unsigned long*)& __m256i_result[1]) = 0x77777777f7777777; ++ *((unsigned long*)& __m256i_result[0]) = 0xf777777777777777; ++ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000100; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xdededede; ++ *((int*)& __m256_op0[6]) = 0xdededede; ++ *((int*)& __m256_op0[5]) = 0xdededede; ++ *((int*)& __m256_op0[4]) = 0xdededede; ++ *((int*)& __m256_op0[3]) = 0xdededede; ++ *((int*)& __m256_op0[2]) = 0xdededede; ++ *((int*)& __m256_op0[1]) = 0xdededede; ++ *((int*)& __m256_op0[0]) = 0xdededede; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363797c63996399; ++ *((unsigned long*)& __m128i_op0[0]) = 0x171f0a1f6376441f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x6363797c63990099; ++ *((unsigned long*)& __m128i_result[0]) = 0x171f0a1f6376441f; ++ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0x94); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363797c63990099; ++ *((unsigned long*)& __m128i_op0[0]) = 0x171f0a1f6376441f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363797c63990099; ++ *((unsigned long*)& __m128i_op1[0]) = 0x171f0a1f6376441f; ++ *((unsigned long*)& __m128i_result[1]) = 0x181e180005021811; ++ *((unsigned long*)& __m128i_result[0]) = 0x181e180005021811; ++ __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x77777777; ++ *((int*)& __m256_op0[6]) = 0xf7777777; ++ *((int*)& __m256_op0[5]) = 0xf7777777; ++ *((int*)& __m256_op0[4]) = 0x77777777; ++ *((int*)& __m256_op0[3]) = 0x77777777; ++ *((int*)& __m256_op0[2]) = 0xf7777777; ++ *((int*)& __m256_op0[1]) = 0xf7777777; ++ *((int*)& __m256_op0[0]) = 0x77777777; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x80000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x80000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x77777777f7777777; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf777777777777777; ++ *((unsigned long*)& __m256i_op0[1]) = 0x77777777f7777777; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf777777777777777; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000000004fc04f81; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000004fc04f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000004fc04f81; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000004fc04f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000004fc04f81; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000004fc04f80; ++ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3e035e51522f0799; ++ *((unsigned long*)& __m128i_result[1]) = 0x9292929292929292; ++ *((unsigned long*)& __m128i_result[0]) = 0x8090808280909002; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x6d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000010; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000010; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000002b902b3e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000002b902b3e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000002a102a3a; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000002a102a3a; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x3a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_result[3]) = 0x1000100054445443; ++ *((unsigned long*)& __m256i_result[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_result[1]) = 0x1000100054445443; ++ *((unsigned long*)& __m256i_result[0]) = 0x7bbbbbbbf7777778; ++ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000004fc04f81; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000004fc04f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00007f7f00007f7f; ++ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3e035e51522f0799; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3e035e51522f0799; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3e035e51522f0799; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000002bfd9461; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000000004fc04f81; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000004fc04f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001c00ffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m128i_result[1]) = 0x000001000f00fe00; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000017fff00fe7f; ++ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff10; ++ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007bbbbbbb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007bbbbbbb; ++ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x8d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007bbbbbbb; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007bbbbbbb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000073333333; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000073333333; ++ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000001000f00fe00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000017fff00fe7f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w(__m128i_op0,9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long*)& __m256i_result[2]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long*)& __m256i_result[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long*)& __m256i_result[0]) = 0xf8f8f8f8f8f8f8f8; ++ __m256i_out = __lasx_xvmini_b(__m256i_op0,-8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001c00ffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010201808040; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010280808040; ++ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000073333333; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000073333333; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000073333333; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000073333333; ++ __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000002bfd9461; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000001000f00fe00; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000017fff00fe7f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000f00; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff00; ++ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000100; ++ *((int*)& __m128_op0[2]) = 0x0f00fe00; ++ *((int*)& __m128_op0[1]) = 0x0000017f; ++ *((int*)& __m128_op0[0]) = 0xff00fe7f; ++ *((unsigned long*)& __m128d_result[1]) = 0x3727f00000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xc7e01fcfe0000000; ++ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf7fdd5ffebe1c9e3; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf7fdd5ffebe1c9e3; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000002467db99; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000003e143852; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000002467db99; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000003e143852; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffdb982466; ++ *((unsigned long*)& __m256i_result[2]) = 0xf7fdd5ffadcd9191; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffdb982466; ++ *((unsigned long*)& __m256i_result[0]) = 0xf7fdd5ffadcd9191; ++ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ffa7f8ff81; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000003f0080ffc0; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000007fff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000a7f87fffff81; ++ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000002bfd9461; ++ *((unsigned long*)& __m128i_result[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000002bfd9461; ++ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00003ff000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000002467db99; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003e143852; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000002467db99; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000003e143852; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000246700003e14; ++ *((unsigned long*)& __m256i_result[2]) = 0x000044447bbbf777; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000246700003e14; ++ *((unsigned long*)& __m256i_result[0]) = 0x000044447bbbf777; ++ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000073333333; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000073333333; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x56); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000002bfd9461; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000007fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000002bfd9461; ++ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ffa7f8ff81; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000003f0080ffc0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000007fff00ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000a7f87fffff81; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffd400000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000004000000040; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000044444443; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000004444; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007bbb0000f777; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000004444; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007bbb0000f777; ++ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000004444; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00007bbb0000f777; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000004444; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00007bbb0000f777; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000002222; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003dde00007bbc; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000002222; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003dde00007bbc; ++ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000002bfd9461; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000f00; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00000000ffffff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000002bfd9461; ++ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3727f00000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc7e01fcfe0000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3727112c00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x39201f7120000040; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xe5b9012c00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc7e01fcfe0000000; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000022222221; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3dddddddfbbb3bbc; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000022222221; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3dddddddfbbb3bbc; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00007fff; ++ *((int*)& __m128_op0[2]) = 0x00007fff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x2bfd9461; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x2bfd9461; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000007fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000002bfd9461; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000f00; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x1ff800000000477f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000015fec9b0; ++ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00070000; ++ *((int*)& __m128_op0[2]) = 0x00040000; ++ *((int*)& __m128_op0[1]) = 0x00030000; ++ *((int*)& __m128_op0[0]) = 0x00010000; ++ *((int*)& __m128_op1[3]) = 0x00070000; ++ *((int*)& __m128_op1[2]) = 0x00040000; ++ *((int*)& __m128_op1[1]) = 0x00030000; ++ *((int*)& __m128_op1[0]) = 0x00010000; ++ *((int*)& __m128_result[3]) = 0x3f800000; ++ *((int*)& __m128_result[2]) = 0x3f800000; ++ *((int*)& __m128_result[1]) = 0x3f800000; ++ *((int*)& __m128_result[0]) = 0x3f800000; ++ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000002bfd9461; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000400400004004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000015ff4a31; ++ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000f00; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000000; ++ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000004444; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00007bbb0000f777; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000004444; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00007bbb0000f777; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000002222; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003ddd80007bbb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000002222; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003ddd80007bbb; ++ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800001; ++ *((unsigned long*)& __m128i_result[0]) = 0x3f8000003f800001; ++ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0007000000040000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0007000000040000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0003000000010000; ++ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000002bfd9461; ++ *((unsigned long*)& __m128i_op2[1]) = 0x3f8000003f800001; ++ *((unsigned long*)& __m128i_op2[0]) = 0x3f8000003f800001; ++ *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3f8000003f800000; ++ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0007000000040000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000780000007800; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0007000000040000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0003000000010000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000002222; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003ddd80007bbb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000002222; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003ddd80007bbb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001700170017; ++ __m256i_out = __lasx_xvmini_hu(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000002222; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003ddd80007bbb; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000002222; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003ddd80007bbb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x31); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000170017; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000170017; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0007000000040000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0003000000010000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000000010000; ++ __m128i_out = __lsx_vpcnt_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000017; ++ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000000010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000000010001; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00030000; ++ *((int*)& __m128_op0[2]) = 0x00010000; ++ *((int*)& __m128_op0[1]) = 0x00020000; ++ *((int*)& __m128_op0[0]) = 0x00010000; ++ *((int*)& __m128_op1[3]) = 0x3f800000; ++ *((int*)& __m128_op1[2]) = 0x3f800000; ++ *((int*)& __m128_op1[1]) = 0x3f800000; ++ *((int*)& __m128_op1[0]) = 0x3f800000; ++ *((int*)& __m128_op2[3]) = 0x00030000; ++ *((int*)& __m128_op2[2]) = 0x00010000; ++ *((int*)& __m128_op2[1]) = 0x00020000; ++ *((int*)& __m128_op2[0]) = 0x00010000; ++ *((int*)& __m128_result[3]) = 0x80060000; ++ *((int*)& __m128_result[2]) = 0x80020000; ++ *((int*)& __m128_result[1]) = 0x80040000; ++ *((int*)& __m128_result[0]) = 0x80020000; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000170017; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000170017; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000170017; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000170017; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000170017; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000170017; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000170017; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000170017; ++ __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0003000000010000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x000000ff; ++ *((int*)& __m128_op0[2]) = 0x808000ff; ++ *((int*)& __m128_op0[1]) = 0x000000ff; ++ *((int*)& __m128_op0[0]) = 0x808000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x3); ++ *((unsigned long*)& __m128i_op0[1]) = 0x8006000080020000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8004000080020000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffff8fffffff8; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffff8fffffff8; ++ __m128i_out = __lsx_vsat_w(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001700170017; ++ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8006000080020000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8004000080020000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8006000080020000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8004000080020000; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x00003f8000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00003f8000000000; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; ++ unsigned_long_int_result = 0x3f8000003f800000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); ++ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1fc000001fc00000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1fc000001fc00000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x8000ffff00000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x8000ffff00000000; ++ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x28); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001700170017; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000ffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000ffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000fefe00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000fefe00000000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00003f8000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00003f8000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000ffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000ffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000080003f80ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x28); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00003f8000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00003f8000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000d; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000d; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000d; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000d; ++ __m256i_out = __lasx_xvaddi_du(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3ff0010000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3ff0010000000000; ++ __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00003f8000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00003f8000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x003f800000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x003f800000000000; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xd2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000080003f80ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op2[1]) = 0x3ff0010000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x3ff0010000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000080003f80ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00008000; ++ *((int*)& __m128_op1[2]) = 0x3f80ffff; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003f800000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003f800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff0000ffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xff0000ffffffffff; ++ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003f800000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003f800000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000080003f80ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000001fc00000000; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0010000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ff0010000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3fffff0000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3fffff0000000000; ++ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x27); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000001fc00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000001fc00000000; ++ __m128i_out = __lsx_vmaxi_h(__m128i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100007f01; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00020000; ++ *((int*)& __m128_op0[2]) = 0x00020000; ++ *((int*)& __m128_op0[1]) = 0x000001fc; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000100007f01; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000001; ++ *((int*)& __m128_op0[2]) = 0x00007f01; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000001fc00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000000020000; ++ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000001fc00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000000010000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010100000000; ++ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3f8000003f800000; ++ __m128i_out = __lsx_vreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d(__m128i_op0,0x3c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x000000fe; ++ *((int*)& __m128_op0[2]) = 0x808000ff; ++ *((int*)& __m128_op0[1]) = 0x000000fe; ++ *((int*)& __m128_op0[0]) = 0x808000fe; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000003fffff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000003fffff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff000000ff00; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x80000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x80000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x80000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x80000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x0000ffff; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x0000ffff; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000001; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000001; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000001; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000001; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0x80000001; ++ *((int*)& __m256_result[5]) = 0x80000000; ++ *((int*)& __m256_result[4]) = 0x80000001; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0x80000001; ++ *((int*)& __m256_result[1]) = 0x80000000; ++ *((int*)& __m256_result[0]) = 0x80000001; ++ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff000000ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3fffff0000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3fffff0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ffff0000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ffff000000ff00; ++ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff000000ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff000000ff00; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3fffff0000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3fffff0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f7fff003f800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f7fff003f800000; ++ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3fffff0000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3fffff0000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3f80000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3f80000000000000; ++ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff000000ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3fffff0000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3fffff0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3f80000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3f80000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff000000ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x1fc0000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1fc07f8000007f80; ++ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff000000ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x03c0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x03c0038000000380; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff000000ff00; ++ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x03c0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x03c0038000000380; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0f0000000f000000; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffc1000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffcc000b000b000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000b000b010a000b; ++ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvclo_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffcc000b000b000b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000b000b010a000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f7f000b000b000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000b000b010a000b; ++ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3fffff0000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3fffff0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3ffffeffffffffe5; ++ *((unsigned long*)& __m128i_result[0]) = 0x3ffffeffffffffe5; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffc1000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffc1000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff000000007fff; ++ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x03c0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x03c0038000000380; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ffff0000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ffff000000ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x03c0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x03c0038000000380; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ffff0000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ffff000000ff00; ++ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_du(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000001; ++ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x03c0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x03c0038000000380; ++ *((unsigned long*)& __m128i_result[1]) = 0x000003c000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w(__m256i_op0,-2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000010a000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00ffff0000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ffff000000ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000010a000b; ++ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f7f000b000b000b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000b000b010a000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101080408040804; ++ *((unsigned long*)& __m128i_result[0]) = 0x0804080407040804; ++ __m128i_out = __lsx_vclz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0909090909090909; ++ *((unsigned long*)& __m256i_result[2]) = 0x0909090909090909; ++ *((unsigned long*)& __m256i_result[1]) = 0x0909090909090909; ++ *((unsigned long*)& __m256i_result[0]) = 0x0909090909090909; ++ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x66); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff000000007fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101080408040804; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0804080407040804; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000010a000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101080408040804; ++ *((unsigned long*)& __m128i_result[0]) = 0x000100810080e081; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000010a000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000104000800; ++ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff000000ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003fc0; ++ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x22); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000800000000000; ++ __m256i_out = __lasx_xvneg_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x0000ffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0000ffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x0000ffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0000ffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffe50000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffe020; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3fc00000010a000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x00001b0000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x4d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0101080408040804; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0804080407040804; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0101080408040804; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0804080407040804; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000104000800; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0101080408040804; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0804080407040804; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000104000800; ++ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[3]) = 0x00c200c200c200c2; ++ *((unsigned long*)& __m256i_result[2]) = 0x00c200c200c200bb; ++ *((unsigned long*)& __m256i_result[1]) = 0x00c200c200c200c2; ++ *((unsigned long*)& __m256i_result[0]) = 0x00c200c200c200bb; ++ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100089bde; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000104000800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x80044def00000001; ++ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80044def00000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff000000ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x00007f8449a19084; ++ *((unsigned long*)& __m128i_result[0]) = 0x49a210000000ff00; ++ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000104000800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000104000800; ++ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00c200c200c200c2; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00c200c200c200bb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00c200c200c200c2; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00c200c200c200bb; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffc2c2ffffc2c2; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffc2c2ffffc2c2; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffc2c2ffffc2c2; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffc2c2ffffc2c2; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x003100310031002f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x003100310031002f; ++ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00c200c200c200c2; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00c200c200c200bb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00c200c200c200c2; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00c200c200c200bb; ++ *((unsigned long*)& __m256i_result[3]) = 0x007fffff007fffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x007fffff007fffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x007fffff007fffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x007fffff007fffff; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00c200c200c200c2; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00c200c200c200bb; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00c200c200c200c2; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00c200c200c200bb; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00070000; ++ *((int*)& __m128_op0[2]) = 0x00050000; ++ *((int*)& __m128_op0[1]) = 0x00030000; ++ *((int*)& __m128_op0[0]) = 0x00010000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0xff81007c; ++ *((int*)& __m128_op1[1]) = 0xffb7005f; ++ *((int*)& __m128_op1[0]) = 0x0070007c; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000805; ++ *((unsigned long*)& __m128i_op0[0]) = 0x978d95ac768d8784; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000104000800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000897957687; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000408; ++ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000897957687; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000408; ++ *((unsigned long*)& __m128i_result[1]) = 0xf7f7f7ff8e8c6d7e; ++ *((unsigned long*)& __m128i_result[0]) = 0xf7f7f7f7f7f7fbff; ++ __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x007fffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007fffff007fffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x007fffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007fffff007fffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00c200c200c200c2; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00c200c200c200bb; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00c200c200c200c2; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00c200c200c200bb; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffbdff3cffbdff44; ++ __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf7f7f7ff8e8c6d7e; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf7f7f7f7f7f7fbff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xf7f7f7ff8e8c6d7e; ++ *((unsigned long*)& __m128i_result[0]) = 0xf7f7f7f7f7f7fbff; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000897957687; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000408; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000010000000080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000100; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0007000000050000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffbdff3cffbdff44; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000001dc; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001dc; ++ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000008; ++ *((int*)& __m128_op0[2]) = 0x97957687; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000408; ++ *((int*)& __m128_op1[3]) = 0x00000008; ++ *((int*)& __m128_op1[2]) = 0x97957687; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000408; ++ *((int*)& __m128_op2[3]) = 0x00010001; ++ *((int*)& __m128_op2[2]) = 0x00010001; ++ *((int*)& __m128_op2[1]) = 0x00010001; ++ *((int*)& __m128_op2[0]) = 0x04000800; ++ *((int*)& __m128_result[3]) = 0x80010001; ++ *((int*)& __m128_result[2]) = 0x80010001; ++ *((int*)& __m128_result[1]) = 0x80010001; ++ *((int*)& __m128_result[0]) = 0x84000800; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000104000800; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8001000180010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8001000184000800; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff80007e028401; ++ *((unsigned long*)& __m128i_result[0]) = 0x9a10144000400000; ++ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000007ae567a3e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000700ff00000000; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000104000800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000040004000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010002000000000; ++ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff81007c; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffb7005f0070007c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000104000800; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000007c; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000005f0003e000; ++ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffbdff3cffbdff44; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffff7effffff46; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff7effffff46; ++ __m256i_out = __lasx_xvori_b(__m256i_op0,0x42); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000000000001dc; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000001dc; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x00000000000001dc; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x00000000000001dc; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x80000000000001dc; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x80000000000001dc; ++ __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000001dc; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000001dc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffbdff3cffbdff44; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffbdff3cffbdff44; ++ *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfe8bfe0efe8bfe12; ++ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfe8bfe0efe8bfe12; ++ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_op0[1]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_op1[3]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_op1[2]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_op1[1]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_result[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_result[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_result[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_result[0]) = 0xc2c2c2c2c2c2c2c2; ++ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffe000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffe000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffe000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffe000000000000; ++ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x31); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffff7e; ++ *((int*)& __m256_op0[4]) = 0xffffff46; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffff7e; ++ *((int*)& __m256_op0[0]) = 0xffffff46; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000001dc; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000001dc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff24; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff24; ++ __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfe8bfe0efe8bfe12; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfe8bfe0efe8bfe12; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff81007c; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffb7005f0070007c; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff80007e028401; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9a10144000400000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001ffff00010; ++ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x5b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x80010001b57fc565; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8001000184000be0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x80010001b57fc565; ++ *((unsigned long*)& __m128i_result[0]) = 0x8001000184000be0; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000700ff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000040004000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0010002000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000700ff00000000; ++ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000005f0003e000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000897957687; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000408; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff24; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff24; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000897957687; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000408; ++ *((unsigned long*)& __m128i_op1[1]) = 0x80010001b57fc565; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8001000184000be0; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000080001fffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000040004000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0010002000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000897957687; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000408; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000ed0e0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000004080; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1111111111111111; ++ *((unsigned long*)& __m128i_result[0]) = 0x1111111111111111; ++ __m128i_out = __lsx_vmaxi_bu(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff00ffff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_result[1]) = 0xfcfcfc00fcfc00fc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcfcfcfcfc00; ++ __m128i_out = __lsx_vslli_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000897957687; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000408; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff0007e215b122; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ffeffff7bfff828; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80010001; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff80010001; ++ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfefefefe; ++ *((int*)& __m256_op0[6]) = 0xfefefefe; ++ *((int*)& __m256_op0[5]) = 0xfe8bfe0e; ++ *((int*)& __m256_op0[4]) = 0xfe8bfe12; ++ *((int*)& __m256_op0[3]) = 0xfefefefe; ++ *((int*)& __m256_op0[2]) = 0xfefefefe; ++ *((int*)& __m256_op0[1]) = 0xfe8bfe0e; ++ *((int*)& __m256_op0[0]) = 0xfe8bfe12; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x80010009816ac5de; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8001000184000bd8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0bd80bd80bd80bd8; ++ *((unsigned long*)& __m128i_result[0]) = 0x0bd80bd80bd80bd8; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000007; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ed0e0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000004080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000ed0e0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000004080; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x80000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x80000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x80000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x80000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x80000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x80000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x80000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x80000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_op0[1]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffa; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffffa; ++ *((unsigned long*)& __m256i_result[3]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_result[2]) = 0x6161616100000018; ++ *((unsigned long*)& __m256i_result[1]) = 0x6161616161616161; ++ *((unsigned long*)& __m256i_result[0]) = 0x6161616100000018; ++ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7fff0007; ++ *((int*)& __m128_op0[2]) = 0xe215b122; ++ *((int*)& __m128_op0[1]) = 0x7ffeffff; ++ *((int*)& __m128_op0[0]) = 0x7bfff828; ++ *((int*)& __m128_op1[3]) = 0x80010009; ++ *((int*)& __m128_op1[2]) = 0x816ac5de; ++ *((int*)& __m128_op1[1]) = 0x80010001; ++ *((int*)& __m128_op1[0]) = 0x84000bd8; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0bd80bd80bd80bd8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0bd80bd80bd80bd8; ++ unsigned_long_int_result = 0x0bd80bd80bd80bd8; ++ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffa; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffffa; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffa; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffa; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x59); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_h(__m256i_op0,13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x80000000b57ec564; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000083ff0be0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0014000000140014; ++ *((unsigned long*)& __m128i_result[0]) = 0x0014000000140014; ++ __m128i_out = __lsx_vmini_hu(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x80000000b57ec564; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000083ff0be0; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001b57ec563; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000183ff0bdf; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffa; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffffa; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5b35342c979955da; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m128i_result[0]) = 0x5b35342c970455da; ++ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x0); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000003397dd140; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000004bd7cdd20; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0016ffb00016ffb0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0016ffb00016ffb0; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000004a294b; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000006d04bc; ++ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffffa; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffffa; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x2a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000004a294b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000006d04bc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0016001600160016; ++ *((unsigned long*)& __m256i_result[2]) = 0x0016001600160016; ++ *((unsigned long*)& __m256i_result[1]) = 0x0016001600160016; ++ *((unsigned long*)& __m256i_result[0]) = 0x0016001600160016; ++ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x7fff0007e215b122; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7ffeffff7bfff828; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0bef0b880bd80bd8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000017b017b01; ++ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x5b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0016001600160016; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0016001600160016; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0016001600160016; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0016001600160016; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d(__m128i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_w(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x0bd80bd8; ++ *((int*)& __m128_op1[2]) = 0x0bdfffff; ++ *((int*)& __m128_op1[1]) = 0x0bd80bd8; ++ *((int*)& __m128_op1[0]) = 0x0bd80000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff80010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1ffffffff8001000; ++ *((unsigned long*)& __m128i_result[0]) = 0xf0bd80bd80bd8000; ++ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1ffffffff8001000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf0bd80bd80bd8000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff7ffffffefffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xdfffdfffdffffffe; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrmh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xd9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffe0001fffe0001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffe0001fffe0001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1ffffffff8001000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf0bd80bd80bd8000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1ffffffff8001000; ++ *((unsigned long*)& __m128i_result[0]) = 0xf0bd80bd80bd8000; ++ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfffe0001; ++ *((int*)& __m128_op0[2]) = 0xfffe0001; ++ *((int*)& __m128_op0[1]) = 0xfffe0001; ++ *((int*)& __m128_op0[0]) = 0xfffe0001; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xfffe0001; ++ *((int*)& __m128_result[2]) = 0xfffe0001; ++ *((int*)& __m128_result[1]) = 0xfffe0001; ++ *((int*)& __m128_result[0]) = 0xfffe0001; ++ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffdfffffffdff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffdfffffffdff; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x37); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffe0001fffe0001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0001fffe0001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xfffe0001fffe0001; ++ *((unsigned long*)& __m128i_op2[0]) = 0xfffe0001fffe0001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffefffffffe; ++ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000001c; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001c; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000001c; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001c; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0bd80bd80bd80000; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xf9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1ffffffff8001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf0bd80bd80bd8000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffe0001fffe0001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0001fffe0001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffefffffffe; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0x8); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m128i_result[0]) = 0x3d3d3d3d3d3d3d3d; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x3d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010000000000000; ++ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x3d3d3d3d; ++ *((int*)& __m128_op0[2]) = 0x3d3d3d3d; ++ *((int*)& __m128_op0[1]) = 0x3d3d3d3d; ++ *((int*)& __m128_op0[0]) = 0x3d3d3d3d; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00100000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x0000bd3d; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000c00; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_h(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000bd3d00000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000c00; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00bd003d; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000bd3d00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000bd3d00000000; ++ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000bd3d00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000bd3d00000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000bd3d00000000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000020202020; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x3a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000020202020; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplve_w(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000bd003d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0013001300130013; ++ *((unsigned long*)& __m128i_result[0]) = 0x0013001300130013; ++ __m128i_out = __lsx_vmini_hu(__m128i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00130013; ++ *((int*)& __m128_op0[2]) = 0x00130013; ++ *((int*)& __m128_op0[1]) = 0x00130013; ++ *((int*)& __m128_op0[0]) = 0x00130013; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x3f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000bd3d00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffff0000000ad3d; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffff000fffff000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff0000; ++ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffff0000000ad3d; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff000fffff000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xefffdffff0009d3d; ++ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000bd3d00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000bd3d00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000bd3d00000000; ++ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000bd3d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000c0000bd49; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000c7fff000c; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffff0000000ad3d; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff000fffff000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffff00010001000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffff000fffff000; ++ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000bd3d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xefffdffff0009d3d; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000bd3d; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff0000; ++ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0020002000400040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0020002000400040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0020002000400040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0020002000400040; ++ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vmaxi_b(__m128i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000bd3d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000bd30; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000d7fff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000007a6d; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000dfefe0000; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0020002000400040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0020002000400040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0020002000400040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0020002000400040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000005555; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000005555; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000c0000bd49; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000c7fff000c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000c7fff000c; ++ *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvclz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000c0000bd49; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000c7fff000c; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b(__m128i_op0,0xb); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0008000800080008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0008000800080008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0008000800080008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0008000800080008; ++ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0020002000400040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0020002000400040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0020002000400040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0020002000400040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010001000200020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010001000200020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010001000200020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010001000200020; ++ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000bd3d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000c7fff000c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000006ffef000; ++ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000005; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00060000; ++ *((int*)& __m256_op0[6]) = 0x00040000; ++ *((int*)& __m256_op0[5]) = 0x00020000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00060000; ++ *((int*)& __m256_op0[2]) = 0x00040000; ++ *((int*)& __m256_op0[1]) = 0x00020000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00060000; ++ *((int*)& __m256_op1[6]) = 0x00040000; ++ *((int*)& __m256_op1[5]) = 0x00020000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00060000; ++ *((int*)& __m256_op1[2]) = 0x00040000; ++ *((int*)& __m256_op1[1]) = 0x00020000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0000000c; ++ *((int*)& __m128_op0[2]) = 0x7fff000c; ++ *((int*)& __m128_op0[1]) = 0x10001000; ++ *((int*)& __m128_op0[0]) = 0x10001000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000010000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000008000000080; ++ __m128i_out = __lsx_vfclass_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00050008000e0010; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0007000800100010; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00050008000e0010; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0007000800100010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffff000f0008d3c; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff0016fff8d3d; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffff000f0008d3c; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffff0016fff8d3d; ++ *((unsigned long*)& __m128i_result[1]) = 0xe10000004deb2610; ++ *((unsigned long*)& __m128i_result[0]) = 0xe101e0014dec4089; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xe1000000; ++ *((int*)& __m128_op0[2]) = 0x4deb2610; ++ *((int*)& __m128_op0[1]) = 0xe101e001; ++ *((int*)& __m128_op0[0]) = 0x4dec4089; ++ *((unsigned long*)& __m128i_result[1]) = 0x800000001d64c200; ++ *((unsigned long*)& __m128i_result[0]) = 0x800000001d881120; ++ __m128i_out = __lsx_vftint_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0006000000020000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0006000000020000; ++ __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000008000000080; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000008000000080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000c7fff000c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xfffff000f0008d3c; ++ *((unsigned long*)& __m128i_op2[0]) = 0xfffff0016fff8d3d; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000100f8100002; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff0ff8006f0f950; ++ __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000008000000080; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000008000000080; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000008000000080; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0x95); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000080; ++ *((int*)& __m256_op0[6]) = 0x00000080; ++ *((int*)& __m256_op0[5]) = 0x00000080; ++ *((int*)& __m256_op0[4]) = 0x00000080; ++ *((int*)& __m256_op0[3]) = 0x00000080; ++ *((int*)& __m256_op0[2]) = 0x00000080; ++ *((int*)& __m256_op0[1]) = 0x00000080; ++ *((int*)& __m256_op0[0]) = 0x00000080; ++ *((int*)& __m256_op1[7]) = 0x00000001; ++ *((int*)& __m256_op1[6]) = 0x00000001; ++ *((int*)& __m256_op1[5]) = 0x00000001; ++ *((int*)& __m256_op1[4]) = 0x00000001; ++ *((int*)& __m256_op1[3]) = 0x00000001; ++ *((int*)& __m256_op1[2]) = 0x00000001; ++ *((int*)& __m256_op1[1]) = 0x00000001; ++ *((int*)& __m256_op1[0]) = 0x00000001; ++ *((int*)& __m256_result[7]) = 0x00000001; ++ *((int*)& __m256_result[6]) = 0x00000001; ++ *((int*)& __m256_result[5]) = 0x00000001; ++ *((int*)& __m256_result[4]) = 0x00000001; ++ *((int*)& __m256_result[3]) = 0x00000001; ++ *((int*)& __m256_result[2]) = 0x00000001; ++ *((int*)& __m256_result[1]) = 0x00000001; ++ *((int*)& __m256_result[0]) = 0x00000001; ++ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000f0009d3c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000016fff9d3d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000bd0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000007f0; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000916c; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000010000954d; ++ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000c0000bd49; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000c7fff000c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000f0009d3c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000016fff9d3d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000c000000060003; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000c0000bd49; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000c7fff000c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000100c6ffef00d; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000f0009d3c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000016fff9d3d; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffff000f0008d3c; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffff0016fff8d3d; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff000000003c3c; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff0101ffff3d3d; ++ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffff00010000fff; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x800000001d64c200; ++ *((unsigned long*)& __m128d_op0[0]) = 0x800000001d881120; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000000f0009d3c; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000016fff9dff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ff000000ff; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d(__m256i_op0,14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffff01; ++ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf0000000f0000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xf0000000f0000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff07effffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100110002; ++ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffff00010000fff; ++ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,-4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000200; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000200; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000200; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000200; ++ *((int*)& __m256_op2[7]) = 0xffffffa0; ++ *((int*)& __m256_op2[6]) = 0x00000001; ++ *((int*)& __m256_op2[5]) = 0xffffffe0; ++ *((int*)& __m256_op2[4]) = 0x00000001; ++ *((int*)& __m256_op2[3]) = 0xffffffa0; ++ *((int*)& __m256_op2[2]) = 0x00000001; ++ *((int*)& __m256_op2[1]) = 0xffffffe0; ++ *((int*)& __m256_op2[0]) = 0x00000001; ++ *((int*)& __m256_result[7]) = 0xffffffa0; ++ *((int*)& __m256_result[6]) = 0x80000001; ++ *((int*)& __m256_result[5]) = 0xffffffe0; ++ *((int*)& __m256_result[4]) = 0x80000001; ++ *((int*)& __m256_result[3]) = 0xffffffa0; ++ *((int*)& __m256_result[2]) = 0x80000001; ++ *((int*)& __m256_result[1]) = 0xffffffe0; ++ *((int*)& __m256_result[0]) = 0x80000001; ++ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_wu(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffa080000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffe080000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffa080000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffe080000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0002000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000010000f00; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000010000f01; ++ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000100f8100002; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff0ff8006f0f950; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x006f0efe258ca851; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ffff00; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00002f0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000958aefff895e; ++ *((unsigned long*)& __m128i_result[1]) = 0xfafafafafafafafa; ++ *((unsigned long*)& __m128i_result[0]) = 0xfafa958aeffa89fa; ++ __m128i_out = __lsx_vmini_b(__m128i_op0,-6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x24); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ffff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000100c6ffef10c; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffff01; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffeff400000df4; ++ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0002000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0002000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0002000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000100c6ffef10c; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff70; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff9001a47e; ++ __m128i_out = __lsx_vhsubw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000067400002685; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000100c6ffef10c; ++ unsigned_int_result = 0x00000000000000ff; ++ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x2); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffff01; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffeff400000df4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ff91fffffff5; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff00650001ffb0; ++ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff91fffffff5; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff00650001ffb0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000067400002685; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ff91fffffff5; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff00650000ff85; ++ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x24); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffff01; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffeff400000df4; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff03fe; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffe9df0000e81b; ++ __m128i_out = __lsx_vrotri_h(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x006f0efe258ca851; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff90ffffe0f5; ++ *((unsigned long*)& __m128i_result[0]) = 0x006e7973258d0ef4; ++ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b(__m128i_op0,12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00060000; ++ *((int*)& __m256_op0[6]) = 0x00040000; ++ *((int*)& __m256_op0[5]) = 0x00020000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00060000; ++ *((int*)& __m256_op0[2]) = 0x00040000; ++ *((int*)& __m256_op0[1]) = 0x00020000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000c000ffffc000; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff91fffffff5; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff00650001ffb0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffff0001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffff0001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000c000ffffc000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000c000ffffc000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000958affff995d; ++ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000958affff995d; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000c000ffffc000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000006f00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000c00000000000; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffefffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010401; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010401; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010401; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010401; ++ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000fdfc0000fd03; ++ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000404040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000404040; ++ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x68); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000404040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000404040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000404040; ++ __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404240; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404240; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000040404240; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000040404240; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007f7f00007f7f; ++ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffefffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000095896a760000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x006f0efe258ca851; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffff7fc8ffff8000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffff200000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000015516a768038; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff9ed2e1c000; ++ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x23); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b(__m128i_op0,13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x40404040; ++ *((int*)& __m256_op0[6]) = 0x40404040; ++ *((int*)& __m256_op0[5]) = 0x40404040; ++ *((int*)& __m256_op0[4]) = 0x40404040; ++ *((int*)& __m256_op0[3]) = 0x40404040; ++ *((int*)& __m256_op0[2]) = 0x40404040; ++ *((int*)& __m256_op0[1]) = 0x40404040; ++ *((int*)& __m256_op0[0]) = 0x40404040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000958affff995d; ++ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000015516a768038; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffff9ed2e1c000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000958affff995d; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000c00000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000bfffffffe0f6; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000bfffffffe0f6; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff7a53; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000007f0000007f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000007f0000007f; ++ *((unsigned long*)& __m256i_result[1]) = 0xff01ff80ff01ff80; ++ *((unsigned long*)& __m256i_result[0]) = 0xff01ff800000007e; ++ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000001; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000001; ++ *((int*)& __m256_op0[1]) = 0x80000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000007f0000007f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000007f0000007f; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff01ff80ff01ff80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff01ff800000007e; ++ *((unsigned long*)& __m256i_result[3]) = 0x003f8000003f8000; ++ *((unsigned long*)& __m256i_result[2]) = 0x003f8000003f8000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffc07f80ffc07f80; ++ *((unsigned long*)& __m256i_result[0]) = 0xffc07f80003f0000; ++ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x36de0000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x3be14000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000030000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000030000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x24); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000036de0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000003be14000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000007e8a60; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000001edde; ++ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000007e8a60; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000001edde; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vclo_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000036de0000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000003be14000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000030000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000030000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000018002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000018002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff7a53; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslei_hu(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00018002; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000002; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00018002; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000002; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00030000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00030000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000007e8a60; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000001edde; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000de00003e14; ++ *((unsigned long*)& __m128i_result[0]) = 0x00012b15ffff32ba; ++ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x3f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff7a53; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff7a53; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslti_wu(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001f0a; ++ __m128i_out = __lsx_vmaxi_w(__m128i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000036de0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000003be14000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00000000ffff7a53; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000001f0000; ++ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001f0a; ++ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000000; ++ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000003be14000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000003bfb4000; ++ __m128i_out = __lsx_vmaxi_b(__m128i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x55); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffe0001fffe0003; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000000000002; ++ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvabsd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x36); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000bfffffffe0f6; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000010001000a; ++ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000003bfb4000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000003bfb4000; ++ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000003bfb4000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000003bfb4000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000de0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001f0a; ++ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000006f00000000; ++ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x0000006f; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000037; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x2f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000de0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000006f00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001f0a; ++ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000007b; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000037; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001f0a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000036; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000007b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000002; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0x80000000; ++ *((int*)& __m256_result[5]) = 0x80000000; ++ *((int*)& __m256_result[4]) = 0x80000000; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0x80000000; ++ *((int*)& __m256_result[1]) = 0x80000000; ++ *((int*)& __m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000020000000200; ++ __m128i_out = __lsx_vfclass_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000000; ++ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000050000007b; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000500000005; ++ __m128i_out = __lsx_vmaxi_w(__m128i_op0,5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ int_op1 = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff9fff9fff9fff9; ++ __m256i_out = __lasx_xvmini_h(__m256i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000800000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000007b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000100010001007c; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x007b01ec007b3a9e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000100010000fe7c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000100010000fe01; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000100010000fe01; ++ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000060; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000100010000fe01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000050000007b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000500000005; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffbffffff85; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffc0000fdfc; ++ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff3fff3fff3fff3; ++ __m256i_out = __lasx_xvmini_h(__m256i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000007b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff1000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff1000100010001; ++ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000007b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000007b; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000070; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff5; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00010001; ++ *((int*)& __m128_op0[2]) = 0x00010001; ++ *((int*)& __m128_op0[1]) = 0x00010001; ++ *((int*)& __m128_op0[0]) = 0x00010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000100010000fe7c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000100010000fe01; ++ *((unsigned long*)& __m128i_result[1]) = 0x000f000f00100000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000f000f00100000; ++ __m128i_out = __lsx_vclz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000001a00; ++ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000001a00; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00010002; ++ *((int*)& __m128_op0[2]) = 0x0000fe7d; ++ *((int*)& __m128_op0[1]) = 0x00010002; ++ *((int*)& __m128_op0[0]) = 0x0000fe02; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x0000007b; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000001a00; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff3fff3fff3fff3; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00f300ff00f3; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00f300ff00f3; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00f300ff00f3; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00f300ff00f3; ++ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x000100010001007c; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffbffffff85; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffc0000fdfc; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff8fff8fff8fff8; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x0000007b; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001a00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_d(__m256i_op0,1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x35); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff8fff8fff8fff8; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x3b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0003000300030004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0003000300030004; ++ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0204; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x32); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000001007c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00010001; ++ *((int*)& __m128_op1[2]) = 0x0001007c; ++ *((int*)& __m128_op1[1]) = 0x00010001; ++ *((int*)& __m128_op1[0]) = 0x00010001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4429146a7b4c88b2; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe22b3595efa4aa0c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000442900007b4c; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000e22b0000efa4; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000004; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0204; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000442900007b4c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000e22b0000efa4; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000442800007b50; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0204; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000442800007b50; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0204; ++ *((int*)& __m128_result[3]) = 0x46885000; ++ *((int*)& __m128_result[2]) = 0x46f6a000; ++ *((int*)& __m128_result[1]) = 0x4f800000; ++ *((int*)& __m128_result[0]) = 0x4f7fff02; ++ __m128_out = __lsx_vffint_s_wu(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4688500046f6a000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f8000004f7fff02; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ffffff03ffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00013fff; ++ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ffffff03ffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00013fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000088500000f6a0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001fffd00000407; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000442900007b4c; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000e22b0000efa4; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ffffff03ffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00013fff; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000100010001007c; ++ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000100000001007c; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000000010000; ++ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000020000007d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000001f400000; ++ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128d_result[1]) = 0x40f0001000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x40f0001000000000; ++ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000020000007d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000746400016388; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000586100015567; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0800000200000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000020000007d; ++ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x40f0001000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x40f0001000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010001; ++ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00800000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x1f400000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffa8ff9f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffffabff99; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000100000002007d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000020001; ++ *((unsigned long*)& __m128i_result[1]) = 0x00010000ffab001c; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001ffffffadff9a; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0xffa8ff9f; ++ *((int*)& __m128_op1[1]) = 0x0000ffff; ++ *((int*)& __m128_op1[0]) = 0xffabff99; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x6d6d6d6d6d6d6d6d; ++ *((unsigned long*)& __m256i_result[2]) = 0x6d6d6d6d6d6d6d6d; ++ *((unsigned long*)& __m256i_result[1]) = 0x6d6d6d6d6d6d6d6d; ++ *((unsigned long*)& __m256i_result[0]) = 0x6d6d6d6d6d6d6d6d; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x6d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x7f800000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00010001; ++ *((int*)& __m128_op1[2]) = 0x0001007c; ++ *((int*)& __m128_op1[1]) = 0x00010001; ++ *((int*)& __m128_op1[0]) = 0x00010001; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00010000ffab001c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001ffffffadff9a; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vslti_hu(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[6]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[5]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[4]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[3]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[2]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[1]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[0]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[7]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[6]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[5]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[4]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[3]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[2]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[1]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[0]) = 0x6d6d6d6d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x40f0001000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x40f0001000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vslei_hu(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[6]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[5]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[4]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[3]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[2]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[1]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[0]) = 0x6d6d6d6d; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x40f0001000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x40f0001000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x40f0001000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100010001; ++ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128d_result[0]) = 0xfffcfffcfffcfffc; ++ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00003fff00003fff; ++ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmsknz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[6]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[5]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[4]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[3]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[2]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[1]) = 0x6d6d6d6d; ++ *((int*)& __m256_op0[0]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[7]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[6]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[5]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[4]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[3]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[2]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[1]) = 0x6d6d6d6d; ++ *((int*)& __m256_op1[0]) = 0x6d6d6d6d; ++ *((unsigned long*)& __m256i_result[3]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_result[2]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_result[1]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_result[0]) = 0x7c007c007c007c00; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x000000000000ffff; ++ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x40f0001000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x40f0001000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1e0200001e020000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffcfffd; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffdfffcfffd; ++ __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffd; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffdfffcfffd; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff7f7f7fff7fffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff7f7f7fff7fffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3f7f7f7eff800000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3f7f7f7eff800000; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1e0200001e020000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffcfffcfffcfffd; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffcfffdfffcfffd; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffcfffffffd; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffdfffffffd; ++ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffd; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffdfffcfffd; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffdfffcfffd; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x3f7f7f7e; ++ *((int*)& __m256_op1[4]) = 0xff800000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x3f7f7f7e; ++ *((int*)& __m256_op1[0]) = 0xff800000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x7fffffff; ++ *((int*)& __m256_op2[4]) = 0xff7fffff; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x7fffffff; ++ *((int*)& __m256_op2[0]) = 0xff7fffff; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x7fffffff; ++ *((int*)& __m256_result[4]) = 0x7fc00000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x7fffffff; ++ *((int*)& __m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8080808000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8080808000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3f7f7f7eff800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3f7f7f7eff800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007efeff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007efeff00; ++ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffff7fffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffff7fffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007efeff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007efeff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007aff7c00; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffd017d00; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007aff7c00; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffd017d00; ++ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007efeff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007efeff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000008e7c00; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000067751500; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000008e7c00; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000067751500; ++ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fff9fff9; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001fff9fffa; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x007ffe7ffe400000; ++ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x2a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x007ffe7ffe400000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x007ffd0001400840; ++ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007aff7c00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffd017d00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007aff7c00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffd017d00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000008e7c00; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000067751500; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000008e7c00; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000067751500; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000007a00f8; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff01640092; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000007a00f8; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff01640092; ++ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000100640000ff92; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000100640000ff92; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007c0100007c01; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007c0100007c00; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007c0100007c01; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007c0100007c00; ++ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x30); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007aff7c00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffd017d00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007aff7c00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffd017d00; ++ *((unsigned long*)& __m256i_result[3]) = 0x7aff7c0000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfd017d0000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7aff7c0000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfd017d0000000000; ++ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xb3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x007ffd0001400840; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x007ffd0001400840; ++ __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x007ffd0001400840; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h(__m128i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007aff7c00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffd017d00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007aff7c00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffd017d00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000c7aff7c00; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffd017d00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000c7aff7c00; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffd017d00; ++ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3fffffffff7f0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3fffffffff7f0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000c7aff7c00; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffd017d00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000c7aff7c00; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffd017d00; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000002030000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x030303670101fd90; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000002030000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x030303670101fd90; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3ffffffffc7bfc99; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3ffffffffc7bfc99; ++ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000027d00f8; ++ *((unsigned long*)& __m256i_op1[2]) = 0x040204660265fe22; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000027d00f8; ++ *((unsigned long*)& __m256i_op1[0]) = 0x040204660265fe22; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffd000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffd000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x3a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x007ffd00; ++ *((int*)& __m128_op2[0]) = 0x01400840; ++ *((int*)& __m128_result[3]) = 0x80000000; ++ *((int*)& __m128_result[2]) = 0x80000000; ++ *((int*)& __m128_result[1]) = 0x007ffd00; ++ *((int*)& __m128_result[0]) = 0x01400840; ++ __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffd000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffd000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfefa000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x007ffd0001400840; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x007ffd0001400840; ++ *((unsigned long*)& __m128i_result[1]) = 0x3fffffff80000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00003ffd000a4000; ++ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3fffffff80000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00003ffd000a4000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffd000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffcffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000fffd000a0000; ++ __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xfefa0000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfefa000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfefa000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long*)& __m256i_result[3]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long*)& __m256i_result[2]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long*)& __m256i_result[1]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long*)& __m256i_result[0]) = 0xc3f0c3f0c3f0c3f0; ++ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0x3c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xc3f0c3f0c3f0c3f0; ++ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000000007a00f8; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00ff00ff01640092; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000007a00f8; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00ff00ff01640092; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3fffffff80000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00003ffd000a4000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffcffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000fffd000a0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xf000800080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000a00028004000; ++ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfffcffff00000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000fffd000a0000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xf0fd800080000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000a00028004000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xd207e90001fb16ef; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc8eab25698f97e90; ++ *((unsigned long*)& __m256i_op0[1]) = 0xd207e90001fb16ef; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc8eab25698f97e90; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf0fd800080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000a00028004000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5c9c9c9ce3636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x63635c9e63692363; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf0fd800080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000a00028004000; ++ *((unsigned long*)& __m128i_result[1]) = 0x6b9fe3649c9d6363; ++ *((unsigned long*)& __m128i_result[0]) = 0x6363bc9e8b696363; ++ __m128i_out = __lsx_vadda_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5c9c9c9ce3636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x63635c9e63692363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffe3636363; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000063692363; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xd207e90001fb16ef; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc8eab25698f97e90; ++ *((unsigned long*)& __m256i_op0[1]) = 0xd207e90001fb16ef; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc8eab25698f97e90; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0007000000fb00ef; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ea005600f90090; ++ *((unsigned long*)& __m256i_result[1]) = 0x0007000000fb00ef; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ea005600f90090; ++ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5c9c9c9ce3636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x63635c9e63692363; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000005c9c9c9c; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffe3636363; ++ __m128i_out = __lsx_vexth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256d_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_result[3]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_result[2]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_result[1]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_result[0]) = 0x7c007c007c007c00; ++ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffd000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf0fd800080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000a00028004000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000f000800000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x000f000000000000; ++ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffe4ffffffe4; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffe4ffffffe4; ++ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffd000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002ffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6b9fe3649c9d6363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363bc9e8b696363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6b9fe3649c9d6363; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363bc9e8b696363; ++ *((unsigned long*)& __m128i_result[1]) = 0xb9fe3640e4eb1b18; ++ *((unsigned long*)& __m128i_result[0]) = 0x800000005b4b1b18; ++ __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xb9fe3640e4eb1b18; ++ *((unsigned long*)& __m128i_op0[0]) = 0x800000005b4b1b18; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffb9fe00003640; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffe4eb00001b18; ++ __m128i_out = __lsx_vexth_w_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_result[2]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_result[1]) = 0x7c007c007c007c00; ++ *((unsigned long*)& __m256i_result[0]) = 0x7c007c007c007c00; ++ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0002ffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67b7cf643c9d636a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x39d70e366f547977; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0002ffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x66b34f643c9c626a; ++ *((unsigned long*)& __m128i_result[0]) = 0x38d60e366e547876; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xb9fe3640e4eb1b18; ++ *((unsigned long*)& __m128i_op0[0]) = 0x800000005b4b1b18; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffb9fe00003640; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffe4eb00001b18; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x80001b155b4b0000; ++ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe273e273e273e273; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe273e273e273e273; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe273e273e273e273; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe273e273e273e273; ++ *((unsigned long*)& __m256i_op1[3]) = 0xd207e90001fb16ef; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc8eab25698f97e90; ++ *((unsigned long*)& __m256i_op1[1]) = 0xd207e90001fb16ef; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc8eab25698f97e90; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001c4e8ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001c4e8ffffffff; ++ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xd207e90001fb16ef; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc8eab25698f97e90; ++ *((unsigned long*)& __m256i_op0[1]) = 0xd207e90001fb16ef; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc8eab25698f97e90; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x01fb16ef98f97e90; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x01fb16ef98f97e90; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xb9fe3640e4eb1b18; ++ *((unsigned long*)& __m128i_op0[0]) = 0x800000005b4b1b18; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffd000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xdcfe1b20f2f60e0c; ++ *((unsigned long*)& __m128i_result[0]) = 0xc00000002e260e0c; ++ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x0001c4e8; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x0001c4e8; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001c4e8ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001c4e8ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0080000000800000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0081c4e8ff7fffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0080000000800000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0081c4e8ff7fffff; ++ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00002df900001700; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffe05ffffe911; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00002df900001700; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffe05ffffe911; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000300000003; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffcfffffffc; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000300000003; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffcfffffffc; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x66b34f643c9c626a; ++ *((unsigned long*)& __m128d_op0[0]) = 0x38d60e366e547876; ++ *((unsigned long*)& __m128d_op1[1]) = 0x66b34f643c9c626a; ++ *((unsigned long*)& __m128d_op1[0]) = 0x38d60e366e547876; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x80008000b70fb810; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3c0f3c0f3911b910; ++ *((unsigned long*)& __m256i_op0[1]) = 0x80008000b70fb810; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3c0f3c0f3911b910; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff6f20; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000781e0000f221; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff6f20; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000781e0000f221; ++ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ff010000ff01; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ff010000ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ff010000ff01; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ff010000ff01; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffff00006c82; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00009b140000917b; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffff00006c82; ++ *((unsigned long*)& __m128d_result[0]) = 0x00009b140000917b; ++ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xffff6f20; ++ *((int*)& __m256_op0[5]) = 0x0000781e; ++ *((int*)& __m256_op0[4]) = 0x0000f221; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xffff6f20; ++ *((int*)& __m256_op0[1]) = 0x0000781e; ++ *((int*)& __m256_op0[0]) = 0x0000f221; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0xffff6f20; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0xffff6f20; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff6f20; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000781e0000f221; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff6f20; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000781e0000f221; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80001b155b4b0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00006c82; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00009b140000917b; ++ *((unsigned long*)& __m128i_result[1]) = 0x80000000fffffffc; ++ *((unsigned long*)& __m128i_result[0]) = 0xb150000000000000; ++ __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffefffffffe; ++ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80001b155b4b0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x80001b155b4b0000; ++ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff994cb09c; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffc3639d96; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff6f20; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff6f20; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xdbc8000000003fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xdbc8000000003fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0000ffff; ++ *((int*)& __m256_op0[6]) = 0x0000ffff; ++ *((int*)& __m256_op0[5]) = 0x0000ffff; ++ *((int*)& __m256_op0[4]) = 0x0000ffff; ++ *((int*)& __m256_op0[3]) = 0x0000ffff; ++ *((int*)& __m256_op0[2]) = 0x0000ffff; ++ *((int*)& __m256_op0[1]) = 0x0000ffff; ++ *((int*)& __m256_op0[0]) = 0x0000ffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x20); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff994cb09c; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc3639d96; ++ *((unsigned long*)& __m128i_op1[1]) = 0x20de27761210386d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x34632935195a123c; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff994db09c; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffc7639d96; ++ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xdbc80000; ++ *((int*)& __m256_op1[6]) = 0x00003fff; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0xdbc80000; ++ *((int*)& __m256_op1[2]) = 0x00003fff; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdbc8000000003fff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xdbc8000000003fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdbc8000000003fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xdbc8000000003fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff994db09c; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffc7639d96; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xdbc8000000003fff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xdbc8000000003fff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0xff800000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0xff800000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x27); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000e0000000e; ++ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0xecececececececec; ++ *((unsigned long*)& __m128i_result[0]) = 0xecececececececec; ++ __m128i_out = __lsx_vldi(1004); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_d(__m256i_op0,11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0x86); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff3e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff3e; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x70); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00c100c100c100c1; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00c100c100c100c1; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00c100c100c100c1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00c100c100c100c1; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0003000300030003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0003000300030003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x003f003f003f003f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000500000005; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000500000005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000500000005; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000500000005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000500000005; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000500000005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000500000005; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000500000005; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vmaxi_w(__m128i_op0,4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_hu(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0x80000000; ++ *((int*)& __m256_result[5]) = 0x80000000; ++ *((int*)& __m256_result[4]) = 0x80000000; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0x80000000; ++ *((int*)& __m256_result[1]) = 0x80000000; ++ *((int*)& __m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_result[1]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; ++ __m256i_out = __lasx_xvori_b(__m256i_op0,0xbf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_op1[1]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffb79fb74; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffb79fb74; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000010486048c; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000006; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000010486048c; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000006; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_result[3]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m256i_result[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; ++ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0005000500050005; ++ *((unsigned long*)& __m128i_result[0]) = 0x0005000500050005; ++ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x000000010486048c; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000100000006; ++ *((unsigned long*)& __m256d_op1[1]) = 0x000000010486048c; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000100000006; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000010486048c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000010486048c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x6f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00050005; ++ *((int*)& __m128_op0[2]) = 0x00050005; ++ *((int*)& __m128_op0[1]) = 0x00050005; ++ *((int*)& __m128_op0[0]) = 0x00050005; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffa0078fffa0074; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffa0078fffa0074; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffb79fb74; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffb79fb74; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m256i_result[3]) = 0x000100010485048a; ++ *((unsigned long*)& __m256i_result[2]) = 0x0005ff870005ff86; ++ *((unsigned long*)& __m256i_result[1]) = 0x000100010485048a; ++ *((unsigned long*)& __m256i_result[0]) = 0x0005ff870005ff86; ++ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000020006; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffb79fb74; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffb79fb74; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m256d_result[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xc192181230000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xc192181230000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0xd9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00050005; ++ *((int*)& __m128_op1[2]) = 0x00050005; ++ *((int*)& __m128_op1[1]) = 0x00050005; ++ *((int*)& __m128_op1[0]) = 0x00050005; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffecffffffec; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000100010485048a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0005ff870005ff86; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000100010485048a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0005ff870005ff86; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffeffebfb7afb62; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffa0065fffa0066; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffeffebfb7afb62; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffa0065fffa0066; ++ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffeffeb; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fb7afb62; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffeffeb; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fb7afb62; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffeffebfb7afb62; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffeffebfb7afb62; ++ __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc192181230000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc192181230000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff0; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffa0078fffa0074; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffa0078fffa0074; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff000000ff0000; ++ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffeffebfb7afb62; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffeffebfb7afb62; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc192181230000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc192181230000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x4010000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long*)& __m256i_result[1]) = 0x4010000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3e6ce7d9cb7afb62; ++ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffa0078fffa0074; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffa0078fffa0074; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffa2078fffa2074; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffa2078fffa2074; ++ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffa0078fffa0074; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffa0078fffa0074; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffa2078fffa2074; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffa2078fffa2074; ++ *((unsigned long*)& __m256i_result[3]) = 0x01ff01ff01ff01ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x01ff01ff01ff01ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x01ff01ff01ff01ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x01ff01ff01ff01ff; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffeffebfb7afb62; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffeffebfb7afb62; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffeffebfb7afb62; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffeffebfb7afb62; ++ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4010000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4010000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2008000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1f3673ece5bd7db1; ++ *((unsigned long*)& __m256i_result[1]) = 0x2008000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1f3673ece5bd7db1; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4010000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4010000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000401000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003e6c0000cb7a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000401000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003e6c0000cb7a; ++ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000401000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003e6c0000cb7a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000401000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003e6c0000cb7a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x40000000b000032d; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x40000000b000032d; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xeffc000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf064c6098d214127; ++ *((unsigned long*)& __m256i_op0[1]) = 0xeffc000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf064c6098d214127; ++ *((unsigned long*)& __m256i_result[3]) = 0xeffc001800180018; ++ *((unsigned long*)& __m256i_result[2]) = 0xf064c6098d214127; ++ *((unsigned long*)& __m256i_result[1]) = 0xeffc001800180018; ++ *((unsigned long*)& __m256i_result[0]) = 0xf064c6098d214127; ++ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc192181230000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc192181230000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xff800000; ++ *((int*)& __m256_result[6]) = 0xff800000; ++ *((int*)& __m256_result[5]) = 0xff800000; ++ *((int*)& __m256_result[4]) = 0xff800000; ++ *((int*)& __m256_result[3]) = 0xff800000; ++ *((int*)& __m256_result[2]) = 0xff800000; ++ *((int*)& __m256_result[1]) = 0xff800000; ++ *((int*)& __m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvflogb_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x29); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003030000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xff800000ff800000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0xff800000; ++ *((int*)& __m256_result[4]) = 0xff800000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0xff800000; ++ *((int*)& __m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff820002ff820002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff820002ff820002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00020002ff820002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00020002ff820002; ++ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff800000ff800000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_result[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff80000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00020002ff820002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00020002ff820002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff82; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000003ffda00f3; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000003ffda00f3; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff820002ff820002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff820002ff820002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0002000200020002; ++ __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00020002ff820002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00020002ff820002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_l(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0800080008000800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0800080008000800; ++ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffecffffffec; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfe7fffecfe7fffec; ++ *((unsigned long*)& __m256i_result[2]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfe7fffecfe7fffec; ++ *((unsigned long*)& __m256i_result[0]) = 0xff80000000000000; ++ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xf4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfe7fffecfe7fffec; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfe7fffecfe7fffec; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808000800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080808000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0002000200020002; ++ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfe7fffecfe7fffec; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfe7fffecfe7fffec; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000800080008000; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffdfffdfffdfffd; ++ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0800080008000800; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0800080008000800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0800080008000800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0800080008000800; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0800080008000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0800080008000800; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[3]) = 0x4343434343434343; ++ *((unsigned long*)& __m256i_result[2]) = 0x4343434343434343; ++ *((unsigned long*)& __m256i_result[1]) = 0x4343434343434343; ++ *((unsigned long*)& __m256i_result[0]) = 0x4343434343434343; ++ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x38); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0100010001000100; ++ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001a0000001a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001a0000001a; ++ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0800080008000800; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0800080008000800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x2b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0800080008000800; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0800080008000800; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0040004000400040; ++ __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0800080008000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0800080008000800; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_d(__m128i_op0,0x35); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0020002000200020; ++ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_result[1]) = 0x9a9a9a9a9a9a9a9a; ++ *((unsigned long*)& __m128i_result[0]) = 0x9aba9aba9aba9aba; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0x9a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0b0b0b0b0b0b0b0b; ++ *((unsigned long*)& __m128i_result[0]) = 0x0b0b0b0b0b0b0b0b; ++ __m128i_out = __lsx_vmaxi_b(__m128i_op0,11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000020000; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0040004000400040; ++ *((unsigned long*)& __m128i_result[0]) = 0x0040004000400040; ++ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x55555555; ++ *((int*)& __m256_op0[6]) = 0x55555555; ++ *((int*)& __m256_op0[5]) = 0x5d5d5d5d; ++ *((int*)& __m256_op0[4]) = 0x5d555d55; ++ *((int*)& __m256_op0[3]) = 0x55555555; ++ *((int*)& __m256_op0[2]) = 0x55555555; ++ *((int*)& __m256_op0[1]) = 0x5d5ca2a3; ++ *((int*)& __m256_op0[0]) = 0x5d54aaab; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0100000001000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0100000001000000; ++ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffee; ++ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256i_result[2]) = 0x01fc03fc01fc03fc; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256i_result[0]) = 0x01fc03fc01fc03fc; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x3e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256d_op0[2]) = 0x01fc03fc01fc03fc; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256d_op0[0]) = 0x01fc03fc01fc03fc; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256i_result[2]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256i_result[0]) = 0x3ff0000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100000001000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0040004000400040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0040004000400040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0040004000400040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0040004000400040; ++ *((unsigned long*)& __m128i_result[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long*)& __m128i_result[0]) = 0xffc0ffc0ffc0ffc0; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffdffd; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffdffd; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffdffd; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffdffd; ++ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffee; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffee; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffee; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffee; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0040004000400040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0040004000400040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01fc03fc01fc03fc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01fc03fc01fc03fc; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000200000001e; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000200000001e; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0081000100810001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0081000100810001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0081000100810001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0081000100810001; ++ __m256i_out = __lasx_xvneg_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffdc; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffdc; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000040000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffdc; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffdc; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffdc; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffdc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffffffdd; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffdc; ++ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff80ff00ff80ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff80ff00ff80ff01; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff80ff00ff80ff01; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff80ff00ff80ff01; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x007f00ff007f00fe; ++ *((unsigned long*)& __m256i_op2[2]) = 0xf711ee11f711ee91; ++ *((unsigned long*)& __m256i_op2[1]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xf711ee11f711ee11; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff80ff00ff80ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff80ff00ff80ff01; ++ __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000040000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000080000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000002affaa; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff002affaa; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000002affaa; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffd50055; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x002affaa00000000; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00820082ff81ff81; ++ *((unsigned long*)& __m128d_op0[0]) = 0xff81ff81ff81ff81; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000820000ff81; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff810000ff81; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000820000ff81; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff810000ff81; ++ __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffeffffffdd; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffdc; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x002affaa00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffffffdd; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffdc; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256d_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x000000000000ffff; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ee; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ee; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f00ff007f00ff; ++ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffeffffffdd; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffdc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m128i_result[0]) = 0x001f001f001f001f; ++ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000808; ++ __m256i_out = __lasx_xvclo_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000808; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1010100fefefeff0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0f8f0e8df676f778; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00ff00ff00ef32; ++ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ee; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ee; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffce; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000fc7c; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffce; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000fc7c; ++ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x28); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_result[2]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_result[1]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_result[0]) = 0xe7e7e7e7e7e7e7e7; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xff81ff82ff810081; ++ *((unsigned long*)& __m128i_op2[0]) = 0xff82ff810081ff81; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_result[3]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_result[2]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_result[1]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_result[0]) = 0xe7e7e7e7e7e7e7e7; ++ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op1[3]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_op1[1]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xe6e8e6e8e6e8d719; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xe6e8e6e8e6e8d719; ++ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x0000ffce; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0000fc7c; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x0000ffce; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0000fc7c; ++ *((int*)& __m256_op1[7]) = 0xe7e7e7e7; ++ *((int*)& __m256_op1[6]) = 0xe7e7e7e7; ++ *((int*)& __m256_op1[5]) = 0xe7e7e7e7; ++ *((int*)& __m256_op1[4]) = 0xe7e7e7e7; ++ *((int*)& __m256_op1[3]) = 0xe7e7e7e7; ++ *((int*)& __m256_op1[2]) = 0xe7e7e7e7; ++ *((int*)& __m256_op1[1]) = 0xe7e7e7e7; ++ *((int*)& __m256_op1[0]) = 0xe7e7e7e7; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ffce20; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ffce20; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ee1100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000004560408; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ee1100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000004560408; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff1100; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000004560420; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff1100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000004560420; ++ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ffce20; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ffce20; ++ *((unsigned long*)& __m256i_result[3]) = 0x1514151415141514; ++ *((unsigned long*)& __m256i_result[2]) = 0x151415141514e335; ++ *((unsigned long*)& __m256i_result[1]) = 0x1514151415141514; ++ *((unsigned long*)& __m256i_result[0]) = 0x151415141514e335; ++ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1514151415141514; ++ *((unsigned long*)& __m256i_op1[2]) = 0x151415141514e335; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1514151415141514; ++ *((unsigned long*)& __m256i_op1[0]) = 0x151415141514e335; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000e9ece9ec; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000e9ece9ec; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000e9ece9ec; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000e9ece9ec; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256d_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256d_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256d_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00ff00ff00ef0120; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00ff00ff00ef0120; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffff007f00000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffff007f00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xecec006c00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xecec006c00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff007f00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff007f00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ef0120; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ef0120; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000e9ece9ec; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000e9ece9ec; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000e9ece9ec; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000e9ece9ec; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff0120; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000e9ec0000e9ec; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff0120; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000e9ec0000e9ec; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d(__m128i_op0,0x38); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff007f00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff007f00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff0000007f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00550f0000550f00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000015c015c0; ++ *((unsigned long*)& __m256i_result[2]) = 0xc0c0c0cdc0c0c0cd; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xc0c0c0cdc0c0c0cd; ++ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001f0000001f; ++ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff1100; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000004560420; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff1100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000004560420; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff1100; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000004560420; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff1100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000004560420; ++ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001f; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xd04752cdd5543b56; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6906e68064f3d78b; ++ *((unsigned long*)& __m256i_op0[1]) = 0xd04752cdd5543b56; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6906e68064f3d78b; ++ *((unsigned long*)& __m256i_result[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000300000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000300000002; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001f; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff007f00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff007f00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000007f00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000007f00000000; ++ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x000000000000001f; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000001f; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xd04752cdd5543b56; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6906e68064f3d78b; ++ *((unsigned long*)& __m256i_op0[1]) = 0xd04752cdd5543b56; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6906e68064f3d78b; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff1100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000004560420; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff1100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000004560420; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ffff00ff00; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000fff00004542; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ffff00ff00; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000fff00004542; ++ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ffff00ff00; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000fff00004542; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ffff00ff00; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000fff00004542; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ffff00ff00; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000fff00004542; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ffff00ff00; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000fff00004542; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff0000007f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff0000007f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000300000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000300000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001f; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001f; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffefffffffe; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0202020202020203; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0202020202020203; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001f0000ffff; ++ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x52525252525252cb; ++ *((unsigned long*)& __m128i_op1[0]) = 0x52525252525252cb; ++ *((unsigned long*)& __m128i_result[1]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long*)& __m128i_result[0]) = 0xaeaeaeaeaeaeae35; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long*)& __m128i_op0[0]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long*)& __m128i_op1[1]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long*)& __m128i_op1[0]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x3e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000300000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000300000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004411; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004411; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0008000800080008; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000c005e000c0029; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0004005600040020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000300000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000300000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000060008; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000c005b; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffe0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000040053; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00ff00ffff00ff00; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000fff00004542; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00ff00ffff00ff00; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000fff00004542; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001f0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_hu(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000005000000020; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000005000000020; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000005000000020; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000005000000020; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000005000000020; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000005000000020; ++ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000005000000020; ++ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000005000000020; ++ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0202020202020203; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0202020202020203; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000002020202; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000002020202; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001f0000ffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000060008; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000000000c005b; ++ *((unsigned long*)& __m256i_op2[1]) = 0xfffffffffffe0000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000040053; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff0007fff7; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff005affa4; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffe100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000053ffac; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004411; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004411; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000001f0000ffff; ++ *((unsigned long*)& __m256d_result[3]) = 0x60000007fffe0001; ++ *((unsigned long*)& __m256d_result[2]) = 0x60000007fffe0001; ++ *((unsigned long*)& __m256d_result[1]) = 0x6056fd4e7926d5c0; ++ *((unsigned long*)& __m256d_result[0]) = 0x6056fd4e1a4616c4; ++ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00040000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00040000; ++ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00ff00ff; ++ *((int*)& __m256_op0[6]) = 0x00ff00ff; ++ *((int*)& __m256_op0[5]) = 0x00ff00ff; ++ *((int*)& __m256_op0[4]) = 0x000c0000; ++ *((int*)& __m256_op0[3]) = 0x00ff00ff; ++ *((int*)& __m256_op0[2]) = 0x00ff00ff; ++ *((int*)& __m256_op0[1]) = 0x00ff00ff; ++ *((int*)& __m256_op0[0]) = 0x00040000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00ff00ff; ++ *((int*)& __m256_result[6]) = 0x00ff00ff; ++ *((int*)& __m256_result[5]) = 0x00ff00ff; ++ *((int*)& __m256_result[4]) = 0x000c0000; ++ *((int*)& __m256_result[3]) = 0x00ff00ff; ++ *((int*)& __m256_result[2]) = 0x00ff00ff; ++ *((int*)& __m256_result[1]) = 0x00ff00ff; ++ *((int*)& __m256_result[0]) = 0x00040000; ++ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000005000000020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000005000000020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; ++ *((int*)& __m256_result[7]) = 0xdf000000; ++ *((int*)& __m256_result[6]) = 0x52a00000; ++ *((int*)& __m256_result[5]) = 0x5b7f00ff; ++ *((int*)& __m256_result[4]) = 0x5b7f00ff; ++ *((int*)& __m256_result[3]) = 0xdf000000; ++ *((int*)& __m256_result[2]) = 0x52a00000; ++ *((int*)& __m256_result[1]) = 0x5b7f00ff; ++ *((int*)& __m256_result[0]) = 0x5b7f00ff; ++ __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004411; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004411; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020202020206431; ++ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000005000000020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000005000000020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002800000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002800000010; ++ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00040000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00040000; ++ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ff00000083; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0xff01ff010000ff7d; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000fffc; ++ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004411; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004411; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x00010001000c4411; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100044411; ++ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xff01ff01; ++ *((int*)& __m128_op1[2]) = 0x0000ff7d; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x0000fffc; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000002800000010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000002800000010; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff0127000c0010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff012700040010; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff01ff010000ff7d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000fffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b(__m128i_op0,2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00010001000c4411; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100044411; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000002800000010; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000002800000010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0002000200020018; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0002000200020008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000002; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000002; ++ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0002000200020018; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0002000200020008; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00c0000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0040000000000000; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x35); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdf00000052a00000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xdf00000052a00000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00040000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00040000; ++ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdf00000052a00000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xdf00000052a00000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffefffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffefffff; ++ __m128i_out = __lsx_vrotri_w(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdf00000052a00000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xdf00000052a00000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_result[3]) = 0xdf01010153a10101; ++ *((unsigned long*)& __m256i_result[2]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long*)& __m256i_result[1]) = 0xdf01010153a10101; ++ *((unsigned long*)& __m256i_result[0]) = 0x5b7f01ff5b7f10ff; ++ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffefffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffefffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0007000700070007; ++ *((unsigned long*)& __m128i_result[0]) = 0x0007000700070007; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffefffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffefffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_b(__m128i_op0,5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdf00000052a00000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xdf00000052a00000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00c0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0040000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000c0000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000040000000; ++ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00040000; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdf01010153a10101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xdf01010153a10101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xcf01010143a10101; ++ *((unsigned long*)& __m256i_result[2]) = 0x4b6f01ef4b6f00ef; ++ *((unsigned long*)& __m256i_result[1]) = 0xcf01010143a10101; ++ *((unsigned long*)& __m256i_result[0]) = 0x4b6f01ef4b6f00ef; ++ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004411; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004411; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004411; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004411; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_result[3]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_result[2]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_result[1]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_result[0]) = 0x001f001f001f001f; ++ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000c0000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000040000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0020001f001f001e; ++ *((unsigned long*)& __m256i_result[2]) = 0x001f001fc01f001f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0020001f001f001e; ++ *((unsigned long*)& __m256i_result[0]) = 0x001f001f401f001f; ++ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m128d_result[0]) = 0xbff0000000000000; ++ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256d_result[3]) = 0x43c0101010101010; ++ *((unsigned long*)& __m256d_result[2]) = 0x43c0101010101032; ++ *((unsigned long*)& __m256d_result[1]) = 0x43c0101010101010; ++ *((unsigned long*)& __m256d_result[0]) = 0x43c0101010101032; ++ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x1010101010101010; ++ *((unsigned long*)& __m128i_result[0]) = 0xefefefefefefefef; ++ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000c0000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000c0000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000040000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0003030300000300; ++ *((unsigned long*)& __m256i_result[2]) = 0x0003030300000300; ++ *((unsigned long*)& __m256i_result[1]) = 0x0003030300000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0003030300000100; ++ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x01010101; ++ *((int*)& __m128_op0[0]) = 0x01010101; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0039ffffffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffbeffffffffffff; ++ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op1[1]) = 0x41dfffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000083b00000000; ++ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x33); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000100000020; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000083b00000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x41dfffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xbff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x7e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h(__m256i_op0,11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1111111111111111; ++ *((unsigned long*)& __m128i_result[0]) = 0x1111111111111111; ++ __m128i_out = __lsx_vmaxi_bu(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1111111111111111; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111111111111111; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1111111111111111; ++ *((unsigned long*)& __m128i_result[0]) = 0x1111111111111111; ++ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdf01010153a10101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xdf01010153a10101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1111111111111111; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111111111111111; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0003030300000300; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0003030300000300; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0003030300000100; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0003030300000100; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x35); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0003030300000300; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0003030300000300; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0003030300000100; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0003030300000100; ++ *((unsigned long*)& __m256d_result[3]) = 0x1febc46085090ea0; ++ *((unsigned long*)& __m256d_result[2]) = 0x1febc46085090ea0; ++ *((unsigned long*)& __m256d_result[1]) = 0x1febc46085090567; ++ *((unsigned long*)& __m256d_result[0]) = 0x1febc46085090567; ++ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe6; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffe6; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0003030300000300; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0003030300000300; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0003030300000100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0003030300000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x00f800f800f800f8; ++ *((unsigned long*)& __m256i_result[2]) = 0x0018181800181818; ++ *((unsigned long*)& __m256i_result[1]) = 0x00f800f800f800f8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0018181800181818; ++ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0008; ++ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0003030300000300; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0003030300000300; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0003030300000100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0003030300000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0043030300400300; ++ *((unsigned long*)& __m256i_result[2]) = 0x0043030300400300; ++ *((unsigned long*)& __m256i_result[1]) = 0x0043030300400100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0043030300400100; ++ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffff0008; ++ *((int*)& __m128_op1[3]) = 0xffc2ffe0; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x0000ffc1; ++ *((int*)& __m128_op1[0]) = 0x00010001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_result[3]) = 0x04080c1014182d35; ++ *((unsigned long*)& __m256i_result[2]) = 0x716d696573765161; ++ *((unsigned long*)& __m256i_result[1]) = 0x04080c1014182d35; ++ *((unsigned long*)& __m256i_result[0]) = 0x716d696573765161; ++ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00f800f800f800f8; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0018181800181818; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00f800f800f800f8; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0018181800181818; ++ *((unsigned long*)& __m256i_result[3]) = 0x001f1f3e3e1f1f00; ++ *((unsigned long*)& __m256i_result[2]) = 0x0003060909060300; ++ *((unsigned long*)& __m256i_result[1]) = 0x001f1f3e3e1f1f00; ++ *((unsigned long*)& __m256i_result[0]) = 0x0003060909060300; ++ __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1111111111111111; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111111111111111; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[1]) = 0x1111113111111131; ++ *((unsigned long*)& __m128i_result[0]) = 0x1111113111111131; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0043030300400300; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0043030300400300; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0043030300400100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0043030300400100; ++ *((unsigned long*)& __m256i_result[3]) = 0xffdd001dffe00020; ++ *((unsigned long*)& __m256i_result[2]) = 0xffdd001dffe00031; ++ *((unsigned long*)& __m256i_result[1]) = 0xffdd001dffe00020; ++ *((unsigned long*)& __m256i_result[0]) = 0xffdd001dffe00031; ++ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x001f1f3e; ++ *((int*)& __m256_op1[6]) = 0x3e1f1f00; ++ *((int*)& __m256_op1[5]) = 0x00030609; ++ *((int*)& __m256_op1[4]) = 0x09060300; ++ *((int*)& __m256_op1[3]) = 0x001f1f3e; ++ *((int*)& __m256_op1[2]) = 0x3e1f1f00; ++ *((int*)& __m256_op1[1]) = 0x00030609; ++ *((int*)& __m256_op1[0]) = 0x09060300; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff000100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x41dfffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff000200000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x7f800000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111131; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111131; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffff0008; ++ *((unsigned long*)& __m128i_result[1]) = 0x1111113111111141; ++ *((unsigned long*)& __m128i_result[0]) = 0x1111113111111121; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff000100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7f8000007f7fffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f8000007f7fffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7f8000007f7fffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f8000007f7fffff; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d(__m128i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x41dfbe1f41e0ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffc2ffe000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffc100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x41dfbe1f41e0ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffc100010001; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xec); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3f77aab500000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffc100010001; ++ *((unsigned long*)& __m128i_op2[1]) = 0x3f77aab500000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffc100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0fbc1df53c1ae3f9; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff820f81; ++ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1111113111111141; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1111113111111121; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000111111312; ++ *((unsigned long*)& __m128i_result[0]) = 0x2222272111111410; ++ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0fbc1df53c1ae3f9; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff820f81; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xf144e32bc4e61d27; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000020017ef19f; ++ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffdd001dffe00020; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffdd001dffe00031; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffdd001dffe00020; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffdd001dffe00031; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x3ff73ff83ff73ff8; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x3ff73ff83ff73ff8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x3ff73ff83ff73ff8; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x3ff73ff83ff73ff8; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256d_op2[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256d_op2[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256d_op2[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256d_result[3]) = 0xa020202020202020; ++ *((unsigned long*)& __m256d_result[2]) = 0xa020202020206431; ++ *((unsigned long*)& __m256d_result[1]) = 0xa020202020202020; ++ *((unsigned long*)& __m256d_result[0]) = 0xa020202020206431; ++ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x33); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffc2ffe700000007; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffc100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffc100010001; ++ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x01fffffffe000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x01fffffffe000000; ++ __m256i_out = __lasx_xvbsrl_v(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x41dfffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0100000008080808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffc2ffe700000007; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffc100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x41dfffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xbde2ffe800000007; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffc100010001; ++ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x41dfffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x403be000; ++ *((int*)& __m128_result[2]) = 0xffffe000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000020; ++ *((int*)& __m128_op0[2]) = 0x00000020; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x0000ffc1; ++ *((int*)& __m128_op1[0]) = 0x00010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000021ffffffdf; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000e60; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000001ff85ffdc0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000332ae5d97330; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1ff85ffe2ae5d973; ++ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvexth_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x01ffffff; ++ *((int*)& __m256_op1[4]) = 0xfe000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x01ffffff; ++ *((int*)& __m256_op1[0]) = 0xfe000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000021ffffffdf; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000e60; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1ff85ffe2ae5d973; ++ *((unsigned long*)& __m128i_result[1]) = 0x00010020fffeffde; ++ *((unsigned long*)& __m128i_result[0]) = 0x0100400100200e68; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00010020fffeffde; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100400100200e68; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00010020fffeffde; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0100400100200e68; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x1ff85ffe2ae5d973; ++ *((unsigned long*)& __m128i_result[1]) = 0x00010020fffeffde; ++ *((unsigned long*)& __m128i_result[0]) = 0x011f57c100201a46; ++ __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1ff85ffe2ae5d973; ++ *((unsigned long*)& __m128i_op1[1]) = 0x403be000ffffe000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000ffc2f; ++ *((unsigned long*)& __m128i_result[0]) = 0x00201df000000000; ++ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x29); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffc2ffe7; ++ *((int*)& __m128_op0[2]) = 0x00000007; ++ *((int*)& __m128_op0[1]) = 0x0000ffc1; ++ *((int*)& __m128_op0[0]) = 0x00010001; ++ *((int*)& __m128_op1[3]) = 0xffc2ffe7; ++ *((int*)& __m128_op1[2]) = 0x00000007; ++ *((int*)& __m128_op1[1]) = 0x0000ffc1; ++ *((int*)& __m128_op1[0]) = 0x00010001; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x000ffc2f; ++ *((int*)& __m128_op2[1]) = 0x00201df0; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xffc2ffe7; ++ *((int*)& __m128_result[2]) = 0x800ffc2f; ++ *((int*)& __m128_result[1]) = 0x80201df0; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ffc2f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00201df000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3838383838300010; ++ *((unsigned long*)& __m128i_result[0]) = 0x3818200838383838; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0xc7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2222272011111410; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2222272011111410; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xa020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0xa020202020206431; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0xa020202020206431; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xa020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0xa020202020206431; ++ *((unsigned long*)& __m256i_result[1]) = 0xa020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0xa020202020206431; ++ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01fffffffe000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01fffffffe000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x01fffffffe000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x01fffffffe000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfe00000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000017f7f7f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f00000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000017f7f7f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f00000000000000; ++ __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xa020202020202020; ++ *((unsigned long*)& __m256i_op1[2]) = 0xa020202020206431; ++ *((unsigned long*)& __m256i_op1[1]) = 0xa020202020202020; ++ *((unsigned long*)& __m256i_op1[0]) = 0xa020202020206431; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202031; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202031; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ffc2f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00201df000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffc2ffe700000007; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffc100010001; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00010020fffeffde; ++ *((unsigned long*)& __m128i_op2[0]) = 0x011f57c100201a46; ++ *((unsigned long*)& __m128i_result[1]) = 0x001ffce00016fb41; ++ *((unsigned long*)& __m128i_result[0]) = 0x57cb857100001a46; ++ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000017f7f7f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000017f7f7f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000017f00007f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007f0000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x1111113111111141; ++ *((unsigned long*)& __m128d_op1[0]) = 0x1111113111111121; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffc2ffe7; ++ *((int*)& __m128_op0[2]) = 0x00000007; ++ *((int*)& __m128_op0[1]) = 0x0000ffc1; ++ *((int*)& __m128_op0[0]) = 0x00010001; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0xfffff1a0; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfbffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfbffffffffffffff; ++ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x3a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000017f00007f7f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00007f0000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fd; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff810000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202031; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202031; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xa020202020202020; ++ *((unsigned long*)& __m256i_op1[2]) = 0xa020202020206431; ++ *((unsigned long*)& __m256i_op1[1]) = 0xa020202020202020; ++ *((unsigned long*)& __m256i_op1[0]) = 0xa020202020206431; ++ *((unsigned long*)& __m256i_result[3]) = 0xa020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0xa020202020206431; ++ *((unsigned long*)& __m256i_result[1]) = 0xa020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0xa020202020206431; ++ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf0800320fff1fa20; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0032000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfbffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x7bffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0xfbffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x7bffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_du(__m256i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x1111113111111141; ++ *((unsigned long*)& __m128d_op0[0]) = 0x1111113111111121; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0032000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001ffce00016fb41; ++ *((unsigned long*)& __m128i_op0[0]) = 0x57cb857100001a46; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfbffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7bffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000150000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffeffff001effff; ++ __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000150000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffeffff001effff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffff1a0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000f00f; ++ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfbffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x7bffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000020; ++ *((int*)& __m128_op0[0]) = 0x00000020; ++ *((unsigned long*)& __m128d_result[1]) = 0x36f0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x36f0000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xa020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0xa020202020206431; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0xa020202020206431; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xd010101010101010; ++ *((unsigned long*)& __m256i_result[2]) = 0xd010101010103218; ++ *((unsigned long*)& __m256i_result[1]) = 0xd010101010101010; ++ *((unsigned long*)& __m256i_result[0]) = 0xd010101010103218; ++ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000f00f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000f00f; ++ __m128i_out = __lsx_vslli_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfbffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7bffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfbffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7bffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xf7ffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xf7feffffffffffff; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xd010101010101010; ++ *((unsigned long*)& __m256i_op0[2]) = 0xd010101010103218; ++ *((unsigned long*)& __m256i_op0[1]) = 0xd010101010101010; ++ *((unsigned long*)& __m256i_op0[0]) = 0xd010101010103218; ++ *((unsigned long*)& __m256i_op1[3]) = 0xd010101010101010; ++ *((unsigned long*)& __m256i_op1[2]) = 0xd010101010103218; ++ *((unsigned long*)& __m256i_op1[1]) = 0xd010101010101010; ++ *((unsigned long*)& __m256i_op1[0]) = 0xd010101010103218; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010002000100020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010002000100020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010002000100020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010002000100020; ++ __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0010002000100020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0010002000100020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0010002000100020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0010002000100020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff77777807777775; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe6eeef00eeeeeebf; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000f00f; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff007700070077; ++ *((unsigned long*)& __m128i_result[0]) = 0x00e600ef00ee01de; ++ __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x111110ff11111141; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000f00f; ++ *((unsigned long*)& __m128i_result[1]) = 0x111110ff11111141; ++ *((unsigned long*)& __m128i_result[0]) = 0x1111113111111100; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x111110ff11111141; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfbffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7bffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x060808ff08080820; ++ *((unsigned long*)& __m128i_result[0]) = 0x4608081808080810; ++ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h(__m256i_op0,10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000f00f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000007fff; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111141; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0010002000100020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010002000100020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0010002000100020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010002000100020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffe; ++ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x3e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x111110ff11111141; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111100; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,-1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111141; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long*)& __m128i_result[1]) = 0x1111311111114111; ++ *((unsigned long*)& __m128i_result[0]) = 0x1111311111112111; ++ __m128i_out = __lsx_vrotri_h(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000001ff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffe0000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000001ff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffe0000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x111110ff11111141; ++ *((unsigned long*)& __m128i_op1[0]) = 0x11111131111116a6; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfe00000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000001ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffe0000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000001ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffe0000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000001ff8000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001ff8000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111141; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h(__m128i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1111311111114111; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111311111112111; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x1111311111114111; ++ *((unsigned long*)& __m128i_result[0]) = 0x1111311111110000; ++ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000001ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffe0000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000001ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffe0000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00080008000801ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0008000800080008; ++ *((unsigned long*)& __m256i_result[1]) = 0x00080008000801ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0008000800080008; ++ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111141; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ff8000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffe0000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ff8000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffe0000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x3f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000002000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000800000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000002000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000800000; ++ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x28); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000007fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_result[0]) = 0x2020202020207fff; ++ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000002000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000002000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_result[0]) = 0x2020202020207f7f; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00080008000801ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0008000800080008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00080008000801ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0008000800080008; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x3f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00ff0077; ++ *((int*)& __m128_op0[2]) = 0x00070077; ++ *((int*)& __m128_op0[1]) = 0x00e600ef; ++ *((int*)& __m128_op0[0]) = 0x00ee01de; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00007fff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020643100000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020643100000000; ++ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0032000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000009c400000000; ++ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_op2[0]) = 0x2020202020207f7f; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff0000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long*)& __m256i_op2[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1111311111114111; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1111311111110000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000001ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffe0000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000001ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffe0000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x20202020; ++ *((int*)& __m128_op0[2]) = 0x20202020; ++ *((int*)& __m128_op0[1]) = 0x20202020; ++ *((int*)& __m128_op0[0]) = 0x20207fff; ++ *((int*)& __m128_op1[3]) = 0x32d3f35e; ++ *((int*)& __m128_op1[2]) = 0xcd509d13; ++ *((int*)& __m128_op1[1]) = 0x3e081b3c; ++ *((int*)& __m128_op1[0]) = 0x93f6b356; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; ++ unsigned_int_result = 0x0000000020202020; ++ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x1); ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2020202020207fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x01010101010101ff; ++ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x20202020; ++ *((int*)& __m128_op0[2]) = 0x20202020; ++ *((int*)& __m128_op0[1]) = 0x20202020; ++ *((int*)& __m128_op0[0]) = 0x20207fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffff02; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; ++ *((unsigned long*)& __m128i_result[1]) = 0x5d5d5d5d5d5d5d5d; ++ *((unsigned long*)& __m128i_result[0]) = 0x5d5d5d5d5d5d0000; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0xa2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff80000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff80000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_hu(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc000c000c000ff81; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5d5d5d5d5d5d5d5d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5d5d5d5d5d5d0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long*)& __m128i_result[0]) = 0xc605c000aedd0000; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0ba00ba00ba00ba0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0ba00ba00ba011eb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000a0000000a; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000a0000000d; ++ __m128i_out = __lsx_vpcnt_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w(__m256i_op0,-2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc605c000aedd0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xc605c000aedd0000; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_h_w(__m256i_op0,__m256i_op1,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff80000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff80000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc605c000aedd0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5d5d5d5d5d5d5d5d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5d5d5d5d5d5d0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long*)& __m128i_result[0]) = 0xc605c000aedd0000; ++ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf1819b7c0732a6b6; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffb9917a6e7fffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_d(__m128i_op0,12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0020002000200020; ++ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x2a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0ba00ba00ba00ba0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0ba00ba00ba011eb; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf1819b7c0732a6b6; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffb9917a6e7fffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x05d0ba0002e8802e; ++ *((unsigned long*)& __m128i_result[0]) = 0xd005e802174023d6; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f417f417f027e03; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m128i_result[0]) = 0x2020202020207e03; ++ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x05d0ba0002e8802e; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd005e802174023d6; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc000c000c000ff81; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0ba00ba00ba00ba0; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0ba00ba00ba011eb; ++ *((unsigned long*)& __m128i_result[1]) = 0x05d0ae6002e8748e; ++ *((unsigned long*)& __m128i_result[0]) = 0xcd1de80217374041; ++ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc605c000aedd0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000005151515; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000006302e00; ++ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000005151515; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000006302e00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f417f417f027e03; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001fd0; ++ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x32); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000001; ++ *((int*)& __m256_op0[6]) = 0x00000001; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000001; ++ *((int*)& __m256_op0[2]) = 0x00000001; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7fc00000; ++ *((int*)& __m256_result[4]) = 0x7fc00000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7fc00000; ++ *((int*)& __m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc605c000aedd0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_wu(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0080010000800100; ++ *((unsigned long*)& __m256i_result[2]) = 0x00c0000000c00000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0080010000800100; ++ *((unsigned long*)& __m256i_result[0]) = 0x00c0000000c00000; ++ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x7f800000; ++ *((int*)& __m256_op2[6]) = 0x7f800000; ++ *((int*)& __m256_op2[5]) = 0x7fc00000; ++ *((int*)& __m256_op2[4]) = 0x7fc00000; ++ *((int*)& __m256_op2[3]) = 0x7f800000; ++ *((int*)& __m256_op2[2]) = 0x7f800000; ++ *((int*)& __m256_op2[1]) = 0x7fc00000; ++ *((int*)& __m256_op2[0]) = 0x7fc00000; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7fc00000; ++ *((int*)& __m256_result[4]) = 0x7fc00000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7fc00000; ++ *((int*)& __m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9795698585057dec; ++ *((unsigned long*)& __m128i_op0[0]) = 0x87f82867431a1d08; ++ *((unsigned long*)& __m128i_result[1]) = 0x9780697084f07dd7; ++ *((unsigned long*)& __m128i_result[0]) = 0x87e3285243051cf3; ++ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001fd0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001fd0; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f417f417f027e03; ++ *((unsigned long*)& __m128i_op1[1]) = 0x9780697084f07dd7; ++ *((unsigned long*)& __m128i_op1[0]) = 0x87e3285243051cf3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9780697084f07dd7; ++ *((unsigned long*)& __m128i_op0[0]) = 0x87e3285243051cf3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000cdc1; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9795698585057dec; ++ *((unsigned long*)& __m128i_op0[0]) = 0x87f82867431a1d08; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1149a96eb1a08000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000cdc1; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long*)& __m128i_op2[1]) = 0x05d0ae6002e8748e; ++ *((unsigned long*)& __m128i_op2[0]) = 0xcd1de80217374041; ++ *((unsigned long*)& __m128i_result[1]) = 0xf490ee600180ce20; ++ *((unsigned long*)& __m128i_result[0]) = 0x063bff74fb46e356; ++ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1149a96eb1a08000; ++ *((unsigned long*)& __m128i_result[1]) = 0xb1a08000b1a08000; ++ *((unsigned long*)& __m128i_result[0]) = 0xb1a08000b1a08000; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001fd0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001fd0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001ffff0001ffff; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x05d0ae6002e8748e; ++ *((unsigned long*)& __m128i_op0[0]) = 0xcd1de80217374041; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000065a0; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x05d0ae6002e8748e; ++ *((unsigned long*)& __m128i_op0[0]) = 0xcd1de80217374041; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f417f417f027e03; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x60); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000001; ++ *((int*)& __m256_op0[6]) = 0x00000001; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000001; ++ *((int*)& __m256_op0[2]) = 0x00000001; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long*)& __m128i_result[1]) = 0x5237c1baffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x7d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffefffefffefffe; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000065a0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x2e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9941d1d5f4ba9d08; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x9941d155f43a9d08; ++ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xdfffffffdfffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xdfffffffdfffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9941d155f43a9d08; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00008bf700017052; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000f841000091aa; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe6d4572c8a5835bc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe5017c2ac9ca9fd0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000f8410000; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long*)& __m128d_op0[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long*)& __m128d_op1[1]) = 0xe6d4572c8a5835bc; ++ *((unsigned long*)& __m128d_op1[0]) = 0xe5017c2ac9ca9fd0; ++ *((unsigned long*)& __m128d_result[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long*)& __m128d_result[0]) = 0x65017c2ac9ca9fd0; ++ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000007fc00000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fc00000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fc00000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fc00000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xdfffffffdfffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xdfffffffdfffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xbff0000000000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long*)& __m128d_op0[0]) = 0x65017c2ac9ca9fd0; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00008bf700017052; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000f841000091aa; ++ *((unsigned long*)& __m128d_result[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long*)& __m128d_result[0]) = 0x65017c2ac9ca9fd0; ++ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000080000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long*)& __m128d_op1[0]) = 0x65017c2ac9ca9fd0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001ca02f854; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001ca02f854; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d(__m256i_op0,-3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long*)& __m128d_op0[0]) = 0x65017c2ac9ca9fd0; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001ca02f854; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffcafff8ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000a0; ++ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcafff8ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe6d4572c8a5835bc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe5017c2ac9ca9fd0; ++ *((unsigned long*)& __m128i_result[1]) = 0x00d3012b015700bb; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001002affca0070; ++ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00d3012b015700bb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001002affca0070; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001ca02f854; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_result[1]) = 0x00d3012b015700bb; ++ *((unsigned long*)& __m128i_result[0]) = 0x00010000ffca0070; ++ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcafff8ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffcafff8ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000a0; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00d3012b015700bb; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00010000ffca0070; ++ *((unsigned long*)& __m128i_result[1]) = 0xff2cfed4fea8ff44; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffeffff0035ff8f; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcafff8ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff2cfed4fea8ff44; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffeffff0035ff8f; ++ *((unsigned long*)& __m128i_result[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000a0; ++ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x9c9c9c9c; ++ *((int*)& __m128_op1[2]) = 0x9c9c9c9c; ++ *((int*)& __m128_op1[1]) = 0x9c9c9c9c; ++ *((int*)& __m128_op1[0]) = 0x9c9c9c9c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001ca02f854; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00000001ca02f854; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001ca02f854; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000020202020; ++ *((unsigned long*)& __m128i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_result[0]) = 0x2020202020202020; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000900000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000900013fa0; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x23); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x80000000; ++ *((int*)& __m256_op0[6]) = 0x80000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x80000000; ++ *((int*)& __m256_op0[2]) = 0x80000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000001; ++ *((int*)& __m128_op0[2]) = 0xca02f854; ++ *((int*)& __m128_op0[1]) = 0x00000001; ++ *((int*)& __m128_op0[0]) = 0x00013fa0; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0xca02f854; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ca02f854; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000fea8ff44; ++ *((unsigned long*)& __m128d_op1[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128d_op1[0]) = 0x2020202020202020; ++ *((unsigned long*)& __m128d_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128d_result[0]) = 0x2020202020202020; ++ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ca02f854; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ca02f854; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ca0200000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ca0200000000; ++ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xbff00000bff00000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xbff00000bff00000; ++ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbff00000bff00000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbff00000bff00000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffbff1ffffbff1; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffbff1ffffbff1; ++ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020202020; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_result[0]) = 0x202020202020ff20; ++ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001ca02f854; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001ca02f854; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x202020202020ff20; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x2000200020002000; ++ *((unsigned long*)& __m128i_result[0]) = 0x2000200020002000; ++ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fea8ff44; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fea8ff44; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000008000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000001ca02f854; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000001ca02f854; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffbff1ffffbff1; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffbff1ffffbff1; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffeffc4000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffeffc4000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffeffc4000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffeffc4000000; ++ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001ca02f854; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001ca02f854; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2000200020002000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2000200020002000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000120002000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000004b01; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000004b01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000a0; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000004b01; ++ __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000004b01; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffb4ff; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffb4ff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffb4ff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffb4ff; ++ __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000120002000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_result[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001021; ++ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000401000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000401000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000401000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000401000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000120002000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000200020; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000003f; ++ __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000001; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000016; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffb4ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffb4ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000016; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffb4ff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffff98dea; ++ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001021; ++ *((unsigned long*)& __m128i_result[1]) = 0x0108020410400208; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010102; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x40f3fa0000000000; ++ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvneg_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffb4ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffff98dea; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x40f3fa0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xc00fffffffffb4ff; ++ *((unsigned long*)& __m128i_result[0]) = 0xbf0c05fffff98dea; ++ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000120002000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_result[1]) = 0x2000200000013fa0; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000013fa0; ++ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0020000000200000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0020000000200000; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x2b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0020000000200000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0020000000200000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffdfffffffdfffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffdfffffffdfffff; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0606060606060606; ++ *((unsigned long*)& __m256i_result[2]) = 0x0606060606060606; ++ *((unsigned long*)& __m256i_result[1]) = 0x0606060606060606; ++ *((unsigned long*)& __m256i_result[0]) = 0x0606060606060606; ++ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0606060606060606; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0606060606060606; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0606060606060606; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0606060606060606; ++ *((unsigned long*)& __m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; ++ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000120002000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2000200000013fa0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000013fa0; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000120002000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000200020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffdfffffffdfffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffdfffffffdfffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000001021; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffb4ff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffb4ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2000200000013fa0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000013fa0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000001000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000120002000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffdfffffffdfffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffdfffffffdfffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0020000000200001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0020000000200001; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x5); ++ *((unsigned long*)& __m128i_op0[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001021; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001021; ++ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffe000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffe000; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x54); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d(__m128i_op0,5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x40f3fa0000000000; ++ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000006a9a5c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000092444; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000006a9a5c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000092444; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000d4ccb8; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000124888; ++ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00d4ccb8; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00124888; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffbd994889; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000a092444; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000890000000000; ++ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0x58); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffe000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffe000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000e000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000e000; ++ __m256i_out = __lasx_xvmod_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffb4ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffb4ff; ++ *((unsigned long*)& __m128i_result[1]) = 0xc110000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc00d060000000000; ++ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xda); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xc110000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc00d060000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x40f3fa0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xf047ef0000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf047ef0000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xbd994889; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x0a092444; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x3941248880000000; ++ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrani_h_w(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x39412488; ++ *((int*)& __m128_op0[0]) = 0x80000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x18171615; ++ *((int*)& __m128_op0[2]) = 0x17161514; ++ *((int*)& __m128_op0[1]) = 0x16151413; ++ *((int*)& __m128_op0[0]) = 0x151d3756; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x39412488; ++ *((int*)& __m128_op1[0]) = 0x80000000; ++ *((int*)& __m128_op2[3]) = 0x3ff00000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x40f3fa00; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xbff00000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0xc0f3fa00; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xc0f3fa0080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffec060; ++ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3ff0008000800080; ++ *((unsigned long*)& __m128i_result[0]) = 0x40f3fa8000800080; ++ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3941248880000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3941248880000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x40f3fa0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x76f4248880000000; ++ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x76f42488; ++ *((int*)& __m128_op0[0]) = 0x80000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000000; ++ __m128i_out = __lsx_vftint_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003fff00003fff; ++ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x32); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x40f3fa0000000000; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff0000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff0000ff; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff0000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff0000ff; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc485edbcc0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x003f000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x007c000d00400000; ++ __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff0000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff0000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ff00000000ff; ++ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0003000300030003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0003000300030003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x76f424887fffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc110000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc00d060000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xc110000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff7fffffff; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc485edbcc0000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000c485; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x30); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003f000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x007c000d00400000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000003f00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000007c00000040; ++ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0x31); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fd; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fd; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x18171615; ++ *((int*)& __m128_op0[2]) = 0x17161514; ++ *((int*)& __m128_op0[1]) = 0x16151413; ++ *((int*)& __m128_op0[0]) = 0x15141312; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1817161517161514; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1615141315141312; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x76f424887fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000017161515; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000095141311; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0003000300030003; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0003000300030003; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[3]) = 0x0600060000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0600060000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x76f424887fffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff082f000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003f000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000f7d1000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x773324887fffffff; ++ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x059a35ef139a8e00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000017161515; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000095141311; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_d(__m128i_op0,0x34); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff0000ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff0000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007f0200007f02; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007f0200007f02; ++ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000002; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000002; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffff00000002; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffff00000002; ++ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0000; ++ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0xa7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000002; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000002; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff082f000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003f000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000f7d1000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x773324887fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff082efffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x88cbdb7780000001; ++ __m128i_out = __lsx_vsub_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x1817161517161514; ++ *((unsigned long*)& __m128d_op1[0]) = 0x1615141315141312; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff082f000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003f000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc04d600d3aded151; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x004cff8fffde0051; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000f7d1000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x773324887fffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000017161515; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000095141311; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000017fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x1716151595141311; ++ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x004cff8fffde0051; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe00010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe00010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000100fe000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000100fe00010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x000100fe000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000100fe00010001; ++ __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0xb4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000001fdfffffe02; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000001fefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff01fefffeff02; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000001fdfffffe02; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000001fefe; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff01fefffeff02; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ff00000000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007fff80fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff80fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff80007ffe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ff007fff80fe; ++ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x18171615; ++ *((int*)& __m128_op0[2]) = 0x17161514; ++ *((int*)& __m128_op0[1]) = 0x16151413; ++ *((int*)& __m128_op0[0]) = 0x15141312; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1817161517161514; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1615141315141312; ++ *((unsigned long*)& __m128i_result[1]) = 0x0c0c8b8a8b8b0b0a; ++ *((unsigned long*)& __m128i_result[0]) = 0x8b8a8a898a8a8909; ++ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000017161515; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000095141311; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x76f424887fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000170014; ++ *((unsigned long*)& __m128i_result[0]) = 0xff0cff78ff96ff14; ++ __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000002; ++ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x76f424887fffffff; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x3f800000; ++ *((int*)& __m128_result[1]) = 0x4eede849; ++ *((int*)& __m128_result[0]) = 0x4f000000; ++ __m128_out = __lsx_vffint_s_w(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000170014; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xff0cff78ff96ff14; ++ __m128i_out = __lsx_vextl_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0c0c8b8a8b8b0b0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8b8a8a898a8a8909; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1817161517161514; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1615141315141312; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000000007fff80fe; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000007fff80fe; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff80007ffe; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000ff007fff80fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000003f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4eede8494f000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1817161517161514; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1615141315141312; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff8607db959f; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000008a0000008a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000008900000009; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000043c5ea7b6; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000008fc4ef7b4; ++ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0001fffe0000ffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0001fffe00010001; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0001fffe0000ffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0001fffe00010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000001fdfffffe02; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000001fefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff01fefffeff02; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000fd00ffff02ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001fffeff; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00fe00feff02ff; ++ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000fd00ffff02ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fffeff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001fffe0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe00010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff81ffffff00; ++ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe00010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe00010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001fffe0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe00010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000043c5ea7b6; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000008fc4ef7b4; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000fea0000fffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe00010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe00010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0007fff8000ffff0; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000007fff8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0007fff8000ffff0; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000007fff8; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffff8607db959f; ++ *((unsigned long*)& __m128d_op0[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff900000800; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff900000800; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000008a0000008a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000008900000009; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffffff; ++ __m128i_out = __lsx_vslti_bu(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0007fff8000ffff0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000007fff8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0007fff8000ffff0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000007fff8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0007fff8000ffff0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0007fff8000ffff0; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000008a0000008a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000008900000009; ++ *((unsigned long*)& __m128i_op1[1]) = 0x63637687636316bb; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x6363771163631745; ++ *((unsigned long*)& __m128i_result[0]) = 0x636363ec6363636c; ++ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000fea0000fffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363771163631745; ++ *((unsigned long*)& __m128i_op1[0]) = 0x636363ec6363636c; ++ *((unsigned long*)& __m128i_result[1]) = 0x006300fb00630143; ++ *((unsigned long*)& __m128i_result[0]) = 0x0063ffec0063006c; ++ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000fea0000fffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffff8607db959f; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000fea0000fffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xff0cff78ff96ff14; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xc2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00007f7f00007f7f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff900000800; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007f7f00007f00; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007f7f00007fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0x87); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000fea0000fffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000fd00ffff02fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fffeff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00007f7f00007f00; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00007f7f00007fff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0100; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00007f7f00007f00; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00007f7f00007fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0007fff8000ffff0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000007fff8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0007fff8000ffff0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000007fff8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f00ff00000000; ++ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000c6c6ee22; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c6c62e8a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000c6c6ee22; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c6c62e8a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000bf; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000002bb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffc000400780087; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000fe80fffc0183; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffc000400f8ff87; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff80ff00ff7c0183; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff900000800; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffc00000078; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffc; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffc000000f8; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff790000077c; ++ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000fd00ffff02ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fffeff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff02ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0100; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00fe00feff02ff; ++ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffff9cff05; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff9cfebd; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffe0001; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xc0fffff000000000; ++ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffe0001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00003a247fff7fff; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffe0001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000500000005; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000005fffe0006; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc0fffff000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000bf; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000002bb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc0fffff000000000; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffffe02; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000300000005fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffff02; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000300000005fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0007fd00000f02ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fffeff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ffffffff00; ++ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffe0001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000bf; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000002bb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00080000fffe0001; ++ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ffffffff00; ++ *((unsigned long*)& __m256d_result[3]) = 0x40efffe000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x40efffe000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc0fffff000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffe00000; ++ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x63637687; ++ *((int*)& __m128_op0[2]) = 0x636316bb; ++ *((int*)& __m128_op0[1]) = 0x63636363; ++ *((int*)& __m128_op0[0]) = 0x63636363; ++ *((unsigned long*)& __m128d_result[1]) = 0x446c6ed0e0000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x446c62d760000000; ++ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc0fffff000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffe00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ long_int_result = 0x00000000ffff0100; ++ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x1); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; ++ int_result = 0x0000000000003a24; ++ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x2); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0007fff8000ffff0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0007fff8000ffff0; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000030007; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x40cd120000000000; ++ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff02ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0100; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff7fff7f; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff7f027f; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff7f0100; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00fe00fe7f027f; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff02ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffff0100; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00fefffeff02ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000100; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00feff00000000; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0007fd00000f02ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fffeff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00fe00feff02ff; ++ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ff000000ff; ++ __m128i_out = __lsx_vreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00fe00feff02fe; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00fe00feff027f; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00fe00feff02fe; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00fe00feff027f; ++ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff01fe0400000006; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000005fffa; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00fe01fc0005fff4; ++ __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfbfbfb17fbfb38ea; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfbfb47fbfbfb0404; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000005fffa; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfbfbfb17fbfb38ea; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfbfb47fbfbfb0404; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000002f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000029; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff02ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffff0100; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00fefffeff02ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00030006fa05f20e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00030081bd80f90e; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[2]) = 0x00010003fc827a86; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007f7f7f7f0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f017fc0ddbf7d86; ++ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000002f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000029; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x000000000000002f; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000029; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff01fe0400000006; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000500000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff01fe0400000005; ++ __m128i_out = __lsx_vmini_w(__m128i_op0,5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00fe01fc0005fff4; ++ int_op1 = 0x0000000020202020; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000820202020; ++ *((unsigned long*)& __m128i_result[0]) = 0x00fe01fc0005fff4; ++ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000820202020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00fe01fc0005fff4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000003a24; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003dbe88077c78c1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000820205a44; ++ *((unsigned long*)& __m128i_result[0]) = 0x013bc084078278b5; ++ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000002f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000029; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfbfbfb17fbfb38ea; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfbfb47fbfbfb0404; ++ *((unsigned long*)& __m128i_result[1]) = 0xfbfbfb17fbfb3919; ++ *((unsigned long*)& __m128i_result[0]) = 0xfbfb47fbfbfb042d; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x000000000000002f; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000029; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000003a24; ++ *((unsigned long*)& __m128d_op1[0]) = 0x003dbe88077c78c1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x40effc0000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x40effc0000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00007f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00010003fc827a86; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00007f7f7f7f0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f017fc0ddbf7d86; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00153f1594ea02ff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000ffffffff0100; ++ *((unsigned long*)& __m256i_op2[0]) = 0xff15c1ea95ea02ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xc06e7c817f7e8081; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000bd3f016f177a; ++ *((unsigned long*)& __m256i_result[1]) = 0xc06e7c8100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x60c485800178147a; ++ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffbe20fc; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000001cc7ee87; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000010bb83239; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000c409ed87; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0100020001bf1efd; ++ *((unsigned long*)& __m256i_result[2]) = 0x010002001ec8ec88; ++ *((unsigned long*)& __m256i_result[1]) = 0x010002010db9303a; ++ *((unsigned long*)& __m256i_result[0]) = 0x01000200c60aeb88; ++ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00153f1594ea02ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffff0100; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff15c1ea95ea02ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00030006fa05f20e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00030081bd80f90e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000018; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000018; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x2d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000002f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000029; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x010101010101012f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010129; ++ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x40efffe000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x40efffe000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_result[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffff00; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffff00; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vfrintrp_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffd700; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x40efffe000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x40efffe000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00000000ff7fff7f; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000000ff7f027f; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000000ff7f0100; ++ *((unsigned long*)& __m256i_op2[0]) = 0xff00fe00fe7f027f; ++ *((unsigned long*)& __m256i_result[3]) = 0x40efffe09fa88260; ++ *((unsigned long*)& __m256i_result[2]) = 0x6b07ca8e013fbf01; ++ *((unsigned long*)& __m256i_result[1]) = 0x40efffe09fa7e358; ++ *((unsigned long*)& __m256i_result[0]) = 0x80ce32be3e827f00; ++ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000030007; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00153f1594ea02ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffff0100; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff15c1ea95ea02ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000153f15; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff15c1ea; ++ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x9ff87f7f7f807f7f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x9ff87f7f7f807f7f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_result[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_result[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_result[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000018; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000018; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000018; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000018; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_result[3]) = 0x4ffc3f783fc040c0; ++ *((unsigned long*)& __m256i_result[2]) = 0x3fc03f803fc040c0; ++ *((unsigned long*)& __m256i_result[1]) = 0x4ffc3f783fc040c0; ++ *((unsigned long*)& __m256i_result[0]) = 0x3fc03f803fc040c0; ++ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x40efffe09fa88260; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6b07ca8e013fbf01; ++ *((unsigned long*)& __m256i_op0[1]) = 0x40efffe09fa7e358; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80ce32be3e827f00; ++ *((unsigned long*)& __m256d_result[3]) = 0x43d03bfff827ea21; ++ *((unsigned long*)& __m256d_result[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long*)& __m256d_result[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long*)& __m256d_result[0]) = 0x43e019c657c7d050; ++ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000018; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000018; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffff30000000b; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff3fffffff3; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffff30000000b; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff3fffffff3; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000002f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000029; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000003a24; ++ *((unsigned long*)& __m128i_op2[0]) = 0x003dbe88077c78c1; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000002f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000029; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffff30000000b; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff3fffffff3; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffff30000000b; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff3fffffff3; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007f7f817f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007f7f817f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007f7f817f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007f7f817f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f807f007f7f817f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4ffc3f783fc040c0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3fc03f803fc040c0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4ffc3f783fc040c0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3fc03f803fc040c0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0003fbfc0bfbfc03; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0003fbfc0bfbfc03; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x2d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x43d03bfff827ea21; ++ *((unsigned long*)& __m256i_op1[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long*)& __m256i_op1[0]) = 0x43e019c657c7d050; ++ *((unsigned long*)& __m256i_result[3]) = 0xbc30c40107d915df; ++ *((unsigned long*)& __m256i_result[2]) = 0xbc263e0e5c80b010; ++ *((unsigned long*)& __m256i_result[1]) = 0xbc30c40107d91607; ++ *((unsigned long*)& __m256i_result[0]) = 0xbc20e63aa8392fb0; ++ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000003a24; ++ *((unsigned long*)& __m128i_result[0]) = 0x003dc288077c7cc1; ++ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000008; ++ *((int*)& __m128_op0[1]) = 0x00200020; ++ *((int*)& __m128_op0[0]) = 0x00200020; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4ffc3f79d20bf257; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffec6f90604bf; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4ffc3f79d20bf257; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffec6f90604bf; ++ *((unsigned long*)& __m256i_result[3]) = 0x4ffc3f79d20bf257; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffec6f90604bf; ++ *((unsigned long*)& __m256i_result[1]) = 0x4ffc3f79d20bf257; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffec6f90604bf; ++ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x43d03bfff827ea21; ++ *((unsigned long*)& __m256i_op1[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long*)& __m256i_op1[0]) = 0x43e019c657c7d050; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xe8001411edf9c0f8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xe80014fdf0e3e428; ++ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x009f00f8007e00f0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f007f0081007f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x009f00f8007e00f0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f007f0081007f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x009f00f8007e00f0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f007f0081007f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x009f00f8007e00f0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f007f0081007f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x009f00f8007e00f0; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f007f0081007f; ++ *((unsigned long*)& __m256i_result[1]) = 0x009f00f8007e00f0; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f007f0081007f; ++ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff7fff7f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff7f027f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff7f0100; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00fe7f027f; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000007f; ++ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000020000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000020000000; ++ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x23); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x43d03bfff827ea21; ++ *((unsigned long*)& __m256i_op0[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43e019c657c7d050; ++ *((unsigned long*)& __m256i_op1[3]) = 0x43d03bfff827ea21; ++ *((unsigned long*)& __m256i_op1[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long*)& __m256i_op1[0]) = 0x43e019c657c7d050; ++ *((unsigned long*)& __m256i_result[3]) = 0x86ff76ffff4eff42; ++ *((unsigned long*)& __m256i_result[2]) = 0x86ffffffffff9eff; ++ *((unsigned long*)& __m256i_result[1]) = 0x86ff76ffff4effff; ++ *((unsigned long*)& __m256i_result[0]) = 0x86ff32ffaeffffa0; ++ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffef8; ++ *((unsigned long*)& __m128i_result[0]) = 0xffdfffdfffdffee0; ++ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vslei_hu(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x40efffe09fa88260; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6b07ca8e013fbf01; ++ *((unsigned long*)& __m256i_op0[1]) = 0x40efffe09fa7e358; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80ce32be3e827f00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x86ff76ffff4eff42; ++ *((unsigned long*)& __m256i_op1[2]) = 0x86ffffffffff9eff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x86ff76ffff4effff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x86ff32ffaeffffa0; ++ *((unsigned long*)& __m256i_result[3]) = 0x223d76f09f3881ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long*)& __m256i_result[1]) = 0x223d76f09f37e357; ++ *((unsigned long*)& __m256i_result[0]) = 0x43ec0a1b2aba7ed0; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4ffc3f783fc040c0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3fc03f803fc040c0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4ffc3f783fc040c0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3fc03f803fc040c0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000001000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff0000ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffefffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffef8; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffdfffdfffdffee0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffdfffdf; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffefffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffefffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffefefffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000018; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000019; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000200000001e; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000019; ++ __m256i_out = __lasx_xvclz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffefefffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0400000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffefefffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbc30c40108a45423; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbc263e0e5d00e69f; ++ *((unsigned long*)& __m256i_op1[1]) = 0xbc30c40108a4544b; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbc20e63aa8b9663f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_hu_w(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffdf; ++ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffdf; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000021; ++ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffefefffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffefefffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000021; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x223d76f09f3881ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x223d76f09f37e357; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long*)& __m256i_result[3]) = 0xdec38a1061c87f01; ++ *((unsigned long*)& __m256i_result[2]) = 0xc8903673ffc28a60; ++ *((unsigned long*)& __m256i_result[1]) = 0xdec38a1061c91da9; ++ *((unsigned long*)& __m256i_result[0]) = 0xbd14f6e5d6468230; ++ __m256i_out = __lasx_xvneg_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000018; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000002000000019; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000200000001e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000002000000019; ++ *((unsigned long*)& __m256i_op1[3]) = 0x223d76f09f3881ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x223d76f09f37e357; ++ *((unsigned long*)& __m256i_op1[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long*)& __m256i_result[3]) = 0x223d771060c77e19; ++ *((unsigned long*)& __m256i_result[2]) = 0x3870caad013e76b9; ++ *((unsigned long*)& __m256i_result[1]) = 0x223d771060c81cc7; ++ *((unsigned long*)& __m256i_result[0]) = 0x43ec0a3b2aba7ee9; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000007f; ++ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000002; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0000007f; ++ *((int*)& __m256_op1[7]) = 0xfffffff3; ++ *((int*)& __m256_op1[6]) = 0x0000000b; ++ *((int*)& __m256_op1[5]) = 0xfffffff3; ++ *((int*)& __m256_op1[4]) = 0xfffffff3; ++ *((int*)& __m256_op1[3]) = 0xfffffff3; ++ *((int*)& __m256_op1[2]) = 0x0000000b; ++ *((int*)& __m256_op1[1]) = 0xfffffff3; ++ *((int*)& __m256_op1[0]) = 0xfffffff3; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000018; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000019; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000200000001e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000019; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0004000000030000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000400000003c000; ++ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x33); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x009f00f8007e00f0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f007f0081007f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x009f00f8007e00f0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f007f0081007f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0ea85f60984a8555; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00a21ef3246995f3; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1189ce8000fa14ed; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0e459089665f40f3; ++ *((unsigned long*)& __m256i_result[3]) = 0x000100f800000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0020001000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000f800000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0004000000000010; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x223d76f09f3881ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x223d76f09f37e357; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff8910ffff7e01; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff3573ffff8960; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff8910ffff1ca9; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffff5e5ffff8130; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffff30000000b; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff3fffffff3; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffff30000000b; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff3fffffff3; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbc30c40108a45423; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbc263e0e5d00e69f; ++ *((unsigned long*)& __m256i_op1[1]) = 0xbc30c40108a4544b; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbc20e63aa8b9663f; ++ *((unsigned long*)& __m256i_result[3]) = 0x71860bf35f0f9d81; ++ *((unsigned long*)& __m256i_result[2]) = 0x720ed94a46f449ed; ++ *((unsigned long*)& __m256i_result[1]) = 0x71860bf35f0f9f39; ++ *((unsigned long*)& __m256i_result[0]) = 0x72544f0e6e95cecd; ++ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x71860bf35f0f9d81; ++ *((unsigned long*)& __m256i_op0[2]) = 0x720ed94a46f449ed; ++ *((unsigned long*)& __m256i_op0[1]) = 0x71860bf35f0f9f39; ++ *((unsigned long*)& __m256i_op0[0]) = 0x72544f0e6e95cecd; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff8910ffff7e01; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff3573ffff8960; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff8910ffff1ca9; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffff5e5ffff8130; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffcb423a587053; ++ *((unsigned long*)& __m256i_result[2]) = 0x6d46f43e71141b81; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffcb423a584528; ++ *((unsigned long*)& __m256i_result[0]) = 0x9bdf36c8d78158a1; ++ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x223d76f09f3881ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x223d76f09f37e357; ++ *((unsigned long*)& __m256i_op1[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long*)& __m256i_result[3]) = 0x111ebb784f9c4100; ++ *((unsigned long*)& __m256i_result[2]) = 0x1c386546809f3b50; ++ *((unsigned long*)& __m256i_result[1]) = 0x111ebb784f9bf1ac; ++ *((unsigned long*)& __m256i_result[0]) = 0x21f6050d955d3f68; ++ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrint_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7ff0000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbc74c3d108e05422; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbc1e3e6a5cace67c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xbc74c3d108e0544a; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbc18e696a86565f4; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbc74c3d108e05422; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbc1e3e6a5cace67c; ++ *((unsigned long*)& __m256i_op1[1]) = 0xbc74c3d108e0544a; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbc18e696a86565f4; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x48); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xa5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff8910ffff7e01; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff3573ffff8960; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff8910ffff1ca9; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffff5e5ffff8130; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff8910ffff7e01; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff3573ffff8960; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff8910ffff1ca9; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffff5e5ffff8130; ++ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x223d76f09f3881ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x223d76f09f37e357; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long*)& __m256i_result[3]) = 0x223d76f09f3881ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x3870ca9d013e76b0; ++ *((unsigned long*)& __m256i_result[1]) = 0x223d76f09f37e357; ++ *((unsigned long*)& __m256i_result[0]) = 0x43ec0a1b2aba7ed0; ++ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffcb423a587053; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6d46f43e71141b81; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffcb423a584528; ++ *((unsigned long*)& __m256i_op0[0]) = 0x9bdf36c8d78158a1; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000007fffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000036a37; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000007fffe; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000004def9; ++ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x2d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffbfffffffbf; ++ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffcb423a587053; ++ *((unsigned long*)& __m256d_op0[2]) = 0x6d46f43e71141b81; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffcb423a584528; ++ *((unsigned long*)& __m256d_op0[0]) = 0x9bdf36c8d78158a1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x223d76f0; ++ *((int*)& __m256_op0[6]) = 0x9f3881ff; ++ *((int*)& __m256_op0[5]) = 0x3870ca8d; ++ *((int*)& __m256_op0[4]) = 0x013e76a0; ++ *((int*)& __m256_op0[3]) = 0x223d76f0; ++ *((int*)& __m256_op0[2]) = 0x9f37e357; ++ *((int*)& __m256_op0[1]) = 0x43ec0a1b; ++ *((int*)& __m256_op0[0]) = 0x2aba7ed0; ++ *((int*)& __m256_op1[7]) = 0x111ebb78; ++ *((int*)& __m256_op1[6]) = 0x4f9c4100; ++ *((int*)& __m256_op1[5]) = 0x1c386546; ++ *((int*)& __m256_op1[4]) = 0x809f3b50; ++ *((int*)& __m256_op1[3]) = 0x111ebb78; ++ *((int*)& __m256_op1[2]) = 0x4f9bf1ac; ++ *((int*)& __m256_op1[1]) = 0x21f6050d; ++ *((int*)& __m256_op1[0]) = 0x955d3f68; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x111ebb784f9c4100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1c386546809f3b50; ++ *((unsigned long*)& __m256i_op1[1]) = 0x111ebb784f9bf1ac; ++ *((unsigned long*)& __m256i_op1[0]) = 0x21f6050d955d3f68; ++ *((unsigned long*)& __m256i_result[3]) = 0x088f5dbc27ce2080; ++ *((unsigned long*)& __m256i_result[2]) = 0x161c32a2c04f9da7; ++ *((unsigned long*)& __m256i_result[1]) = 0x088f5dbc27cdf8d6; ++ *((unsigned long*)& __m256i_result[0]) = 0x10fb02864aae9fb4; ++ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x111ebb78; ++ *((int*)& __m256_op1[6]) = 0x4f9c4100; ++ *((int*)& __m256_op1[5]) = 0x1c386546; ++ *((int*)& __m256_op1[4]) = 0x809f3b50; ++ *((int*)& __m256_op1[3]) = 0x111ebb78; ++ *((int*)& __m256_op1[2]) = 0x4f9bf1ac; ++ *((int*)& __m256_op1[1]) = 0x21f6050d; ++ *((int*)& __m256_op1[0]) = 0x955d3f68; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff7ffffef77fffdd; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf77edf9cffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvabsd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotri_h(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x111ebb784f9c4100; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1c386546809f3b50; ++ *((unsigned long*)& __m256i_op0[1]) = 0x111ebb784f9bf1ac; ++ *((unsigned long*)& __m256i_op0[0]) = 0x21f6050d955d3f68; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xbab0c4b000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xaa0ac09800000000; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000007fffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000036a37; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000007fffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000004def9; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff7ffffef77fffdd; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf77edf9cffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000008800022; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000001; ++ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x29); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xb8ec43be; ++ *((int*)& __m128_op1[2]) = 0xfe38e64b; ++ *((int*)& __m128_op1[1]) = 0x6477d042; ++ *((int*)& __m128_op1[0]) = 0x343cce24; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000008800022; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128d_op2[1]) = 0xb8ec43befe38e64b; ++ *((unsigned long*)& __m128d_op2[0]) = 0x6477d042343cce24; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffbfffffffbf; ++ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0097011900f4009f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x003200d4010f0144; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0097011900f301cd; ++ *((unsigned long*)& __m256i_op0[0]) = 0x010b008800f80153; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4ffc3f7800000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3fc03f6400000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4ffc3f7800000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3fc03f6400000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x4eb13ec100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3ec13ec100000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x4eb13ec100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3ec13ec100000000; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_h(__m256i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffff7f; ++ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x5f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0004040404000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0004040404000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0004040404000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0004040404000000; ++ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffbfffffffbf; ++ long_op1 = 0x0000000000003a24; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003a24; ++ __m128i_out = __lsx_vinsgr2vr_d(__m128i_op0,long_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffbfffffffbe; ++ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4ffc3f7800000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3fc03f6400000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4ffc3f7800000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3fc03f6400000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000050fd00000101; ++ *((unsigned long*)& __m256i_result[2]) = 0x000040c100000101; ++ *((unsigned long*)& __m256i_result[1]) = 0x000050fd00000101; ++ *((unsigned long*)& __m256i_result[0]) = 0x000040c100000101; ++ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0004040404000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0004040404000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0004040404000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0004040404000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000050fd00000101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000040c100000101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000050fd00000101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000040c100000101; ++ *((unsigned long*)& __m256i_result[3]) = 0x000050fd00000101; ++ *((unsigned long*)& __m256i_result[2]) = 0x000040c100000101; ++ *((unsigned long*)& __m256i_result[1]) = 0x000050fd00000101; ++ *((unsigned long*)& __m256i_result[0]) = 0x000040c100000101; ++ __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_result[0]) = 0x4040404040404040; ++ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff7e00000081; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000008000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffff5fffffff5; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff5fffffff5; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffff5fffffff5; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff5fffffff5; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff7e00000081; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffbfffffffbf; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000000000ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0404000004040000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0404000004040000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op1[3]) = 0x8011ffee804c004c; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long*)& __m256d_op1[1]) = 0x80f900f980780078; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0057ffa800ceff31; ++ *((unsigned long*)& __m256d_op2[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256d_op2[2]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256d_op2[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256d_op2[0]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256d_result[2]) = 0x80003fc00000428a; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256d_result[0]) = 0x80003fc00000428a; ++ __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x01010101; ++ *((int*)& __m256_op0[6]) = 0x01010101; ++ *((int*)& __m256_op0[5]) = 0x01010101; ++ *((int*)& __m256_op0[4]) = 0x01010101; ++ *((int*)& __m256_op0[3]) = 0x01010101; ++ *((int*)& __m256_op0[2]) = 0x01010101; ++ *((int*)& __m256_op0[1]) = 0x01010101; ++ *((int*)& __m256_op0[0]) = 0x01010101; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff0000007f800000; ++ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffee0000004c0000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff050000ff3c0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00f9000000780000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffa80000ff310000; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffee0000004c0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff050000ff3c0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00f9000000780000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffa80000ff310000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffbfc0ffffbfc0; ++ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffbfc0ffffbfc0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000032; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8011ffee804c004c; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x80f900f980780078; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0057ffa800ceff31; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xff000000ff000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ff000000ff00; ++ *((unsigned long*)& __m256i_result[0]) = 0xff000000ff000000; ++ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff000000ff000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ff000000ff00; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff000000ff000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ff050000ff3c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fff90000ff78; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffa80000ff31; ++ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ff050000ff3c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000fff90000ff78; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffa80000ff31; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ff050000ff3c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fff90000ff78; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffa80000ff31; ++ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_result[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_result[0]) = 0x4040404040404040; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long*)& __m256i_result[3]) = 0x8011ffae800c000c; ++ *((unsigned long*)& __m256i_result[2]) = 0x00baff050083ff3c; ++ *((unsigned long*)& __m256i_result[1]) = 0x80b900b980380038; ++ *((unsigned long*)& __m256i_result[0]) = 0x0017ffa8008eff31; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_op2[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_op2[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0fff0fff0fff0fff; ++ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff000000010000; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000032; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000032; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff000000010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8011ffae800c000c; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00baff050083ff3c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x80b900b980380038; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0017ffa8008eff31; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff800c000c; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000084ff3c; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff80380038; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000008fff31; ++ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000ff050000ff3c; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000fff90000ff78; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000ffa80000ff31; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffff0000; ++ *((int*)& __m256_op0[4]) = 0xffff0000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffff0000; ++ *((int*)& __m256_op0[0]) = 0xffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vneg_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0b085bfc00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0b004bc000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0b085bfc00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0b004bc000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0404010008080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0408010008080808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0404010008080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0408010008080808; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8011ffae800c000c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00baff050083ff3c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x80b900b980380038; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0017ffa8008eff31; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff0000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001010001; ++ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000012; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0fff0fff0fff0fff; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0fff0fff0fff0fff; ++ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x0000ff00; ++ *((int*)& __m128_op1[3]) = 0x40404040; ++ *((int*)& __m128_op1[2]) = 0x40404040; ++ *((int*)& __m128_op1[1]) = 0x40404040; ++ *((int*)& __m128_op1[0]) = 0x40404040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ff050000ff3c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000fff90000ff78; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffa80000ff31; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0b085bfc00000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0b004bc000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0b085bfc00000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0b004bc000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0fff0fff7f800fff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0fff0fff0fff0fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xf001f0010101f002; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x35); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0fff0fff0fff0fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0404010008080808; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0408010008080808; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0404010008080808; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0408010008080808; ++ *((int*)& __m256_result[7]) = 0x38808000; ++ *((int*)& __m256_result[6]) = 0x37800000; ++ *((int*)& __m256_result[5]) = 0x39010000; ++ *((int*)& __m256_result[4]) = 0x39010000; ++ *((int*)& __m256_result[3]) = 0x38808000; ++ *((int*)& __m256_result[2]) = 0x37800000; ++ *((int*)& __m256_result[1]) = 0x39010000; ++ *((int*)& __m256_result[0]) = 0x39010000; ++ __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3880800037800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3901000039010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3880800037800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3901000039010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003fc00000428a; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0006ffff0004ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0002ffff0000ffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff7f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002fffefffd0001; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefefefefefefefe; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m128i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefefefefefefefe; ++ __m128i_out = __lsx_vmini_h(__m128i_op0,2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0xffffffffffffffff; ++ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x1); ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xe6e6e6e6e6e6e6e6; ++ *((unsigned long*)& __m128i_result[0]) = 0xe6e6e6e6e6e6e6e6; ++ __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf001f0010101f002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0404010008080808; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0408010008080808; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0404010008080808; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0408010008080808; ++ *((unsigned long*)& __m256i_result[3]) = 0x0505070804040404; ++ *((unsigned long*)& __m256i_result[2]) = 0x0504070804040404; ++ *((unsigned long*)& __m256i_result[1]) = 0x0505070804040404; ++ *((unsigned long*)& __m256i_result[0]) = 0x0504070804040404; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m128i_result[1]) = 0x1202120212021202; ++ *((unsigned long*)& __m128i_result[0]) = 0x1202120212021202; ++ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002fffefffd0001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1202120212021202; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1202120212021202; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0202fe02fd020102; ++ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ff000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ff000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1202120212021202; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1202120212021202; ++ *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0505070804040404; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0504070804040404; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0505070804040404; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0504070804040404; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0283038402020202; ++ *((unsigned long*)& __m256i_result[2]) = 0x0282038402020202; ++ *((unsigned long*)& __m256i_result[1]) = 0x0283038402020202; ++ *((unsigned long*)& __m256i_result[0]) = 0x0282038402020202; ++ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0202fe02fd020102; ++ *((unsigned long*)& __m128i_result[1]) = 0xfefcfefcfefcfefc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfcfc00fc01fcfdfc; ++ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf001f0010101f002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0202fe02fd020102; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000202fe02; ++ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x78); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0505070804040404; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0504070804040404; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0505070804040404; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0504070804040404; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ff000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ff000000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0504080804030405; ++ *((unsigned long*)& __m256i_result[2]) = 0x0504060904040305; ++ *((unsigned long*)& __m256i_result[1]) = 0x0504080804030405; ++ *((unsigned long*)& __m256i_result[0]) = 0x0504060904040305; ++ __m256i_out = __lasx_xvsub_q(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfffefffe; ++ *((int*)& __m128_op0[2]) = 0xfffefffe; ++ *((int*)& __m128_op0[1]) = 0xfffefffe; ++ *((int*)& __m128_op0[0]) = 0xfffefffe; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xf001f001; ++ *((int*)& __m128_op1[0]) = 0x0101f002; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0202fe02fd020102; ++ *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_result[0]) = 0x0400040004000400; ++ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000202fe02; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff00ff; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0504080804030405; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0504060904040305; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0504080804030405; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0504060904040305; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000141020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000141020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x66); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x1000100012030e02; ++ *((unsigned long*)& __m128i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefefefefefefefe; ++ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000000202fe02; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffff00fc0000ff02; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0000ff00; ++ *((int*)& __m256_op0[6]) = 0x0000ffff; ++ *((int*)& __m256_op0[5]) = 0x000000ff; ++ *((int*)& __m256_op0[4]) = 0x000000ff; ++ *((int*)& __m256_op0[3]) = 0x0000ff00; ++ *((int*)& __m256_op0[2]) = 0x0000ffff; ++ *((int*)& __m256_op0[1]) = 0x000000ff; ++ *((int*)& __m256_op0[0]) = 0x000000ff; ++ *((int*)& __m256_op1[7]) = 0x0000ffee; ++ *((int*)& __m256_op1[6]) = 0x0000ff4c; ++ *((int*)& __m256_op1[5]) = 0x0000ff05; ++ *((int*)& __m256_op1[4]) = 0x0000ff3c; ++ *((int*)& __m256_op1[3]) = 0x0000fff9; ++ *((int*)& __m256_op1[2]) = 0x0000ff78; ++ *((int*)& __m256_op1[1]) = 0x0000ffa8; ++ *((int*)& __m256_op1[0]) = 0x0000ff31; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010100; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff00fc0000ff02; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xff01ff040000fffe; ++ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000202fe02; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff3c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff31; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x5e5e5e5e5e5e5e1c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x5e5e5e5e5e5e5e10; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x5e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffffeff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x55aa55aa55aa55ab; ++ *((unsigned long*)& __m128i_op0[0]) = 0xaa55555655aaaaa8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff00000000ffff; ++ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fffffeff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000009ffffff08; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x55aa55aa55aa55ab; ++ *((unsigned long*)& __m128i_op0[0]) = 0xaa55555655aaaaa8; ++ *((unsigned long*)& __m128i_result[1]) = 0x55aa55c355aa55c4; ++ *((unsigned long*)& __m128i_result[0]) = 0xaa55556f55aaaac1; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000141020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000141020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1020102010201020; ++ *((unsigned long*)& __m256i_result[2]) = 0x1020102010201020; ++ *((unsigned long*)& __m256i_result[1]) = 0x1020102010201020; ++ *((unsigned long*)& __m256i_result[0]) = 0x1020102010201020; ++ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffc00fd; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h(__m128i_op0,-16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1020102010201020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1020102010201020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1020102010201020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1020102010201020; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_result[1]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xefdfefdfefdfefdf; ++ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsknz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x55aa55aa55aa55ab; ++ *((unsigned long*)& __m128i_op0[0]) = 0xaa55555655aaaaa8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ef4002d21fc7001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x28bf02d1ec6a35b2; ++ *((unsigned long*)& __m128i_result[1]) = 0x2a7b7c9260f90ee2; ++ *((unsigned long*)& __m128i_result[0]) = 0x1b1c6cdfd57f5736; ++ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003fc00000428a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d0d00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d0d00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x6c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1020102010201020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1020102010201020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1020102010201020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1020102010201020; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[3]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_op2[1]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_result[3]) = 0x1031146010201020; ++ *((unsigned long*)& __m256i_result[2]) = 0x1020102010201020; ++ *((unsigned long*)& __m256i_result[1]) = 0x1031146010201020; ++ *((unsigned long*)& __m256i_result[0]) = 0x1020102010201020; ++ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvori_b(__m256i_op0,0x2c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128d_result[0]) = 0x1000100010001000; ++ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x55aa55c3d5aa55c4; ++ *((unsigned long*)& __m128i_op0[0]) = 0xaa55556fd5aaaac1; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000c; ++ *((unsigned long*)& __m128i_result[0]) = 0xaa55556fd5aaaac1; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0d0d0d0d00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0d0d0d0d00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[3]) = 0x02407a3c00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0d0cf2f30d0cf2f3; ++ *((unsigned long*)& __m256i_result[1]) = 0x02407a3c00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0d0cf2f30d0cf2f3; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du(__m128i_op0,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x86); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2a7b7c9260f90ee2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1b1c6cdfd57f5736; ++ *((unsigned long*)& __m128i_result[1]) = 0x153e3e49307d0771; ++ *((unsigned long*)& __m128i_result[0]) = 0x0d8e36706ac02b9b; ++ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x153e3e49; ++ *((int*)& __m128_op0[2]) = 0x307d0771; ++ *((int*)& __m128_op0[1]) = 0x0d8e3670; ++ *((int*)& __m128_op0[0]) = 0x6ac02b9b; ++ *((int*)& __m128_op1[3]) = 0x55aa55c3; ++ *((int*)& __m128_op1[2]) = 0xd5aa55c4; ++ *((int*)& __m128_op1[1]) = 0xaa55556f; ++ *((int*)& __m128_op1[0]) = 0xd5aaaac1; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000100000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x1000100000001000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000100000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x1000100000001000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x02407a3c00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0d0cf2f30d0cf2f3; ++ *((unsigned long*)& __m256i_op0[1]) = 0x02407a3c00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0d0cf2f30d0cf2f3; ++ *((unsigned long*)& __m256i_op1[3]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_op1[1]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0010100000100000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1000100000101000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000000010; ++ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xefdfefdf; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0xefdfefdf; ++ *((int*)& __m256_op1[4]) = 0xefdfefdf; ++ *((int*)& __m256_op1[3]) = 0xefdfefdf; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0xefdfefdf; ++ *((int*)& __m256_op1[0]) = 0xefdfefdf; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0010001000000010; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000080000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffefffef00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m256i_result[1]) = 0xffefffef00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffefffefffefffef; ++ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ff050000ff3c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000fff90000ff78; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffa80000ff31; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff0fff0ff01ff01; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff0fff0fff0fff0; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff0fff0ff01ff01; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff0fff0fff0fff0; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x80000000307d0771; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0d8e36706ac02b9b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x80000000307d0771; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0d8e36706ac02b9b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff80df00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0010100000100000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1000100000101000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010100000100000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1000100000101000; ++ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_op1[1]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0x80000000; ++ *((int*)& __m256_result[5]) = 0x80000000; ++ *((int*)& __m256_result[4]) = 0x80000000; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0x80000000; ++ *((int*)& __m256_result[1]) = 0x80000000; ++ *((int*)& __m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000dfa6e0c6; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000d46cdc13; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff80df00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00007f7f00007f7f; ++ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256d_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff80df00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000dfa6e0c6; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000d46cdc13; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000d46cdc13; ++ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_result[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long*)& __m256i_result[2]) = 0xdbcbdbcbdbcbdbcb; ++ *((unsigned long*)& __m256i_result[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long*)& __m256i_result[0]) = 0xdbcbdbcbdbcbdbcb; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xa5c4c774; ++ *((int*)& __m128_op0[2]) = 0x856ba83b; ++ *((int*)& __m128_op0[1]) = 0x8003caef; ++ *((int*)& __m128_op0[0]) = 0x54691124; ++ *((unsigned long*)& __m128i_result[1]) = 0xbf800000bf800000; ++ *((unsigned long*)& __m128i_result[0]) = 0xbf80000054691124; ++ __m128i_out = __lsx_vfrintrm_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfff0fff0; ++ *((int*)& __m256_op0[6]) = 0xff01ff01; ++ *((int*)& __m256_op0[5]) = 0xfff0fff0; ++ *((int*)& __m256_op0[4]) = 0xfff0fff0; ++ *((int*)& __m256_op0[3]) = 0xfff0fff0; ++ *((int*)& __m256_op0[2]) = 0xff01ff01; ++ *((int*)& __m256_op0[1]) = 0xfff0fff0; ++ *((int*)& __m256_op0[0]) = 0xfff0fff0; ++ *((int*)& __m256_op1[7]) = 0xffefffef; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0xffefffef; ++ *((int*)& __m256_op1[4]) = 0xffefffef; ++ *((int*)& __m256_op1[3]) = 0xffefffef; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0xffefffef; ++ *((int*)& __m256_op1[0]) = 0xffefffef; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffefffef00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffefffef00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m256i_op1[3]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_op1[1]) = 0xefdfefdf00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffefffef00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m256i_result[1]) = 0xffefffef00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffefffefffefffef; ++ __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000d46cdc13; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000060000000; ++ __m128i_out = __lsx_vslli_w(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff80df00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xa5c4c774856ba837; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2a569f8081c3bbe9; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffb96bffff57c9; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff6080ffff4417; ++ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0xd46cdc13; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0xff800000; ++ *((int*)& __m128_result[1]) = 0xff800000; ++ *((int*)& __m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0fff0ff01ff01; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff0fff0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0fff0ff01ff01; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff0fff0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ef4002d21fc7001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x28bf02d1ec6a35b2; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffb96bffff57c9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff6080ffff4417; ++ *((unsigned long*)& __m128i_op2[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xff8000007fc00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long*)& __m128i_result[0]) = 0x28bf0351ec69b5f2; ++ __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffb96bffff57c9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff6080ffff4417; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffb96bffff57c9; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff6080ffff4417; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0fff0ff01ff01; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff0fff0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0fff0ff01ff01; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff0fff0; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff0fff0ff01ff14; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff0fff0fff10003; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff0fff0ff01ff14; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff0fff0fff10003; ++ __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long*)& __m128i_op0[0]) = 0x28bf0351ec69b5f2; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffb96bffff57c9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff6080ffff4417; ++ *((unsigned long*)& __m128i_result[1]) = 0x7ef3ddac21fc5a2c; ++ *((unsigned long*)& __m128i_result[0]) = 0x28bee9edec690869; ++ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0fff0ff01ff14; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff10003; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0fff0ff01ff14; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff10003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfefee0e3fefefe00; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfefee0e3fefefe00; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7ef400ad; ++ *((int*)& __m128_op0[2]) = 0x21fc7081; ++ *((int*)& __m128_op0[1]) = 0x28bf0351; ++ *((int*)& __m128_op0[0]) = 0xec69b5f2; ++ *((int*)& __m128_op1[3]) = 0xff800000; ++ *((int*)& __m128_op1[2]) = 0xff800000; ++ *((int*)& __m128_op1[1]) = 0xff800000; ++ *((int*)& __m128_op1[0]) = 0x7fc00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xdfa6e0c6; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0xd46cdc13; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000002c002400; ++ *((unsigned long*)& __m128d_op1[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long*)& __m128d_op1[0]) = 0x28bf0351ec69b5f2; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0fff0ff01ff01; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff0fff0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0fff0ff01ff01; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff0fff0; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff0; ++ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long*)& __m128i_op1[0]) = 0x28bf0351ec69b5f2; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ad00007081; ++ *((unsigned long*)& __m128i_result[0]) = 0x000003510000b5f2; ++ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000dfa6e0c6; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000d46cdc13; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long*)& __m128i_op1[0]) = 0x28bf0351ec69b5f2; ++ *((unsigned long*)& __m128i_result[1]) = 0xdfa6e0c6d46cdc13; ++ *((unsigned long*)& __m128i_result[0]) = 0x21fc7081ec69b5f2; ++ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xdfa6e0c6d46cdc13; ++ *((unsigned long*)& __m128i_op0[0]) = 0x21fc7081ec69b5f2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000002c002400; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffb96bffff57c9; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffff6080ffff4417; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a0a0a0a0a0a0a0a; ++ __m128i_out = __lsx_vmaxi_bu(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffb96b; ++ *((int*)& __m128_op0[2]) = 0xffff57c9; ++ *((int*)& __m128_op0[1]) = 0xffff6080; ++ *((int*)& __m128_op0[0]) = 0xffff4417; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffb96bffff57c9; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff6080ffff4417; ++ __m128i_out = __lsx_vfrintrp_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x000000ad; ++ *((int*)& __m128_op0[2]) = 0x00007081; ++ *((int*)& __m128_op0[1]) = 0x00000351; ++ *((int*)& __m128_op0[0]) = 0x0000b5f2; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x7f800000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x80000000; ++ *((int*)& __m128_result[2]) = 0x80000000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffefffef00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffefffef00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00ff0000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00ff0000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00ff00ff00ff00; ++ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffb96bffff57c9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff6080ffff4417; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_w(__m128i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000dfa6e0c6; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000d46cdc13; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x64); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long*)& __m256i_op1[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long*)& __m256i_op1[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long*)& __m256i_op1[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffefffef00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffefffef00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff00ff0000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff00ff0000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[3]) = 0xffefffef00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m256i_result[1]) = 0xffefffef00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffefffefffefffef; ++ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfefee0e3; ++ *((int*)& __m256_op0[6]) = 0xfefefe00; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xfefee0e3; ++ *((int*)& __m256_op0[2]) = 0xfefefe00; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ unsigned_int_result = 0x00000000000000ff; ++ unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0x9); ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f80000080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x800080007f008000; ++ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffb96bffff57c9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff6080ffff4417; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a0aa9890a0ac5f3; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long*)& __m256i_op1[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long*)& __m256i_op1[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long*)& __m256i_op1[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x24342434ffff2435; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x24342434ffff2435; ++ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x24342434ffff2435; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x24342434ffff2435; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x800080007f008000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a0aa9890a0ac5f3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffff000; ++ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000060000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xfffffffffffff000; ++ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d1c1b1a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffff000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long*)& __m256i_op0[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long*)& __m256i_op0[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long*)& __m256i_op0[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2535253514141414; ++ *((unsigned long*)& __m256i_result[2]) = 0x2535253500002535; ++ *((unsigned long*)& __m256i_result[1]) = 0x2535253514141414; ++ *((unsigned long*)& __m256i_result[0]) = 0x2535253500002535; ++ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_b(__m256i_op0,0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000fe; ++ __m128i_out = __lsx_vmsknz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long*)& __m256i_op1[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long*)& __m256i_op1[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long*)& __m256i_op1[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000080000001000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000080000001000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000080000001000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000080000001000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000f0000000f; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long*)& __m256i_op0[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long*)& __m256i_op0[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long*)& __m256i_op0[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000f0000000f000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000f0000000f000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000f0000000f000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000f0000000f000; ++ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x009c3e201e39e7e3; ++ *((unsigned long*)& __m256i_op0[2]) = 0x87c1135043408bba; ++ *((unsigned long*)& __m256i_op0[1]) = 0x009c3e201e39e7e3; ++ *((unsigned long*)& __m256i_op0[0]) = 0x87c1135043408bba; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000f0000000f000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000f0000000f000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000f0000000f000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000f0000000f000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000f0000000f000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000f0000000f000; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x35); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0010000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0010000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0010000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0008000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0010000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0008000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0010000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0008000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0010000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0008000000000000; ++ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x001fffff001fffff; ++ __m128i_out = __lsx_vmaxi_w(__m128i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x3b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d1c1b1a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long*)& __m128i_result[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long*)& __m128i_result[0]) = 0x3918371635143312; ++ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000f0000000f000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000f0000000f000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff1fffffff1; ++ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x001fffff001fffff; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[0]) = 0x6363636363636363; ++ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0020000f0000000f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0010000f0000000f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0020000f0000000f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0010000f0000000f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long*)& __m128d_op0[0]) = 0x3918371635143312; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000af555555555; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000af555555555; ++ *((unsigned long*)& __m128d_result[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long*)& __m128d_result[0]) = 0x3918371635143312; ++ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff1fffffff1; ++ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xcd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0010000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0010000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x21201f1e1d1c1b1a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1918171615141312; ++ *((unsigned long*)& __m128i_result[1]) = 0x10ff10ff10ff10ff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0020000f0000000f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010000f0000000f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0020000f0000000f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010000f0000000f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0020000f0000000f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010000f0000000f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0020000f0000000f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010000f0000000f; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3918371635143312; ++ *((unsigned long*)& __m128i_op1[1]) = 0x21201f1e1d1c1b1a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1918171615141312; ++ *((unsigned long*)& __m128i_result[1]) = 0x480f7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vsat_du(__m128i_op0,0x3e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0010000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0010000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0020000f0000000f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0010000f0000000f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0020000f0000000f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0010000f0000000f; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d001b1a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x21201f1e19181716; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000af555555555; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000af555555555; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000af5; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000af5; ++ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d001b1a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long*)& __m128i_result[1]) = 0x21201f1e1d001b25; ++ *((unsigned long*)& __m128i_result[0]) = 0x191817161514131d; ++ __m128i_out = __lsx_vaddi_du(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0003000900050007; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3918371635143312; ++ *((unsigned long*)& __m128i_op1[1]) = 0x21201f1e1d001b25; ++ *((unsigned long*)& __m128i_op1[0]) = 0x191817161514131d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000001e8e1d8; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000e400000001; ++ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001e8e1d8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000e400000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001e8e1d8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000e400000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000e4e4; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000101; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000109000000c9; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3918371635143312; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000001d5d4; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000150d707009; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x03f1e3d28b1a8a1a; ++ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0010000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0010000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0020000f0000000f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010000f0000000f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0020000f0000000f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010000f0000000f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1e0000001e002000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1e0000001e002000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x27); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x03f1e3d28b1a8a1a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x03f1e3d28b1a8a1a; ++ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x03f1e3d28b1a8a1a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000001d5d4; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000150d707009; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffe2a2c; ++ *((unsigned long*)& __m128i_result[0]) = 0x03f1e3bd80000000; ++ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d001b1a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001918000017160; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001514000013120; ++ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0020000f0000000f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010000f0000000f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0020000f0000000f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010000f0000000f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,-4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3918371635143312; ++ *((unsigned long*)& __m128i_result[1]) = 0x21011f3f193d173b; ++ *((unsigned long*)& __m128i_result[0]) = 0xff39ff37ff35ff33; ++ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1f5533a694f902c0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00005dcbe7e830c0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x03f21e0114bf19da; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x03f1e3d28b1a8a1a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x03f1e3d28b1a8a1a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x18e2184858682868; ++ __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000022; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000022; ++ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x21011f3f193d173b; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff39ff37ff35ff33; ++ *((unsigned long*)& __m128i_result[1]) = 0x00fe008e009e0071; ++ *((unsigned long*)& __m128i_result[0]) = 0x001c006f00c4008d; ++ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000fffe; ++ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000fffc0000fffc; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fffc0000fffc; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x21011f3f193d173b; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff39ff37ff35ff33; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000015d926c7; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000e41b; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000fffe; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000022; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000022; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000003f200001e01; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000014bf000019da; ++ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c99aed5b88fcf; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7c3650c5f79a61a3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00005dcbe7e830c0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000015d926c7; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000e41b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000005dcb; ++ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00005dcbe7e830c0; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1f5533a694f902c0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000001fffff59; ++ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x63); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00005dcbe7e830c0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x03f21e0114bf19da; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000003f200001e01; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000014bf000019da; ++ *((unsigned long*)& __m128i_result[1]) = 0x0005fe0300010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100010001; ++ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfffffff1; ++ *((int*)& __m256_op0[6]) = 0xfffffff1; ++ *((int*)& __m256_op0[5]) = 0xfffffff1; ++ *((int*)& __m256_op0[4]) = 0xfffffff1; ++ *((int*)& __m256_op0[3]) = 0xfffffff1; ++ *((int*)& __m256_op0[2]) = 0xfffffff1; ++ *((int*)& __m256_op0[1]) = 0xfffffff1; ++ *((int*)& __m256_op0[0]) = 0xfffffff1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000022; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000022; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000045ff740023; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000001fffc0001; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000045ff740023; ++ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xacc8c794af2caf01; ++ *((unsigned long*)& __m128i_op0[0]) = 0xa91e2048938c40f0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00fd0101; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00fd0101; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00fd0101; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00fd0101; ++ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000001fffff59; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000aaabffff; ++ __m256i_out = __lasx_xvmini_b(__m256i_op0,11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000015d926c7; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000e41b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000abff0000abff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000abff0000abff; ++ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_result[0]) = 0x1f5533a694f902c0; ++ __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000023; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000023; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000023; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000023; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000023; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000023; ++ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x15d926c7; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x0000e41b; ++ *((int*)& __m128_op1[3]) = 0xfffffacd; ++ *((int*)& __m128_op1[2]) = 0xb6dbecac; ++ *((int*)& __m128_op1[1]) = 0x1f5533a6; ++ *((int*)& __m128_op1[0]) = 0x94f902c0; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000015d926c7; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000e41b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff56ff55ff01ff01; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff56ff55ff01ff01; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007f7f7f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007f7f7f7f; ++ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007f7f7f7f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007f7f7f7f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000001fffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000001fffe; ++ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x1223dabf; ++ *((int*)& __m128_op0[2]) = 0x4c3b3549; ++ *((int*)& __m128_op0[1]) = 0x8e8f8626; ++ *((int*)& __m128_op0[0]) = 0xf15be124; ++ *((int*)& __m128_op1[3]) = 0xfffffacd; ++ *((int*)& __m128_op1[2]) = 0xb6dbecac; ++ *((int*)& __m128_op1[1]) = 0x1f5533a6; ++ *((int*)& __m128_op1[0]) = 0x94f902c0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffadffedbfefe; ++ *((unsigned long*)& __m128i_result[0]) = 0x5f5f7bfedefb5ada; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x5a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000022ffdd; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000022ffdd; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000f4b6ff23; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000f4b6ff23; ++ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0005fe0300010101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe03000101010000; ++ __m128i_out = __lsx_vbsrl_v(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x007f807f007e8080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f807f007e806f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x007f807f007e8080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f807f007e806f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000023; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000023; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000007e8080; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000007e8092; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000007e8080; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000007e8092; ++ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfffffadf; ++ *((int*)& __m128_op0[2]) = 0xfedbfefe; ++ *((int*)& __m128_op0[1]) = 0x5f5f7bfe; ++ *((int*)& __m128_op0[0]) = 0xdefb5ada; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff80000000; ++ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xff56ff55; ++ *((int*)& __m256_op0[4]) = 0xff01ff01; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xff56ff55; ++ *((int*)& __m256_op0[0]) = 0xff01ff01; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x0000abff; ++ *((int*)& __m256_op1[4]) = 0x0000abff; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x0000abff; ++ *((int*)& __m256_op1[0]) = 0x0000abff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xff56ff55ff01ff01; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xff56ff55ff01ff01; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000023; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000023; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000023; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000023; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000aaabffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00aa00ab00ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00aa00ab00ff00ff; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00aa00ab00ff00ff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00aa00ab00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000007e8080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007e8092; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000007e8080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000007e8092; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffda6f; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffe3d7; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffda6e; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffe3d6; ++ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffda6e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffe3d6; ++ *((unsigned long*)& __m128i_op1[1]) = 0xeeb1e4f4bc3763f3; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6f5edf5ada6fe3d7; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffe3d6; ++ *((unsigned long*)& __m128i_result[0]) = 0xeeb1e4f4bc3763f3; ++ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x23); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000043cf26c7; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000e31d4cae8636; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000021e79364; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000718ea657431b; ++ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x2b2a292827262524; ++ *((unsigned long*)& __m256i_op1[2]) = 0x232221201f1e1d1c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x2b2a292827262524; ++ *((unsigned long*)& __m256i_op1[0]) = 0x232221201f1e1d1c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xeeb1e4f43c3763f3; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff5a6fe3d7; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000021e79364; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000718ea657431b; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000006ca193ec; ++ *((unsigned long*)& __m128i_result[0]) = 0x00008e72b5b94cad; ++ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffff60ca7104649; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff790a15db63d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffff60ca710464a; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffff790a15db63e; ++ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_w(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ int_op0 = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2b2a292827262524; ++ *((unsigned long*)& __m256i_op0[2]) = 0x232221201f1e1d1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2b2a292827262524; ++ *((unsigned long*)& __m256i_op0[0]) = 0x232221201f1e1d1c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000023; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000023; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000027262524; ++ *((unsigned long*)& __m256i_result[2]) = 0x232221201f1e1d1c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000027262524; ++ *((unsigned long*)& __m256i_result[0]) = 0x232221201f1e1d1c; ++ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0xbd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000007e8080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fdda7dc4; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000007e8080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fdda7dc4; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff827f80; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0226823c; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff827f80; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0226823c; ++ __m256i_out = __lasx_xvneg_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_w_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffff60ca7104649; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff790a15db63d; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffc00ffde4000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe857400fed8f400; ++ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5a6f5c53ebed3faa; ++ *((unsigned long*)& __m128i_op1[0]) = 0xa36aca4435b8b8e1; ++ *((unsigned long*)& __m128i_result[1]) = 0x5a6f61865d36d3aa; ++ *((unsigned long*)& __m128i_result[0]) = 0x7bea6962a0bfb621; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffda6f; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffe3d7; ++ *((unsigned long*)& __m128i_result[1]) = 0xfefffffffeffda6f; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefffffffeffe3d7; ++ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff827f80; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0226823c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff827f80; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0226823c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000027262524; ++ *((unsigned long*)& __m256i_op0[2]) = 0x232221201f1e1d1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000027262524; ++ *((unsigned long*)& __m256i_op0[0]) = 0x232221201f1e1d1c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000027262524; ++ *((unsigned long*)& __m256i_result[2]) = 0x23222120171e151c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000027262524; ++ *((unsigned long*)& __m256i_result[0]) = 0x23222120171e151c; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5a6f5c53ebed3faa; ++ *((unsigned long*)& __m128i_op0[0]) = 0xa36aca4435b8b8e1; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5a6f5c53ebed3faa; ++ *((unsigned long*)& __m128i_op1[0]) = 0xa36aca4435b8b8e1; ++ *((unsigned long*)& __m128i_result[1]) = 0x5c535c533faa3faa; ++ *((unsigned long*)& __m128i_result[0]) = 0xca44ca44b8e1b8e1; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x201fdfe0201fdfe0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x201fdfe0201fdfe0; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000021e79364; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000718ea657431b; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfefffffffeffda6f; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfefffffffeffe3d7; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ff0000ff86; ++ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x201fdfe0201fdfe0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x201fdfe0201fdfe0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[3]) = 0x1010101010101013; ++ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_result[1]) = 0x1010101010101013; ++ *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff0000ff86; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffa6ff91fdd8ef77; ++ *((unsigned long*)& __m128i_op1[0]) = 0x061202bffb141c38; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000005a00000228; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffff9ee000004ec; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000027262524; ++ *((unsigned long*)& __m256i_op0[2]) = 0x23222120171e151c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000027262524; ++ *((unsigned long*)& __m256i_op0[0]) = 0x23222120171e151c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x201fdfe0201fdfe0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x201fdfe0201fdfe0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010127272525; ++ *((unsigned long*)& __m256i_result[2]) = 0x23a2a121179e951d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010127272525; ++ *((unsigned long*)& __m256i_result[0]) = 0x23a2a121179e951d; ++ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf0bc9a5278285a4a; ++ *((int*)& __m128_result[3]) = 0xc6178000; ++ *((int*)& __m128_result[2]) = 0xbb4a4000; ++ *((int*)& __m128_result[1]) = 0x47050000; ++ *((int*)& __m128_result[0]) = 0x43494000; ++ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0101010127272525; ++ *((unsigned long*)& __m256d_op1[2]) = 0x23a2a121179e951d; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0101010127272525; ++ *((unsigned long*)& __m256d_op1[0]) = 0x23a2a121179e951d; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0101010127272525; ++ *((unsigned long*)& __m256i_op2[2]) = 0x23a2a121179e951d; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0101010127272525; ++ *((unsigned long*)& __m256i_op2[0]) = 0x23a2a121179e951d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvmadd_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffff0001; ++ *((unsigned long*)& __m256i_op2[2]) = 0xfffffffffdd97dc4; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffff0001; ++ *((unsigned long*)& __m256i_op2[0]) = 0xfffffffffdd97dc4; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x1010100f10100fd4; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x1010100f10100fd4; ++ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffa6ff91fdd8ef77; ++ *((unsigned long*)& __m128i_op0[0]) = 0x061202bffb141c38; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b(__m128i_op0,13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff0000ff86; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x010101fe0101fe87; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000005a00000228; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffff9ee000004ec; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1f5533a694f902c0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1f54e0ab00000000; ++ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffa6ff91fdd8ef77; ++ *((unsigned long*)& __m128d_op0[0]) = 0x061202bffb141c38; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfefffffffed08f77; ++ *((unsigned long*)& __m128d_op1[0]) = 0x8160cdd2f365ed0d; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffa6ff91fdd8ef77; ++ *((unsigned long*)& __m128i_op0[0]) = 0x061202bffb141c38; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x010101fe0101fe87; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000004000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x3a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x01010101; ++ *((int*)& __m128_op0[2]) = 0x01010101; ++ *((int*)& __m128_op0[1]) = 0x010101fe; ++ *((int*)& __m128_op0[0]) = 0x0101fe87; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000004000000002; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x5555410154551515; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0004455501500540; ++ *((unsigned long*)& __m128d_result[1]) = 0xd555410154551515; ++ *((unsigned long*)& __m128d_result[0]) = 0x8004455501500540; ++ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000023a20000a121; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000179e0000951d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000023a20000a121; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000179e0000951d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010000000100; ++ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x62cbf96e; ++ *((int*)& __m128_op0[2]) = 0x4acfaf40; ++ *((int*)& __m128_op0[1]) = 0xf0bc9a52; ++ *((int*)& __m128_op0[0]) = 0x78285a4a; ++ *((unsigned long*)& __m128i_result[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long*)& __m128i_result[0]) = 0xf0bc9a5278285a4a; ++ __m128i_out = __lsx_vfrintrz_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf0bc9a5278285a4a; ++ *((unsigned long*)& __m128i_op2[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_op2[0]) = 0x1f5533a694f902c0; ++ *((unsigned long*)& __m128i_result[1]) = 0x62cbf84c02cbac00; ++ *((unsigned long*)& __m128i_result[0]) = 0x1014120210280240; ++ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff0001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffdd97dc4; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff0001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffdd97dc4; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0001; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffdd97dc4; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0001; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffdd97dc4; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000023a20000a121; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000179e0000951d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000023a20000a121; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000179e0000951d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000125100005111; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000c4f00004b0f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000125100005111; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000c4f00004b0f; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1f54e0ab00000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op2[0]) = 0x010101fe0101fe87; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101fe870101fe87; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101fe8700000000; ++ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0101fe870101fe87; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0101fe8700000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0101fe870101fe87; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0101fe8700000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long*)& __m128d_op1[0]) = 0xf0bc9a5278285a4a; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101fe870101fe87; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101fe8700000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000236200005111; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000175e0000490d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000236200005111; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000175e0000490d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffeeffaf; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000011; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffeeffaf; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000011; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000226200005111; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000165e0000480d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000226200005111; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000165e0000480d; ++ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x01010101; ++ *((int*)& __m256_op0[6]) = 0x27272525; ++ *((int*)& __m256_op0[5]) = 0x23a2a121; ++ *((int*)& __m256_op0[4]) = 0x179e951d; ++ *((int*)& __m256_op0[3]) = 0x01010101; ++ *((int*)& __m256_op0[2]) = 0x27272525; ++ *((int*)& __m256_op0[1]) = 0x23a2a121; ++ *((int*)& __m256_op0[0]) = 0x179e951d; ++ *((int*)& __m256_op1[7]) = 0x00001251; ++ *((int*)& __m256_op1[6]) = 0x00005111; ++ *((int*)& __m256_op1[5]) = 0x00000c4f; ++ *((int*)& __m256_op1[4]) = 0x00004b0f; ++ *((int*)& __m256_op1[3]) = 0x00001251; ++ *((int*)& __m256_op1[2]) = 0x00005111; ++ *((int*)& __m256_op1[1]) = 0x00000c4f; ++ *((int*)& __m256_op1[0]) = 0x00004b0f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00002262; ++ *((int*)& __m256_op0[6]) = 0x00005111; ++ *((int*)& __m256_op0[5]) = 0x0000165e; ++ *((int*)& __m256_op0[4]) = 0x0000480d; ++ *((int*)& __m256_op0[3]) = 0x00002262; ++ *((int*)& __m256_op0[2]) = 0x00005111; ++ *((int*)& __m256_op0[1]) = 0x0000165e; ++ *((int*)& __m256_op0[0]) = 0x0000480d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf0bc9a5278285a4a; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x62cbf96e4acfaf40; ++ __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x40); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1f54e0ab00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffb6d01f5f94f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001f50000; ++ __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1010100f10100fd4; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1010100f10100fd4; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffeeffaf; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000011; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffeeffaf; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000011; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000051; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000101000000fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000051; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000101000000fff; ++ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000051; ++ *((int*)& __m256_op1[5]) = 0x00001010; ++ *((int*)& __m256_op1[4]) = 0x00000fff; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000051; ++ *((int*)& __m256_op1[1]) = 0x00001010; ++ *((int*)& __m256_op1[0]) = 0x00000fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000051; ++ *((int*)& __m256_op0[5]) = 0x00001010; ++ *((int*)& __m256_op0[4]) = 0x00000fff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000051; ++ *((int*)& __m256_op0[1]) = 0x00001010; ++ *((int*)& __m256_op0[0]) = 0x00000fff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000236200005111; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000175e0000490d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000236200005111; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000175e0000490d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00220021004a007e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00220021004a007e; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffdfffffffdffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffddffdeffb5ff8d; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffdfffffffdffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffddffdeffb5ff8d; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00ff00ff; ++ *((int*)& __m128_op0[2]) = 0x00ff00ff; ++ *((int*)& __m128_op0[1]) = 0x62cbf96e; ++ *((int*)& __m128_op0[0]) = 0x4acfaf40; ++ *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x62cbf96e4acfaf40; ++ __m128i_out = __lsx_vfrintrp_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001f50000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffe0b0000; ++ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000236200005111; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000175e0000490d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000236200005111; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000175e0000490d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000002362; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000010000175d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000002362; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000010000175d; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00ff00ff; ++ *((int*)& __m128_op0[2]) = 0x00ff00ff; ++ *((int*)& __m128_op0[1]) = 0x62cbf96e; ++ *((int*)& __m128_op0[0]) = 0x4acfaf40; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrmh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1010100f10100fd4; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1010100f10100fd4; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffeeffaf; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000011; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffeeffaf; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000011; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffffffeeffaf; ++ *((unsigned long*)& __m256i_result[2]) = 0x1010100f10100fd4; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffeeffaf; ++ *((unsigned long*)& __m256i_result[0]) = 0x1010100f10100fd4; ++ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf0bc9a5278285a4a; ++ *((unsigned long*)& __m128i_result[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long*)& __m128i_result[0]) = 0xf0bc9a5278285a4a; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffdfffffffdffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffddffdeffb5ff8d; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffdfffffffdffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffddffdeffb5ff8d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffeeffaf; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1010100f10100fd4; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffeeffaf; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1010100f10100fd4; ++ *((unsigned long*)& __m256i_op2[3]) = 0xfffdfffffffdffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffddffdeffb5ff8d; ++ *((unsigned long*)& __m256i_op2[1]) = 0xfffdfffffffdffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffddffdeffb5ff8d; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffefffcffff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0febedc9bb95dd8f; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffefffcffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0febedc9bb95dd8f; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x01f50000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000226200005111; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000165e0000480d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000226200005111; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000165e0000480d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000226200005111; ++ *((unsigned long*)& __m256i_result[2]) = 0x000016000000480d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000226200005111; ++ *((unsigned long*)& __m256i_result[0]) = 0x000016000000480d; ++ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1f54e0ab00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00001f5400000000; ++ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000b8f81b8c840e4; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000b8f81b8c840e4; ++ *((unsigned long*)& __m256i_result[3]) = 0x000007ff000007ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000007fffffff800; ++ *((unsigned long*)& __m256i_result[1]) = 0x000007ff000007ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000007fffffff800; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00001f5400000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff00000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0x1010100f10100fd4; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff00000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0x1010100f10100fd4; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000b8f81b8c840e4; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000b8f81b8c840e4; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000504f00002361; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff8f81000040e4; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000504f00002361; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff8f81000040e4; ++ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1f54e0ab00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x000007ff; ++ *((int*)& __m256_op0[6]) = 0x000007ff; ++ *((int*)& __m256_op0[5]) = 0x000007ff; ++ *((int*)& __m256_op0[4]) = 0xfffff800; ++ *((int*)& __m256_op0[3]) = 0x000007ff; ++ *((int*)& __m256_op0[2]) = 0x000007ff; ++ *((int*)& __m256_op0[1]) = 0x000007ff; ++ *((int*)& __m256_op0[0]) = 0xfffff800; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x000007ff; ++ *((int*)& __m256_result[6]) = 0x000007ff; ++ *((int*)& __m256_result[5]) = 0x000007ff; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x000007ff; ++ *((int*)& __m256_result[2]) = 0x000007ff; ++ *((int*)& __m256_result[1]) = 0x000007ff; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000504f00002361; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff8f81000040e4; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000504f00002361; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff8f81000040e4; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000007ff000007ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000007ff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000007ff000007ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000007ff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000584e00002b60; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000787dffffbf1c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000584e00002b60; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000787dffffbf1c; ++ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xffeeffaf; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000011; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xffeeffaf; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000011; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000695d00009b8f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000074f20000d272; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00001f5400000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000b8f81b8c840e4; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000b8f81b8c840e4; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffb3b4; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff5ffff4738; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffb3b4; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff5ffff4738; ++ __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf800d0d8ffffeecf; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000383fffffdf0d; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf800d0d8ffffeecf; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000383fffffdf0d; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf000f000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf000f000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xe800c0d8fffeeece; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff383efffedf0c; ++ *((unsigned long*)& __m256i_result[1]) = 0xe800c0d8fffeeece; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff383efffedf0c; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vaddi_du(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256d_op1[3]) = 0xf800d0d8ffffeecf; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000383fffffdf0d; ++ *((unsigned long*)& __m256d_op1[1]) = 0xf800d0d8ffffeecf; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000383fffffdf0d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf800d0d8ffffeecf; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000383fffffdf0d; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf800d0d8ffffeecf; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000383fffffdf0d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0xd0d8eecf383fdf0d; ++ __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x3f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000b8f81b8c850f4; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000b8f81b8c850f4; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000b8f81b8c850f4; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000b8f81b8c850f4; ++ *((unsigned long*)& __m256i_result[3]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_result[2]) = 0x000b2673a90896a4; ++ *((unsigned long*)& __m256i_result[1]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_result[0]) = 0x000b2673a90896a4; ++ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00001f5400000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001f00000000; ++ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffb3b4; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff5ffff4738; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffb3b4; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff5ffff4738; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0xee); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x3bcc5098; ++ *((int*)& __m128_op1[2]) = 0x703fa5f0; ++ *((int*)& __m128_op1[1]) = 0xab7b3134; ++ *((int*)& __m128_op1[0]) = 0x9703f605; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xffffb3b4; ++ *((int*)& __m256_op0[5]) = 0xfffffff5; ++ *((int*)& __m256_op0[4]) = 0xffff4738; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xffffb3b4; ++ *((int*)& __m256_op0[1]) = 0xfffffff5; ++ *((int*)& __m256_op0[0]) = 0xffff4738; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0xffffb3b4; ++ *((int*)& __m256_result[5]) = 0xfffffff5; ++ *((int*)& __m256_result[4]) = 0xffff4738; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0xffffb3b4; ++ *((int*)& __m256_result[1]) = 0xfffffff5; ++ *((int*)& __m256_result[0]) = 0xffff4738; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256d_op0[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256d_op0[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xaf0489001bd4c0c3; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xaf0489001bd4c0c3; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000d0d8ffffeecf; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000383fffffdf0d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000d0d8ffffeecf; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000383fffffdf0d; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffd8ffc7ffffdf0d; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffd8ffc7ffffdf0d; ++ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000226200005111; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000016000000480d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000226200005111; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000016000000480d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1131288800000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1131288800000002; ++ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000014; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000014; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffd8ffc7; ++ *((int*)& __m256_op0[4]) = 0xffdaff8a; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffd8ffc7; ++ *((int*)& __m256_op0[0]) = 0xffdaff8a; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0xffffb3b4; ++ *((int*)& __m256_op1[5]) = 0xfffffff5; ++ *((int*)& __m256_op1[4]) = 0xffff4738; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0xffffb3b4; ++ *((int*)& __m256_op1[1]) = 0xfffffff5; ++ *((int*)& __m256_op1[0]) = 0xffff4738; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe800c0d8fffeeece; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff383efffedf0c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe800c0d8fffeeece; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff383efffedf0c; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xe800c000fffeeece; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff383efffedf0c; ++ *((unsigned long*)& __m256i_result[1]) = 0xe800c000fffeeece; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff383efffedf0c; ++ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe800c000fffeeece; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff383efffedf0c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe800c000fffeeece; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff383efffedf0c; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xe800c000fffeeece; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff383e000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0xe800c000fffeeece; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff383efffedf0c; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x26); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00220021004a007e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00220021004a007e; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00ff00ff00ff00; ++ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_result[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_result[0]) = 0xf0f0f0f0f0f0f0f0; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000fffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010000000000001; ++ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0008000000000000; ++ __m128i_out = __lsx_vrotri_d(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000fffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0010000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffb3b4; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff5ffff4738; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffb3b4; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff5ffff4738; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_hu(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256d_op0[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256d_op0[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0001b0b1b4b5dd9f; ++ *((unsigned long*)& __m256d_op2[2]) = 0x7f7f7f5c8f374980; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0001b0b1b4b5dd9f; ++ *((unsigned long*)& __m256d_op2[0]) = 0x7f7f7f5c8f374980; ++ *((unsigned long*)& __m256d_result[3]) = 0x8001b0b1b4b5dd9f; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0x8001b0b1b4b5dd9f; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000fffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0010000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000007f41; ++ __m128i_out = __lsx_vmsknz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001b0b1b4b5dd9f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f5c8f374980; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001b0b1b4b5dd9f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f7f7f5c8f374980; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100007f7f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100007f7f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x30); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000fffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0010000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007f41; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000fffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010000000000001; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfff00000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xfff00000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000fffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0010000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007f41; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffc7f7f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffc000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffc7f7f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffc000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8001b0b1b4b5dd9f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8001b0b1b4b5dd9f; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000b0b100015d1e; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001fffe0001bfff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000b0b100015d1e; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001fffe0001bfff; ++ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x58); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000b2673a90896a4; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000b2673a90896a4; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffafafb3b3dc9d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffafafb3b3dc9d; ++ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7ff8000000000000; ++ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000fffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0010000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000b2673a90896a4; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000b2673a90896a4; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001504f4c4b2361; ++ *((unsigned long*)& __m256i_result[2]) = 0x303338a48f374969; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001504f4c4b2361; ++ *((unsigned long*)& __m256i_result[0]) = 0x303338a48f374969; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffafaf; ++ *((int*)& __m256_op0[4]) = 0xb3b3dc9d; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffafaf; ++ *((int*)& __m256_op0[0]) = 0xb3b3dc9d; ++ *((int*)& __m256_op1[7]) = 0x00020000; ++ *((int*)& __m256_op1[6]) = 0x00020000; ++ *((int*)& __m256_op1[5]) = 0x00220021; ++ *((int*)& __m256_op1[4]) = 0x004a007e; ++ *((int*)& __m256_op1[3]) = 0x00020000; ++ *((int*)& __m256_op1[2]) = 0x00020000; ++ *((int*)& __m256_op1[1]) = 0x00220021; ++ *((int*)& __m256_op1[0]) = 0x004a007e; ++ *((int*)& __m256_op2[7]) = 0x00000001; ++ *((int*)& __m256_op2[6]) = 0x00007f7f; ++ *((int*)& __m256_op2[5]) = 0x00000001; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000001; ++ *((int*)& __m256_op2[2]) = 0x00007f7f; ++ *((int*)& __m256_op2[1]) = 0x00000001; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x80000001; ++ *((int*)& __m256_result[6]) = 0x80007f7f; ++ *((int*)& __m256_result[5]) = 0xffffafaf; ++ *((int*)& __m256_result[4]) = 0x80000000; ++ *((int*)& __m256_result[3]) = 0x80000001; ++ *((int*)& __m256_result[2]) = 0x80007f7f; ++ *((int*)& __m256_result[1]) = 0xffffafaf; ++ *((int*)& __m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_result[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_result[0]) = 0xf0f0f0f0f0f0f0f0; ++ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0f0f0ef; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf0f0f0f0f0f0f0ef; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0f0f0ef; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf0f0f0f0f0f0f0ef; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000180007f7f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffafaf80000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000180007f7f; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffafaf80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000070f07170; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000070f0f0ef; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000070f07170; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000070f0f0ef; ++ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000090909090; ++ *((unsigned long*)& __m256i_result[2]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000090909090; ++ *((unsigned long*)& __m256i_result[0]) = 0x9090909090909090; ++ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x95); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001504f4c4b2361; ++ *((unsigned long*)& __m256i_op0[2]) = 0x303338a48f374969; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001504f4c4b2361; ++ *((unsigned long*)& __m256i_op0[0]) = 0x303338a48f374969; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff47b4ffff5879; ++ __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x81); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000b2673a90896a4; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000050504c4c2362; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000b2673a90896a4; ++ *((unsigned long*)& __m256i_result[3]) = 0xa90896a400000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xa90896a400000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x22); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00020421d7d41124; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00020421d7d41124; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000180007f7f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffafaf80000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000180007f7f; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffafaf80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x55550000; ++ *((int*)& __m256_op0[6]) = 0x55550000; ++ *((int*)& __m256_op0[5]) = 0x55550000; ++ *((int*)& __m256_op0[4]) = 0x55550000; ++ *((int*)& __m256_op0[3]) = 0x55550000; ++ *((int*)& __m256_op0[2]) = 0x55550000; ++ *((int*)& __m256_op0[1]) = 0x55550000; ++ *((int*)& __m256_op0[0]) = 0x55550000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000d5000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000d5000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000d5000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000d5000000000; ++ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00020421d7d41124; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00020421d7d41124; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00220021004a007e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00220021004a007e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00220021004a007e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00220021004a007e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ff0fff0fff0f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ff0fff0fff0f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffb10001ff8f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001004c0001ff87; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffb10001ff8f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001004c0001ff87; ++ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0008000000000000; ++ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000180007f7f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffafaf80000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000180007f7f; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffafaf80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256i_result[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256i_result[0]) = 0x01fe01ae00ff00ff; ++ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff47b4ffff5879; ++ __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0000504f; ++ *((int*)& __m256_op0[6]) = 0xffff3271; ++ *((int*)& __m256_op0[5]) = 0xffff47b4; ++ *((int*)& __m256_op0[4]) = 0xffff5879; ++ *((int*)& __m256_op0[3]) = 0x0000504f; ++ *((int*)& __m256_op0[2]) = 0xffff3271; ++ *((int*)& __m256_op0[1]) = 0xffff47b4; ++ *((int*)& __m256_op0[0]) = 0xffff5879; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xa90896a400000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa90896a400000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256i_result[3]) = 0x7f7f000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x7f7f000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256d_op1[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256d_op1[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7f7f000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7f7f000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff3225; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff3225; ++ *((unsigned long*)& __m256i_op1[3]) = 0x2221201f1e1d1c1b; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1a19181716151413; ++ *((unsigned long*)& __m256i_op1[1]) = 0x2221201f1e1d1c1b; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1a19181716151413; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000004442403; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000004442403; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x63); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7f7f000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7f7f000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100010001; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff47b4ffff5878; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000b84b0000a787; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff47b4ffff5878; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000b84b0000a787; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff47b4ffff5878; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000b84b0000a787; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff47b4ffff5878; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000b84b0000a787; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff07b4ffff0707; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000b8070000a787; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff07b4ffff0707; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000b8070000a787; ++ __m256i_out = __lasx_xvmini_b(__m256i_op0,7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x0000ffb1; ++ *((int*)& __m256_op1[6]) = 0x0001ff8f; ++ *((int*)& __m256_op1[5]) = 0x0001004c; ++ *((int*)& __m256_op1[4]) = 0x0001ff87; ++ *((int*)& __m256_op1[3]) = 0x0000ffb1; ++ *((int*)& __m256_op1[2]) = 0x0001ff8f; ++ *((int*)& __m256_op1[1]) = 0x0001004c; ++ *((int*)& __m256_op1[0]) = 0x0001ff87; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256i_result[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256i_result[0]) = 0x01fe01ae00ff00ff; ++ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x7f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffe1ffffffe1; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffe1ffffffe1; ++ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000100010001; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000100010001; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x1313131313131313; ++ *((unsigned long*)& __m128i_result[0]) = 0x1313131313131313; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0xec); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffe1ffffffe1; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffe1ffffffe1; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffafffffffa; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffafffffffa; ++ __m128i_out = __lsx_vmini_w(__m128i_op0,-6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfffffffafffffffa; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfffffffafffffffa; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfffffffa; ++ *((int*)& __m128_op0[2]) = 0xfffffffa; ++ *((int*)& __m128_op0[1]) = 0xfffffffa; ++ *((int*)& __m128_op0[0]) = 0xfffffffa; ++ *((int*)& __m128_result[3]) = 0xfffffffa; ++ *((int*)& __m128_result[2]) = 0xfffffffa; ++ *((int*)& __m128_result[1]) = 0xfffffffa; ++ *((int*)& __m128_result[0]) = 0xfffffffa; ++ __m128_out = __lsx_vfrecip_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0aa077b7054c9554; ++ *((unsigned long*)& __m128i_op0[0]) = 0x40c7ee1f38e4c4e8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff07b4ffff0707; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000b8070000a787; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff07b4ffff0707; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000b8070000a787; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000504fffff3271; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff47b4ffff5879; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffb7650000d496; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001800000018000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffb7650000d496; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001800000018000; ++ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000a00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000010000000a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000a00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000010000000a; ++ __m256i_out = __lasx_xvmini_w(__m256i_op0,10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256d_op1[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256d_op1[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000100010001; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000100010001; ++ *((unsigned long*)& __m256d_result[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000100010001; ++ *((unsigned long*)& __m256d_result[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000100010001; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000a00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000010000000a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000a00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000010000000a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000010000000a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000010000000a; ++ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000080008001; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000a00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000010000000a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000a00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000010000000a; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000001000b000b; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000001000b000b; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000010000000a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000010000000a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00feffff00fe81; ++ *((unsigned long*)& __m256i_result[2]) = 0xfe01fe51ff00ff40; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00feffff00fe81; ++ *((unsigned long*)& __m256i_result[0]) = 0xfe01fe51ff00ff40; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000001; ++ *((int*)& __m256_op0[4]) = 0x0000000a; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000001; ++ *((int*)& __m256_op0[0]) = 0x0000000a; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000040; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff7f80ffff7f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff7f80ffff7f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff7f80ffff7f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff7f80ffff7f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffeff00; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffeff00; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff010000ff017e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000a00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000010000000a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000a00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000010000000a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff017e6b803fc0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff017e6b803fc0; ++ __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0aa077b7054c9554; ++ *((unsigned long*)& __m128i_op0[0]) = 0x40c7ee1f38e4c4e8; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vsrai_h(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_b(__m128i_op0,8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000a00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000010000000a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000a00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000010000000a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000fffffe01fe52; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff01ff02; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffffe01fe52; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff01ff02; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000080008001; ++ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_b(__m128i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w(__m128i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000007f7f7f7f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000080000000; ++ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0x33); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000080008001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000080008001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000a00000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000fffff614; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000a00000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000fffff614; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000020202020; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x7ef8000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7ef8000000000000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ef8000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ef8000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7ef8000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvmini_b(__m256i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffff600000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff000009ec; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffff600000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff000009ec; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000180000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000180000001; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f6f7f7f7f6; ++ *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f6f7f7f7f6; ++ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f6f7f7f7f6; ++ *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f6f7f7f7f6; ++ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff80017fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff80017fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000280000; ++ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x30); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7ef8000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000f; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ int_result = 0x000000007ff00000; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x1); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0x92); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w(__m256i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000000000000f; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x000000000000000f; ++ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000280000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000140001; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000000; ++ __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100007fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100007fff; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ef8000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8108000000000000; ++ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_result[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_result[0]) = 0x0a0a0a0a7f0a0a0a; ++ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000001fffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000001fffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000001fffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000001fffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000000000001e; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100007fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100007fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100007fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100007fff; ++ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100008000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100007fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100008000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100007fff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000140001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000140001; ++ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010200000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010200000000; ++ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x35); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010200000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010200000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000080000000800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffe5ffffffe5; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffe5ffffffe5; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffe5ffffffe5; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffe5ffffffe5; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010200000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010200000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010200000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010200000000; ++ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ unsigned_long_int_result = 0x00000000ffffffff; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x0); ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0xffffffe5; ++ *((int*)& __m256_op2[6]) = 0xffffffe5; ++ *((int*)& __m256_op2[5]) = 0xffffffe5; ++ *((int*)& __m256_op2[4]) = 0xffffffe5; ++ *((int*)& __m256_op2[3]) = 0xffffffe5; ++ *((int*)& __m256_op2[2]) = 0xffffffe5; ++ *((int*)& __m256_op2[1]) = 0xffffffe5; ++ *((int*)& __m256_op2[0]) = 0xffffffe5; ++ *((int*)& __m256_result[7]) = 0xffffffe5; ++ *((int*)& __m256_result[6]) = 0xffffffe5; ++ *((int*)& __m256_result[5]) = 0xffffffe5; ++ *((int*)& __m256_result[4]) = 0xffffffe5; ++ *((int*)& __m256_result[3]) = 0xffffffe5; ++ *((int*)& __m256_result[2]) = 0xffffffe5; ++ *((int*)& __m256_result[1]) = 0xffffffe5; ++ *((int*)& __m256_result[0]) = 0xffffffe5; ++ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000000f; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x61608654a2d4f6da; ++ __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000001e; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffe5ffffffe5; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffe5ffffffe5; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffe5ffffffe5; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffe5ffffffe5; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffe5ffffffe5; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffe5ffffffe5; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffe5ffffffe5; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffe5ffffffe5; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010200000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010200000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0c0c0c0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0014000100000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x35); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x61608654a2d4f6da; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x61608654a2d4f6da; ++ *((unsigned long*)& __m128i_result[1]) = 0xfee0000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc2c00ca844a8ecb4; ++ __m128i_out = __lsx_vslli_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_w(__m256i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x5d20a0a1; ++ *((int*)& __m256_result[6]) = 0x5d20a0a1; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x5d20a0a1; ++ *((int*)& __m256_result[2]) = 0x5d20a0a1; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0014000100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f807f807f807f80; ++ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000007f7f7f7f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000003fbf3fbf; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7ff8; ++ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000007f7f7f7f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000007f7f7f7f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000010; ++ __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x5d20a0a1; ++ *((int*)& __m256_op1[6]) = 0x5d20a0a1; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x5d20a0a1; ++ *((int*)& __m256_op1[2]) = 0x5d20a0a1; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x7f7f7f7f; ++ *((int*)& __m128_op0[1]) = 0x00000001; ++ *((int*)& __m128_op0[0]) = 0x00000010; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x61608654a2d4f6da; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff0800080008000; ++ *((unsigned long*)& __m128i_result[0]) = 0xe160065422d476da; ++ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff0800080008000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe160065422d476da; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000d00000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000b00000010; ++ __m128i_out = __lsx_vpcnt_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x61608654a2d4f6da; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ff08ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x5d20a0a1; ++ *((int*)& __m256_op0[6]) = 0x5d20a0a1; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x5d20a0a1; ++ *((int*)& __m256_op0[2]) = 0x5d20a0a1; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffeaffffffea; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffeaffffffea; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffeaffffffea; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffeaffffffea; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ff08ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ff08ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff0; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000f; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vslei_hu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff08ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmskltz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_w(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000002c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000002c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000002c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000002c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000002c0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000002c0000; ++ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x5d20a0895d20a089; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffe8ffffffe8; ++ *((unsigned long*)& __m256i_result[1]) = 0x5d20a0895d20a089; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffe8ffffffe8; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000003fbf3fbf; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3fbf3fbf00007fff; ++ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000003fbf3fbf; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff8007; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xfffffff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_lu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000003fbf3fbf; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000100; ++ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffff8fffffff8; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff8fffffff8; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffff8fffffff8; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff8fffffff8; ++ __m256i_out = __lasx_xvmini_w(__m256i_op0,-8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x0000000f; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00077f88; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00077f97; ++ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000077f97; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffeff7f0000; ++ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3fbf3fbf00007fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000000e; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x3fbf3fbf00007fff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000000003fbf3fbf; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000003fbf3fbf; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff3fbfffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff3fbfffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3fbf3fbf00007fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x007f7f7f01027f02; ++ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff3fbfffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000100fe000100fe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vclz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000100fe000100fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000002000; ++ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x39); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000100fe000100fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000100fe000100fe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000002000; ++ __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000100fe000100fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x31); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3fbf3fbf00007fff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00100010; ++ *((int*)& __m128_op0[2]) = 0x00100010; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000039; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000039; ++ __m128i_out = __lsx_vclz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00002000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x1fe02000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000003f800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000003f800000; ++ __m128i_out = __lsx_vfrintrp_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x4050000000000000; ++ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x7f800000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00003f80000000ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4050000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x2028000000000000; ++ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000001fe02000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000001fe02000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4050000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_w(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00000000; ++ __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4050000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2028000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ff000000ff; ++ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x000000ff; ++ *((int*)& __m128_op0[2]) = 0x000000ff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x371fe00000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x371fe00000000000; ++ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x371fe00000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x371fe00000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_result[0]) = 0x370bdfecffecffec; ++ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op1[0]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x000000ff; ++ *((int*)& __m128_op0[0]) = 0x000000ff; ++ *((int*)& __m128_op1[3]) = 0x370bdfec; ++ *((int*)& __m128_op1[2]) = 0xffecffec; ++ *((int*)& __m128_op1[1]) = 0x370bdfec; ++ *((int*)& __m128_op1[0]) = 0xffecffec; ++ *((int*)& __m128_result[3]) = 0x370bdfec; ++ *((int*)& __m128_result[2]) = 0xffecffec; ++ *((int*)& __m128_result[1]) = 0x370bdfec; ++ *((int*)& __m128_result[0]) = 0xffecffec; ++ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000008140c80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0037ffdfffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0037ffdfffeb007f; ++ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x371fe00000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x371fe00000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ffffffffff; ++ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003f3f; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_h(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00010001; ++ *((int*)& __m256_op1[6]) = 0x00010001; ++ *((int*)& __m256_op1[5]) = 0x00010001; ++ *((int*)& __m256_op1[4]) = 0x00010001; ++ *((int*)& __m256_op1[3]) = 0x00010001; ++ *((int*)& __m256_op1[2]) = 0x00010001; ++ *((int*)& __m256_op1[1]) = 0x00010001; ++ *((int*)& __m256_op1[0]) = 0x00010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op1[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op1[0]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000006e17bfd8; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000006e17bfd8; ++ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000ffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff0100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff0100000001; ++ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x7f800000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclo_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x7f800000; ++ *((int*)& __m256_op0[6]) = 0x7f800000; ++ *((int*)& __m256_op0[5]) = 0x7f800000; ++ *((int*)& __m256_op0[4]) = 0x7f800000; ++ *((int*)& __m256_op0[3]) = 0x7f800000; ++ *((int*)& __m256_op0[2]) = 0x7f800000; ++ *((int*)& __m256_op0[1]) = 0x7f800000; ++ *((int*)& __m256_op0[0]) = 0x7f800000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000006e17bfd8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000006e17bfd8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffff0100000001; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffff0100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000006e17bfd8; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000006e17bfd8; ++ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000008140c80; ++ *((unsigned long*)& __m128i_result[1]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long*)& __m128i_result[0]) = 0x1f1f1f1f27332b9f; ++ __m128i_out = __lsx_vaddi_bu(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x36fbdfdcffdcffdc; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_d(__m256i_op0,-2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op1[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op1[0]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_result[1]) = 0x370bdfec00130014; ++ *((unsigned long*)& __m128i_result[0]) = 0x370bdfec00130014; ++ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x38); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op2[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fefffffffffffff; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w(__m256i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000008140c80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000008140c80; ++ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x36fbdfdcffdcffdc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000008140c80; ++ *((unsigned long*)& __m128i_op2[1]) = 0x1f1f1f1f1f1f1f00; ++ *((unsigned long*)& __m128i_op2[0]) = 0x1f1f1f27332b9f00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x36fbdfdcffdc0008; ++ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x3ff0010000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x3ff0010000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000008140c80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000008140c80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000002050320; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000002050320; ++ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000008130c7f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1f1f1f1f1f1f1f00; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1f1f1f27332b9f00; ++ *((unsigned long*)& __m128i_op2[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op2[0]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_result[1]) = 0x06b1213ef1efa299; ++ *((unsigned long*)& __m128i_result[0]) = 0x8312f5424ca4a07f; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000007fef; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fef; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000007fef; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007fef; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_b(__m256i_op0,5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x06b1213ef1efa299; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8312f5424ca4a07f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1f1f1f1f1f1f1f00; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1f1f1f27332b9f00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xa23214697fd03f7f; ++ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f70000000000000; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x7f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fef; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fef; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fef; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fef; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007fee; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000fedd; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000fedd; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000fedd; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000fedd; ++ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000002050320; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002050320; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000002050320; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000002050320; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f70000000000000; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000012; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfec00130014; ++ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfec00130014; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000370bffffdfec; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001300000014; ++ __m128i_out = __lsx_vexth_w_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x800080008000ffee; ++ *((unsigned long*)& __m256i_result[2]) = 0x800080008000ffee; ++ *((unsigned long*)& __m256i_result[1]) = 0x800080008000ffee; ++ *((unsigned long*)& __m256i_result[0]) = 0x800080008000ffee; ++ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001c88bf0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001c88bf0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001c88bf0; ++ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000002050320; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002050320; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001c88bf0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000320; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000007730; ++ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001c88bf0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001c88bf0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7f70000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000dc300003ffb; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000dc300003ffb; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000dc300003ffb; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000dc300003ffb; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000ffff3fbfffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7fffffff7fffffff; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x7ffffffb; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001c88bf0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000320; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007730; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fee; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xa23214697fd03f7f; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007ffffffb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x010101017f010101; ++ __m128i_out = __lsx_vmaxi_b(__m128i_op0,1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff810011; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff810011; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff810011; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff810011; ++ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfeca2eb9931; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00d3007c014e00bd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200020002; ++ *((unsigned long*)& __m128i_result[0]) = 0x06e1000e00030005; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000002050320; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002050320; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x010101017f010101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000040600000406; ++ *((unsigned long*)& __m128i_result[0]) = 0x020202020202fe02; ++ __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xfff70156; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xfff70156; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xfff70156; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xfff70156; ++ *((int*)& __m256_op1[7]) = 0x7fefffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0x7fefffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0x7fefffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0x7fefffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffff70156; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffff70156; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffff70156; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffff70156; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xa2321469; ++ *((int*)& __m128_op0[0]) = 0x7fd03f7f; ++ *((int*)& __m128_op1[3]) = 0x00000406; ++ *((int*)& __m128_op1[2]) = 0x00000406; ++ *((int*)& __m128_op1[1]) = 0x02020202; ++ *((int*)& __m128_op1[0]) = 0x0202fe02; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000040600000406; ++ *((unsigned long*)& __m128i_op0[0]) = 0x020202020202fe02; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff503fbfff503fb; ++ *((unsigned long*)& __m128i_result[0]) = 0x01f701f701f7fdf7; ++ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff810011; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff810011; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x3fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x3fff7fffffc08008; ++ *((unsigned long*)& __m256i_result[1]) = 0x3fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x3fff7fffffc08008; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000040600000406; ++ *((unsigned long*)& __m128i_op0[0]) = 0x020202020202fe02; ++ *((unsigned long*)& __m128i_result[1]) = 0x0020200000202000; ++ *((unsigned long*)& __m128i_result[0]) = 0x002020000fe02000; ++ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x7fefffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0x7fefffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0x7fefffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0x7fefffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7fefffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0x7fefffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0x7fefffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0x7fefffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff810011; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff810011; ++ *((unsigned long*)& __m256i_op1[3]) = 0x3fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3fff8000ffa08004; ++ *((unsigned long*)& __m256i_op1[1]) = 0x3fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3fff8000ffa08004; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff01; ++ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x13f9c5b60028a415; ++ *((unsigned long*)& __m128i_op0[0]) = 0x545cab1d7e57c415; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x13f9c5b60028a415; ++ *((unsigned long*)& __m128i_result[0]) = 0x545cab1d81a83bea; ++ __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op0[0]) = 0x370bdfeca2eb9931; ++ *((unsigned long*)& __m128i_op1[1]) = 0x370bdfecffecffec; ++ *((unsigned long*)& __m128i_op1[0]) = 0x370bdfeca2eb9931; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x370bdfec; ++ *((int*)& __m128_op0[2]) = 0xffecffec; ++ *((int*)& __m128_op0[1]) = 0x370bdfec; ++ *((int*)& __m128_op0[0]) = 0xa2eb9931; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff810011; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff810011; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x817f11ed81800ff0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x817f11ed81800ff0; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000aaaa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000545cab1d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000081a83bea; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00d3007c014e00bd; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000aaaa; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fef010000010100; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fef010000010100; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fef010000010100; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fef010000010100; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000aaaa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffff70156; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffff70156; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffff70156; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffff70156; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x74); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000545cab1d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000081a83bea; ++ *((unsigned long*)& __m128i_op1[1]) = 0x13f9c5b60028a415; ++ *((unsigned long*)& __m128i_op1[0]) = 0x545cab1d81a83bea; ++ *((unsigned long*)& __m128i_result[1]) = 0x00400000547cab1d; ++ *((unsigned long*)& __m128i_result[0]) = 0x2000000081a83fea; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long*)& __m128d_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000080; ++ __m128i_out = __lsx_vfclass_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x13f9c5b60028a415; ++ *((unsigned long*)& __m128d_op1[0]) = 0x545cab1d81a83bea; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long*)& __m128d_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long*)& __m128d_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long*)& __m128i_result[0]) = 0x685670d37e80682a; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x817f11ed81800ff0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x817f11ed81800ff0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff8180ffff8181; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff8180ffff8181; ++ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long*)& __m128i_result[0]) = 0x685670d27e00682a; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x817f11ed81800ff0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x817f11ed81800ff0; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffe05fc47b400; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffe06003fc000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffe05fc47b400; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffe06003fc000; ++ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x685670d27e00682a; ++ *((unsigned long*)& __m128i_result[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long*)& __m128i_result[0]) = 0x685670d27e00682a; ++ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x9d9d9d9d9d9d9d9d; ++ *((unsigned long*)& __m128i_result[0]) = 0x9d9d9d9d9d9d9d9d; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x62); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff810011; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff810011; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff8180ffff8181; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff8180ffff8181; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000008000ff00; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff81ff81; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000008000ff00; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff81ff81; ++ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0fff01800fff0181; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0fff01800fff0181; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0007ff800007ff80; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0007ff800007ff80; ++ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x14ccc632; ++ *((int*)& __m128_op0[2]) = 0x0076a4d2; ++ *((int*)& __m128_op0[1]) = 0x685670d2; ++ *((int*)& __m128_op0[0]) = 0x7e00682a; ++ *((int*)& __m128_op1[3]) = 0x14ccc632; ++ *((int*)& __m128_op1[2]) = 0x0076a4d2; ++ *((int*)& __m128_op1[1]) = 0x685670d2; ++ *((int*)& __m128_op1[0]) = 0x7e00682a; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000001; ++ *((int*)& __m256_op0[6]) = 0x00000001; ++ *((int*)& __m256_op0[5]) = 0x0fff0180; ++ *((int*)& __m256_op0[4]) = 0x0fff0181; ++ *((int*)& __m256_op0[3]) = 0x00000001; ++ *((int*)& __m256_op0[2]) = 0x00000001; ++ *((int*)& __m256_op0[1]) = 0x0fff0180; ++ *((int*)& __m256_op0[0]) = 0x0fff0181; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000545cffffab1d; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff81a800003bea; ++ *((unsigned long*)& __m128i_op1[1]) = 0x13f9c5b60028a415; ++ *((unsigned long*)& __m128i_op1[0]) = 0x545cab1d81a83bea; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000545cffff0001; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff81a800003bea; ++ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffff800000003; ++ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00197d3200197d56; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00197d3200197d56; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h(__m256i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x685670d27e00682a; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x14ccc631eb3339ce; ++ *((unsigned long*)& __m128i_result[0]) = 0x685670d197a98f2e; ++ __m128i_out = __lsx_vmulwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x817f11ed81800ff0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x817f11ed81800ff0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x817f11ed81800ff0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x817f11ed81800ff0; ++ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x817f11ed81800ff0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x817f11ed81800ff0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x817f11ed81800ff0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x817f11ed81800ff0; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000004fc480040; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000004fc480040; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000004fc480040; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000004fc480040; ++ __m256i_out = __lasx_xvsrlrni_h_w(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x13f9c5b60028a415; ++ *((unsigned long*)& __m128i_op0[0]) = 0x545cab1d81a83bea; ++ *((unsigned long*)& __m128i_op1[1]) = 0x13f9c5b60028a415; ++ *((unsigned long*)& __m128i_op1[0]) = 0x545cab1d81a83bea; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0015172b; ++ __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x685670d27e00682a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x685670d27e00682a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00197d3200197d56; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00197d3200197d56; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffff800000003; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0015172b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001900000019; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001900000019; ++ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000300000003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000300000003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000300000003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000300000003; ++ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000300000003; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000300000003; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000300000003; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000300000003; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffd; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffd; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffd; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffd; ++ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_d(__m128i_op0,long_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0x0015172b; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xfffffffe; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xfffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff0015172b; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff0015172b; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0015172b; ++ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0015172b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffb00151727; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op2[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op2[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x00010000fffffffc; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x2c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x20fc000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x20fc000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffb00151727; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0015172b; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00010000fffffffc; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffb00151727; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00010000fffffffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00010000fffffffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffebeeaaefafb; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffebeeaaeeeeb; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffebeeaaefafb; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffebeeaaeeeeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x7fefffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x7fefffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x7fffffff; ++ *((int*)& __m256_op0[6]) = 0x7fffffff; ++ *((int*)& __m256_op0[5]) = 0x7fffffff; ++ *((int*)& __m256_op0[4]) = 0x7fffffff; ++ *((int*)& __m256_op0[3]) = 0x7fffffff; ++ *((int*)& __m256_op0[2]) = 0x7fffffff; ++ *((int*)& __m256_op0[1]) = 0x7fffffff; ++ *((int*)& __m256_op0[0]) = 0x7fffffff; ++ *((int*)& __m256_op1[7]) = 0x20fc0000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x20fc0000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffebeeaaefafb; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffebeeaaeeeeb; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffebeeaaefafb; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffebeeaaeeeeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x7fefffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x7fefffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x01ffbfff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x03ffffff03ffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x01ffbfff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x03ffffff03ffffff; ++ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x26); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xfbffffff; ++ *((int*)& __m128_op0[0]) = 0x27001517; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x0000ffff; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x14ccc631eb3339ce; ++ *((unsigned long*)& __m128i_op0[0]) = 0x685670d197a98f2e; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00fe00fe00fe0045; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00fe00fe00fe0045; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff46; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0x43f0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x43f0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x43f0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x43f0000000000000; ++ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff46; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe00fe0045; ++ *((unsigned long*)& __m128i_result[1]) = 0x007f007f007f007e; ++ *((unsigned long*)& __m128i_result[0]) = 0x007f007f007effc6; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x007f007f007f007e; ++ *((unsigned long*)& __m128d_op1[0]) = 0x007f007f007effc6; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffff1fffffff1; ++ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff46; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff46000000ba; ++ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffe00000002; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffff46000000ba; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffa30000005c; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x43f0000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x43f0000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x43f0000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x43f0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff46; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x4c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x001f001f001f001f; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0xa3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x001f001f001f001f; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000001001f001e; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000001001f001e; ++ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000fffe0000ff45; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff000000b9; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffd5002affffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x343d8dc6b0ed5a08; ++ *((unsigned long*)& __m128i_result[1]) = 0x012b012c01010246; ++ *((unsigned long*)& __m128i_result[0]) = 0x353e743b50135a4f; ++ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffd5002affffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x343d8dc6b0ed5a08; ++ *((unsigned long*)& __m128i_result[1]) = 0x002affd600000001; ++ *((unsigned long*)& __m128i_result[0]) = 0xcbc2723a4f12a5f8; ++ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffe05fc47b400; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffe06003fc000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffe05fc47b400; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffe06003fc000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x01ff020000ff03ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x01346b8d00b04c5a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x002affd600000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcbc2723a4f12a5f8; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x01ff020000ff03ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x01346b8d00b04c5a; ++ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7eeefefefefefefe; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x7eeefefefefefefe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x002affd600000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xcbc2723a4f12a5f8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffd60001723aa5f8; ++ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x002affd600000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcbc2723a4f12a5f8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x343d8dc5b0ed5a08; ++ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_d(__m128i_op0,12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x343d8dc5b0ed5a08; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x353c8cc4b1ec5b09; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000010101010; ++ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000010101010; ++ *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe05fc47b400; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffe06003fc000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe05fc47b400; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffe06003fc000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,-3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x353c8cc4b1ec5b09; ++ *((unsigned long*)& __m128i_op1[1]) = 0x002affd600000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcbc2723a4f12a5f8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808000000035; ++ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7eeefefefefefefe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7eeefefefefefefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7e00ee00fe00fe00; ++ *((unsigned long*)& __m256i_result[2]) = 0xfe00fe00fe00fe00; ++ *((unsigned long*)& __m256i_result[1]) = 0x7e00ee00fe00fe00; ++ *((unsigned long*)& __m256i_result[0]) = 0xfe00fe00fe00fe00; ++ __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00f525682ffd27f2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00365c60317ff930; ++ *((unsigned long*)& __m128i_result[1]) = 0xe500c085c000c005; ++ *((unsigned long*)& __m128i_result[0]) = 0xe5c1a185c48004c5; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff00ff; ++ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x61); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffd60001723aa5f8; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000007f007f7f; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff00ff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff00ff; ++ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xe500c085c000c005; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe5c1a185c48004c5; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffe500ffffc085; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffc000ffffc005; ++ __m128i_out = __lsx_vexth_w_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000fefe0000fefe; ++ *((unsigned long*)& __m256i_result[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fefe0000fefe; ++ *((unsigned long*)& __m256i_result[0]) = 0x00fe00fe00fe00fe; ++ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_result[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_result[0]) = 0x00ff00fe00ff00fe; ++ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808000000035; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000200000000; ++ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00fe00fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00fe00fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00fe00fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00fe00fe; ++ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0d1202e19235e2bc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xea38e0f75f6e56d1; ++ *((unsigned long*)& __m128i_result[1]) = 0x2f3626e7b637e6be; ++ *((unsigned long*)& __m128i_result[0]) = 0xee3ee6f77f6e76f7; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x26); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fef0000ffff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fef0000ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fef7fef7fef7fef; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fef7fef7fef7fef; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fef7fef7fef7fef; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fef7fef7fef7fef; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffe500ffffc085; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffc000ffffc005; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffc000ffffc005; ++ __m128i_out = __lsx_vextl_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0d1202e19235e2bc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xea38e0f75f6e56d1; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffe500ffffc085; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffc000ffffc005; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xe500c085; ++ *((int*)& __m128_op0[2]) = 0xc000c005; ++ *((int*)& __m128_op0[1]) = 0xe5c1a185; ++ *((int*)& __m128_op0[0]) = 0xc48004c5; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffc000; ++ *((int*)& __m128_op1[0]) = 0xffffc005; ++ *((int*)& __m128_op2[3]) = 0xff550025; ++ *((int*)& __m128_op2[2]) = 0x002a004b; ++ *((int*)& __m128_op2[1]) = 0x00590013; ++ *((int*)& __m128_op2[0]) = 0x005cffca; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xffffffff; ++ *((int*)& __m128_result[1]) = 0xffffc000; ++ *((int*)& __m128_result[0]) = 0xffffc005; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fef0000ffff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fef0000ffff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xde00fe0000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000fe010000fe01; ++ *((unsigned long*)& __m256i_result[1]) = 0xde00fe0000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000fe010000fe01; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffc000ffffc005; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000080800000808; ++ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000007070707; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff07070707; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000007070707; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff07070707; ++ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fef7fef7fef7fef; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fef7fef7fef7fef; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fef7fef7fef7fef; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fef7fef7fef7fef; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xde00fe00; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x0000fe01; ++ *((int*)& __m256_op0[4]) = 0x0000fe01; ++ *((int*)& __m256_op0[3]) = 0xde00fe00; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x0000fe01; ++ *((int*)& __m256_op0[0]) = 0x0000fe01; ++ *((int*)& __m256_op1[7]) = 0x0000ffff; ++ *((int*)& __m256_op1[6]) = 0x0000ffff; ++ *((int*)& __m256_op1[5]) = 0x00ff00fe; ++ *((int*)& __m256_op1[4]) = 0x00ff00fe; ++ *((int*)& __m256_op1[3]) = 0x0000ffff; ++ *((int*)& __m256_op1[2]) = 0x0000ffff; ++ *((int*)& __m256_op1[1]) = 0x00ff00fe; ++ *((int*)& __m256_op1[0]) = 0x00ff00fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xde00fe0000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000fe010000fe01; ++ *((unsigned long*)& __m256i_op0[1]) = 0xde00fe0000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fe010000fe01; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x2); ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffe500ffffc085; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffc000ffffc005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001300000012; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001200000012; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbe8282a0793636d3; ++ *((unsigned long*)& __m128i_op0[0]) = 0x793636d3793636d3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0202020202020202; ++ *((unsigned long*)& __m128i_op0[0]) = 0x363d753d50155c0a; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe500c085c000c005; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe5c1a185c48004c5; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002020002020200; ++ *((unsigned long*)& __m128i_result[0]) = 0x021f3b0205150600; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002020002020200; ++ *((unsigned long*)& __m128i_op0[0]) = 0x021f3b0205150600; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000300400002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000100010040fffb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000300400002; ++ *((unsigned long*)& __m128i_result[0]) = 0x000100010040fffb; ++ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000545400; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000545400; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffff040000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffff040000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff040000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff040000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff040000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff040000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00fe00fe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00fe00fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00fe00fe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00fe00fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100fe04ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100fe04ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff00ff; ++ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3a3a3a3b3a3a3a3a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3a3a00003a3a0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x353c8cc4b1ec5b09; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080008000808080; ++ *((unsigned long*)& __m128i_result[0]) = 0x1a9e466258f62d84; ++ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0202020202020202; ++ *((unsigned long*)& __m128i_op1[0]) = 0x363d753d50155c0a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff040000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff040000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffff400000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffff400000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00fe00fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00fe00fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00fe00fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00fe00fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x007f8080007f007f; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f8080007f007f; ++ *((unsigned long*)& __m256i_result[1]) = 0x007f8080007f007f; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f8080007f007f; ++ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x808080e280808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080636380806363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080638063; ++ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffff040000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffff040000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffff0000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffff0000; ++ *((int*)& __m256_op0[4]) = 0xffff0000; ++ *((int*)& __m256_op0[3]) = 0xffff0000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffff0000; ++ *((int*)& __m256_op0[0]) = 0xffff0000; ++ *((int*)& __m256_op1[7]) = 0x007f8080; ++ *((int*)& __m256_op1[6]) = 0x007f007f; ++ *((int*)& __m256_op1[5]) = 0x007f8080; ++ *((int*)& __m256_op1[4]) = 0x007f007f; ++ *((int*)& __m256_op1[3]) = 0x007f8080; ++ *((int*)& __m256_op1[2]) = 0x007f007f; ++ *((int*)& __m256_op1[1]) = 0x007f8080; ++ *((int*)& __m256_op1[0]) = 0x007f007f; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x01ef013f01e701f8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x35bb8d32b2625c00; ++ *((unsigned long*)& __m128i_result[1]) = 0x00008d3200000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0xea); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x808080e280808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080636380806363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x808080e280808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080636380806363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080638063; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080638063; ++ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0x00000001; ++ *((int*)& __m128_op0[1]) = 0xffffffee; ++ *((int*)& __m128_op0[0]) = 0x00000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x007f8080; ++ *((int*)& __m256_op0[6]) = 0x007f007f; ++ *((int*)& __m256_op0[5]) = 0x007f8080; ++ *((int*)& __m256_op0[4]) = 0x007f007f; ++ *((int*)& __m256_op0[3]) = 0x007f8080; ++ *((int*)& __m256_op0[2]) = 0x007f007f; ++ *((int*)& __m256_op0[1]) = 0x007f8080; ++ *((int*)& __m256_op0[0]) = 0x007f007f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x007f8080007f007f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f8080007f007f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x007f8080007f007f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f8080007f007f; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007f3f7f007f1f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007f3f7f007f1f; ++ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffee00000004; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x80808080; ++ *((int*)& __m128_op0[0]) = 0x80638063; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrph_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x03ff000003ff03ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x03ff000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x03ff000003ff03ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x03ff000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x007f8080007f007f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x007f8080007f007f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x007f8080007f007f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x007f8080007f007f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffee00000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3a3a3a3b3a3a3a3a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3a3a00003a3a0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000003a0000003a; ++ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x38); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0x00000001; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000002; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0x00000001; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000002; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0x00000001; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000002; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0x00000001; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000002; ++ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfc00ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000100fe000100fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfc00ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000100fe000100fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x00fe00fe00fe00fe; ++ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x1b1b1b1b1b1b1b1b; ++ *((unsigned long*)& __m256i_result[2]) = 0x1b1b1b1b1b1b1b1b; ++ *((unsigned long*)& __m256i_result[1]) = 0x1b1b1b1b1b1b1b1b; ++ *((unsigned long*)& __m256i_result[0]) = 0x1b1b1b1b1b1b1b1b; ++ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff000000000000; ++ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00008d3200000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x09e8e9012fded7fd; ++ *((unsigned long*)& __m128i_op1[0]) = 0x479f64b03373df61; ++ *((unsigned long*)& __m128i_result[1]) = 0x00008d3200000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00040004; ++ *((int*)& __m128_op0[2]) = 0x00040004; ++ *((int*)& __m128_op0[1]) = 0x00040004; ++ *((int*)& __m128_op0[0]) = 0x00040004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000fffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x09e8e9012fded7fd; ++ *((unsigned long*)& __m128i_op1[0]) = 0x479f64b03373df61; ++ *((unsigned long*)& __m128i_result[1]) = 0x09e8e9012fded7fd; ++ *((unsigned long*)& __m128i_result[0]) = 0x479f64b03373df61; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00040004; ++ *((int*)& __m128_op0[2]) = 0x00040004; ++ *((int*)& __m128_op0[1]) = 0x00040004; ++ *((int*)& __m128_op0[0]) = 0x00040004; ++ *((unsigned long*)& __m128d_result[1]) = 0x37c0001000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x37c0001000000000; ++ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x37c0001000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x37c0001000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x37c0001000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x37c0001000000001; ++ __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x37c0001000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x37c0001000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x77c0401040004000; ++ *((unsigned long*)& __m128i_result[0]) = 0x77c0401040004000; ++ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0100000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0100000000000000; ++ __m128i_out = __lsx_vslli_d(__m128i_op0,0x36); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffff0400; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0xffff0400; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff040000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff040000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x77c0404a4000403a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x77c03fd640003fc6; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000003a0000003a; ++ *((unsigned long*)& __m128i_result[1]) = 0x77c0404a4000403a; ++ *((unsigned long*)& __m128i_result[0]) = 0x77c03fd640003fc6; ++ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x77c0404a4000403a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x77c03fd640003fc6; ++ *((unsigned long*)& __m128i_result[1]) = 0x75c0404a4200403a; ++ *((unsigned long*)& __m128i_result[0]) = 0x75c03fd642003fc6; ++ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0xb9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x77c0404a4000403a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x77c03fd640003fc6; ++ *((unsigned long*)& __m128i_result[1]) = 0x04c0044a0400043a; ++ *((unsigned long*)& __m128i_result[0]) = 0x04c004d6040004c6; ++ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x77c0404a4000403a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x77c03fd640003fc6; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x37c0001000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x37c0001000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0003c853c843c844; ++ *((unsigned long*)& __m128i_result[0]) = 0x0003c853c843c844; ++ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x09e8e9012fded7fd; ++ *((unsigned long*)& __m128i_op0[0]) = 0x479f64b03373df61; ++ *((unsigned long*)& __m128i_op1[1]) = 0x04c0044a0400043a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x04c004d6040004c6; ++ *((unsigned long*)& __m128i_result[1]) = 0x1d20db00ec967bec; ++ *((unsigned long*)& __m128i_result[0]) = 0x00890087009b0099; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xff00ffff00000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256d_op0[1]) = 0xff00ffff00000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x04c0044a0400043a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x04c004d6040004c6; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_result[1]) = 0x044a043a04d604c6; ++ *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00fe00fe; ++ *((int*)& __m256_op0[6]) = 0x00fe00fe; ++ *((int*)& __m256_op0[5]) = 0x00fe00fe; ++ *((int*)& __m256_op0[4]) = 0x00fe00fe; ++ *((int*)& __m256_op0[3]) = 0x00fe00fe; ++ *((int*)& __m256_op0[2]) = 0x00fe00fe; ++ *((int*)& __m256_op0[1]) = 0x00fe00fe; ++ *((int*)& __m256_op0[0]) = 0x00fe00fe; ++ *((int*)& __m256_op1[7]) = 0x00fe00fe; ++ *((int*)& __m256_op1[6]) = 0x00fe00fe; ++ *((int*)& __m256_op1[5]) = 0x00fe00fe; ++ *((int*)& __m256_op1[4]) = 0x00fe00fe; ++ *((int*)& __m256_op1[3]) = 0x00fe00fe; ++ *((int*)& __m256_op1[2]) = 0x00fe00fe; ++ *((int*)& __m256_op1[1]) = 0x00fe00fe; ++ *((int*)& __m256_op1[0]) = 0x00fe00fe; ++ *((int*)& __m256_result[7]) = 0x3f800000; ++ *((int*)& __m256_result[6]) = 0x3f800000; ++ *((int*)& __m256_result[5]) = 0x3f800000; ++ *((int*)& __m256_result[4]) = 0x3f800000; ++ *((int*)& __m256_result[3]) = 0x3f800000; ++ *((int*)& __m256_result[2]) = 0x3f800000; ++ *((int*)& __m256_result[1]) = 0x3f800000; ++ *((int*)& __m256_result[0]) = 0x3f800000; ++ __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x77c0404a4000403a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x77c03fd640003fc6; ++ *((unsigned long*)& __m128i_result[1]) = 0x00f0008100800080; ++ *((unsigned long*)& __m128i_result[0]) = 0x00f0008000800080; ++ __m128i_out = __lsx_vsrari_h(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c844; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c844; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000003a0000003a; ++ *((unsigned long*)& __m128d_op1[1]) = 0x37c0001000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x37c0001000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x37c0001000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x37c0001000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000003a0000003a; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x37c0001000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x37c0001000000008; ++ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_h(__m128i_op0,3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000003a0000003a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000003a0000003a; ++ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x007f0000007f0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x007f0000007f0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x27); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00f0008100800080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00f000807000009e; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000ec382e; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ec382d; ++ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00f0008100800080; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00f000807000009e; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x007f0000007f0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x007f0000007f0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000003f8000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000003f8000004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff000000ff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00000003f8000004; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00000003f8000004; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000691a6c843c8fc; ++ *((unsigned long*)& __m128i_result[0]) = 0x000691a6918691fc; ++ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_w(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_result[1]) = 0xd6d7ded7ded7defe; ++ *((unsigned long*)& __m128i_result[0]) = 0xd6d7ded7ded7defe; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0xd6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000020000000200; ++ __m128i_out = __lsx_vfclass_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff000000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001c8520000c97d; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001c8520001c87d; ++ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000020000000200; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000020000000200; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000003f8000004; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000003f8000004; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000003f8000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000003f8000004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_result[0]) = 0x0003c853c843c87e; ++ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffff7; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffff7; ++ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_b(__m256i_op0,15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000007f8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000007f8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0b0b0b0b0b0b0b0b; ++ *((unsigned long*)& __m128i_result[0]) = 0x0b0b0b0b0b0b0b0b; ++ __m128i_out = __lsx_vmaxi_b(__m128i_op0,11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000007f8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000007f8; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vsat_hu(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000007f8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000007f8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000f80007; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000f8; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x4a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0002000000000007; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0006000000040000; ++ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000f80007; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000007; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000f80007; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000006c80031; ++ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000006c80031; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_d(__m128i_op0,0x3c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffcfd000000fb00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001fe00f8000700; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0xfdfef9ff0efff900; ++ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfdfef9ff0efff900; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffcfd000000fb00; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001fe00f8000700; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000fb01; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000007000000; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000fb01; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000007000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0002000000000007; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000fb01; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000e0000; ++ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128d_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfdfef9ff0efff900; ++ *((unsigned long*)& __m128d_result[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128d_result[0]) = 0x6363636363636363; ++ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x6363636363636363; ++ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000c000c000c000c; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x000000ff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x000000ff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00018d8e00018d8e; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff7; ++ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000007; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x807fffff80800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0002000000000007; ++ *((unsigned long*)& __m128i_result[1]) = 0x8003000000020000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4040ffffc0400004; ++ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8003000000020000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4040ffffc0400004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8003000000020000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x64); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007fff00007fff; ++ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256d_result[2]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256d_result[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256d_result[0]) = 0x00007fff00007fff; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0086000000040000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0082000000000007; ++ *((unsigned long*)& __m128d_result[1]) = 0x4160c00000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x4110000000000000; ++ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0001; ++ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c9c9c9c9d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0086000000040000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0082000000000007; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0086000000040000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0082000000000007; ++ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffff0000; ++ *((int*)& __m256_op1[4]) = 0xffff0001; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffff0000; ++ *((int*)& __m256_op1[0]) = 0xffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000d0000000d; ++ __m128i_out = __lsx_vmini_w(__m128i_op0,13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000d0000000d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8006000000040000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8002000000000007; ++ *((unsigned long*)& __m128i_result[1]) = 0x8006000000040000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8002000d00000014; ++ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000007; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000006362ffff; ++ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0002000000000007; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000600000004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000636500006363; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x31b1777777777776; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6eee282828282829; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000006362ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000d0000000d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x6363635663636356; ++ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000006362ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000d0000000d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000dffff000d; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000007; ++ *((unsigned long*)& __m128i_op1[1]) = 0x31b1777777777776; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6eee282828282829; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000dffff000d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ffffff; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x6b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000d0000000d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000dffff000d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000070007; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000007ffff; ++ __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w(__m128i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00001fff00001fff; ++ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000800c00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0002; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0002; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0002; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0002; ++ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000068; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00001fff00001fff; ++ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x0000001f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000070007; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000007ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000068; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000038003; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000040033; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0002000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000068; ++ *((unsigned long*)& __m128d_op1[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128d_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000068; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000068; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h(__m128i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_du_wu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0200000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0200000000000000; ++ __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffff00; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffff00; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvclz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffff00; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffff00; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000fefe7f00; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000fefe7f00; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000038003; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000040033; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000068; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0020000000200000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0020000000200000; ++ __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00001fff00001fff; ++ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000038003; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000040033; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100080000; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000068; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000038003; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000040033; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000008; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100080000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffefff80000; ++ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffe0000fffe0002; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffe0000fffe0002; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvneg_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffe0000fffe0002; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffe0000fffe0002; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000fffeffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000fffeffff; ++ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100080000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x80000000; ++ *((int*)& __m128_result[2]) = 0x80000000; ++ *((int*)& __m128_result[1]) = 0x80000000; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001e0000001e; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001e0000001e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001e0000001e; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001e0000001e; ++ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0020000000200000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0020000000200000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0020000000200000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0020000000200000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0020000000200000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000001e0000001e; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000001e0000001e; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000001e0000001e; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000001e0000001e; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffeffee; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe0000fffe0012; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffeffee; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe0000fffe0012; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffeffee; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffe0000fffe0012; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffeffee; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffe0000fffe0012; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffeffee; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffe0000fffe0012; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffeffee; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffe0000fffe0012; ++ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffeffee; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe0000fffe0012; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffeffee; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe0000fffe0012; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000001ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000001ffff; ++ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000001000000010; ++ *((unsigned long*)& __m256d_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0020002000200020; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0020000000200000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0020000000200000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x1010101010001000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x1010101000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0xb); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0004000404040404; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0004000400000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1010101010001000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1010101000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x1010101010001000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x101010100000000e; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000f02e1f80f04; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000f02e1f80f04; ++ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001000000000; ++ __m128i_out = __lsx_vclo_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1010101010001000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x101010100000000e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0889088908810881; ++ *((unsigned long*)& __m256i_result[2]) = 0x0081010000810100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0889088900810088; ++ *((unsigned long*)& __m256i_result[0]) = 0x0081010000810100; ++ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000100010001ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000100010001ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000100010001ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000100010001ffff; ++ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0889088908810881; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0081010000810100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0889088900810088; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0081010000810100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0004448444844084; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000408080004080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0004448444804080; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000408080004080; ++ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007ff000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007ff000000000; ++ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000100010001ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000100010001ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000100010001ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000100010001ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00007ff000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00007ff000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x79); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000007ff00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d(__m128i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003f780000ff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf7f8f7f80000fff9; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003f780000ff80; ++ *((unsigned long*)& __m256i_result[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003f784000ff80; ++ *((unsigned long*)& __m256i_result[1]) = 0xf7f8f7f84000fff9; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003f784000ff80; ++ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00003f784000ff80; ++ *((unsigned long*)& __m256d_op1[1]) = 0xf7f8f7f84000fff9; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00003f784000ff80; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0xff800000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0xff800000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003f784000ff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf7f8f7f84000fff9; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003f784000ff80; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000f7f8f7f8; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000003f78; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000f7f8f7f8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000003f78; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xff80000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x8060000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x8060000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x003fffff00000000; ++ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003f780000ff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf7f8f7f80000fff9; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003f780000ff80; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1f001f00000007ef; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00001fff200007ef; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x23); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xf7f8f7f8; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00003f78; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xf7f8f7f8; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00003f78; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0xf7f8f7f8; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00003f78; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0xf7f8f7f8; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00003f78; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0xff800000; ++ *((int*)& __m256_result[5]) = 0x80000000; ++ *((int*)& __m256_result[4]) = 0x80000000; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0xff800000; ++ *((int*)& __m256_result[1]) = 0x80000000; ++ *((int*)& __m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00007ff000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007ff000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffffffff; ++ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xaad5555500000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xaad5555500000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1f001f00000007ef; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00001fff200007ef; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1f001f00000007ef; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00001fff200007ef; ++ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff01; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffff2; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff01; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1010101010001000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x101010100000000e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000fe; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff01feffff01ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000fe; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff01feffff01ff; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8060000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8060000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x805f0000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x805f0000ffffffff; ++ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f7f8f7f8; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000003f78; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f7f8f7f8; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003f78; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[2]) = 0x805f0000ffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[0]) = 0x805f0000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000f7f8f7f8; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000003f78; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000f7f8f7f8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000003f78; ++ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8060000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8060000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1f001f00000007ef; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00001fff200007ef; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff000000010000; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x805f0000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x805f0000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x805f0000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x805f0000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[2]) = 0x80be0000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[0]) = 0x80be0000ffffffff; ++ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80be0000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80be0000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000100000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000100000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff00000000; ++ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x80000000; ++ *((int*)& __m256_op1[6]) = 0xff800000; ++ *((int*)& __m256_op1[5]) = 0x80000000; ++ *((int*)& __m256_op1[4]) = 0x80000000; ++ *((int*)& __m256_op1[3]) = 0x80000000; ++ *((int*)& __m256_op1[2]) = 0xff800000; ++ *((int*)& __m256_op1[1]) = 0x80000000; ++ *((int*)& __m256_op1[0]) = 0x80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000007fc00000400; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000040000000400; ++ *((unsigned long*)& __m256i_result[1]) = 0x000007fc00000400; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000040000000400; ++ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x35); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1f001f00000007ef; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00001fff200007ef; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000f0f0003; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000f1003; ++ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff80be0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000f0f0002; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff80be0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000f1002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x80000000ff800000; ++ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0xdb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000f0f0003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000f1003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000f0001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000011; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000010000fffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000010000fffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000010000fffe; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000010000fffe; ++ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000003d0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000003d0000; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000003f0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffc3ffff003e; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000003f0000ffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffc3ffff003e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000f07f0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffff177fffff0fc; ++ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000003dffc2; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000020202020; ++ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000003f0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffc3ffff003e; ++ *((unsigned long*)& __m128i_result[1]) = 0x00001f80007fff80; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffe1ffff801f7f; ++ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000020202020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_d(__m256i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001084314a6; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001084314a6; ++ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x800000007fff0001; ++ *((unsigned long*)& __m256i_result[2]) = 0x80000000ff7f0001; ++ *((unsigned long*)& __m256i_result[1]) = 0x800000007fff0001; ++ *((unsigned long*)& __m256i_result[0]) = 0x80000000ff7f0001; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000003f0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffc3ffff003e; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_result[1]) = 0xc000000fc0003fff; ++ *((unsigned long*)& __m128i_result[0]) = 0xbffffff0ffffc00f; ++ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x1f001f00000007ef; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00001fff200007ef; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1f001f00000007ef; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00001fff200007ef; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000003030000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000030400; ++ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000003d0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000003d0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000030000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000030000; ++ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00010000fffe0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00010000fffe0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00010000fffe0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00010000fffe0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1f001f00000007ef; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00001fff200007ef; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x80000000; ++ *((int*)& __m256_op0[6]) = 0x80000000; ++ *((int*)& __m256_op0[5]) = 0x80000000; ++ *((int*)& __m256_op0[4]) = 0xff800000; ++ *((int*)& __m256_op0[3]) = 0x80000000; ++ *((int*)& __m256_op0[2]) = 0x80000000; ++ *((int*)& __m256_op0[1]) = 0x80000000; ++ *((int*)& __m256_op0[0]) = 0xff800000; ++ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xc000000fc0003fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xbffffff0ffffc00f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000003f0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffc3ffff003e; ++ *((unsigned long*)& __m128i_result[1]) = 0x00c0000000bfffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ffffff; ++ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x28); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x800000007fff0001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x80000000ff7f0001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x800000007fff0001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x80000000ff7f0001; ++ *((unsigned long*)& __m256i_result[3]) = 0xbfffffffffff8000; ++ *((unsigned long*)& __m256i_result[2]) = 0xbfff800080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xbfffffffffff8000; ++ *((unsigned long*)& __m256i_result[0]) = 0xbfff800080000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000001; ++ *((int*)& __m128_op0[2]) = 0x084314a6; ++ *((int*)& __m128_op0[1]) = 0x00000001; ++ *((int*)& __m128_op0[0]) = 0x084314a6; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x800000007fff0001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x80000000ff7f0001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x800000007fff0001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x80000000ff7f0001; ++ *((unsigned long*)& __m256i_result[3]) = 0x800000007fff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x80000000ff7f0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x800000007fff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x80000000ff7f0000; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001d; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001d; ++ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6b6c4beb636443e3; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0507070805070708; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xbfffffff; ++ *((int*)& __m256_op0[6]) = 0xffff8000; ++ *((int*)& __m256_op0[5]) = 0xbfff8000; ++ *((int*)& __m256_op0[4]) = 0x80000000; ++ *((int*)& __m256_op0[3]) = 0xbfffffff; ++ *((int*)& __m256_op0[2]) = 0xffff8000; ++ *((int*)& __m256_op0[1]) = 0xbfff8000; ++ *((int*)& __m256_op0[0]) = 0x80000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0xffff8000; ++ *((int*)& __m256_result[5]) = 0x80000000; ++ *((int*)& __m256_result[4]) = 0x80000000; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0xffff8000; ++ *((int*)& __m256_result[1]) = 0x80000000; ++ *((int*)& __m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[1]) = 0x0909090909090909; ++ *((unsigned long*)& __m128i_result[0]) = 0x0909090909090909; ++ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x63); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmini_b(__m128i_op0,1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff800000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff800000; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfrintrne_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff000000000000; ++ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808ffff0808ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808ffff0808ffff; ++ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc0000000c0000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc000000080400000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc0000000c0000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc000000080400000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0002000000010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0002000000010000; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00020000; ++ *((int*)& __m256_op1[6]) = 0x00020000; ++ *((int*)& __m256_op1[5]) = 0x00020000; ++ *((int*)& __m256_op1[4]) = 0x00010000; ++ *((int*)& __m256_op1[3]) = 0x00020000; ++ *((int*)& __m256_op1[2]) = 0x00020000; ++ *((int*)& __m256_op1[1]) = 0x00020000; ++ *((int*)& __m256_op1[0]) = 0x00010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0002000000010000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0002000000010000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a6; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_h(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000a6; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff59; ++ __m128i_out = __lsx_vhsubw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff800000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x27); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000a6; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000080800000808; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x80000000ff800000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff000200000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff000200000000; ++ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_l(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0909090900000909; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0909090909090909; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff000200000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff000200000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff020000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff020000; ++ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000080800000808; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080000180800001; ++ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000001; ++ *((int*)& __m256_op0[5]) = 0x001f00e0; ++ *((int*)& __m256_op0[4]) = 0x1f1f1fff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000001; ++ *((int*)& __m256_op0[1]) = 0x001f00e0; ++ *((int*)& __m256_op0[0]) = 0x1f1f1fff; ++ *((int*)& __m256_op1[7]) = 0x80000000; ++ *((int*)& __m256_op1[6]) = 0x80000000; ++ *((int*)& __m256_op1[5]) = 0x80000000; ++ *((int*)& __m256_op1[4]) = 0xff800000; ++ *((int*)& __m256_op1[3]) = 0x80000000; ++ *((int*)& __m256_op1[2]) = 0x80000000; ++ *((int*)& __m256_op1[1]) = 0x80000000; ++ *((int*)& __m256_op1[0]) = 0xff800000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000001; ++ *((int*)& __m256_result[5]) = 0x001f00e0; ++ *((int*)& __m256_result[4]) = 0xff800000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000001; ++ *((int*)& __m256_result[1]) = 0x001f00e0; ++ *((int*)& __m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff59; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff59; ++ __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x08080808; ++ *((int*)& __m128_op1[2]) = 0x08080808; ++ *((int*)& __m128_op1[1]) = 0x08080808; ++ *((int*)& __m128_op1[0]) = 0x08080808; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff000200000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff000200000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x001f00e0ff800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x001f00e0ff800000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff80000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff000200000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff000200000000; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080808280808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808280808; ++ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000100fffffeff; ++ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0xb8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808081; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x80808080ffffffff; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000080800000808; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffff80800001; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffff80800001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000080800000808; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80800001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff80800001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff7fff7ef; ++ *((unsigned long*)& __m128i_op1[0]) = 0x80808080ffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000080800000808; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffbff8888080a; ++ *((unsigned long*)& __m128i_result[0]) = 0x080803ff807ff7f9; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffbff8888080a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x080803ff807ff7f9; ++ *((unsigned long*)& __m128i_result[1]) = 0x010105017878f8f6; ++ *((unsigned long*)& __m128i_result[0]) = 0xf8f8fd0180810907; ++ __m128i_out = __lsx_vneg_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1212121212121212; ++ *((unsigned long*)& __m256i_result[2]) = 0x1212121212121212; ++ *((unsigned long*)& __m256i_result[1]) = 0x1212121212121212; ++ *((unsigned long*)& __m256i_result[0]) = 0x1212121212121212; ++ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff7fff7ef; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80808080ffffffff; ++ *((int*)& __m128_result[3]) = 0xffffe000; ++ *((int*)& __m128_result[2]) = 0xffffe000; ++ *((int*)& __m128_result[1]) = 0xc6ffe000; ++ *((int*)& __m128_result[0]) = 0xc6fde000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc6ffe000c6fde000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808081; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_result[0]) = 0x467f6080467d607f; ++ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000080800000808; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x010105017878f8f6; ++ *((unsigned long*)& __m128i_op2[0]) = 0xf8f8fd0180810907; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000080800000808; ++ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x467f6080467d607f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x467f6080467d607f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808081; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xe000e0006080b040; ++ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x467f6080467d607f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffe000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c6fde000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xe000e0006080b040; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffe000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000c6fde000; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffff00ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff00ffffffff; ++ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_h_w(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xef); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe364525335ede000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000fff00000e36; ++ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x34); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffe000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c6fde000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x39); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000010000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00fe00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000040000000000; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x2a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000fff00000e36; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000fff0e36; ++ __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000010000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00fe00ff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00000fff00000e36; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000fef01000e27ca; ++ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffff00ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffff00ffffffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000010000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00fe00ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000010000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00fe00fe00ff; ++ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x23); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000fef01000e27ca; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001fde020000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001c4f940000; ++ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000010000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00fe00ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w(__m128i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000100000001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0ed5ced7e51023e5; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00001000e51023e5; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_b(__m256i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000e36400015253; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000035ed0001e000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000e36400015253; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000035ed0001e000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1c6c80007fffffff; ++ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1c6c80007fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0038d800ff000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00fffe00fffffe00; ++ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000fef01000f27ca; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000010000010101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0101000001000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000ffef0010000; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000fef01000f27ca; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w(__m128i_op0,-4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000ffef0010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff0000ff0000; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000ffef0010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000010000010101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0101000001000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff0000ff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff0000ff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff0000000000; ++ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0000ffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0000ffff; ++ *((int*)& __m256_op1[7]) = 0x00ff00ff; ++ *((int*)& __m256_op1[6]) = 0x00ff00ff; ++ *((int*)& __m256_op1[5]) = 0x00ff00ff; ++ *((int*)& __m256_op1[4]) = 0x00ff00ff; ++ *((int*)& __m256_op1[3]) = 0x00ff00ff; ++ *((int*)& __m256_op1[2]) = 0x00ff00ff; ++ *((int*)& __m256_op1[1]) = 0x00ff00ff; ++ *((int*)& __m256_op1[0]) = 0x00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000001ffffffff; ++ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x21); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000010101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101000001000100; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000010000010101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0101000001000100; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x0000ff00; ++ *((int*)& __m128_op1[0]) = 0x00ff0000; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xffffffff; ++ *((int*)& __m128_result[1]) = 0xffffffff; ++ *((int*)& __m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000e36400005253; ++ *((unsigned long*)& __m128i_op2[0]) = 0x000035ed0000e000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000010101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101000001000100; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000008000008080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080800000800080; ++ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000001ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00ff00ff; ++ *((int*)& __m256_op0[6]) = 0x00ff00ff; ++ *((int*)& __m256_op0[5]) = 0x00ff00ff; ++ *((int*)& __m256_op0[4]) = 0x00ff00ff; ++ *((int*)& __m256_op0[3]) = 0x00ff00ff; ++ *((int*)& __m256_op0[2]) = 0x00ff00ff; ++ *((int*)& __m256_op0[1]) = 0x00ff00ff; ++ *((int*)& __m256_op0[0]) = 0x00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00ff00ff; ++ *((int*)& __m256_op1[6]) = 0x00ff00ff; ++ *((int*)& __m256_op1[5]) = 0x00ff00ff; ++ *((int*)& __m256_op1[4]) = 0x00ff00ff; ++ *((int*)& __m256_op1[3]) = 0x00ff00ff; ++ *((int*)& __m256_op1[2]) = 0x00ff00ff; ++ *((int*)& __m256_op1[1]) = 0x00ff00ff; ++ *((int*)& __m256_op1[0]) = 0x00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0037ffc8d7ff2800; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff00000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[1]) = 0x001bffe4ebff9400; ++ *((unsigned long*)& __m128i_result[0]) = 0xff80000000000000; ++ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0037ffc8d7ff2800; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff00ffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_d(__m128i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000e2e3ffffd1d3; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000008000e2e3; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000008000e2e3; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000008000e2e3; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000080806362; ++ *((unsigned long*)& __m128i_result[0]) = 0x807f808000000000; ++ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0038d800ff000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00fffe00fffffe00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0038f000ff000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00fffe00fffffe00; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_b(__m256i_op0,-3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0038d800ff000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00fffe00fffffe00; ++ *((unsigned long*)& __m128d_op2[1]) = 0x8000008000008080; ++ *((unsigned long*)& __m128d_op2[0]) = 0x8080800000800080; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000008000008080; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvfclass_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0037ffc8d7ff2800; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ffffff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0038d800ff000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00fffe00fffffe00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0137ffc9d7fe2801; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f00ff017fffff01; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00e4880080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0080810080808100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x41f0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x41f0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x41f0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x41f0000000000000; ++ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000080806362; ++ *((unsigned long*)& __m128i_op1[0]) = 0x807f808000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80806362; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff00ff; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80806362; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00008080; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff00008080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h(__m128i_op0,-4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000000ff801c9e; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000810000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x40eff02383e383e4; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x800000810000807f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x808080010080007f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x800000810000807f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x808080010080007f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000020000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000020000020; ++ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x62); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff801c9e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000810000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000020000020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000020000020; ++ *((unsigned long*)& __m128i_result[1]) = 0x001d001d20000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x001d001d20000020; ++ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff801c9e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000810000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x40eff02383e383e4; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000800000007fff; ++ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000020000020; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000020000020; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00000000ff801c9e; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000810000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x0000ffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000020000020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000020000020; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000020000020; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00ff00ff; ++ *((int*)& __m128_op0[0]) = 0x00ff00ff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000fffe0001; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffe0001; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000fffe0001; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffe0001; ++ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0000ffff; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x0000ffff; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x0000ffff; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x0000ffff; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000700000007; ++ *((unsigned long*)& __m256i_result[2]) = 0x0007ffff0007ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000700000007; ++ *((unsigned long*)& __m256i_result[0]) = 0x0007ffff0007ffff; ++ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x2d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000020000020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000020000020; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000200000002000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000700000007; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0007ffff0007ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000700000007; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0007ffff0007ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000700000007; ++ *((unsigned long*)& __m256i_result[2]) = 0x00071f1f00071f1f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000700000007; ++ *((unsigned long*)& __m256i_result[0]) = 0x00071f1f00071f1f; ++ __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000007fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000020000020; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000020000020; ++ *((unsigned long*)& __m128i_result[1]) = 0x2000002000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x2000002020000020; ++ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000020006; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffe000ffdf; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000200000002000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffe000ffdf; ++ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h(__m128i_op0,0x0); ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b(__m256i_op0,12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000200000002000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe000ffdf; ++ *((unsigned long*)& __m128i_result[1]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffe000ffdf; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffe000ffdf; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000200000002000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe000ffdf; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000200000002001; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000001fff0021; ++ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000200000002000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001200100012001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100200001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100200001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x3a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001200100012001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000080000000800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000000000007ffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000000007ffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000000000007ffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000000000007ffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0e0d0c0b0e0d0c0b; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0e0d0c0b0e0d0c0b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0e0d0c0b0e0d0c0b; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0e0d0c0b0e0d0c0b; ++ *((unsigned long*)& __m256i_result[3]) = 0x0a0908070a090807; ++ *((unsigned long*)& __m256i_result[2]) = 0x0a0908070a090807; ++ *((unsigned long*)& __m256i_result[1]) = 0x0a0908070a090807; ++ *((unsigned long*)& __m256i_result[0]) = 0x0a0908070a090807; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0400400204004002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x32); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000080000000800; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0400400204004002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000080000000800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7171717171717171; ++ *((unsigned long*)& __m256i_result[2]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long*)& __m256i_result[1]) = 0x7171717171717171; ++ *((unsigned long*)& __m256i_result[0]) = 0x8e8e8e8e8e8e8e8e; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x71); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0400400204004002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x6d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff00000000; ++ __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000003fffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x008e8e8e8e8e8e8e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x008e8e8e8e8e8e8e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000700000007; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0007ffff0007ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000700000007; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0007ffff0007ffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x008e8e8e8e8e8e8e; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x008e8e8e8e8e8e8e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007000008e700000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x007000008e700000; ++ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001200100012001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000003fffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00001fff00001fff; ++ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7171717171717171; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8e8e8e8e8f0e8e8e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7171717171717171; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8e8e8e8e8f0e8e8e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000007ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000007ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000007ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000007ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7171717171010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x8e8e8e8e8f00ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7171717171010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x8e8e8e8e8f00ffff; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7171717171717171; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7171717171717171; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00001fff00001fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000007ffc000; ++ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00001fff; ++ *((int*)& __m128_op0[2]) = 0x00001fff; ++ *((int*)& __m128_op0[1]) = 0x00000003; ++ *((int*)& __m128_op0[0]) = 0xfffffffc; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0xfffffffc; ++ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000e2e20000e2e2; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00011d1c00011d9c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000e2e20000e2e2; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00011d1c00011d9c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000e2e20000e2e2; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00011d1c00011d9c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000e2e20000e2e2; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00011d1c00011d9c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7171717171717171; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7171717171717171; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x01c601c6fe3afe3a; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x01c601c6fe3afe3a; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001000000000; ++ __m128i_out = __lsx_vpcnt_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffe080f6efc100f7; ++ *((unsigned long*)& __m128i_op1[0]) = 0xefd32176ffe100f7; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000040000000200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000040000000000; ++ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x71717171; ++ *((int*)& __m256_op1[6]) = 0x71010101; ++ *((int*)& __m256_op1[5]) = 0x8e8e8e8e; ++ *((int*)& __m256_op1[4]) = 0x8f00ffff; ++ *((int*)& __m256_op1[3]) = 0x71717171; ++ *((int*)& __m256_op1[2]) = 0x71010101; ++ *((int*)& __m256_op1[1]) = 0x8e8e8e8e; ++ *((int*)& __m256_op1[0]) = 0x8f00ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7c007c0080008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7c007c0080008000; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x71717171; ++ *((int*)& __m256_op0[6]) = 0x71010101; ++ *((int*)& __m256_op0[5]) = 0x8e8e8e8e; ++ *((int*)& __m256_op0[4]) = 0x8f00ffff; ++ *((int*)& __m256_op0[3]) = 0x71717171; ++ *((int*)& __m256_op0[2]) = 0x71010101; ++ *((int*)& __m256_op0[1]) = 0x8e8e8e8e; ++ *((int*)& __m256_op0[0]) = 0x8f00ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x01c601c6fe3afe3a; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01c601c6fe3afe3a; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007000008e700000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007000008e700000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7171717171010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8e8e8e8e8f00ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7171717171010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8e8e8e8e8f00ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xe2e2e202ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_result[0]) = 0xe2e2e202ffffffff; ++ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000007ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000007ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000007ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000007ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001e0007ffff; ++ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffe080f6efc100f7; ++ *((unsigned long*)& __m128i_op0[0]) = 0xefd32176ffe100f7; ++ int_result = 0x0000000000002176; ++ int_out = __lsx_vpickve2gr_h(__m128i_op0,0x2); ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffc6ffc6; ++ *((int*)& __m256_op0[6]) = 0x003a003a; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffc6ffc6; ++ *((int*)& __m256_op0[2]) = 0x003a003a; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x71717171; ++ *((int*)& __m256_op1[6]) = 0x71010101; ++ *((int*)& __m256_op1[5]) = 0x8e8e8e8e; ++ *((int*)& __m256_op1[4]) = 0x8f00ffff; ++ *((int*)& __m256_op1[3]) = 0x71717171; ++ *((int*)& __m256_op1[2]) = 0x71010101; ++ *((int*)& __m256_op1[1]) = 0x8e8e8e8e; ++ *((int*)& __m256_op1[0]) = 0x8f00ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2a29282726252423; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2a29282726252423; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000005452505; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000004442403e4; ++ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe2e2e202ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe2e2e202ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000465; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000465; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2a29282726252423; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x26); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000465; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000465; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000008d00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000008d00000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2a29282726252423; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00a8009800880078; ++ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x07ffc000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffe080f6efc100f7; ++ *((unsigned long*)& __m128i_op0[0]) = 0xefd32176ffe100f7; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffe080f6efc100f7; ++ *((unsigned long*)& __m128i_op1[0]) = 0xefd32176ffe100f7; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x2c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffe37fe3001d001d; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffe37fe3001d001d; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff8000; ++ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff0000; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fe37fff001fffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fe37fff001fffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fffffff; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000465; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000465; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000465; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000465; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_result[2]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_result[1]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_result[0]) = 0x7575757575757575; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x75); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7c007c0080008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7c007c0080008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7c00000880008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7c00000880008000; ++ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2a29282726252423; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b(__m128i_op0,-1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000001e0007ffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffe37fe3001d001d; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000000ffff8000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffe37fe3001d001d; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000ffff8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffe200000020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000fffe00008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffe200000020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fffe00008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_result[3]) = 0x7575ffff75757595; ++ *((unsigned long*)& __m256i_result[2]) = 0x7575ffff7575f575; ++ *((unsigned long*)& __m256i_result[1]) = 0x7575ffff75757595; ++ *((unsigned long*)& __m256i_result[0]) = 0x7575ffff7575f575; ++ __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7575ffff75757595; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7575ffff7575f575; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7575ffff75757595; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7575ffff7575f575; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x3aadec4f6c7975b1; ++ *((unsigned long*)& __m256i_result[2]) = 0x3abac5447fffca89; ++ *((unsigned long*)& __m256i_result[1]) = 0x3aadec4f6c7975b1; ++ *((unsigned long*)& __m256i_result[0]) = 0x3abac5447fffca89; ++ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00a600e000a600e0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x01500178010000f8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0100000001000000; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7575ffff75757595; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7575ffff7575f575; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7575ffff75757595; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7575ffff7575f575; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op2[3]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op2[2]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op2[1]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op2[0]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff0000; ++ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x001d001d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x05452505; ++ *((int*)& __m128_op0[1]) = 0x00000004; ++ *((int*)& __m128_op0[0]) = 0x442403e4; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_result[3]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_result[2]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_result[1]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_result[0]) = 0x7575757575757575; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff0000; ++ __m256i_out = __lasx_xvpermi_q(__m256i_op0,__m256i_op1,0x22); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffe0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000fff0; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_hu(__m128i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000fff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000fff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7c00000880008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7c00000880008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000001d001d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000080008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3e00000440004000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3e000004400f400f; ++ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x05452505; ++ *((int*)& __m128_op1[1]) = 0x00000004; ++ *((int*)& __m128_op1[0]) = 0x442403e4; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000001000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x001d001d; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x001d001d; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7c00000880008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7c00000880008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0102; ++ *((unsigned long*)& __m256i_result[2]) = 0x007c000000810081; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0102; ++ *((unsigned long*)& __m256i_result[0]) = 0x007c000000810081; ++ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000fff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000001d001d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000001d001d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000030003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000030003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x7fe37fe3; ++ *((int*)& __m256_op0[6]) = 0x001d001d; ++ *((int*)& __m256_op0[5]) = 0x7fff7fff; ++ *((int*)& __m256_op0[4]) = 0x7fff0000; ++ *((int*)& __m256_op0[3]) = 0x7fe37fe3; ++ *((int*)& __m256_op0[2]) = 0x001d001d; ++ *((int*)& __m256_op0[1]) = 0x7fff7fff; ++ *((int*)& __m256_op0[0]) = 0x7fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffe0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007c000000810081; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0102; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007c000000810081; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x007c7fff00007fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00817fff00810000; ++ *((unsigned long*)& __m256i_result[1]) = 0x007c7fff00007fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00817fff00810000; ++ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffe8ffffffe8; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffe8ffffffe8; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffe8ffffffe8; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffe8ffffffe8; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010109; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x1); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffe0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffe0; ++ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3aadec4f6c7975b1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3abac5447fffca89; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3aadec4f6c7975b1; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3abac5447fffca89; ++ *((unsigned long*)& __m256i_op1[3]) = 0x3aadec4f6c7975b1; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3abac5447fffca89; ++ *((unsigned long*)& __m256i_op1[1]) = 0x3aadec4f6c7975b1; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3abac5447fffca89; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000755a0000d8f2; ++ *((unsigned long*)& __m256i_result[2]) = 0x000075740000fffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000755a0000d8f2; ++ *((unsigned long*)& __m256i_result[0]) = 0x000075740000fffe; ++ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007c000000810081; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0102; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007c000000810081; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000005452505; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000004442403e4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffe0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000005452505; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000044525043c; ++ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffe0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000005452505; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000044525043c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7c00000880008000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7c00000880008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7c00000880008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7c00000880008000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7c00000880008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7c00000880008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0100000001000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0100000001000100; ++ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0100000001000100; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0100000001000100; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x12835580; ++ *((int*)& __m128_op0[0]) = 0xb880eb98; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xffffffff; ++ *((int*)& __m128_result[1]) = 0x55fcbad1; ++ *((int*)& __m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0100000001000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0100000001000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x3abb3abbbabababa; ++ *((unsigned long*)& __m256i_result[2]) = 0x0080000000800080; ++ *((unsigned long*)& __m256i_result[1]) = 0x3abb3abbbabababa; ++ *((unsigned long*)& __m256i_result[0]) = 0x0080000000800080; ++ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0100000001000100; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0100000001000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0100000001000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0100000001000100; ++ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000040; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000040; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000180007fe8; ++ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0100000001000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0100000001000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffe8ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffe8ffffffe8; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffe8ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffe8ffffffe8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000005452505; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000004442403e4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x03fc03fc03fc03fc; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000b4a00008808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080800000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000040; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x42800000; ++ *((int*)& __m128_result[0]) = 0x42800000; ++ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7c00000880008000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7c00000880008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000001; ++ *((int*)& __m256_op0[6]) = 0x7bfffff0; ++ *((int*)& __m256_op0[5]) = 0x00000001; ++ *((int*)& __m256_op0[4]) = 0x80007fe8; ++ *((int*)& __m256_op0[3]) = 0x00000001; ++ *((int*)& __m256_op0[2]) = 0x7bfffff0; ++ *((int*)& __m256_op0[1]) = 0x00000001; ++ *((int*)& __m256_op0[0]) = 0x80007fe8; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0100000001000100; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0100000001000100; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w(__m128i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000017bfffff0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000180007fe8; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff7bfffff1; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff80007fe9; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff7bfffff1; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff80007fe9; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000004000000040; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000b4a00008808; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0808080800000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000b4a00008808; ++ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4280000042800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xbd7fffffbd800000; ++ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000c0007; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000c0007; ++ *((unsigned long*)& __m256i_op1[3]) = 0x3abb3abbbabababa; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0080000000800080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x3abb3abbbabababa; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0080000000800080; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000babababa; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000008c0087; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000babababa; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000008c0087; ++ __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000b4a00008808; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0808080800000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000001d001d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001d0000001d; ++ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x007c7fff00007fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00817fff00810000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x007c7fff00007fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00817fff00810000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x7c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00001b4a00007808; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00001b4a00007808; ++ *((unsigned long*)& __m128i_result[1]) = 0x00001b4a00007808; ++ *((unsigned long*)& __m128i_result[0]) = 0x00001b4a00007808; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00001b4a00007808; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffe4b5ffff87f8; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001d0000001d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00001d0000001d00; ++ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000fff; ++ __m128i_out = __lsx_vmaxi_h(__m128i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00001b4a00007808; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x5252525252525252; ++ *((unsigned long*)& __m256i_result[2]) = 0x5252525252525252; ++ *((unsigned long*)& __m256i_result[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m256i_result[0]) = 0x5252525252525252; ++ __m256i_out = __lasx_xvori_b(__m256i_op0,0x52); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff01; ++ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001000; ++ int_op1 = 0x000000007ff00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00001b4a00007808; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vslei_hu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffffff01; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x807c7fffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80817fff00810000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x807c7fffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80817fff00810000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x80767f0101050101; ++ *((unsigned long*)& __m256i_result[2]) = 0x80817f01007f0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x80767f0101050101; ++ *((unsigned long*)& __m256i_result[0]) = 0x80817f01007f0000; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvreplve0_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x5252525252525252; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5252525252525252; ++ *((unsigned long*)& __m256i_op1[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5252525252525252; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff7bfffff1; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff80007fe9; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff7bfffff1; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff80007fe9; ++ *((unsigned long*)& __m256i_result[3]) = 0x40ff40ff40ff40ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x407b40ff40ff40f1; ++ *((unsigned long*)& __m256i_result[1]) = 0x40ff40ff40ff40ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x407b40ff40ff40f1; ++ __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x40ff40ff40ff40ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x407b40ff40ff40f1; ++ *((unsigned long*)& __m256i_op0[1]) = 0x40ff40ff40ff40ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x407b40ff40ff40f1; ++ *((unsigned long*)& __m256i_op1[3]) = 0x40ff40ff40ff40ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x407b40ff40ff40f1; ++ *((unsigned long*)& __m256i_op1[1]) = 0x40ff40ff40ff40ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x407b40ff40ff40f1; ++ *((unsigned long*)& __m256i_result[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long*)& __m256i_result[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long*)& __m256i_result[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long*)& __m256i_result[0]) = 0xbf84bf00bf00bf0e; ++ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00060000; ++ *((int*)& __m256_op0[6]) = 0x00040000; ++ *((int*)& __m256_op0[5]) = 0x00020000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00060000; ++ *((int*)& __m256_op0[2]) = 0x00040000; ++ *((int*)& __m256_op0[1]) = 0x00020000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256d_op0[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256d_op0[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256d_op0[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0006000000040000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0002000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0006000000040000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00bf00bf00bf00bf; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00bf00bf00bf00bf; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00bf00bf00bf00bf; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00bf00bf00bf00bf; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long*)& __m256i_op0[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xdf80df80df80df80; ++ *((unsigned long*)& __m256i_result[2]) = 0xdfc2df80df80df87; ++ *((unsigned long*)& __m256i_result[1]) = 0xdf80df80df80df80; ++ *((unsigned long*)& __m256i_result[0]) = 0xdfc2df80df80df87; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long*)& __m256i_op0[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long*)& __m256i_op1[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m128i_result[0]) = 0x9090909090909090; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0x90); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffdfffdfffdfffd; ++ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000e0e0e0e0; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff4; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000e0e0e0e0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000e0e0e0e0; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7000700070007000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7000700070007000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000070007000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7000700070007000; ++ *((unsigned long*)& __m256i_result[3]) = 0xe070e000e070e000; ++ *((unsigned long*)& __m256i_result[2]) = 0xe070e000e070e000; ++ *((unsigned long*)& __m256i_result[1]) = 0xe070e000e070e000; ++ *((unsigned long*)& __m256i_result[0]) = 0xe070e000e070e000; ++ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x74); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d(__m128i_op0,-16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x003f003f003f0040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x003f003f003f0040; ++ *((unsigned long*)& __m256i_result[3]) = 0x00003f003f003f00; ++ *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00003f003f003f00; ++ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7000700070007000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7000700070007000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000070007000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7000700070007000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4040403fd03fd040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4040403fd03fd040; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffd03fd040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4040403fd03fd040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001010000010100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010000010100; ++ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long*)& __m256d_op0[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long*)& __m256d_op0[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long*)& __m256d_op0[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7000700070007000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7000700070007000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000070007000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7000700070007000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff8fff9000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff8fff9000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff8fff9000; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7000700070007000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7000700070007000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000070007000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7000700070007000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0e0e0e0e0e0e0e0e; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000e0e0e0e0e0e; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x003f003f003f0040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x003f003f003f0040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x003f003f003f0040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x003f003f003f0040; ++ *((unsigned long*)& __m256i_result[3]) = 0x00003f3f00003f3f; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_result[1]) = 0x00003f3f00003f3f; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003f3f00004040; ++ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfefbff06fffa0004; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfefeff04fffd0004; ++ *((unsigned long*)& __m128i_result[1]) = 0x4008804080040110; ++ *((unsigned long*)& __m128i_result[0]) = 0x4040801080200110; ++ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00003f3f00003f3f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00003f3f00003f3f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x41000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x41000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x41000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x41000000; ++ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff0000ffff0000f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffff0000; ++ *((int*)& __m128_op0[2]) = 0xffff0000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x40088040; ++ *((int*)& __m128_op1[2]) = 0x80040110; ++ *((int*)& __m128_op1[1]) = 0x40408010; ++ *((int*)& __m128_op1[0]) = 0x80200110; ++ *((int*)& __m128_result[3]) = 0xffff0000; ++ *((int*)& __m128_result[2]) = 0xffff0000; ++ *((int*)& __m128_result[1]) = 0x40408010; ++ *((int*)& __m128_result[0]) = 0x80200110; ++ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0e0e0e0e0e0e0e0e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000e0e0e0e0e0e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff8fff9000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff8fff9000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff8fff9000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00010e0d00009e0e; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00009000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000e0e; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00009000; ++ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long*)& __m256i_op0[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00003f3f00003f3f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00003f3f00003f3f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_result[3]) = 0xdf80ff20df80ff20; ++ *((unsigned long*)& __m256i_result[2]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long*)& __m256i_result[1]) = 0xdf80ff20df80ff20; ++ *((unsigned long*)& __m256i_result[0]) = 0xdfc2ff20df80ffa7; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00003f3f00003f3f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00003f3f00003f3f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003f3f00004040; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_b(__m128i_op0,11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdf80df80df80df80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xdfc2df80df80df87; ++ *((unsigned long*)& __m256i_op0[1]) = 0xdf80df80df80df80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xdfc2df80df80df87; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdf80df80df80df80; ++ *((unsigned long*)& __m256i_op1[2]) = 0xdfc2df80df80df87; ++ *((unsigned long*)& __m256i_op1[1]) = 0xdf80df80df80df80; ++ *((unsigned long*)& __m256i_op1[0]) = 0xdfc2df80df80df87; ++ *((unsigned long*)& __m256i_result[3]) = 0x2080208020802080; ++ *((unsigned long*)& __m256i_result[2]) = 0x203e208020802079; ++ *((unsigned long*)& __m256i_result[1]) = 0x2080208020802080; ++ *((unsigned long*)& __m256i_result[0]) = 0x203e208020802079; ++ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdf80ff20df80ff20; ++ *((unsigned long*)& __m256i_op0[2]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xdf80ff20df80ff20; ++ *((unsigned long*)& __m256i_op0[0]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x80208020c22080a7; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x80208020c22080a7; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x80208020c22080a7; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x80208020c22080a7; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdf80ff20df80ff20; ++ *((unsigned long*)& __m256i_op1[2]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long*)& __m256i_op1[1]) = 0xdf80ff20df80ff20; ++ *((unsigned long*)& __m256i_op1[0]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000840100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xbffebffec0febfff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000840100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xbffebffec0febfff; ++ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffff0000; ++ *((int*)& __m128_op0[2]) = 0xffff0000; ++ *((int*)& __m128_op0[1]) = 0x40408010; ++ *((int*)& __m128_op0[0]) = 0x80200110; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff4; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000840100000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbffebffec0fe0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000840100000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbffebffec0fe0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000420080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000420080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x5fff5fff607f0000; ++ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000033; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000033; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00004200; ++ *((int*)& __m256_op0[6]) = 0x80000000; ++ *((int*)& __m256_op0[5]) = 0x5fff5fff; ++ *((int*)& __m256_op0[4]) = 0x607f0000; ++ *((int*)& __m256_op0[3]) = 0x00004200; ++ *((int*)& __m256_op0[2]) = 0x80000000; ++ *((int*)& __m256_op0[1]) = 0x5fff5fff; ++ *((int*)& __m256_op0[0]) = 0x607f0000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00004200; ++ *((int*)& __m256_result[6]) = 0x80000000; ++ *((int*)& __m256_result[5]) = 0x5fff5fff; ++ *((int*)& __m256_result[4]) = 0x607f0000; ++ *((int*)& __m256_result[3]) = 0x00004200; ++ *((int*)& __m256_result[2]) = 0x80000000; ++ *((int*)& __m256_result[1]) = 0x5fff5fff; ++ *((int*)& __m256_result[0]) = 0x607f0000; ++ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffc0c0ffffbfc0; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffc0c0ffffbfc0; ++ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000033; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000033; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003f3f0000400d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003f3f0000400d; ++ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00010e0d00009e0e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00009000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000e0e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00009000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000033; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000033; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000033; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000033; ++ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x71); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000033; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000033; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3fc03fc000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f801fe000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x3fc03fc000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f801fe000000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe05f8102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe05f8102; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffe05f8102; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffe05f8102; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000420080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000420080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000420080000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000420080000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000420080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000001607f0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000420080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000001607f0000; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000033; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000033; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000420080000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000420080000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffbdff7fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xa000a0009f80ffcc; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffbdff7fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xa000a0009f80ffcc; ++ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000033; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000033; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3fc03fc000000003; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f7f1fd800000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x3fc03fc000000004; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000033; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000033; ++ *((int*)& __m256_op1[7]) = 0x00004200; ++ *((int*)& __m256_op1[6]) = 0x80000000; ++ *((int*)& __m256_op1[5]) = 0x5fff5fff; ++ *((int*)& __m256_op1[4]) = 0x607f0000; ++ *((int*)& __m256_op1[3]) = 0x00004200; ++ *((int*)& __m256_op1[2]) = 0x80000000; ++ *((int*)& __m256_op1[1]) = 0x5fff5fff; ++ *((int*)& __m256_op1[0]) = 0x607f0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f00004040; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffe05f8102; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffe05f8102; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffc0c0ffffbfc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffc0c0ffffbfc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f0000400d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f0000400d; ++ *((unsigned long*)& __m256i_result[3]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x44); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000900000009; ++ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3fc03fc000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f801fe000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3fc03fc000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f801fe000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3fc03fc000000004; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3fc03fc000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffc03fc040; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffe00000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffe00000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f801fe000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3fc03fc000000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3fc03fc000000003; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f7f1fd800000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f1f00003f3f0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3f3f00007f1f0000; ++ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003f3f0000400d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f0000400d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3fc03fc000000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3fc03fc000000003; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f7f1fd800000004; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xc0411fe800000000; ++ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f801fe000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3fc03fc000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f801fdfffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x3fc03fc000000003; ++ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x3f413f4100000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7f801fe000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000017fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000420080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000420080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000420080000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000420080000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x1000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000420080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000420080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x5fff5fff607f0000; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x28); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe05f8102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe05f8102; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000420080000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000420080000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5fff5fff607f0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f801fe000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3fc03fc000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000003fc00ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001fe01fe00; ++ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000003fc00ff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001fe01fe00; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000a; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000a; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x10000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x10000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0xffff0000; ++ *((int*)& __m128_op1[2]) = 0xffff0000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xffffffff; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000000a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000a; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3f413f4100000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f801fe000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc0411fe800000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x601fbfbeffffffff; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbf3efff536d5169b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ebdfffffddf3f40; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3f5ec0a0feefa0b0; ++ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdf80df80df80df80; ++ *((unsigned long*)& __m256i_op1[2]) = 0xdfc2df80df80df87; ++ *((unsigned long*)& __m256i_op1[1]) = 0xdf80df80df80df80; ++ *((unsigned long*)& __m256i_op1[0]) = 0xdfc2df80df80df87; ++ *((unsigned long*)& __m256i_result[3]) = 0xff21ff21ff21ff21; ++ *((unsigned long*)& __m256i_result[2]) = 0xff21ff21ff21ff21; ++ *((unsigned long*)& __m256i_result[1]) = 0xff21ff21ff21ff21; ++ *((unsigned long*)& __m256i_result[0]) = 0xff21ff21ff21ff21; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffe00000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffe00000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffff00000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffff00000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffe00029f9f6061; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x64e464e464e464e4; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffeffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000064e264e6; ++ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfffe00029f9f6061; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x601fbfbeffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff8000000000000; ++ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff7fff; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvmini_d(__m256i_op0,9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xff21ff21ff21ff21; ++ *((unsigned long*)& __m256d_op0[2]) = 0xff21ff21ff21ff21; ++ *((unsigned long*)& __m256d_op0[1]) = 0xff21ff21ff21ff21; ++ *((unsigned long*)& __m256d_op0[0]) = 0xff21ff21ff21ff21; ++ *((unsigned long*)& __m256d_op1[3]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256d_op1[2]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256d_op1[1]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256d_op1[0]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0100010001000100; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x01000100; ++ *((int*)& __m128_op0[0]) = 0x01000100; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x64e464e4; ++ *((int*)& __m128_op1[0]) = 0x64e464e4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[3]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256i_result[2]) = 0xff21c241ff21c238; ++ *((unsigned long*)& __m256i_result[1]) = 0xff21c241ff21c241; ++ *((unsigned long*)& __m256i_result[0]) = 0xff21c241ff21c238; ++ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100010001000100; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000100; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3f5ec0a0feefa0b0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff02d060; ++ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff02d060; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff02d060; ++ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff02d060; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff02d060; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff02d06000000000; ++ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_du_wu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffe00029f9f6061; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3f5ec0a0feefa0b0; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffe00029fb060b1; ++ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x3fff3fff3fff3fff; ++ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m128d_op2[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128d_op2[0]) = 0xfff8000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xfff8000000000000; ++ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff8000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff8000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff8000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0100010001000100; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff8000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00fe000100cf005f; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128d_result[1]) = 0x5f675e96e29a5a60; ++ *((unsigned long*)& __m128d_result[0]) = 0x7fff7fff7fff7fff; ++ __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ int_op0 = 0x000000007ff00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00fe000100cf005f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00fe000100cf005f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5f675e96e29a5a60; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x5fff5e97e2ff5abf; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefffefffefffeff; ++ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x26); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000004; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5f675e96e29a5a60; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x965f5e9660e25a60; ++ *((unsigned long*)& __m128i_result[0]) = 0xff7f7fffff7f7fff; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x34); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00ff000100ff00fe; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00ff003000ff00a0; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrint_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000011; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000011; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5f675e96e29a5a60; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00fe000100cf005f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x5e695e95e1cb5a01; ++ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000011; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000011; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000088; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000088; ++ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000088; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000088; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000009; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000009; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000009; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5e695e95e1cb5a01; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x5f675e96; ++ *((int*)& __m128_op0[2]) = 0xe29a5a60; ++ *((int*)& __m128_op0[1]) = 0x7fff7fff; ++ *((int*)& __m128_op0[0]) = 0x7fff7fff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x5e695e95; ++ *((int*)& __m128_op1[0]) = 0xe1cb5a01; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000005e695e95; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5e695e96c396b402; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000005e94; ++ *((unsigned long*)& __m128i_result[0]) = 0x00005e96ffffb402; ++ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000005e94; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00005e96ffffb402; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00fe000100cf005f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000000bd; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001fc0000fffeff; ++ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x27); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000020006; ++ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff000100ff00fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff003000ff00a0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff000100ff00fe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff003000ff00a0; ++ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000020006; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000020006; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000020006; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000020006; ++ __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000c; ++ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff00; ++ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00ff000100ff00fe; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00ff003000ff00a0; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000000005e695e95; ++ *((unsigned long*)& __m128d_op1[0]) = 0x5e695e96c396b402; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2ea268972ea2966a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4026f4ffbc175bff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long*)& __m256i_result[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f017ffd; ++ *((unsigned long*)& __m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f017ffd; ++ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff81ff7d; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff81ff7d; ++ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0001ffff0001; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff0001; ++ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000005e695e95; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5e695e96c396b402; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d(__m128i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff00000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff00000001; ++ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffff0001; ++ *((int*)& __m128_op0[2]) = 0xffff0001; ++ *((int*)& __m128_op0[1]) = 0xffff0001; ++ *((int*)& __m128_op0[0]) = 0xffff0001; ++ *((int*)& __m128_result[3]) = 0xffff0001; ++ *((int*)& __m128_result[2]) = 0xffff0001; ++ *((int*)& __m128_result[1]) = 0xffff0001; ++ *((int*)& __m128_result[0]) = 0xffff0001; ++ __m128_out = __lsx_vfrecip_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00fe000100cf005f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7f00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100010100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x03f0000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x03f0000000000000; ++ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x34); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x5f675e96a8d359f5; ++ *((unsigned long*)& __m128d_op0[0]) = 0x46387f95d9a68001; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x5d7f5d007f6a007f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long*)& __m256i_result[3]) = 0xff81ff7dffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff81ff7d; ++ *((unsigned long*)& __m256i_result[1]) = 0xff81ff7dffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff81ff7d; ++ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0x28); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff81ff7dffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff81ff7dffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f017ffd; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f7f7f7f7f017ffd; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000007; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000007; ++ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xa4a4a4a4a4a4a4a4; ++ *((unsigned long*)& __m256i_result[2]) = 0xa4a4a4a4a4a4a4a4; ++ *((unsigned long*)& __m256i_result[1]) = 0xa4a4a4a4a4a4a4a4; ++ *((unsigned long*)& __m256i_result[0]) = 0xa4a4a4a4a4a4a4a4; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0xa4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000012; ++ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000100010100; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00fe0001; ++ *((int*)& __m128_op1[2]) = 0x00cf005f; ++ *((int*)& __m128_op1[1]) = 0x7fff7fff; ++ *((int*)& __m128_op1[0]) = 0x7fff7f00; ++ *((int*)& __m128_op2[3]) = 0x5d7f5d00; ++ *((int*)& __m128_op2[2]) = 0x7f6a007f; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x5d7f5d00; ++ *((int*)& __m128_result[2]) = 0x7f6a007f; ++ *((int*)& __m128_result[1]) = 0x7fff7fff; ++ *((int*)& __m128_result[0]) = 0x7fff7f00; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000012; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x5d7f5d007f6a007f; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7fff7fff7fff7f00; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_result[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_result[0]) = 0x43ef878780000009; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff000100ff00fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff003000ff00a0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0008000f00080008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0008000a00080008; ++ __m128i_out = __lsx_vclz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0218ff78fc38fc38; ++ *((unsigned long*)& __m256i_result[2]) = 0xfc00000000000048; ++ *((unsigned long*)& __m256i_result[1]) = 0x0218ff78fc38fc38; ++ *((unsigned long*)& __m256i_result[0]) = 0xfc00000000000048; ++ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_wu(__m128i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op1[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op1[0]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x43ef878780000009; ++ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x36); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x01fe01fd01fd01fd; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x5d7f5d007f6a007f; ++ *((unsigned long*)& __m128i_op2[0]) = 0x7fff7fff7fff7f00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002ebf; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x31); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00002ebf; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0xffffffff; ++ *((int*)& __m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; ++ *((unsigned long*)& __m128i_result[0]) = 0xc404040404040404; ++ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0006ffff0004ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0002ffff0000ff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0006ffff0004ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0002ffff0000ff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000d; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000e; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000d; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000e; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x007f008000ea007f; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff8000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000043efffff8000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff8000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000043efffff8000; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x3d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000040400000404; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000040400000404; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x10fbe1e2e0000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x10fbe1e2e0000002; ++ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000c0000005; ++ *((unsigned long*)& __m256i_result[2]) = 0x21f8c3c4c0000005; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000c0000005; ++ *((unsigned long*)& __m256i_result[0]) = 0x21f8c3c4c0000005; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x007f008000ea007f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xf000000000000000; ++ __m128i_out = __lsx_vsat_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000c0000005; ++ *((unsigned long*)& __m256i_op0[2]) = 0x21f8c3c4c0000005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000c0000005; ++ *((unsigned long*)& __m256i_op0[0]) = 0x21f8c3c4c0000005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff8000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000043efffff8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff8000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000043efffff8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xbfffa004fffd8000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xbfffa004fffd8000; ++ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00000000c0000005; ++ *((unsigned long*)& __m256d_op1[2]) = 0x21f8c3c4c0000005; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00000000c0000005; ++ *((unsigned long*)& __m256d_op1[0]) = 0x21f8c3c4c0000005; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op1[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000080000009; ++ *((unsigned long*)& __m256i_op1[0]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0218ff78fc38fc38; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfc00000000000048; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0218ff78fc38fc38; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfc00000000000048; ++ *((unsigned long*)& __m256i_result[3]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfc00000000000048; ++ *((unsigned long*)& __m256i_result[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfc00000000000048; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x43ef8787; ++ *((int*)& __m256_op0[4]) = 0x8000ffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x43ef8787; ++ *((int*)& __m256_op0[0]) = 0x8000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000001df00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000001df00000000; ++ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000c0000005; ++ *((unsigned long*)& __m256i_op1[2]) = 0x21f8c3c4c0000005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000c0000005; ++ *((unsigned long*)& __m256i_op1[0]) = 0x21f8c3c4c0000005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfc00000000000048; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfc00000000000048; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbfffa004fffd8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbfffa004fffd8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00003f0000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00002fffe8013fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00003f0000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00002fffe8013fff; ++ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x0218ff78; ++ *((int*)& __m256_op1[6]) = 0xfc38fc38; ++ *((int*)& __m256_op1[5]) = 0xfc000000; ++ *((int*)& __m256_op1[4]) = 0x00000048; ++ *((int*)& __m256_op1[3]) = 0x0218ff78; ++ *((int*)& __m256_op1[2]) = 0xfc38fc38; ++ *((int*)& __m256_op1[1]) = 0xfc000000; ++ *((int*)& __m256_op1[0]) = 0x00000048; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0xfc38fc38; ++ *((int*)& __m256_result[5]) = 0xfc000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0xfc38fc38; ++ *((int*)& __m256_result[1]) = 0xfc000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x43ef87878000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43ef87878000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000fc38fc38; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000fc38fc38; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff0000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffbfffa0ffffff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff0000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffbfffa0ffffff80; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbfffa004fffd8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbfffa004fffd8000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ffff0000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ffff0000ff; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00ff00ffff0000ff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00ff00ffff0000ff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d(__m128i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fc38fc38; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fc38fc38; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff00ff0000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffbfffa0ffffff80; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff00ff0000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffbfffa0ffffff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff02000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff02000000; ++ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000feccfecc; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000feccfecc; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xbafebb00; ++ *((int*)& __m128_op1[2]) = 0xffd500fe; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000fc38fc38; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000fc38fc38; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000fefefe000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000fefefe000000; ++ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff02000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff02000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fc38fc38; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fc38fc38; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007e1c7e1c; ++ *((unsigned long*)& __m256i_result[2]) = 0x7e00000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007e1c7e1c; ++ *((unsigned long*)& __m256i_result[0]) = 0x7e00000000000000; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007e1c7e1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7e00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007e1c7e1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7e00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007e1c7e1c; ++ *((unsigned long*)& __m256i_result[2]) = 0x7e00000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007e1c7e1c; ++ *((unsigned long*)& __m256i_result[0]) = 0x7e00000000000000; ++ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff02000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff02000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x010c7fbc; ++ *((int*)& __m256_op0[6]) = 0x7e1c7e1c; ++ *((int*)& __m256_op0[5]) = 0xfe000000; ++ *((int*)& __m256_op0[4]) = 0x00000024; ++ *((int*)& __m256_op0[3]) = 0x010c7fbc; ++ *((int*)& __m256_op0[2]) = 0x7e1c7e1c; ++ *((int*)& __m256_op0[1]) = 0xfe000000; ++ *((int*)& __m256_op0[0]) = 0x00000024; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff02000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff02000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007e1c7e1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7e00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007e1c7e1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7e00000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007e1c7e1c; ++ *((unsigned long*)& __m256i_result[2]) = 0x7e00000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007e1c7e1c; ++ *((unsigned long*)& __m256i_result[0]) = 0x7e00000000000000; ++ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000000001c9880; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000001c9880; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x000000007ff00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000007ff00000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000007ff00000; ++ __m128i_out = __lsx_vreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fc38fc38; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fc38fc38; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfc00000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0002001800ff0078; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01f8007001f80070; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0002001800ff0078; ++ *((unsigned long*)& __m256i_op1[0]) = 0x01f8007001f80070; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0218ff78fc38fc38; ++ *((unsigned long*)& __m256i_op2[2]) = 0xfc00000000000048; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0218ff78fc38fc38; ++ *((unsigned long*)& __m256i_op2[0]) = 0xfc00000000000048; ++ *((unsigned long*)& __m256i_result[3]) = 0x00300b40fc001678; ++ *((unsigned long*)& __m256i_result[2]) = 0xfc00000000001f80; ++ *((unsigned long*)& __m256i_result[1]) = 0x00300b40fc001678; ++ *((unsigned long*)& __m256i_result[0]) = 0xfc00000000001f80; ++ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x4429146a7b4c88b2; ++ *((unsigned long*)& __m128d_op0[0]) = 0xe22b3595efa4aa0c; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000048; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000048; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000048; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000001c9880; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000001c9880; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffe36780; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffe36780; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000100000001; ++ __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000010100000101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010100000101; ++ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffe36780; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffe36780; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_result[2]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_result[0]) = 0x0100000100000001; ++ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00000000ffe36780; ++ *((unsigned long*)& __m256d_op1[2]) = 0x8000000100000001; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00000000ffe36780; ++ *((unsigned long*)& __m256d_op1[0]) = 0x8000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffb; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffb; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffb; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffb; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000100000000fc; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000100000000fc; ++ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x2d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000100000000fc; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000100000000fc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000100000000fc; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000100000000fc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0100000001000000; ++ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffe36780; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffe36780; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000010100000101; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000010100000101; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000010000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000010000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000010000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000010000000000; ++ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x000000ffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffff0000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000010100000101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000010100000101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vpcnt_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; ++ int_result = 0x0000000000000002; ++ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x0); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[2]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[0]) = 0x9090909090909090; ++ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x6f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x66); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0404050404040404; ++ *((unsigned long*)& __m128i_result[0]) = 0x0404050404040404; ++ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0404050404040404; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0404050404040404; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000004040504; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000004040504; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200010002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200010002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000010004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000101; ++ *((int*)& __m128_op0[2]) = 0x00000101; ++ *((int*)& __m128_op0[1]) = 0x00000101; ++ *((int*)& __m128_op0[0]) = 0x00000101; ++ *((int*)& __m128_op1[3]) = 0x00000002; ++ *((int*)& __m128_op1[2]) = 0x00000002; ++ *((int*)& __m128_op1[1]) = 0x00000002; ++ *((int*)& __m128_op1[0]) = 0x00000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[2]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[0]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op0[2]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op0[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op0[0]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[2]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[0]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[3]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[2]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[0]) = 0x9090909090909090; ++ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op2[2]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op2[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op2[0]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_result[2]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_result[0]) = 0x0100000100000001; ++ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffdfe01; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffdfe0200000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4000000000000000; ++ __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[2]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[0]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x9090909090909090; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000004040504; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000004040504; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000010100000101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000010100000101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000008050501; ++ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x04040504; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x04040504; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000f91; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000f91; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000010100000101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000010100000101; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x08050501; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x08050501; ++ *((int*)& __m256_op1[7]) = 0x90909090; ++ *((int*)& __m256_op1[6]) = 0x90909090; ++ *((int*)& __m256_op1[5]) = 0x90909090; ++ *((int*)& __m256_op1[4]) = 0x90909090; ++ *((int*)& __m256_op1[3]) = 0x90909090; ++ *((int*)& __m256_op1[2]) = 0x90909090; ++ *((int*)& __m256_op1[1]) = 0x90909090; ++ *((int*)& __m256_op1[0]) = 0x90909090; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0100000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000008050501; ++ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x3d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000008050501; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000008050501; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x01000000; ++ *((int*)& __m128_op0[0]) = 0x01000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op0[2]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op0[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op0[0]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[2]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[0]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_result[3]) = 0x6f6f6f6f6f6f6f6f; ++ *((unsigned long*)& __m256i_result[2]) = 0x6f6f6f6f6f6f6f6f; ++ *((unsigned long*)& __m256i_result[1]) = 0x6f6f6f6f6f6f6f6f; ++ *((unsigned long*)& __m256i_result[0]) = 0x6f6f6f6f6f6f6f6f; ++ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0x00ffff00ff00ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ffff00ff00ff00; ++ __m128i_out = __lsx_vldi(-1686); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ffff00ff00ff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ffff00ff00ff00; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xff00ff00ff00ff00; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00ffff00; ++ *((int*)& __m128_op0[2]) = 0xff00ff00; ++ *((int*)& __m128_op0[1]) = 0x00ffff00; ++ *((int*)& __m128_op0[0]) = 0xff00ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000008050501; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vneg_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op0[2]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op0[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op0[0]) = 0x9090909090909090; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xc848c848c848c848; ++ *((unsigned long*)& __m256i_result[2]) = 0x8848c848c848c848; ++ *((unsigned long*)& __m256i_result[1]) = 0xc848c848c848c848; ++ *((unsigned long*)& __m256i_result[0]) = 0x8848c848c848c848; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000f91; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000f91; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xff00ff00; ++ *((int*)& __m128_op0[2]) = 0xff00ff00; ++ *((int*)& __m128_op0[1]) = 0xff00ff00; ++ *((int*)& __m128_op0[0]) = 0xff00ff00; ++ *((int*)& __m128_result[3]) = 0x7fc00000; ++ *((int*)& __m128_result[2]) = 0x7fc00000; ++ *((int*)& __m128_result[1]) = 0x7fc00000; ++ *((int*)& __m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000022; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfe813f00fe813f00; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe813f00fe813f00; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe813f00fe813f00; ++ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe813f00fe813f00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000033; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfe813f00fe813f00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe813f00fe813f00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vclz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff800000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff800000000000; ++ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000f91; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000f91; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000f90; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000f90; ++ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ff000000ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff000000ff00; ++ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((int*)& __m128_result[3]) = 0xffe00000; ++ *((int*)& __m128_result[2]) = 0xffe00000; ++ *((int*)& __m128_result[1]) = 0xffe00000; ++ *((int*)& __m128_result[0]) = 0xffe00000; ++ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfe813f00fe813f00; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe813f00fe813f00; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff017fffff017f; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff017fffff017f; ++ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff9f017f1fa0b199; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1197817fd839ea3e; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000033; ++ *((unsigned long*)& __m128i_result[1]) = 0xff011fb11181d8ea; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc848c848c848c848; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8848c848c848c848; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc848c848c848c848; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8848c848c848c848; ++ *((unsigned long*)& __m256i_result[3]) = 0xc800c800c800c800; ++ *((unsigned long*)& __m256i_result[2]) = 0x8800c800c800c801; ++ *((unsigned long*)& __m256i_result[1]) = 0xc800c800c800c800; ++ *((unsigned long*)& __m256i_result[0]) = 0x8800c800c800c801; ++ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f90; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000f90; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff70; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff70; ++ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000003e; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00fe00fe000200fe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe000200fe; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000003e; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefe02fefefe02fe; ++ __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00fe00fe; ++ *((int*)& __m128_op0[2]) = 0x000200fe; ++ *((int*)& __m128_op0[1]) = 0x00fe00fe; ++ *((int*)& __m128_op0[0]) = 0x000200fe; ++ *((int*)& __m128_result[3]) = 0xc2fc0000; ++ *((int*)& __m128_result[2]) = 0xc3040000; ++ *((int*)& __m128_result[1]) = 0xc2fc0000; ++ *((int*)& __m128_result[0]) = 0xc3040000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0000ff70; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0000ff70; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000100; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000100; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff011fb11181d8ea; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80ff800000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00fe00fe000200fe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe000200fe; ++ *((unsigned long*)& __m128i_result[1]) = 0x00fd02fe00002302; ++ *((unsigned long*)& __m128i_result[0]) = 0x007ffd0200000000; ++ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc800c800c800c800; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8800c800c800c801; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc800c800c800c800; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8800c800c800c801; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc800c800c800c800; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8800c800c800c801; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc800c800c800c800; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8800c800c800c801; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffcc9a989a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0007000000050000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00003fff00003fff; ++ __m128i_out = __lsx_vsat_wu(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc2fc0000c3040000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc2fc0000c3040000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc848c848c848c848; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8848c848c848c848; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc848c848c848c848; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8848c848c848c848; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff37b737b8; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff77b737b8; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff37b737b8; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff77b737b8; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f90; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000f90; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000f90; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000f90; ++ __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xc2fc0000; ++ *((int*)& __m128_op1[2]) = 0xc3040000; ++ *((int*)& __m128_op1[1]) = 0xc2fc0000; ++ *((int*)& __m128_op1[0]) = 0xc3040000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc800c800c800c800; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8800c800c800c801; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc800c800c800c800; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8800c800c800c801; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00fe00fe; ++ *((int*)& __m128_op0[2]) = 0x000200fe; ++ *((int*)& __m128_op0[1]) = 0x00fe00fe; ++ *((int*)& __m128_op0[0]) = 0x000200fe; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc848c848c848c848; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8848c848c848c848; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc848c848c848c848; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8848c848c848c848; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc848c848c848c848; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8848c848c848c848; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc848c848c848c848; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8848c848c848c848; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f90; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000f90; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffefffe00000000; ++ __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000208000002080; ++ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_result[1]) = 0x2080208020802080; ++ *((unsigned long*)& __m128i_result[0]) = 0x2080208020802080; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long*)& __m128i_op0[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long*)& __m128i_result[0]) = 0xa352bfac9269e0aa; ++ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long*)& __m128d_op0[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000208000002080; ++ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x000007c8; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x000007c8; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long*)& __m128i_op1[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffd70b00006ea9; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffa352ffff9269; ++ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long*)& __m128i_op1[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffd70b00006ea9; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffa352ffff9269; ++ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffd70b00006ea9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffa352ffff9269; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffd70b00006ea9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffa352ffff9269; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff0001; ++ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc848c848c848c848; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8848c848c848c848; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc848c848c848c848; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8848c848c848c848; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000007c8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000007c8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000007c8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000007c8; ++ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001fe01fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fe01fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000007c8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000007c8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x01fe01fe0000ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x01fe01fe0000ff01; ++ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long*)& __m128i_op1[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000003fbfc04; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001fdfe02; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000003fbfc04; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001fdfe02; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fd; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000000007c8; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000000007c8; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000001fe01fe; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000ff0100; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000001fe01fe; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000ff0100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xbba0c07b51230d5c; ++ *((unsigned long*)& __m128d_op0[0]) = 0xa15f3f9e8763c2b9; ++ *((unsigned long*)& __m128d_op1[1]) = 0xbba0c07b51230d5c; ++ *((unsigned long*)& __m128d_op1[0]) = 0xa15f3f9e8763c2b9; ++ *((int*)& __m128_result[3]) = 0x9d0603db; ++ *((int*)& __m128_result[2]) = 0x80000000; ++ *((int*)& __m128_result[1]) = 0x9d0603db; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long*)& __m128i_op0[0]) = 0xa352bfac9269e0aa; ++ *((int*)& __m128_result[3]) = 0xce23d33d; ++ *((int*)& __m128_result[2]) = 0x4edd53ea; ++ *((int*)& __m128_result[1]) = 0xceb95a81; ++ *((int*)& __m128_result[0]) = 0xcedb2c3f; ++ __m128_out = __lsx_vffint_s_w(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long*)& __m128i_op1[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long*)& __m128i_result[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long*)& __m128i_result[0]) = 0xa352bfac9269e0aa; ++ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000007c8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000007c8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001fe01fe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0100; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fe01fe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000c8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000c8; ++ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fd; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000004000000fd; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000004000000fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000003fbfc04; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001fdfe02; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000003fbfc04; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001fdfe02; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_d(__m256i_op0,13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xce23d33e43d9736c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x63b2ac27aa076aeb; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x31dc2cc1bc268c93; ++ *((unsigned long*)& __m128i_result[0]) = 0x9c4d53d855f89514; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xce23d33e43d9736c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x63b2ac27aa076aeb; ++ *((unsigned long*)& __m128i_result[1]) = 0x63b2ac27aa076aeb; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0xc8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x31dc2cc1bc268c93; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c4d53d855f89514; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff00000000ffff; ++ __m128i_out = __lsx_vslei_h(__m128i_op0,13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000020006; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000060000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffff0000; ++ *((int*)& __m128_op0[0]) = 0x0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x3e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x63b2ac27aa076aeb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000063b2ac27; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffaa076aeb; ++ __m128i_out = __lsx_vexth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000063b2ac27; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffaa076aeb; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff9515; ++ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000063b2ac27; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffaa076aeb; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff63b3584e; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000fffdaa07d5d6; ++ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000600; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000ac26; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff80000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000060000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000003000000d613; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000c0000000; ++ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000fd; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000fe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000062d4; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000062d4; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000006338; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long*)& __m128d_op0[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000003000000d613; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000c0000000; ++ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000003000000d613; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000003000000d612; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000bfffffff; ++ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000c9; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000c9; ++ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000060000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000060000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff0fffffff00001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff0fffffff09515; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ff00000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000100010000ffda; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000016; ++ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000003000000d612; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000bfffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000500000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000060000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000060000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0600000100000001; ++ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ff00000000; ++ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffff0100ff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffeffff; ++ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000060000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000500000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000060000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000fffe00006aea; ++ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256d_op1[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffbfffefffc9510; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffbfffefffc9510; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a09080709080706; ++ *((unsigned long*)& __m128i_op2[1]) = 0xfffbfffefffc9510; ++ *((unsigned long*)& __m128i_op2[0]) = 0xfffbfffefffc9510; ++ *((unsigned long*)& __m128i_result[1]) = 0x29c251319c3a5c90; ++ *((unsigned long*)& __m128i_result[0]) = 0x62fb9272df7da6b0; ++ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_h(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff9515; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff0100ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0607060700000807; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0707f8f803e8157e; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x31); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x000000f0; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x000000f0; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x000000f0; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x000000f0; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x01010101010101c9; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x01010101010101c9; ++ __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01010101010101c9; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x01010101010101c9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffff88; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe98; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffe98; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe98; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x06070607; ++ *((int*)& __m128_op0[2]) = 0x00000807; ++ *((int*)& __m128_op0[1]) = 0x0707f8f8; ++ *((int*)& __m128_op0[0]) = 0x03e8157e; ++ *((int*)& __m128_result[3]) = 0x5c303f97; ++ *((int*)& __m128_result[2]) = 0x61ff9049; ++ *((int*)& __m128_result[1]) = 0x5bafa1dd; ++ *((int*)& __m128_result[0]) = 0x5d3e1e1d; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xffff53d9; ++ *((int*)& __m128_op0[1]) = 0xffff0001; ++ *((int*)& __m128_op0[0]) = 0xffff9515; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff9514; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long*)& __m128i_result[0]) = 0xbfbfbfbfbfbfbfbf; ++ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_result[0]) = 0xff000001ffff9515; ++ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0x67); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffe98; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff000001ffff9515; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000007fffa9ed; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f8000017fffca8b; ++ __m128i_out = __lsx_vavgr_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff000001ffff9515; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9514; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff0000ac26; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000000001; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01010101010101c9; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x01010101010101c9; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000781; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff00000000; ++ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01010101010101c9; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x01010101010101c9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x2c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01010101010101c9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01010101010101c9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000781; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[3]) = 0x0008080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0008080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000003c; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x45); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000027; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000027; ++ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0x0000ac26; ++ *((int*)& __m128_op0[1]) = 0x00ff0000; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrmh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0a09080709080706; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a09080709080706; ++ *((unsigned long*)& __m128i_result[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a09080709080706; ++ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff0000ac26; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0c0b0a09; ++ *((int*)& __m128_op0[2]) = 0x0b0a0908; ++ *((int*)& __m128_op0[1]) = 0x0a090807; ++ *((int*)& __m128_op0[0]) = 0x09080706; ++ *((int*)& __m128_op1[3]) = 0x0c0b0a09; ++ *((int*)& __m128_op1[2]) = 0x0b0a0908; ++ *((int*)& __m128_op1[1]) = 0x0a090807; ++ *((int*)& __m128_op1[0]) = 0x09080706; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000781; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0c0b0a09; ++ *((int*)& __m128_op0[2]) = 0x0b0a0908; ++ *((int*)& __m128_op0[1]) = 0x0a090807; ++ *((int*)& __m128_op0[0]) = 0x09080706; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000008000000080; ++ __m128i_out = __lsx_vfclass_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000fffe00006aea; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffce; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000010002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff960015; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000010002; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffff960015; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x000000ff; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x000000ff; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x000000007fffa9ed; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7f8000017fffca8b; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a09080709080706; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a09080709080706; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00010002; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xff960015; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffd60015; ++ __m128i_out = __lsx_vfrintrm_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffd60015; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x80808080806b000b; ++ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000001fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff0000ac26; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80005613; ++ *((unsigned long*)& __m128i_result[0]) = 0x007f800000000000; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000781; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_w(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[3]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000ffce; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80005613; ++ *((unsigned long*)& __m128i_op1[0]) = 0x007f800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000807f80808000; ++ *((unsigned long*)& __m128i_result[0]) = 0x80006b0000000b00; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000807f80808000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80006b0000000b00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000807f00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x80006b0080808080; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000807f00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x80006b0080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff00007fff7fff; ++ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000781; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000078100000064; ++ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000002b0995850; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80005613; ++ *((unsigned long*)& __m128i_op1[0]) = 0x007f800000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffff80005613; ++ *((unsigned long*)& __m128i_op2[0]) = 0x007f800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff00011cf0c569; ++ *((unsigned long*)& __m128i_result[0]) = 0xc0000002b0995850; ++ __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000781; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000781; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000064; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000781; ++ *((int*)& __m256_op0[0]) = 0x00000064; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x0000ffce; ++ *((int*)& __m128_op1[3]) = 0xffff0001; ++ *((int*)& __m128_op1[2]) = 0x1cf0c569; ++ *((int*)& __m128_op1[1]) = 0xc0000002; ++ *((int*)& __m128_op1[0]) = 0xb0995850; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000064; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000781; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000064; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80005613; ++ *((unsigned long*)& __m128i_op0[0]) = 0x007f800000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80005613; ++ *((unsigned long*)& __m128i_result[0]) = 0x81000080806b000b; ++ __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000807f00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80006b0080808080; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff00011cf0c569; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc0000002b0995850; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffe30f3a97; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffcfe72830; ++ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80005613; ++ *((unsigned long*)& __m128i_op0[0]) = 0x81000080806b000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff00011cf0c569; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc0000002b0995850; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff9cf0d77b; ++ *((unsigned long*)& __m128i_result[0]) = 0xc1000082b0fb585b; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x5a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080808000; ++ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080808000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000080808000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x8b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffff00011cf0c569; ++ *((unsigned long*)& __m128d_op0[0]) = 0xc0000002b0995850; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x22); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff9cf0d77b; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc1000082b0fb585b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_b(__m256i_op0,14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000080808000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080808000; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000032; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000003c000000032; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000004e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000032; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000003c000000032; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[2]) = 0x001000100010000a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[0]) = 0x001000060010000a; ++ __m256i_out = __lasx_xvclz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000080808000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0080008000800080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0080006b0000000b; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000004e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffefffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffefffe; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000004e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffbfff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffbfffb; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xf4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vpcnt_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffbfff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010001; ++ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00800080; ++ *((int*)& __m128_op0[2]) = 0x00800080; ++ *((int*)& __m128_op0[1]) = 0x0080006b; ++ *((int*)& __m128_op0[0]) = 0x0000000b; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x80808080; ++ *((int*)& __m128_op1[0]) = 0x806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0x2f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000001010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x80808080806b000b; ++ __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000001; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000001; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000001; ++ *((int*)& __m256_op1[7]) = 0x7ff00000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x7ff00000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x7ff00000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x7ff00000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d(__m256i_op0,1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x7ff00000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x7ff00000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x7ff00000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x7ff00000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffbfff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0080008000800080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0080006b0000000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000001ff1745745c; ++ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000c0c0c000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0080008000800080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0080006b0000000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000800080; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op2[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long*)& __m256i_result[2]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long*)& __m256i_result[1]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long*)& __m256i_result[0]) = 0xa1a1a1a15e5e5e5e; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0xa1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long*)& __m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long*)& __m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000001; ++ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000800080; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x13121110; ++ *((int*)& __m128_op0[2]) = 0x1211100f; ++ *((int*)& __m128_op0[1]) = 0x11100f0e; ++ *((int*)& __m128_op0[0]) = 0x100f0e0d; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x131211101211100f; ++ *((unsigned long*)& __m128d_op0[0]) = 0x11100f0e100f0e0d; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long*)& __m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long*)& __m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xa1a1a1a1a1a15e5e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xa1a1a1a1a1a15e5e; ++ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xc0c0c000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00800080; ++ *((int*)& __m128_op1[2]) = 0x00800080; ++ *((int*)& __m128_op1[1]) = 0x0080006b; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00800080; ++ *((int*)& __m128_result[2]) = 0xc0c0c000; ++ *((int*)& __m128_result[1]) = 0x0080006b; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000040002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00fe01e000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00fe01e000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xa1a1a1a1a1a15e5e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xa1a1a1a1a1a15e5e; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0080008000800080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0080006b00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffff80000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffff80000; ++ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000400000004000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00004000ffffffff; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0080008000800080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0080006b00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001b19b1c9c6da5a; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x001b19b1c9c6da5a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0080008000800080; ++ *((unsigned long*)& __m128i_result[0]) = 0x008003496dea0c61; ++ __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x131211101211100f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x11100f0e100f0e0d; ++ *((unsigned long*)& __m128i_result[1]) = 0x13101213120f1112; ++ *((unsigned long*)& __m128i_result[0]) = 0x110e1011100d0f10; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xcb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long*)& __m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long*)& __m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long*)& __m256i_result[3]) = 0xa1bfa1bfa1bfa1bf; ++ *((unsigned long*)& __m256i_result[2]) = 0xa1bfa1bf5e7c5e7c; ++ *((unsigned long*)& __m256i_result[1]) = 0xa1bfa1bfa1bfa1bf; ++ *((unsigned long*)& __m256i_result[0]) = 0xa1bfa1bf5e7c5e7c; ++ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x2); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x007d003e007d003e; ++ *((unsigned long*)& __m256i_result[2]) = 0x007d003effa80010; ++ *((unsigned long*)& __m256i_result[1]) = 0x007d003e007d003e; ++ *((unsigned long*)& __m256i_result[0]) = 0x007d003effa80010; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0080008000800080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x008003496dea0c61; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00004000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvnori_b(__m256i_op0,0xf7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x007d003e007d003e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x007d003effa80010; ++ *((unsigned long*)& __m256i_op1[1]) = 0x007d003e007d003e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x007d003effa80010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000f0000000f; ++ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0080008000800080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_d(__m128i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0080008000800080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000457db03e; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff457db03f; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000457db03e; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff457db03f; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000457db03e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457db03f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000457db03e; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457db03f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00000000457db03e; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffff457db03f; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000000457db03e; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffff457db03f; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000457db03e; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff457db03f; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000457db03e; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff457db03f; ++ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000457d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000b03f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000457d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000b03f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x3b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x31); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000f000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000f000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0008b03e457db03e; ++ *((unsigned long*)& __m256i_result[2]) = 0x457db03e45a87310; ++ *((unsigned long*)& __m256i_result[1]) = 0x0008b03e457db03e; ++ *((unsigned long*)& __m256i_result[0]) = 0x457db03e45a87310; ++ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefefe; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrint_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000f000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000f000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffba8300004fc2; ++ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000457db03e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457db03f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000457db03e; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457db03f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000457d607d; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff457d607f; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000457d607d; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff457d607f; ++ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000457d607d; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457d607f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000457d607d; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457d607f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffa2beb040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffa2beb040; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000457d607d; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457d607f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000457d607d; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457d607f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffa2beb040; ++ __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000457db03e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457db03f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000457db03e; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457db03f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00020001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00020001; ++ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x000f000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000f000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256d_result[2]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256d_result[0]) = 0x7fffffffa2beb040; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000f000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000f000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000f000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000f000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff1000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff1000000000000; ++ __m256i_out = __lasx_xvneg_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000020002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000020002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffba8300004fc2; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffba8300004fc2; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001000100; ++ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d(__m128i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfff1000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfff1000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfrintrp_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x232221201f1e1d1c; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1b1a191817161514; ++ *((unsigned long*)& __m256i_op1[1]) = 0x232221201f1e1d1c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1b1a191817161514; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000f; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256d_result[3]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256d_result[2]) = 0xc1d75053f0000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256d_result[0]) = 0xc1d75053f0000000; ++ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x000100010000fffb; ++ *((unsigned long*)& __m128i_result[0]) = 0x000100010000fffb; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0303030303030303; ++ *((unsigned long*)& __m128i_result[0]) = 0x0303030303030304; ++ __m128i_out = __lsx_vaddi_bu(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc1d75053f0000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc1d75053f0000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x004100df00ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00c000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x004100df00ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00c000000000; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000022be22be; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fffa2bea2be; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000022be22be; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fffa2bea2be; ++ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x004100df00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00c000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x004100df00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00c000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc1d75053f0000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc1d75053f0000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256i_result[2]) = 0xc1d75053f0000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256i_result[0]) = 0xc1d75053f0000000; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010000; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00010001; ++ *((int*)& __m128_op0[2]) = 0x00010001; ++ *((int*)& __m128_op0[1]) = 0x00010001; ++ *((int*)& __m128_op0[0]) = 0x00010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x6); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000022be22be; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fffa2bea2be; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000022be22be; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fffa2bea2be; ++ *((unsigned long*)& __m256i_result[3]) = 0xffe1ffe1229f229f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fe07fe0a29fa29f; ++ *((unsigned long*)& __m256i_result[1]) = 0xffe1ffe1229f229f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fe07fe0a29fa29f; ++ __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000400000000; ++ __m128i_out = __lsx_vsrli_w(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffa30000165a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000104000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffa30000165a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000104000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc1d75053f0000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc1d75053f0000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xbe21000100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000505300000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xbe21000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000505300000000; ++ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffa30000165a; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000104000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffa30000165a; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000104000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc1d75053f0000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc1d75053f0000000; ++ *((int*)& __m256_result[7]) = 0xc03ae000; ++ *((int*)& __m256_result[6]) = 0x420a6000; ++ *((int*)& __m256_result[5]) = 0xc6000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0xc03ae000; ++ *((int*)& __m256_result[2]) = 0x420a6000; ++ *((int*)& __m256_result[1]) = 0xc6000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbe21000100000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000505300000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xbe21000100000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000505300000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xc1d75053f0000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x41dfffffffc00000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xc1d75053f0000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00005053000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00005053000000ff; ++ __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffa30000165a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000104000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffa30000165a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000104000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000165a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000165a; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x40b2bf4d; ++ *((int*)& __m256_op0[6]) = 0x30313031; ++ *((int*)& __m256_op0[5]) = 0x50005000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x40b2bf4d; ++ *((int*)& __m256_op0[2]) = 0x30313031; ++ *((int*)& __m256_op0[1]) = 0x50005000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x22be22be; ++ *((int*)& __m256_op1[5]) = 0x7fff7fff; ++ *((int*)& __m256_op1[4]) = 0xa2bea2be; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x22be22be; ++ *((int*)& __m256_op1[1]) = 0x7fff7fff; ++ *((int*)& __m256_op1[0]) = 0xa2bea2be; ++ *((int*)& __m256_result[7]) = 0x40b2bf4d; ++ *((int*)& __m256_result[6]) = 0x30313031; ++ *((int*)& __m256_result[5]) = 0x7fff7fff; ++ *((int*)& __m256_result[4]) = 0xa2bea2be; ++ *((int*)& __m256_result[3]) = 0x40b2bf4d; ++ *((int*)& __m256_result[2]) = 0x30313031; ++ *((int*)& __m256_result[1]) = 0x7fff7fff; ++ *((int*)& __m256_result[0]) = 0xa2bea2be; ++ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfffefffe; ++ *((int*)& __m128_op0[2]) = 0xfffeffff; ++ *((int*)& __m128_op0[1]) = 0xfffefffe; ++ *((int*)& __m128_op0[0]) = 0xfffeffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffefffefffeffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffefffefffeffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000165a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000165a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00005053000000ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00005053000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00010001; ++ *((int*)& __m128_op1[2]) = 0x00010001; ++ *((int*)& __m128_op1[1]) = 0x00010001; ++ *((int*)& __m128_op1[0]) = 0x00010001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x3f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffe0000000; ++ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x40b2bf4d30313031; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fffa2bea2be; ++ *((unsigned long*)& __m256i_op0[1]) = 0x40b2bf4d30313031; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fffa2bea2be; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x40b240b330313031; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff5d425d42; ++ *((unsigned long*)& __m256i_result[1]) = 0x40b240b330313031; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff5d425d42; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x7f800000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000165a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000165a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x40b240b330313031; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff5d425d42; ++ *((unsigned long*)& __m256i_op1[1]) = 0x40b240b330313031; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff5d425d42; ++ *((unsigned long*)& __m256i_result[3]) = 0x000040b200002fd4; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007fff0000739c; ++ *((unsigned long*)& __m256i_result[1]) = 0x000040b200002fd4; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007fff0000739c; ++ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_bu(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0001000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000400000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_result[2]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_result[0]) = 0xc600000000000000; ++ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff00ff; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000040b200002fd4; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00007fff0000739c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000040b200002fd4; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00007fff0000739c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000739c; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000000013ec13e; ++ *((unsigned long*)& __m128d_op1[0]) = 0xc03fc03fc0ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000040b200002fd4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00007fff0000739c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000040b200002fd4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007fff0000739c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ffff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffe0000000; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ffff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ffff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffc03fffffffc0; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffc00000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffc03fffffffc0; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffc00000000000; ++ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000004; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xe0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfrecip_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x013ec13e; ++ *((int*)& __m128_op0[1]) = 0xc03fc03f; ++ *((int*)& __m128_op0[0]) = 0xc0ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffdfffffff8; ++ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000008000165a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000008000165a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff00017fff005d; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fffe9a6; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff00017fff005d; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fffe9a6; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000165a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000165a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000011f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000011f; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000000000000165a; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000000000000165a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000192540; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000192540; ++ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000165a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000165a; ++ *((unsigned long*)& __m256i_result[3]) = 0x1818ffff1818ffa3; ++ *((unsigned long*)& __m256i_result[2]) = 0x181818181818185a; ++ *((unsigned long*)& __m256i_result[1]) = 0x1818ffff1818ffa3; ++ *((unsigned long*)& __m256i_result[0]) = 0x181818181818185a; ++ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffdfffffff8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7ffffffc; ++ __m128i_out = __lsx_vavgr_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffc03b1fc5e050; ++ *((unsigned long*)& __m256d_op0[2]) = 0x6a9e3fa2603a2000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffc03b1fc5e050; ++ *((unsigned long*)& __m256d_op0[0]) = 0x6a9e3fa2603a2000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636389038903; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636389038903; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000001ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000001ffff; ++ __m128i_out = __lsx_vsat_du(__m128i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fe70000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fe70000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007fe70000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fe70000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007f7f80007fa3; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007f670000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007f7f80007fa3; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007f670000; ++ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffc03fffffffc0; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffc00000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffc03fffffffc0; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffc00000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_result[2]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_result[0]) = 0xc600000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fe70000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fe70000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x7e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000007ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000007ffffffff; ++ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff00ff; ++ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000008000165a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000008000165a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0009000900090009; ++ *((unsigned long*)& __m256i_result[2]) = 0x000900090009165a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0009000900090009; ++ *((unsigned long*)& __m256i_result[0]) = 0x000900090009165a; ++ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc03ae000ffff6000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00007f7f80007fa3; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007f670000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00007f7f80007fa3; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007f670000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffe7fffffff; ++ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7fffffff7ffffffb; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe7fffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000001fd02; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffe1fffffff; ++ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffc03b1fc5e050; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6a9e3fa2603a2000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffc03b1fc5e050; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6a9e3fa2603a2000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffc03fffffffc0; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffc00000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffc03fffffffc0; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffc00000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x01fe007a01c40110; ++ *((unsigned long*)& __m256i_result[2]) = 0x019d00a2003a0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x01fe007a01c40110; ++ *((unsigned long*)& __m256i_result[0]) = 0x019d00a2003a0000; ++ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe1fffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7ffffffb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000080008; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000077fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x000000ff; ++ *((int*)& __m128_op1[0]) = 0xfe01fd02; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000080008; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000fffe01fd02; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000040002; ++ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01fe007a01c40110; ++ *((unsigned long*)& __m256i_op0[2]) = 0x019d00a2003a0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01fe007a01c40110; ++ *((unsigned long*)& __m256i_op0[0]) = 0x019d00a2003a0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000077fff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x01fe007a01c40110; ++ *((unsigned long*)& __m256i_result[2]) = 0x019d00a20039fff9; ++ *((unsigned long*)& __m256i_result[1]) = 0x01fe007a01c40110; ++ *((unsigned long*)& __m256i_result[0]) = 0x019d00a2003a0000; ++ __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7fffffff7ffffffb; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000040002; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000080008; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xc1f0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xc1f0000000000000; ++ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x000000ff; ++ *((int*)& __m128_op0[0]) = 0xfe01fd02; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x0001fe01; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ff02ff80fede; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ff02ff80fede; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000fffe00800022; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fffe00800022; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000001fe01; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000001fe01; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0f0f0f0f00000000; ++ __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000077fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000003ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x6a9e3f9a; ++ *((int*)& __m256_op0[4]) = 0x603a2001; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x6a9e3f9a; ++ *((int*)& __m256_op0[0]) = 0x603a2001; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000900000009; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff7fffffff7f; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000fffe00800022; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffe00800022; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000003ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007fff00400011; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000008001ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007fff00400011; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000077fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000307; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00ff80ff00ff80ff; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000900000009; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x01fe007a; ++ *((int*)& __m256_op0[6]) = 0x01c40110; ++ *((int*)& __m256_op0[5]) = 0x019d00a2; ++ *((int*)& __m256_op0[4]) = 0x0039fff9; ++ *((int*)& __m256_op0[3]) = 0x01fe007a; ++ *((int*)& __m256_op0[2]) = 0x01c40110; ++ *((int*)& __m256_op0[1]) = 0x019d00a2; ++ *((int*)& __m256_op0[0]) = 0x003a0000; ++ *((int*)& __m256_op1[7]) = 0x0000fffe; ++ *((int*)& __m256_op1[6]) = 0x00800022; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0x0000fffe; ++ *((int*)& __m256_op1[2]) = 0x00800022; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffff7fffffff7f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0f0f0f0f00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0f07697100000000; ++ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff80ff00ff80ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b(__m128i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ffffff81fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffff00ffff7e01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000fffe01fd02; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000fe86; ++ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0f0f0f0f00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000fffe01fd02; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ffffffff00; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ffffffff00; ++ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000fffe00800022; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000fffe00800022; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ffffff81fe; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffff00ffff7e01; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x000000fffe01fd02; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00fe00fffe86f901; ++ __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000100; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000100; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001000000ff; ++ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x000000ff; ++ *((int*)& __m128_op0[0]) = 0xfe01fd02; ++ *((int*)& __m128_op1[3]) = 0x00000001; ++ *((int*)& __m128_op1[2]) = 0x00000100; ++ *((int*)& __m128_op1[1]) = 0x00000001; ++ *((int*)& __m128_op1[0]) = 0x00000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000003ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000077fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001000000ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff80ff00ff80ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000077fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000007ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0003ffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000003ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000007ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01fe007a01c40110; ++ *((unsigned long*)& __m256i_op0[2]) = 0x019d00a20039fff9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01fe007a01c40110; ++ *((unsigned long*)& __m256i_op0[0]) = 0x019d00a2003a0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000003ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x01fe007a01c40110; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x01fe007a01c40110; ++ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000003ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000003ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000077fff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du(__m256i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8d8d72728d8d7272; ++ *((unsigned long*)& __m256i_result[2]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long*)& __m256i_result[1]) = 0x8d8d72728d8d7272; ++ *((unsigned long*)& __m256i_result[0]) = 0x8d8d72728d8d8d8d; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x8d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8d8d72728d8d7272; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8d8d72728d8d7272; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long*)& __m256i_result[3]) = 0x8d8d72728d8d7272; ++ *((unsigned long*)& __m256i_result[2]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long*)& __m256i_result[1]) = 0x8d8d72728d8d7272; ++ *((unsigned long*)& __m256i_result[0]) = 0x8d8d72728d8d8d8d; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000003ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8d8d72728d8d7272; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8d8d72728d8d7272; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0003ffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0f07697100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000076971000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7f800000; ++ *((int*)& __m128_op0[2]) = 0x7f800000; ++ *((int*)& __m128_op0[1]) = 0x7f800000; ++ *((int*)& __m128_op0[0]) = 0x7f800000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x21); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000040000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000040000000; ++ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000040000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000040000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3fc000005fc00000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3fc000005fc00000; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7fc00000; ++ *((int*)& __m128_result[2]) = 0x7fc00000; ++ *((int*)& __m128_result[1]) = 0x7fc00000; ++ *((int*)& __m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x8d8d72728d8d7272; ++ *((unsigned long*)& __m256d_op0[2]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long*)& __m256d_op0[1]) = 0x8d8d72728d8d7272; ++ *((unsigned long*)& __m256d_op0[0]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_b(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x01fe007a; ++ *((int*)& __m256_op1[6]) = 0x01c40110; ++ *((int*)& __m256_op1[5]) = 0x019d00a2; ++ *((int*)& __m256_op1[4]) = 0x0039fff9; ++ *((int*)& __m256_op1[3]) = 0x01fe007a; ++ *((int*)& __m256_op1[2]) = 0x01c40110; ++ *((int*)& __m256_op1[1]) = 0x019d00a2; ++ *((int*)& __m256_op1[0]) = 0x003a0000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xff800000; ++ *((int*)& __m256_result[6]) = 0xff800000; ++ *((int*)& __m256_result[5]) = 0xff800000; ++ *((int*)& __m256_result[4]) = 0xff800000; ++ *((int*)& __m256_result[3]) = 0xff800000; ++ *((int*)& __m256_result[2]) = 0xff800000; ++ *((int*)& __m256_result[1]) = 0xff800000; ++ *((int*)& __m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvflogb_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xc0008000c0008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xc0008000c0008000; ++ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000000b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xf800f800f800f800; ++ *((unsigned long*)& __m256i_result[2]) = 0xf800f800f800f800; ++ *((unsigned long*)& __m256i_result[1]) = 0xf800f800f800f800; ++ *((unsigned long*)& __m256i_result[0]) = 0xf800f800f800f800; ++ __m256i_out = __lasx_xvslli_h(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf800f800f800f800; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf800f800f800f800; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf800f800f800f800; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf800f800f800f800; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffff8000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffff8000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffff8000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffff8000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc0008000c0008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc0008000c0008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x80000000; ++ *((int*)& __m256_op1[4]) = 0x80000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x80000000; ++ *((int*)& __m256_op1[0]) = 0x80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x000000000000000b; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000000000000b; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc0008000c0008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc0008000c0008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc0008000c0008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc0008000c0008000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x8001000180010000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x8001000180010000; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x80000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000000b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7fc00000; ++ *((int*)& __m256_result[6]) = 0x7fc00000; ++ *((int*)& __m256_result[5]) = 0x7fc00000; ++ *((int*)& __m256_result[4]) = 0x7fc00000; ++ *((int*)& __m256_result[3]) = 0x7fc00000; ++ *((int*)& __m256_result[2]) = 0x7fc00000; ++ *((int*)& __m256_result[1]) = 0x7fc00000; ++ *((int*)& __m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000001; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000001; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvsll_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff800080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff800080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff800080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff008000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff008000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff008000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff008000000000; ++ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff800080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff800080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0xfffffff5; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff800080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000001ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000001ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000001ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001ff; ++ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000fffffff5; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010000100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010000100000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010000100000000; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0010000100000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010000100000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0010000100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010000100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff800080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff800080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff80000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff80000000; ++ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x8f8f8f8f8f8f8f8f; ++ *((unsigned long*)& __m128i_result[0]) = 0x8f8f8f8f8f8f8f8f; ++ __m128i_out = __lsx_vaddi_bu(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8f8f8f8f8f8f8f8f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8f8f8f8f8f8f8f8f; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op2[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op2[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001808281820102; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001808201018081; ++ __m128i_out = __lsx_vmaddwev_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001808281820102; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001808201018081; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001008281820102; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001008201010081; ++ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x80808080; ++ *((int*)& __m128_op1[2]) = 0x80808080; ++ *((int*)& __m128_op1[1]) = 0x80808080; ++ *((int*)& __m128_op1[0]) = 0x80808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_result[0]) = 0x4040404040404040; ++ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0007000100040102; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0003000100010101; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0007000100040102; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0003000100010101; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_w(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000001; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000fe000000fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000fe000000fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000fe000000fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000fe000000fe; ++ __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000300000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x001c001c001c001c; ++ *((unsigned long*)& __m128i_result[0]) = 0x001c001c001c001c; ++ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffe5ffe5ffe5ffe5; ++ *((unsigned long*)& __m256i_result[2]) = 0xffe5ffe5ffe5ffe5; ++ *((unsigned long*)& __m256i_result[1]) = 0xffe5ffe5ffe5ffe5; ++ *((unsigned long*)& __m256i_result[0]) = 0xffe5ffe5ffe5ffe5; ++ __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffeb; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffeb; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff800200000002; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff800200000002; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xc1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffeb; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffeb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000004; ++ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h(__m256i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000300000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffdffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffeffff; ++ __m128i_out = __lsx_vneg_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffeb; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffeb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000015; ++ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x5858585858585858; ++ *((unsigned long*)& __m256i_result[2]) = 0x5858585858585858; ++ *((unsigned long*)& __m256i_result[1]) = 0x5858585858585858; ++ *((unsigned long*)& __m256i_result[0]) = 0x5858585858585858; ++ __m256i_out = __lasx_xvnori_b(__m256i_op0,0xa7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1e1e1e0000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1e1e1e0000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1e1e1e0000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1e1e1e0000000000; ++ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x2000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffa; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffbfbfbfc0; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbfbfbfc0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m128i_result[1]) = 0xffbfffbfff7fff80; ++ *((unsigned long*)& __m128i_result[0]) = 0xffbfffbfff7fff80; ++ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x54); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffa; ++ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000300000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000002fffffffb; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000010000fffb; ++ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000040804000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000040804000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000040a04000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000040a04000; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffe6; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffe6; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffe6; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffe6; ++ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000040a04000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000040a04000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000040a04000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000040a04000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffa; ++ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000002fffffffb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000010000fffb; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000bffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x42); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0xbffffffe; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b(__m128i_op0,0x5); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0xe7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf7f8f7f8f7f8f7f8; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xf7f8f7f8f7f8f7f8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf7f8f7f8f7f8f7f8; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf7f8f7f8f7f8f7f8; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000bffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000bffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vclz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f8; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000e0000002e; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000004e; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x7f800000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0f000f000f000f00; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0f000f000f000f00; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000101000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000101000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffffffff; ++ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000500000000; ++ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xffff0000; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xf7f7f7f7; ++ *((int*)& __m256_op1[6]) = 0xf7f7f7f8; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0xf7f7f7f7; ++ *((int*)& __m256_op1[2]) = 0xf7f7f7f8; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x3a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff10000fff10000; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xfff10000; ++ *((int*)& __m256_op0[4]) = 0xfff10000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xfff10000; ++ *((int*)& __m256_op0[0]) = 0xfff10000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0xfff10000; ++ *((int*)& __m256_result[4]) = 0xfff10000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0xfff10000; ++ *((int*)& __m256_result[0]) = 0xfff10000; ++ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff1fffffff1; ++ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000001ffe2000; ++ *((unsigned long*)& __m256i_result[2]) = 0x001fe020001fe020; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000001ffe2000; ++ *((unsigned long*)& __m256i_result[0]) = 0x001fe020001fe020; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x23); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xff800000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xff800000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrm_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000000001ffe2000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x001fe020001fe020; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000000001ffe2000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x001fe020001fe020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff1fffffff1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xfff10000; ++ *((int*)& __m256_op0[4]) = 0xfff10000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xfff10000; ++ *((int*)& __m256_op0[0]) = 0xfff10000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0xfff10000; ++ *((int*)& __m256_op1[4]) = 0xfff10000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0xfff10000; ++ *((int*)& __m256_op1[0]) = 0xfff10000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff88ff88; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xfff10000; ++ *((int*)& __m256_op0[4]) = 0xfff10000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xfff10000; ++ *((int*)& __m256_op0[0]) = 0xfff10000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0xfff10000; ++ *((int*)& __m256_op1[4]) = 0xfff10000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff000000ff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0080000000800000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff1000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff1000000000000; ++ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000001ffe2000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x001fe020001fe020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000001ffe2000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x001fe020001fe020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xfff10000; ++ *((int*)& __m256_op0[4]) = 0xfff10000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xfff10000; ++ *((int*)& __m256_op0[0]) = 0xfff10000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000001ffe2000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x001fe020001fe020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000001ffe2000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x001fe020001fe020; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff0020ff1f001f; ++ *((unsigned long*)& __m256i_result[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff0020ff1f001f; ++ *((unsigned long*)& __m256i_result[0]) = 0xffe1ffe0ffe1ffe0; ++ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100f000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100f000ff; ++ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vmini_hu(__m128i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000fe200000fe1f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fe200000fe1f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000005; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x80000000; ++ *((int*)& __m128_result[2]) = 0x80000000; ++ *((int*)& __m128_result[1]) = 0x80000000; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8101010181010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x8101010181010101; ++ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128d_op1[1]) = 0x8101010181010101; ++ *((unsigned long*)& __m128d_op1[0]) = 0x8101010181010101; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x80000000; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8101010181010101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8101010181010101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc0808000c0808000; ++ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc0808000c0808000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xc080800000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc080800000000000; ++ __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc0808000c0808000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000003020302; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x001ffffe00200000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x001ffffe00200000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_result[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_result[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_result[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_result[0]) = 0xffe1ffe0ffe1ffe0; ++ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8101010181010101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8101010181010101; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7efefefe82010201; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff0000ff; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7efefefe82010201; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000fe200000fe1f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000fe200000fe1f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x001ffffe00200000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x001ffffe00200000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000fe200000fe1f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fe200000fe1f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfffffe20; ++ *((int*)& __m256_op0[6]) = 0x001dfe1f; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xfffffe20; ++ *((int*)& __m256_op0[2]) = 0x001dfe1f; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7efefefe82010201; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x7afafaf88a050a05; ++ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xc080800000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc080800000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7efefefe82010201; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x418181017dfefdff; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7efefefe82010201; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x418181017dfefdff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff81; ++ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff81; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff7c; ++ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003fe000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x2b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000003020302; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff81; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000c0c00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x001ffffe00200000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x001ffffe00200000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0020001d001f; ++ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000a00000009; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000c0c00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_result[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_result[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fe1ffe0ffe1ffe0; ++ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x3f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffc0ff80; ++ *((int*)& __m128_op1[2]) = 0xff800000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffc0ff80ff800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000c0c00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000c0c00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffc00000ff800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff0000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsat_b(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffc00000ff800000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0xffffffff; ++ *((int*)& __m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003fe000000000; ++ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffe20; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001dfffffe1f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000c0c00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x3); ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xfffffe20; ++ *((int*)& __m256_op0[5]) = 0x0000001d; ++ *((int*)& __m256_op0[4]) = 0xfffffe1f; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff0000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000005; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003fe000000000; ++ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003fe000000000; ++ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000190; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffc0ff80ff800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00003fe0; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00003fe0; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00003fe0; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00003fe0; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001400000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001400000000; ++ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001400000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001400000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256d_op0[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00003fe000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_result[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fe1ffe0ffe1ffe0; ++ __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff1f001f; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffe1ffe0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff1f001f; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffe1ffe0; ++ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffc020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffc020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001400000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001400000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007fc0083fc7c007; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x007fc0083fc7c007; ++ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x42); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000b0000000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000b0000000b; ++ __m128i_out = __lsx_vmaxi_w(__m128i_op0,11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmini_d(__m256i_op0,-1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x003f60041f636003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x003f60041f636003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x003f60041f636003; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x003f60041f636003; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x003f60041f636003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x003f60041f636003; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x007fc0083fc7c007; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x007fc0083fc7c007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffc0003fffc0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffc0003fffc0; ++ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000010100000101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010100000101; ++ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000020000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000020000; ++ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x003f60041f636003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x003f60041f636003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x003f60041f636003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x003f60041f636003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000003f00001f63; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000003f00001f63; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000020000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101030101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101030101; ++ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffe1; ++ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffe1; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffe1; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffe1; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffe1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101010101030101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101010101030101; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000fffa0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffa0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101000101010001; ++ __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x01010001; ++ *((int*)& __m128_op0[0]) = 0x01010001; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00020000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00020000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00020000; ++ *((int*)& __m128_result[1]) = 0x01010001; ++ *((int*)& __m128_result[0]) = 0x01010001; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff10; ++ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffc0003fffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffc0003fffc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x007fc0083fc7c007; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x007fc0083fc7c007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f010700c70106; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f010700c70106; ++ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000000fffa0000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000fffa0000; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256d_op0[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256d_op0[0]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000008; ++ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffc0003fffc0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffc0003fffc0; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x803f6004; ++ *((int*)& __m256_op2[4]) = 0x1f636003; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x803f6004; ++ *((int*)& __m256_op2[0]) = 0x1f636003; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x803f6004; ++ *((int*)& __m256_result[4]) = 0x1f636003; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x803f6004; ++ *((int*)& __m256_result[0]) = 0x1f636003; ++ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000008; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ff0000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ff0000000000; ++ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00002000; ++ *((int*)& __m128_op0[2]) = 0x00002000; ++ *((int*)& __m128_op0[1]) = 0x10000000; ++ *((int*)& __m128_op0[0]) = 0x10000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000002; ++ *((int*)& __m256_op0[4]) = 0x00000008; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000002; ++ *((int*)& __m256_op0[0]) = 0x00000008; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x64800000; ++ *((int*)& __m256_result[4]) = 0x64000000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x64800000; ++ *((int*)& __m256_result[0]) = 0x64000000; ++ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x71); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f010700c70106; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f010700c70106; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0106010601060106; ++ *((unsigned long*)& __m256i_result[2]) = 0x0106010601060106; ++ *((unsigned long*)& __m256i_result[1]) = 0x0106010601060106; ++ *((unsigned long*)& __m256i_result[0]) = 0x0106010601060106; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x803f6004; ++ *((int*)& __m256_op0[4]) = 0x1f636003; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x803f6004; ++ *((int*)& __m256_op0[0]) = 0x1f636003; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x007f0107; ++ *((int*)& __m256_op1[4]) = 0x00c70106; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x007f0107; ++ *((int*)& __m256_op1[0]) = 0x00c70106; ++ *((int*)& __m256_result[7]) = 0x7fc00000; ++ *((int*)& __m256_result[6]) = 0x7fc00000; ++ *((int*)& __m256_result[5]) = 0xbeff7cfd; ++ *((int*)& __m256_result[4]) = 0x5e123f94; ++ *((int*)& __m256_result[3]) = 0x7fc00000; ++ *((int*)& __m256_result[2]) = 0x7fc00000; ++ *((int*)& __m256_result[1]) = 0xbeff7cfd; ++ *((int*)& __m256_result[0]) = 0x5e123f94; ++ __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0106010601060106; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0106010601060106; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0106010601060106; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0106010601060106; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00011ffb0000bee1; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00011ffb0000bee1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001010600000106; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001010600000106; ++ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000200000002000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1000000010000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000020000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0103000201030002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x3f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001010600000106; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001010600000106; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0103000201030002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d(__m128i_op0,7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0103000201030002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000008; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101000101010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000fe0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff00ffffff00ff; ++ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00011ffb0000bee1; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00011ffb0000bee1; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000003f003f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000003f003f; ++ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000002; ++ *((int*)& __m256_op0[4]) = 0x00000008; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000002; ++ *((int*)& __m256_op0[0]) = 0x00000008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f010700c70106; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f010700c70106; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000010211921; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000010211921; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_h(__m256i_op0,0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000008; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000008; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00000200000008; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00000200000008; ++ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vslei_hu(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000010100fe0101; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffff0200ffff01ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_w(__m128i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w(__m256i_op0,8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffc0ffc1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x003f00000000003f; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffc0ffc1; ++ *((unsigned long*)& __m256i_op0[0]) = 0x003f00000000003f; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001fffe0001ffc0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0001003e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001fffe0001ffc0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0001003e; ++ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff010300ff0103; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000002ffffffff; ++ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x007f000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x007f000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000002; ++ *((int*)& __m256_op1[4]) = 0x00000008; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000002; ++ *((int*)& __m256_op1[0]) = 0x00000008; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000010100fe0101; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffff0200ffff01ff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0x0001010100fe0100; ++ *((unsigned long*)& __m128d_result[0]) = 0xffff0200ffff01ff; ++ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000008; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000008; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000455555555; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000003fe0000141e; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffc01ffffebe2; ++ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000455555555; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d(__m128i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0xffffffffffffffff; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x1); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,-6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000004; ++ *((int*)& __m128_op1[0]) = 0x55555555; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00011ffb0000bee1; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00011ffb0000bee1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00011ffb0000bee1; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00011ffb0000bee1; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000455555555; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000055555555; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000002ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000017fffffff; ++ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff010300ff0103; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x007ffff001000300; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff0001000300; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff010300ff0103; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_result[0]) = 0xf0003000f0003000; ++ __m128i_out = __lsx_vslli_h(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf0003000f0003000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000017fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x003fffffff800000; ++ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000455555555; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000008; ++ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff010300ff0103; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x555500adfffc5cab; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010100000100; ++ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x010003f00000ff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x017f03000000ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x010003f00000ff00; ++ *((unsigned long*)& __m128i_op1[0]) = 0x017f03000000ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000020; ++ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000020; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x42800000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x42000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x42800000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x42000000; ++ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000017; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000017; ++ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x01010101; ++ *((int*)& __m128_op0[0]) = 0x00000100; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0xff800000; ++ *((int*)& __m128_result[1]) = 0xc2fa0000; ++ *((int*)& __m128_result[0]) = 0xc30d0000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000020; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000020; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffc0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffc0000000000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000455555555; ++ *((unsigned long*)& __m128i_result[1]) = 0xffc0000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffc0000000000004; ++ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003fffffff800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000455555555; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000455555555; ++ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x007f00ff007f00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f00ff007f00ff; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0xc9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000158; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000158; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_hu_w(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000001580000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0x0101ffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0x0101ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000455555555; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000001580000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffa800000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000157; ++ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ac; ++ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000157; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00067fff00047fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00027fff000080fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00067fff00047fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00027fff000080fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x067f047f027f0080; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x067f047f027f0080; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000015800000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f7f7f80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f7f7f80; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010058; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010058; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000158; ++ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000158; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xffffffa8; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010058; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010058; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000158; ++ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000010; ++ *((int*)& __m128_op0[2]) = 0x00100010; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x79); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010058; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010058; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseqi_b(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007f7f7f80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007f7f7f80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000100010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010058; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001001100110068; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w(__m128i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001001100110068; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001001100110068; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001001100110067; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001001100110068; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vclz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_hu(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f7f7f80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f7f7f80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000fef0ff0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000fef0ff0; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0xbd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f7f7f80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f7f7f80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000040004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000040004; ++ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_lu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f7f7f80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f7f7f80; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007f7f7f80; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007f7f7f80; ++ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x82); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000040000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000040000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000400; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000400; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000fef0ff0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000fef0ff0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x687a8373f249bc44; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7861145d9241a14a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101030100010001; ++ __m128i_out = __lsx_vclz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff1fff1fff1fff1; ++ __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff1fff1fff1fff1; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000020006; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000600; ++ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000e000e000e000e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000e000e000e000e; ++ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff1fff1fff1fff1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000040000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000040000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000020000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000020000; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000e000e000e000e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000e000e000e000e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x39); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0101000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0101030100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0080800000008000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0080818000008000; ++ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000e000e000e000e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000e000e000e000e; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000e000e; ++ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000e0000000e00; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000e0000000e00; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x01010001; ++ *((int*)& __m128_op1[2]) = 0x00010001; ++ *((int*)& __m128_op1[1]) = 0x01010301; ++ *((int*)& __m128_op1[0]) = 0x00010001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vneg_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000040000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000040000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00000e0000000e00; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000e0000000e00; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000040000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000040000; ++ __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00040000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00040000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long*)& __m128i_result[0]) = 0xf8f8f8f8f8f8f8f8; ++ __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000e000e000e000e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000e000e000e000e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000e000e000e000e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000e000e000e000e; ++ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000e0000000e00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000e0000000e00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000e000e000e000e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000e000e000e000e; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000e0000000e00; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000e0000000e00; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000e0000000e00; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000e0000000e00; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000e000e; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0101000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101030100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x2000200020002000; ++ *((unsigned long*)& __m128i_result[0]) = 0x2000200020002000; ++ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_hu(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_result[2]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_result[1]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_result[0]) = 0x0007000700070007; ++ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00010001; ++ *((int*)& __m128_op0[2]) = 0x00010001; ++ *((int*)& __m128_op0[1]) = 0x00010001; ++ *((int*)& __m128_op0[0]) = 0x00010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000e000e000e000e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000e000e000e000e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0003800400038004; ++ *((unsigned long*)& __m256i_result[2]) = 0x000a800b000a800b; ++ *((unsigned long*)& __m256i_result[1]) = 0x0003800400038004; ++ *((unsigned long*)& __m256i_result[0]) = 0x000a800b000a800b; ++ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x000e000e; ++ *((int*)& __m256_op1[4]) = 0x000e000e; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x000e000e; ++ *((int*)& __m256_op1[0]) = 0x000e000e; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0x98); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long*)& __m128d_op1[0]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0007000700070007; ++ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long*)& __m128i_op2[0]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0003800400038004; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000a800b000a800b; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0003800400038004; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000a800b000a800b; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000018803100188; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000018803100188; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000014402080144; ++ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000affff800b; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000affff800b; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000affff800b; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000affff800b; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000800; ++ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000018803100188; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000018803100188; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbsrl_v(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0003800400038004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000a800b000a800b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0003800400038004; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000a800b000a800b; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000a0080000b00; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000a0080000b00; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000a0080000b00; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000a0080000b00; ++ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0003800400038004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000a800b000a800b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0003800400038004; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000a800b000a800b; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000e; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000e; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000c; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000440800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000440800; ++ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0003800400038004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000a800b000a800b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0003800400038004; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000a800b000a800b; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000e0010000e; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000e0010000e; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x4e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmskgez_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0010000e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0010000e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0707070707070707; ++ *((unsigned long*)& __m256i_result[2]) = 0x0707070707070707; ++ *((unsigned long*)& __m256i_result[1]) = 0x0707070707070707; ++ *((unsigned long*)& __m256i_result[0]) = 0x0707070707070707; ++ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x27b9331b8e77ead9; ++ *((unsigned long*)& __m128i_op0[0]) = 0x58d6bf1867ace738; ++ *((unsigned long*)& __m128i_result[1]) = 0xe4cc6c9edfab6639; ++ *((unsigned long*)& __m128i_result[0]) = 0x5afc6163b39ce19e; ++ __m128i_out = __lsx_vrotri_w(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000014402080144; ++ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffff800; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffff800; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000000fffff800; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000fffff800; ++ *((unsigned long*)& __m256i_result[3]) = 0xf800f800f800f800; ++ *((unsigned long*)& __m256i_result[2]) = 0xf800f800f800f800; ++ *((unsigned long*)& __m256i_result[1]) = 0xf800f800f800f800; ++ *((unsigned long*)& __m256i_result[0]) = 0xf800f800f800f800; ++ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x5); ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffff800; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffff800; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000002080100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000002080100; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000001880310877e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000001880310877e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002080100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002080100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000008000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000a080100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000008000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000a080100; ++ __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffc; ++ __m128i_out = __lsx_vhsubw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffbfffffff8; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffbfffffff8; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010800; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010800; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffff800; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffff800; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001010800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001010800; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x07ffffff07ffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x07ffffff07ffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x07ffffff07ffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x07ffffff07ffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x0ffffffe0ffffffe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0ffffffe0ffffffe; ++ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000a0010400a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000a0010400a; ++ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ff00000000; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000007f007f007f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000007f007f007f; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xdd6156076967d8c9; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2e3ab5266375e71b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x6eb12b0634b46c67; ++ *((unsigned long*)& __m128i_result[0]) = 0x171d5a9531bb7390; ++ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010800; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010800; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000014402080144; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000014402080144; ++ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000b; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000002070145; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000002070145; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xfffffffc; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xfffffffc; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xfffffffc; ++ *((int*)& __m128_result[1]) = 0xffffffff; ++ *((int*)& __m128_result[0]) = 0xfffffffc; ++ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000007f007f007f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000007f007f007f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1ab6021f72496458; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7750af4954c29940; ++ *((unsigned long*)& __m128i_result[1]) = 0xe64afee18eb79ca8; ++ *((unsigned long*)& __m128i_result[0]) = 0x89b051b7ac3e67c0; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffdc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffbffffffd8; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffbfffffff8; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffc; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000008000b; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000008000b; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000b; ++ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1ab6021f72496458; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7750af4954c29940; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1ab6021f72496458; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7750af4954c29940; ++ *((unsigned long*)& __m128i_result[1]) = 0x6ad8ffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x6ad8ffffffffffff; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000008000b; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000008000b; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000000b; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000008000a; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000a; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000008000a; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000a; ++ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010800; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010800; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffefef800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffefef800; ++ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vmskltz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000007f007f007f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000007f007f007f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffefef800; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffefef800; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffefef800; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffefef800; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000008000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffefef800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000008000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffefef800; ++ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x27); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000010000000; ++ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001f; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffff80; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffff80; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000430207f944; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000001f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000001f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001f; ++ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000200000001e; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000200000001e; ++ __m128i_out = __lsx_vpcnt_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x38); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0000001f; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0000001f; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x0000001f; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x0000001f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m128i_result[0]) = 0xff01ff01ff01fc10; ++ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000007f007f007f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000007f007f007f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000001f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000001f; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x403f000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x403f000000000000; ++ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x45); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000080; ++ __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x7e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ffffffe00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ffffffe00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x007f00ff00ff00fe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x007f00ff00ff00fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x7ffffffe00000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x7ffffffe00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x007f00ff00ff00fe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000007f007f007f; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000007f007f007f; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x1f9689fdb16cabbd; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x1f9689fdb16cabbd; ++ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ffffffe00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ffffffe00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff00007fff0000; ++ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0xcd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000007f007f007f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000007f007f007f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0af57272788754ab; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000005e80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0af57272788754ab; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000005e80; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000f0f0f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f0000007f; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000f0f0f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f0000007f; ++ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ffffffe00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7ffffffe00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x3a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0008; ++ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000017ffeffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000017ffeffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x32); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000ffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffff0100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff0100000001; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffff00018d8b; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffff0100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffff0100000001; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x7); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff00007fff0000; ++ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000001; ++ *((int*)& __m128_op0[2]) = 0x7ffeffff; ++ *((int*)& __m128_op0[1]) = 0x00000001; ++ *((int*)& __m128_op0[0]) = 0x7ffeffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x003f0000003f0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x003f0000003f0000; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffff0100000001; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffff0100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003f0000003f0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003f0000003f0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x803e0000803e0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x803e0000803e0000; ++ __m128i_out = __lsx_vadda_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff0008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff0008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000bdfef907bc; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000bdfef907bc; ++ __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x803e0000803e0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x803e0000803e0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x803bfffd803bfffd; ++ *((unsigned long*)& __m128i_result[0]) = 0x803bfffd803bfffd; ++ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0010511c54440437; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0010511c54440437; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff0008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff0008; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x6); ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000000ffff0008; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000ffff0008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffff0008; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffff0008; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x000000430207f944; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_result[2]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0100010001000100; ++ __m256i_out = __lasx_xvbitrevi_h(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000008080800; ++ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000008080800; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010511c54440437; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010511c54440437; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000103fca1bd; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000103fca1bd; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000103fca1bd; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000103fca1bd; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010511c54440438; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010511c54440438; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1d8000001d800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1d8000001d800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1d8000001d800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1d8000001d800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0366000003660000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0366000003660000; ++ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000bdfef907bc; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000bdfef907bc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0000fffe0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7777777777777777; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff7777ffff7777; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x77); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x000000bd; ++ *((int*)& __m256_op0[4]) = 0xfef907bc; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x000000bd; ++ *((int*)& __m256_op0[0]) = 0xfef907bc; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x62d2acee; ++ *((int*)& __m256_result[4]) = 0x7fc00000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x62d2acee; ++ *((int*)& __m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0100004300000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0100004300000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xff0000bd00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[0]) = 0xff0000bd00000000; ++ __m256i_out = __lasx_xvneg_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x01000100; ++ *((int*)& __m256_op0[6]) = 0x01000100; ++ *((int*)& __m256_op0[5]) = 0x01000100; ++ *((int*)& __m256_op0[4]) = 0x01000100; ++ *((int*)& __m256_op0[3]) = 0x01000100; ++ *((int*)& __m256_op0[2]) = 0x01000100; ++ *((int*)& __m256_op0[1]) = 0x01000100; ++ *((int*)& __m256_op0[0]) = 0x01000100; ++ *((int*)& __m256_op1[7]) = 0x7f800000; ++ *((int*)& __m256_op1[6]) = 0x7f800000; ++ *((int*)& __m256_op1[5]) = 0x62d2acee; ++ *((int*)& __m256_op1[4]) = 0x7fc00000; ++ *((int*)& __m256_op1[3]) = 0x7f800000; ++ *((int*)& __m256_op1[2]) = 0x7f800000; ++ *((int*)& __m256_op1[1]) = 0x62d2acee; ++ *((int*)& __m256_op1[0]) = 0x7fc00000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000043; ++ *((int*)& __m256_op0[4]) = 0x0207f944; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000043; ++ *((int*)& __m256_op0[0]) = 0x0207f944; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x8c7fc73a; ++ *((int*)& __m128_op0[2]) = 0x137e54af; ++ *((int*)& __m128_op0[1]) = 0xbc84cf6f; ++ *((int*)& __m128_op0[0]) = 0x76208329; ++ *((int*)& __m128_result[3]) = 0x7fc00000; ++ *((int*)& __m128_result[2]) = 0x297f29fe; ++ *((int*)& __m128_result[1]) = 0x7fc00000; ++ *((int*)& __m128_result[0]) = 0x5acab5a5; ++ __m128_out = __lsx_vfsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00010001000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00010001000100; ++ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0x7b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du(__m128i_op0,0x22); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0100004300000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0100004300000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op2[2]) = 0xff00010001000100; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op2[0]) = 0xff00010001000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_result[2]) = 0x01ffff4300ffff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x01ffff4300ffff00; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x80008000ec82ab51; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000800089e08000; ++ int_result = 0xffffffff89e08000; ++ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x0); ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010511c54440438; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010511c54440438; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000777777777777; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff7777ffff7777; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000003bbbbbbbbbb; ++ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x45); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000430207f944; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000086000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00040ff288000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000086000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00040ff288000000; ++ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000777777777777; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffff7777ffff7777; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001b; ++ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01ffff4300ffff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x01ffff4300ffff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000008000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000008000000100; ++ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x3f800000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_w(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00010001000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00010001000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h(__m128i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffc0800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000008080600; ++ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x3f800000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000086000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00040ff288000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000086000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00040ff288000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x5555555555555555; ++ *((unsigned long*)& __m256i_op1[2]) = 0x5555555555555555; ++ *((unsigned long*)& __m256i_op1[1]) = 0x5555555555555555; ++ *((unsigned long*)& __m256i_op1[0]) = 0x5555555555555555; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000fc300000fc40; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000fc300000fc40; ++ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x5555555555555555; ++ *((unsigned long*)& __m256i_op0[2]) = 0x5555555555555555; ++ *((unsigned long*)& __m256i_op0[1]) = 0x5555555555555555; ++ *((unsigned long*)& __m256i_op0[0]) = 0x5555555555555555; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x4545454545454545; ++ *((unsigned long*)& __m256i_result[2]) = 0x4545454545454545; ++ *((unsigned long*)& __m256i_result[1]) = 0x4545454545454545; ++ *((unsigned long*)& __m256i_result[0]) = 0x4545454545454545; ++ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x4d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00010001000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00010001000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000040004000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000040004000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x5a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01ffff4300ffff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01ffff4300ffff00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff00000000; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000fc300000fc40; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000fc300000fc40; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff000003c0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff000003c0; ++ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000008080600; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff000003c0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff000003c0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000fc300000fc40; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000fc300000fc40; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7c030000ffc4; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7c030000ffc4; ++ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7ffeffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7ffeffffffff; ++ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000001; ++ *((int*)& __m256_op0[5]) = 0x7fff7ffe; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000001; ++ *((int*)& __m256_op0[1]) = 0x7fff7ffe; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000002; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000002; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000002; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000002; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0xffffffff; ++ *((int*)& __m256_op2[4]) = 0xffffffff; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0xffffffff; ++ *((int*)& __m256_op2[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0x80000000; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0x80000000; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7ffeffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7ffeffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000fc300000fc40; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000fc300000fc40; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f007bfffffffb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f007bfffffffb; ++ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x01ffff4300fffeff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfe0000bcff000100; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01ffff4300fffeff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfe0000bcff000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x81ff00bd80ff0101; ++ *((unsigned long*)& __m256i_result[2]) = 0x01ff00bd00ff0101; ++ *((unsigned long*)& __m256i_result[1]) = 0x81ff00bd80ff0101; ++ *((unsigned long*)& __m256i_result[0]) = 0x01ff00bd00ff0101; ++ __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001b; ++ int_op1 = 0xffffffff89e08000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001b0000001b; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001b0000001b; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc0800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff0018; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0000001b; ++ *((int*)& __m128_op0[2]) = 0x0000001b; ++ *((int*)& __m128_op0[1]) = 0x0000001b; ++ *((int*)& __m128_op0[0]) = 0x0000001b; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x0000001b; ++ *((int*)& __m128_result[2]) = 0x0000001b; ++ *((int*)& __m128_result[1]) = 0x0000001b; ++ *((int*)& __m128_result[0]) = 0x0000001b; ++ __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000040004000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000004000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000040004000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000004000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01ffff4300ffff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x01ffff4300ffff00; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00ff003f003f00; ++ *((unsigned long*)& __m256i_result[2]) = 0xff0101fd00010100; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00ff003f003f00; ++ *((unsigned long*)& __m256i_result[0]) = 0xff0101fd00010100; ++ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff003f003f00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff0101fd00010100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff003f003f00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff0101fd00010100; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff00ff003f003f00; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff0101fd00010100; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff00ff003f003f00; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff0101fd00010100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xff00ff00; ++ *((int*)& __m256_op0[6]) = 0x3f003f00; ++ *((int*)& __m256_op0[5]) = 0xff0101fd; ++ *((int*)& __m256_op0[4]) = 0x00010100; ++ *((int*)& __m256_op0[3]) = 0xff00ff00; ++ *((int*)& __m256_op0[2]) = 0x3f003f00; ++ *((int*)& __m256_op0[1]) = 0xff0101fd; ++ *((int*)& __m256_op0[0]) = 0x00010100; ++ *((int*)& __m256_op1[7]) = 0x01ffff43; ++ *((int*)& __m256_op1[6]) = 0x00fffeff; ++ *((int*)& __m256_op1[5]) = 0xfe0000bc; ++ *((int*)& __m256_op1[4]) = 0xff000100; ++ *((int*)& __m256_op1[3]) = 0x01ffff43; ++ *((int*)& __m256_op1[2]) = 0x00fffeff; ++ *((int*)& __m256_op1[1]) = 0xfe0000bc; ++ *((int*)& __m256_op1[0]) = 0xff000100; ++ *((unsigned long*)& __m256i_result[3]) = 0xfc003802fc000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fc00fc00; ++ *((unsigned long*)& __m256i_result[1]) = 0xfc003802fc000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fc00fc00; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x2c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01ffff4300ffff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01ffff4300ffff00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000040004000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000040004000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100000000; ++ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x2e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffc0800000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0x6f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc0800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4545454545454545; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4545454545454545; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4545454545454545; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4545454545454545; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000001b; ++ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000001b0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000001b0000; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000fc300000fc40; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000fc300000fc40; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000001b0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000001b0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000001b001b; ++ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xc0800000; ++ *((int*)& __m128_op1[3]) = 0x0000001b; ++ *((int*)& __m128_op1[2]) = 0x0000001b; ++ *((int*)& __m128_op1[1]) = 0x0000001b; ++ *((int*)& __m128_op1[0]) = 0x0000001b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x007f007bfffffffb; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x007f007bfffffffb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000010000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000010000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc0800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffc0800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffc0800000; ++ __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000007fff0018; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000003fff800c; ++ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000010000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000010000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffeffff10000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffeffff10000000; ++ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfc003802fc000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fc00fc00; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfc003802fc000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fc00fc00; ++ *((unsigned long*)& __m256i_result[3]) = 0xfc003802fc000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fc00fc00; ++ *((unsigned long*)& __m256i_result[1]) = 0xfc003802fc000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fc00fc00; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfffeffff10000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfffeffff10000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfc003802; ++ *((int*)& __m256_op0[6]) = 0xfc000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0xfc00fc00; ++ *((int*)& __m256_op0[3]) = 0xfc003802; ++ *((int*)& __m256_op0[2]) = 0xfc000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0xfc00fc00; ++ *((int*)& __m256_result[7]) = 0x82ff902d; ++ *((int*)& __m256_result[6]) = 0x83000000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x82fe0bd9; ++ *((int*)& __m256_result[3]) = 0x82ff902d; ++ *((int*)& __m256_result[2]) = 0x83000000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x82fe0bd9; ++ __m256_out = __lasx_xvfrecip_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0018; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vclz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffeffff10000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffeffff10000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7ffffffffffffffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7ffffffffffffffe; ++ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfc003802fc000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfc003802fc000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x03802fc000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x03802fc000000000; ++ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_wu(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xd5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffe0000000; ++ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfc003802fc000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfc003802fc000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7ffffffffffffffe; ++ *((unsigned long*)& __m256d_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7ffffffffffffffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff00010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff00010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xd2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x82ff902d83000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f80000082fe0bd9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x82ff902d83000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f80000082fe0bd9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x82ff902d83000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f80000082fe0bd9; ++ *((unsigned long*)& __m256i_op1[1]) = 0x82ff902d83000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f80000082fe0bd9; ++ *((unsigned long*)& __m256i_result[3]) = 0xc008fa01c0090000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3f804000c008f404; ++ *((unsigned long*)& __m256i_result[1]) = 0xc008fa01c0090000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3f804000c008f404; ++ __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x03802fc000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x03802fc000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x82ff902d83000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f80000082fe0bd9; ++ *((unsigned long*)& __m256i_op1[1]) = 0x82ff902d83000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f80000082fe0bd9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x3f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc008fa01c0090000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3f804000c008f404; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc008fa01c0090000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3f804000c008f404; ++ *((unsigned long*)& __m256i_op1[3]) = 0x82ff902d83000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f80000082fe0bd9; ++ *((unsigned long*)& __m256i_op1[1]) = 0x82ff902d83000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f80000082fe0bd9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xc0090000c0200060; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xc0090000c0200060; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc0090000c0200060; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc0090000c0200060; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f0000007f0060; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f0000007f0060; ++ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc008fa01c0090000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3f804000c008f404; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc008fa01c0090000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3f804000c008f404; ++ *((unsigned long*)& __m256i_result[3]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_result[2]) = 0x001fc0200060047a; ++ *((unsigned long*)& __m256i_result[1]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_result[0]) = 0x001fc0200060047a; ++ __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x03802fc000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x03802fc000000000; ++ *((int*)& __m256_result[7]) = 0x38600000; ++ *((int*)& __m256_result[6]) = 0x3df80000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x38600000; ++ *((int*)& __m256_result[2]) = 0x3df80000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrp_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000400028000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x386000003df80000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x386000003df80000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0xd9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x386000003df80000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x386000003df80000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x386000003df80000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x386000003df80000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0c6a240000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0c6a240000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_op0[2]) = 0x001fc0200060047a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_op0[0]) = 0x001fc0200060047a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_result[2]) = 0x001fc0200060047a; ++ *((unsigned long*)& __m256i_result[1]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_result[0]) = 0x001fc0200060047a; ++ __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f0000007f0060; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f0000007f0060; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f0000007f0060; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f0000007f0060; ++ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400028000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000020001c020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000022; ++ __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_op0[2]) = 0x001fc0200060047a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_op0[0]) = 0x001fc0200060047a; ++ *((unsigned long*)& __m256i_result[3]) = 0xfee1057c01e10581; ++ *((unsigned long*)& __m256i_result[2]) = 0x011ec1210161057b; ++ *((unsigned long*)& __m256i_result[1]) = 0xfee1057c01e10581; ++ *((unsigned long*)& __m256i_result[0]) = 0x011ec1210161057b; ++ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002008360500088; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000400028000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_op0[2]) = 0x001fc0200060047a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_op0[0]) = 0x001fc0200060047a; ++ *((unsigned long*)& __m256i_result[3]) = 0x047a047a047a047a; ++ *((unsigned long*)& __m256i_result[2]) = 0x047a047a047a047a; ++ *((unsigned long*)& __m256i_result[1]) = 0x047a047a047a047a; ++ *((unsigned long*)& __m256i_result[0]) = 0x047a047a047a047a; ++ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0002008360500088; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000008; ++ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000c; ++ __m128i_out = __lsx_vmaxi_b(__m128i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x386000003df80000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x386000003df80000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0c6a240000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0c6a240000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ca0000fff80000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ca0000fff80000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff3; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f0000007f0060; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f0000007f0060; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0xffffffffffffffff; ++ int_out = __lsx_vpickve2gr_h(__m128i_op0,0x2); ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x55); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f0000007f0060; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f0000007f0060; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00f7000000f70006; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00f7000000f70006; ++ __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_op0[2]) = 0x001fc0200060047a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_op0[0]) = 0x001fc0200060047a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000fffe00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000fffe00000000; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xfffffff3; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000008; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000088; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000008; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000088; ++ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000fffe00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fffe00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ca0000fff80000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ca0000fff80000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x386000003df80000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x386000003df80000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du(__m128i_op0,0x36); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000fffe00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fffe00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x386000003df80000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x386000003df80000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x5fa0000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x5fa0000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x386000003df80000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x386000003df80000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ca0000fff80000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ca0000fff80000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x381800007af80000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x381800007af80000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002008300500088; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000088; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0c6a240000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0f00204000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0c6a240000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0f00204000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0xf3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x5fa00000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x5fa00000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_op1[2]) = 0x001fc0200060047a; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffe0047d00e00480; ++ *((unsigned long*)& __m256i_op1[0]) = 0x001fc0200060047a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xe07de0801f20607a; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00f3009500db00ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00f3009500db00ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000003cc0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000003cc0; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x6a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0c6a2400; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x0f002040; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x0c6a2400; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x0f002040; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x5fa0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x5fa0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0c6a240000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0f00204000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0c6a240000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0f00204000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x04a3000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x04a3000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0c6a240000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0f00204000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0c6a240000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0f00204000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x52525252; ++ *((int*)& __m128_op0[2]) = 0xadadadad; ++ *((int*)& __m128_op0[1]) = 0x52525252; ++ *((int*)& __m128_op0[0]) = 0xadadadad; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0xadadadad; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0xadadadad; ++ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrintrne_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vslli_w(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000003cc0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000003cc0; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000003cc0; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000003cc0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x5fa0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x5fa0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000003cc0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000003cc0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000081f20607a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000081f20607a; ++ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000adadadad; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000adadadad; ++ *((unsigned long*)& __m128i_result[1]) = 0xfbfbfbfbadadadad; ++ *((unsigned long*)& __m128i_result[0]) = 0xfbfbfbfbadadadad; ++ __m128i_out = __lsx_vmini_b(__m128i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x52525252adadadad; ++ *((unsigned long*)& __m128i_op1[0]) = 0x52525252adadadad; ++ *((unsigned long*)& __m128i_result[1]) = 0x52525252adadadad; ++ *((unsigned long*)& __m128i_result[0]) = 0x52525252adadadad; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000adadadad; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000adadadad; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000adadadad; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000adadadad; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x800000007fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x800000007fffffff; ++ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0df9f8e; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0df9f8e; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe0df9f8e; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffe0df9f8e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00f7000000f70006; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00f7000000f70006; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x52525252adadadad; ++ *((unsigned long*)& __m128i_op0[0]) = 0x52525252adadadad; ++ *((unsigned long*)& __m128i_op1[1]) = 0x800000007fffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x800000007fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x5fa00000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x5fa00000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000004; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00007f95; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000004; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00007f95; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000adadadad; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000adadadad; ++ *((unsigned long*)& __m128i_result[1]) = 0xadadadadadadadad; ++ *((unsigned long*)& __m128i_result[0]) = 0xadadadadadadadad; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0df9f8e; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0df9f8e; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe0df9f8f; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffe0df9f8f; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffbfffffffb; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffb; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffbfffffffb; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffb; ++ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x52525252adadadad; ++ *((unsigned long*)& __m128i_op1[0]) = 0x52525252adadadad; ++ *((unsigned long*)& __m128i_op2[1]) = 0x800000007fffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x800000007fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00adadad00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00adadad00000000; ++ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x52525252adadadad; ++ *((unsigned long*)& __m128i_op0[0]) = 0x52525252adadadad; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5b5b5b5aa4a4a4a6; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x5b5b5b5aadadadad; ++ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x007fffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x002cffacffacffab; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000007f00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0001fffa; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe00018069; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0001fffa; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe00018069; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff01fffffffeff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff01fffffffaff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff01fffffffeff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff01fffffffaff; ++ __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5b5b5b5aadadadad; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000052525253; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0001fffa; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00018069; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001fffe0001fffa; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe00018069; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000002000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000002000; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x64); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xe07de080; ++ *((int*)& __m256_op0[4]) = 0x1f20607a; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xe07de080; ++ *((int*)& __m256_op0[0]) = 0x1f20607a; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xe07de080; ++ *((int*)& __m256_op1[4]) = 0x1f20607a; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xe07de080; ++ *((int*)& __m256_op1[0]) = 0x1f20607a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000007f00ff00ff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3ff0000000000000; ++ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7ffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7ffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff7ffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7ffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x3fffffff3ffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x3fffffff3ffffffe; ++ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x800000007fffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x800000007fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x003f0000ffffffff; ++ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7ffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7ffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffe4866c86; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe4866c86; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000002000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000002000000; ++ __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xe07de080; ++ *((int*)& __m256_op1[4]) = 0x1f20607a; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xe07de080; ++ *((int*)& __m256_op1[0]) = 0x1f20607a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x207fffff22bd04fb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x207fffff22bd04fb; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000002000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000002000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x207fffff22bd04fa; ++ *((unsigned long*)& __m128i_result[0]) = 0x207fffff22bd04fa; ++ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe07de080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000001f20607a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe07de080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000001f20607a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_result[3]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long*)& __m256i_result[2]) = 0xe27fe2821d226278; ++ *((unsigned long*)& __m256i_result[1]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long*)& __m256i_result[0]) = 0xe27fe2821d226278; ++ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x1f831f80e0e09f86; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x1f831f80e0e09f86; ++ __m256i_out = __lasx_xvdiv_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000003effff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000003effff; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x441ba9fcffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x181b2541ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff7ffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7ffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff010181010102; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff81010102; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe27fe2821d226278; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe27fe2821d226278; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x441ba9fcffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x181b2541ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x401fadf8fbfbfbfb; ++ *((unsigned long*)& __m128i_result[0]) = 0x1c1f2145fbfbfbfb; ++ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000002000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000002000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff0000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff0000ffffffff; ++ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000002000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000002000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x38); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xe07de0801f20607a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x01ff01ff01c0003e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x01ff01ff01c0003e; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01ff01ff01c0003e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01ff01ff01c0003e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000100ff000100ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000100c00000003e; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x441ba9fcffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x181b2541ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xbbe5560400010001; ++ *((unsigned long*)& __m128i_result[0]) = 0xe7e5dabf00010001; ++ __m128i_out = __lsx_vneg_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbbe5560400010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe7e5dabf00010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x000b000500010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x000b000c00010001; ++ __m128i_out = __lsx_vpcnt_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff010181010102; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff81010102; ++ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00ff0000; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00ff0000; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00ff0000; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00ff0000; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fc0010181020103; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fc0ffff81020103; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbbe5560400010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe7e5dabf00010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbbe5560400010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe7e5dabf00010001; ++ *((unsigned long*)& __m128i_result[1]) = 0xe7e5560400010001; ++ *((unsigned long*)& __m128i_result[0]) = 0xe7e5dabf00010001; ++ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0xf3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xdcec560380000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x08ec7f7f80000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long*)& __m128i_op2[1]) = 0x32d8f0a905b6c59b; ++ *((unsigned long*)& __m128i_op2[0]) = 0x322a52fc2ba83b96; ++ *((unsigned long*)& __m128i_result[1]) = 0xaa14efac3bb62636; ++ *((unsigned long*)& __m128i_result[0]) = 0xd6c22c8353a80d2c; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long*)& __m128i_result[1]) = 0x03ff0101fc010102; ++ *((unsigned long*)& __m128i_result[0]) = 0x03fffffffc010102; ++ __m128i_out = __lsx_vsat_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000fffffffe000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000102020204000; ++ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xaa14efac3bb62636; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd6c22c8353a80d2c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002000300000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0003000000010000; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ff0000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ff0000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x03ff0101fc010102; ++ *((unsigned long*)& __m128i_op0[0]) = 0x03fffffffc010102; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long*)& __m128i_result[1]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfeffffffffffffff; ++ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7fff0101; ++ *((int*)& __m128_op0[2]) = 0x81010102; ++ *((int*)& __m128_op0[1]) = 0x7fffffff; ++ *((int*)& __m128_op0[0]) = 0x81010102; ++ *((int*)& __m128_op1[3]) = 0x00000fff; ++ *((int*)& __m128_op1[2]) = 0xffffe000; ++ *((int*)& __m128_op1[1]) = 0x00001020; ++ *((int*)& __m128_op1[0]) = 0x20204000; ++ *((int*)& __m128_result[3]) = 0x7fff0101; ++ *((int*)& __m128_result[2]) = 0xffffe000; ++ *((int*)& __m128_result[1]) = 0x7fffffff; ++ *((int*)& __m128_result[0]) = 0xa0204000; ++ __m128_out = __lsx_vfsub_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00f7000000f70007; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00f7000000f70007; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0xe7e5560400010001; ++ *((unsigned long*)& __m128d_op1[0]) = 0xe7e5dabf00010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00800000; ++ *((int*)& __m128_op0[0]) = 0x00800000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00800000; ++ *((int*)& __m128_op1[0]) = 0x00800000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfeffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xfeffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000fff; ++ *((int*)& __m128_op1[2]) = 0xffffe000; ++ *((int*)& __m128_op1[1]) = 0x00001020; ++ *((int*)& __m128_op1[0]) = 0x20204000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfffefffefffeffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfffefffefffeffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffffffff; ++ __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000fff; ++ *((int*)& __m128_op1[2]) = 0xffffe000; ++ *((int*)& __m128_op1[1]) = 0x00001020; ++ *((int*)& __m128_op1[0]) = 0x20204000; ++ *((int*)& __m128_result[3]) = 0x80000fff; ++ *((int*)& __m128_result[2]) = 0xffffffff; ++ *((int*)& __m128_result[1]) = 0x80001020; ++ *((int*)& __m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfsub_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000100010001fffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000100010001fffe; ++ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000700000004e000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0003000000012020; ++ *((unsigned long*)& __m128i_result[1]) = 0x0038000000051fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x003c000000022021; ++ __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff0000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff0000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskgez_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000005500000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001005500020000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000005500000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001005500020000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000100010001fffe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000100010001fffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000005500000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000005400000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000005500000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000005400000002; ++ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xfdfcfda8; ++ *((int*)& __m256_op0[5]) = 0x0000e282; ++ *((int*)& __m256_op0[4]) = 0x1d20ffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xfdfcfda8; ++ *((int*)& __m256_op0[1]) = 0x0000e282; ++ *((int*)& __m256_op0[0]) = 0x1d20ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000700000004e000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003000000012020; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00000000e00a18f5; ++ *((unsigned long*)& __m128i_op2[0]) = 0x000000002023dcdc; ++ *((unsigned long*)& __m128i_result[1]) = 0x000700000004e000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0003000000012020; ++ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff0101ffffe000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffa0204000; ++ *((unsigned long*)& __m128i_result[1]) = 0x001f7fc100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x001f7fff00000000; ++ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0038000000051fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003c000000022021; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff0101ffffe000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffa0204000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f370101ff04ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f3bffffa0226021; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000fffffffe000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000102020204000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefff00000001fff; ++ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000100010001fffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000100010001fffe; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0x80000000; ++ *((int*)& __m256_result[5]) = 0x80000000; ++ *((int*)& __m256_result[4]) = 0x80000000; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0x80000000; ++ *((int*)& __m256_result[1]) = 0x80000000; ++ *((int*)& __m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000005500000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001005500020000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000005500000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001005500020000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000080000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x7fff0101ffffe000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7fffffffa0204000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x7f370101ff04ffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7f3bffffa0226021; ++ *((unsigned long*)& __m128d_result[1]) = 0x7fff0101ffffe000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7fffffffa0204000; ++ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x003f000400000003; ++ *((unsigned long*)& __m128i_result[0]) = 0x003f000400000003; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0effeffefdffa1e0; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe6004c5f64284224; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000000010000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickve_w(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003f000400000003; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003f000400000003; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000010000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000400004; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000003f0004; ++ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001f7fc100000404; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000002a000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fff0101ffffe000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffa0204000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffe1ffc100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000400000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfefff00000001fff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffe1ffc100000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000400000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffe1ffc100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefff00000401fff; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffe1ffc100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000400000; ++ *((int*)& __m128_result[3]) = 0xfffc2000; ++ *((int*)& __m128_result[2]) = 0xfff82000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003ef89df07f0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003ec0fc0fbfe001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3ff800ff2fe6c00d; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff40408ece0e0de; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000045340a6; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000028404044; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00f7000000f70006; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00f7000000f70006; ++ *((unsigned long*)& __m256d_result[3]) = 0x416ee00000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x416ee000c0000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x416ee00000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x416ee000c0000000; ++ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000fdfcfda8; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000e2821d20ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000fdfcfda8; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000e2821d20ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff7f810100001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001fffc0ffffe001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000002259662; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc4dbe60354005d25; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f01000000f8ff00; ++ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000045340a6; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000028404044; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000fffffffe000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000102020204000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x045340a628404044; ++ __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0xffffe000; ++ *((int*)& __m128_result[0]) = 0xffffe000; ++ __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff007fff810001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000400530050ffa6; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b(__m256i_op0,0xcc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff007fff810001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000400530050ffa6; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff7f810100001000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001fffc0ffffe001; ++ *((unsigned long*)& __m128i_result[1]) = 0xff7f810100001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000400530050ffa6; ++ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff000ff6220c0c1; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffe8081000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000007ff000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff7f810100001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000400530050ffa6; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff007fff810001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000400530050ffa6; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffff811001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000a1ff4c; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x0002a000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x0002a000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000000002a000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000060a3db; ++ *((unsigned long*)& __m128i_op0[0]) = 0xa70594c000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ff9f5c25; ++ *((unsigned long*)& __m128i_result[0]) = 0x58fa6b4000000000; ++ __m128i_out = __lsx_vneg_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000007ff000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000a1ff4c; ++ *((unsigned long*)& __m128i_result[1]) = 0x000300037ff000ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0003000300a10003; ++ __m128i_out = __lsx_vmaxi_h(__m128i_op0,3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x045340a628404044; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x7ff000ff6220c0c1; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffe8081000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffff0001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000300037ff000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0003000300a10003; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x3c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7ff000ff6220c0c1; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffe8081000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7ff000ff6220c0c1; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffe8081000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xb110606000000000; ++ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff9f5c25; ++ *((unsigned long*)& __m128i_op0[0]) = 0x58fa6b4000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ff9f5c25; ++ *((unsigned long*)& __m128i_op1[0]) = 0x58fa6b4000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_wu(__m128i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000080800000808; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long*)& __m256i_result[2]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long*)& __m256i_result[1]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long*)& __m256i_result[0]) = 0xf3f3f3f3f3f3f3f3; ++ __m256i_out = __lasx_xvmini_b(__m256i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xf3f3f3f3; ++ *((int*)& __m256_op0[6]) = 0xf3f3f3f3; ++ *((int*)& __m256_op0[5]) = 0xf3f3f3f3; ++ *((int*)& __m256_op0[4]) = 0xf3f3f3f3; ++ *((int*)& __m256_op0[3]) = 0xf3f3f3f3; ++ *((int*)& __m256_op0[2]) = 0xf3f3f3f3; ++ *((int*)& __m256_op0[1]) = 0xf3f3f3f3; ++ *((int*)& __m256_op0[0]) = 0xf3f3f3f3; ++ *((int*)& __m256_op1[7]) = 0xf3f3f3f3; ++ *((int*)& __m256_op1[6]) = 0xf3f3f3f3; ++ *((int*)& __m256_op1[5]) = 0xf3f3f3f3; ++ *((int*)& __m256_op1[4]) = 0xf3f3f3f3; ++ *((int*)& __m256_op1[3]) = 0xf3f3f3f3; ++ *((int*)& __m256_op1[2]) = 0xf3f3f3f3; ++ *((int*)& __m256_op1[1]) = 0xf3f3f3f3; ++ *((int*)& __m256_op1[0]) = 0xf3f3f3f3; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000080800000808; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xf3f3f3f3f3f3f4f3; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xf3f3f3f3f3f3f4f3; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff800fff01; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff001ffe02; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000300037ff000ff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0003000300a10003; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff800fff01; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000007ff000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x1); ++ *((unsigned long*)& __m128d_op0[1]) = 0x000300037ff000ff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0003000300a10003; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000000007ff000ff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0003000300000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0003000300a10003; ++ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0003000300000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0003000300a10003; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffcfffd00000000; ++ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfffdfffe80008000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0xffeffff4; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000000007ff000ff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffe80008000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe2; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffdfffe80007fe2; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf3f3f3f3f3f3f4f3; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf3f3f3f3f3f3f4f3; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000f3f3f4f3; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000f3f3f4f3; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000300037ff000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003000300a10003; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000300037ff000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0003000300a10003; ++ *((unsigned long*)& __m128i_op2[1]) = 0x000000007ff000ff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x26); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x000000007ff000ff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x7ff000ff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_w(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000004000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000004000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000040; ++ *((int*)& __m256_op0[6]) = 0x00000020; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000040; ++ *((int*)& __m256_op0[2]) = 0x00000020; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000004000000020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000004000000020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xf8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_du(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x58); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffff7fffffff7; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff7fffffff7; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffff7fffffff7; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff7fffffff7; ++ __m256i_out = __lasx_xvmini_w(__m256i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4000400040004000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffff7fffffff7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff7fffffff7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffff7fffffff7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff7fffffff7; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffff7fffffff7; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff7fffffff7; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffff700000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff7fffffff7; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d(__m256i_op0,10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h(__m128i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080700000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0808080700000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffefffe; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffefffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0042003e0042002f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffc0001fffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffe0004fffe0004; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0004fffe0004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0042003e0042002f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001fffc0001fffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0042003e0042002f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001fffc0001fffc; ++ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0004fffe0004; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfffe0004fffe0004; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x4b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000007070707; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xc1bdceee242070db; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe8c7b756d76aa478; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3f433212dce09025; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0042003e0042002f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffc0001fffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffbeffc2ffbeffd1; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0042003e0042002f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffc0001fffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0042003e0042002f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001fffc0001fffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0707070707070707; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0707070707070707; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000001fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0018001800180018; ++ *((unsigned long*)& __m256i_result[2]) = 0x0018001800180018; ++ *((unsigned long*)& __m256i_result[1]) = 0x0018001800180018; ++ *((unsigned long*)& __m256i_result[0]) = 0x0018001800180018; ++ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_result[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; ++ __m256i_out = __lasx_xvnori_b(__m256i_op0,0xc2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000001fffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000001ffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000001ffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_hu(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x2c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0018001800180018; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0018001800180018; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0018001800180018; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0018001800180018; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3000300030003000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3000300030003000; ++ __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_result[3]) = 0x1e9e1e9e1e9e1e9e; ++ *((unsigned long*)& __m256i_result[2]) = 0x1e9e1e9e1e9e1e9e; ++ *((unsigned long*)& __m256i_result[1]) = 0x1e9e1e9e1e9e1e9e; ++ *((unsigned long*)& __m256i_result[0]) = 0x1e9e1e9e1e9e1e9e; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; ++ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000001; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0002000000020000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0002000000020000; ++ __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsrl_v(__m128i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x27); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001d0000001d; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001d0000001d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001d0000001d; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001d0000001d; ++ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000020000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000020000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000010000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d(__m256i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000004; ++ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x7e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000555500005555; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000555500005555; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000555500005555; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000555500005555; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrph_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x5a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x0); ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256d_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256d_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256d_op1[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x01000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00ffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00ffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000555500005555; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000555500005555; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000555500005555; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000555500005555; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x01fe01fe01fe01fe; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256d_result[3]) = 0x437fe01fe01fe020; ++ *((unsigned long*)& __m256d_result[2]) = 0x437fe01fe01fe020; ++ *((unsigned long*)& __m256d_result[1]) = 0x437fe01fe01fe020; ++ *((unsigned long*)& __m256d_result[0]) = 0x437fe01fe01fe020; ++ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b(__m128i_op0,0x8); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x45); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0xbf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x037fe01f001fe020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x037fe01f001fe020; ++ *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x437fe01fe01fe020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x437fe01fe01fe020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x037fe01f001fe020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x037fe01f001fe020; ++ *((unsigned long*)& __m256i_result[3]) = 0x437f201f201f2020; ++ *((unsigned long*)& __m256i_result[2]) = 0x037f201f001f2020; ++ *((unsigned long*)& __m256i_result[1]) = 0x437f201f201f2020; ++ *((unsigned long*)& __m256i_result[0]) = 0x037f201f001f2020; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x437f201f201f2020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x037f201f001f2020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x437f201f201f2020; ++ *((unsigned long*)& __m256i_op1[0]) = 0x037f201f001f2020; ++ *((unsigned long*)& __m256i_op2[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x21bb481000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x01bf481000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x21bb481000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x01bf481000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000086fe0000403e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000403e00004040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000086fe0000403e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000403e00004040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000086fe0000403e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000403e00004040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000086fe0000403e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000403e00004040; ++ *((unsigned long*)& __m256i_result[3]) = 0x00001bfa000000f9; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000f900004040; ++ *((unsigned long*)& __m256i_result[1]) = 0x00001bfa000000f9; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000f900004040; ++ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000086fe0000403e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000403e00004040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000086fe0000403e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000403e00004040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000437f0000201f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000201f00002020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000437f0000201f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000201f00002020; ++ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00001bfa000000f9; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000f900004040; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00001bfa000000f9; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000000f900004040; ++ *((unsigned long*)& __m256d_result[3]) = 0x60183329ceb52cf0; ++ *((unsigned long*)& __m256d_result[2]) = 0x6040392cdaf9b3ff; ++ *((unsigned long*)& __m256d_result[1]) = 0x60183329ceb52cf0; ++ *((unsigned long*)& __m256d_result[0]) = 0x6040392cdaf9b3ff; ++ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x21bb481000ff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x01bf481000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x21bb481000ff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x01bf481000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xb1b3b1b1b1b7b1b1; ++ *((unsigned long*)& __m256i_result[2]) = 0xb1b7b1b1b1b1b1b1; ++ *((unsigned long*)& __m256i_result[1]) = 0xb1b3b1b1b1b7b1b1; ++ *((unsigned long*)& __m256i_result[0]) = 0xb1b7b1b1b1b1b1b1; ++ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xb7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x5d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du(__m128i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x43800000; ++ *((int*)& __m128_result[0]) = 0x43800000; ++ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000008e4bfc4eff0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000001ffee10000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000008e4bfc4eff0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000001ffee10000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d000000000d; ++ *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0000060d0d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d000000000d; ++ *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0000060d0d; ++ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x1); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000008; ++ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000008; ++ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m256i_out = __lasx_xvreplve0_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256d_op0[1]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h(__m128i_op0,14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256d_op0[2]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256d_op0[0]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_result[2]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_result[0]) = 0x6040190d00000000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x43800000; ++ *((int*)& __m128_op0[0]) = 0x43800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000100; ++ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffdfffdfffdfffd; ++ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffedffedffedffed; ++ *((unsigned long*)& __m128i_result[0]) = 0xffedffedffedffed; ++ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000800200027; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000800200027; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000800200028; ++ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128i_result[1]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m128i_result[0]) = 0xffefffefffefffef; ++ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256d_op1[2]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256d_op1[0]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffdfffdfffdfffd; ++ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x7e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffdfffcfffdfffc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffdfffcfffdfffc; ++ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_result[3]) = 0x080808000828082f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080008280820; ++ *((unsigned long*)& __m256i_result[1]) = 0x080808000828082f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080008280820; ++ __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000400100013; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000400100014; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000400100013; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x080808000828082f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0808080008280820; ++ *((unsigned long*)& __m256i_op0[1]) = 0x080808000828082f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0808080008280820; ++ *((unsigned long*)& __m256i_op1[3]) = 0x04e8296f18181818; ++ *((unsigned long*)& __m256i_op1[2]) = 0x132feea900000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x04e8296f18181818; ++ *((unsigned long*)& __m256i_op1[0]) = 0x132feea900000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00828082f0808080; ++ *((unsigned long*)& __m256i_result[2]) = 0xf18181818132feea; ++ *((unsigned long*)& __m256i_result[1]) = 0x00828082f0808080; ++ *((unsigned long*)& __m256i_result[0]) = 0xf18181818132feea; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x24); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000006040190d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000006040190d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000860601934; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000860601934; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000800200028; ++ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000800200027; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000800200027; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_result[3]) = 0x006018000000001a; ++ *((unsigned long*)& __m256i_result[2]) = 0x0060401900000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x006018000000001a; ++ *((unsigned long*)& __m256i_result[0]) = 0x0060401900000000; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000860601934; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000800200028; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000860601934; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000800200028; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffdfffcfffdfffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffcfffdfffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0a0a000000000a0a; ++ *((unsigned long*)& __m256i_result[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0a0a000000000a0a; ++ *((unsigned long*)& __m256i_result[0]) = 0x0a0a0a0a00000000; ++ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0a0a000000000a0a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0a0a000000000a0a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0a0a000000000a0a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0a0a000000000a0a; ++ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x04e8296f18181818; ++ *((unsigned long*)& __m256i_op1[2]) = 0x132feea900000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x04e8296f18181818; ++ *((unsigned long*)& __m256i_op1[0]) = 0x132feea900000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_result[2]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x132feea900000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x6040190d00000000; ++ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x132feea900000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op2[3]) = 0x2020080800000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000004044f4f; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0ef11ae55a5a6767; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_result[2]) = 0x6040190d20227a78; ++ *((unsigned long*)& __m256i_result[1]) = 0x132feeabd2d33b38; ++ *((unsigned long*)& __m256i_result[0]) = 0x6040190d00000000; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000400100013; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000400100014; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000400100013; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0a0a000000000a0a; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0a0a000000000a0a; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0a0a0a0a00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000020200000202; ++ *((unsigned long*)& __m256i_result[2]) = 0x4100004141410000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000020200000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x4100004141410000; ++ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000860601934; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000860601934; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000800200028; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000000006040190d; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000006040190d; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000006040190c; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff9fbfe6f3; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000006040190c; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff9fbfe6f3; ++ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000008; ++ *((int*)& __m256_op0[6]) = 0x60601934; ++ *((int*)& __m256_op0[5]) = 0x00000008; ++ *((int*)& __m256_op0[4]) = 0x00200028; ++ *((int*)& __m256_op0[3]) = 0x00000008; ++ *((int*)& __m256_op0[2]) = 0x60601934; ++ *((int*)& __m256_op0[1]) = 0x00000008; ++ *((int*)& __m256_op0[0]) = 0x00200028; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffefffefffefffef; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0a0a000000000a0a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0a0a000000000a0a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0004001000100004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0004000400100010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0004001000100004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0004000400100010; ++ __m256i_out = __lasx_xvclz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0a0a000000000a0a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0a0a000000000a0a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x006018000000001a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0060401900000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x006018000000001a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0060401900000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000006170; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000006170; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000006170; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000006170; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000030b8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000030b8; ++ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0004000f00100003; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000400030010000f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0004000f00100003; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000400030010000f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_d(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffeffff; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000800000008000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000800000008000; ++ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000040000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000040000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000040000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000040000000000; ++ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0004000f00100003; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000400030010000f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0004000f00100003; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000400030010000f; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffbfffcffeffff0; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffbfffcffeffff0; ++ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0004000f00100003; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000400030010000f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0004000f00100003; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000400030010000f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0400100004001000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0400100004001000; ++ __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000f0000000f; ++ __m128i_out = __lsx_vclz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6040190d20227a78; ++ *((unsigned long*)& __m256i_op0[1]) = 0x132feeabd2d33b38; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0004000f00100003; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000400030010000f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0004000f00100003; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000400030010000f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000c0300000019a; ++ *((unsigned long*)& __m256i_result[2]) = 0x0c08032100004044; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000265ffa5a6767; ++ *((unsigned long*)& __m256i_result[0]) = 0x0c08032100000000; ++ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f18181818; ++ *((unsigned long*)& __m256i_op0[2]) = 0x132feea900000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f18181818; ++ *((unsigned long*)& __m256i_op0[0]) = 0x132feea900000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f18181818; ++ *((unsigned long*)& __m256i_op0[2]) = 0x132feea900000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f18181818; ++ *((unsigned long*)& __m256i_op0[0]) = 0x132feea900000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x04e8296f18181818; ++ *((unsigned long*)& __m256i_result[2]) = 0x132feea900000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x04e8296f18181818; ++ *((unsigned long*)& __m256i_result[0]) = 0x132feea900000000; ++ __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f18181818; ++ *((unsigned long*)& __m256i_op0[2]) = 0x132feea900000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f18181818; ++ *((unsigned long*)& __m256i_op0[0]) = 0x132feea900000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x4393a0a5bc606060; ++ *((unsigned long*)& __m256d_result[2]) = 0x43b32feea9000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x4393a0a5bc606060; ++ *((unsigned long*)& __m256d_result[0]) = 0x43b32feea9000000; ++ __m256d_out = __lasx_xvffint_d_l(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000010000000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000008000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000008000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000800000008000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000800000008000; ++ __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x04e8296f08181818; ++ *((unsigned long*)& __m256d_op1[2]) = 0x032feea900000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x04e8296f08181818; ++ *((unsigned long*)& __m256d_op1[0]) = 0x032feea900000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x296e000018170000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x296e000018170000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x04e8296f; ++ *((int*)& __m256_op0[6]) = 0x18181818; ++ *((int*)& __m256_op0[5]) = 0x132feea9; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x04e8296f; ++ *((int*)& __m256_op0[2]) = 0x18181818; ++ *((int*)& __m256_op0[1]) = 0x132feea9; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x5cbe15f2; ++ *((int*)& __m256_result[6]) = 0x53261036; ++ *((int*)& __m256_result[5]) = 0x559a674d; ++ *((int*)& __m256_result[4]) = 0x7f800000; ++ *((int*)& __m256_result[3]) = 0x5cbe15f2; ++ *((int*)& __m256_result[2]) = 0x53261036; ++ *((int*)& __m256_result[1]) = 0x559a674d; ++ *((int*)& __m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000080; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrph_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6040190d20227a78; ++ *((unsigned long*)& __m256i_op0[1]) = 0x132feeabd2d33b38; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x9fe7fffffffff32e; ++ *((unsigned long*)& __m256i_result[2]) = 0x6040190ddfdd8587; ++ *((unsigned long*)& __m256i_result[1]) = 0xecd011542d2cc4c7; ++ *((unsigned long*)& __m256i_result[0]) = 0x6040190dffffffff; ++ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000030b8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000030b8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000030b8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000030b8; ++ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vmaxi_b(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000030b8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000030b8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_result[2]) = 0x00020002000230ba; ++ *((unsigned long*)& __m256i_result[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m256i_result[0]) = 0x00020002000230ba; ++ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000030b8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000030b8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9fe7fffffffff32e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x6040190ddfdd8587; ++ *((unsigned long*)& __m256i_op1[1]) = 0xecd011542d2cc4c7; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6040190dffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000080; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x35); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f08181818; ++ *((unsigned long*)& __m256i_op0[2]) = 0x032feea900000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f08181818; ++ *((unsigned long*)& __m256i_op0[0]) = 0x032feea900000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000001; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000001; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m256_op0[7]) = 0x4393a0a5; ++ *((int*)& __m256_op0[6]) = 0xbc606060; ++ *((int*)& __m256_op0[5]) = 0x43b32fee; ++ *((int*)& __m256_op0[4]) = 0xa9000000; ++ *((int*)& __m256_op0[3]) = 0x4393a0a5; ++ *((int*)& __m256_op0[2]) = 0xbc606060; ++ *((int*)& __m256_op0[1]) = 0x43b32fee; ++ *((int*)& __m256_op0[0]) = 0xa9000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000001; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffff0000; ++ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x3eab77367fff4848; ++ *((unsigned long*)& __m256d_op1[2]) = 0x408480007fff0000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x3eab77367fff4848; ++ *((unsigned long*)& __m256d_op1[0]) = 0x408480007fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4393a0a5bc606060; ++ *((unsigned long*)& __m256i_op0[2]) = 0x43b32feea9000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4393a0a5bc606060; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43b32feea9000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x3eab77367fff4848; ++ *((unsigned long*)& __m256i_op1[2]) = 0x408480007fff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x3eab77367fff4848; ++ *((unsigned long*)& __m256i_op1[0]) = 0x408480007fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x04e8296f3c611818; ++ *((unsigned long*)& __m256i_result[2]) = 0x032eafee29010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x04e8296f3c611818; ++ *((unsigned long*)& __m256i_result[0]) = 0x032eafee29010000; ++ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x3eab77367fff4848; ++ *((unsigned long*)& __m256d_op0[2]) = 0x408480007fff0000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x3eab77367fff4848; ++ *((unsigned long*)& __m256d_op0[0]) = 0x408480007fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x4084800000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x4084800000000000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x3eab77367fff4848; ++ *((unsigned long*)& __m256d_op0[2]) = 0x408480007fff0000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x3eab77367fff4848; ++ *((unsigned long*)& __m256d_op0[0]) = 0x408480007fff0000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f3c611818; ++ *((unsigned long*)& __m256i_op0[2]) = 0x032eafee29010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f3c611818; ++ *((unsigned long*)& __m256i_op0[0]) = 0x032eafee29010000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00000000ffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00000000ffffff; ++ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00b213171dff0606; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00e9a80014ff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00b213171dff0606; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00e9a80014ff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00b213181dff0607; ++ *((unsigned long*)& __m256i_result[2]) = 0x00e9a80114ff0001; ++ *((unsigned long*)& __m256i_result[1]) = 0x00b213181dff0607; ++ *((unsigned long*)& __m256i_result[0]) = 0x00e9a80114ff0001; ++ __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00b213171dff0606; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00e9a80014ff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00b213171dff0606; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00e9a80014ff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00b213171dff0606; ++ *((unsigned long*)& __m256i_result[2]) = 0x00e9a80014ff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00b213171dff0606; ++ *((unsigned long*)& __m256i_result[0]) = 0x00e9a80014ff0000; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffff0001ffff0001; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffff0001ffff0001; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffff0001ffff0001; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffff0001ffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff0001ffff0001; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0001ffff0001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff0001ffff0001; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0001ffff0001; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3eab77367fff4848; ++ *((unsigned long*)& __m256i_op0[2]) = 0x408480007fff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3eab77367fff4848; ++ *((unsigned long*)& __m256i_op0[0]) = 0x408480007fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0003000300030003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0003000300030000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0003000300030003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0003000300030000; ++ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00010001; ++ *((int*)& __m128_op0[2]) = 0x00010001; ++ *((int*)& __m128_op0[1]) = 0x00010001; ++ *((int*)& __m128_op0[0]) = 0x00010001; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000080; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_wu(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00010001; ++ *((int*)& __m128_op0[2]) = 0x00010001; ++ *((int*)& __m128_op0[1]) = 0x00010001; ++ *((int*)& __m128_op0[0]) = 0x00010001; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0020010101610000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0061200000610000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0020010101610000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0061200000610000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000101000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00011fff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000101000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00011fff0000ffff; ++ __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00b213171dff0606; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00e9a80014ff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00b213171dff0606; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00e9a80014ff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff00000000ffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00000000ffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x3b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000000e; ++ __m128i_out = __lsx_vmaxi_w(__m128i_op0,14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3eab77367fff4848; ++ *((unsigned long*)& __m256i_op0[2]) = 0x408480007fff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3eab77367fff4848; ++ *((unsigned long*)& __m256i_op0[0]) = 0x408480007fff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000700000008; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000700000008; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x3b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00ff00ff; ++ *((int*)& __m256_op0[6]) = 0x00ff00ff; ++ *((int*)& __m256_op0[5]) = 0x00ff00ff; ++ *((int*)& __m256_op0[4]) = 0x00ff00ff; ++ *((int*)& __m256_op0[3]) = 0x00ff00ff; ++ *((int*)& __m256_op0[2]) = 0x00ff00ff; ++ *((int*)& __m256_op0[1]) = 0x00ff00ff; ++ *((int*)& __m256_op0[0]) = 0x00ff00ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00000000ffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00000000ffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00000000ffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00000000ffffff; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vneg_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[3]) = 0x7f7fff7f7f7fff7f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f7fff7f7f7fff7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x7f7fff7f7f7fff7f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f7fff7f7f7fff7f; ++ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[3]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_result[2]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_result[1]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_result[0]) = 0x000408080c111414; ++ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00200101; ++ *((int*)& __m256_op0[6]) = 0x01610000; ++ *((int*)& __m256_op0[5]) = 0x00612000; ++ *((int*)& __m256_op0[4]) = 0x00610000; ++ *((int*)& __m256_op0[3]) = 0x00200101; ++ *((int*)& __m256_op0[2]) = 0x01610000; ++ *((int*)& __m256_op0[1]) = 0x00612000; ++ *((int*)& __m256_op0[0]) = 0x00610000; ++ *((unsigned long*)& __m256i_result[3]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_result[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3f8000003f800000; ++ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000408080c111414; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000408080c111414; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000408080c111414; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x24); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x3e8000003e800000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3e8000003e800000; ++ *((unsigned long*)& __m256i_result[1]) = 0x3e8000003e800000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3e8000003e800000; ++ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00b2fe28e4420609; ++ *((unsigned long*)& __m256i_op0[2]) = 0x028da7fe15020000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00b2fe28e4420609; ++ *((unsigned long*)& __m256i_op0[0]) = 0x028da7fe15020000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000598; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000598; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x6d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000598; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000598; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000002cc0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000002cc0000; ++ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x31); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x2d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xb6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xf039b8c0; ++ *((int*)& __m128_op0[2]) = 0xc61e81ef; ++ *((int*)& __m128_op0[1]) = 0x6db7da53; ++ *((int*)& __m128_op0[0]) = 0xfbd2e34b; ++ *((unsigned long*)& __m128i_result[1]) = 0x80000000ffffd860; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff80000000; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x02020102; ++ *((int*)& __m256_op0[6]) = 0x02020102; ++ *((int*)& __m256_op0[5]) = 0x02020102; ++ *((int*)& __m256_op0[4]) = 0x02020102; ++ *((int*)& __m256_op0[3]) = 0x02020102; ++ *((int*)& __m256_op0[2]) = 0x02020102; ++ *((int*)& __m256_op0[1]) = 0x02020102; ++ *((int*)& __m256_op0[0]) = 0x02020102; ++ *((int*)& __m256_op1[7]) = 0x3e800000; ++ *((int*)& __m256_op1[6]) = 0x3e800000; ++ *((int*)& __m256_op1[5]) = 0x3e800000; ++ *((int*)& __m256_op1[4]) = 0x3e800000; ++ *((int*)& __m256_op1[3]) = 0x3e800000; ++ *((int*)& __m256_op1[2]) = 0x3e800000; ++ *((int*)& __m256_op1[1]) = 0x3e800000; ++ *((int*)& __m256_op1[0]) = 0x3e800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000598; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000598; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsat_du(__m128i_op0,0x34); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000001c000000134; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000001c000000134; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x000001c000000134; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x000001c000000134; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000038000000268; ++ *((unsigned long*)& __m256d_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000038000000268; ++ *((unsigned long*)& __m256d_result[0]) = 0x7fff7fff7fff7fff; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x80000000; ++ *((int*)& __m128_op0[2]) = 0xffffd860; ++ *((int*)& __m128_op0[1]) = 0x7fffffff; ++ *((int*)& __m128_op0[0]) = 0x80000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[0]) = 0x0202010202020102; ++ __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[3]) = 0x0002000200010002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0002000200010002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0002000200010002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0002000200010002; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x80000000ffffd860; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff80000000; ++ __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000038000000268; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000038000000268; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001010101; ++ __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x02020102; ++ *((int*)& __m256_op0[6]) = 0x02020102; ++ *((int*)& __m256_op0[5]) = 0x02020102; ++ *((int*)& __m256_op0[4]) = 0x02020102; ++ *((int*)& __m256_op0[3]) = 0x02020102; ++ *((int*)& __m256_op0[2]) = 0x02020102; ++ *((int*)& __m256_op0[1]) = 0x02020102; ++ *((int*)& __m256_op0[0]) = 0x02020102; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffe400000707; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000af100001455; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffe400000707; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000af100001455; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000010; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m256d_op1[3]) = 0x000408080c111414; ++ *((unsigned long*)& __m256d_op1[2]) = 0x000408080c111414; ++ *((unsigned long*)& __m256d_op1[1]) = 0x000408080c111414; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000038000000268; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000038000000268; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000408080c111414; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0002000200010002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0002000200010002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0002000200010002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0002000200010002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fe363637fe36363; ++ __m256i_out = __lasx_xvori_b(__m256i_op0,0x63); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000038000000268; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000038000000268; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000001200000011a; ++ *((unsigned long*)& __m256i_result[2]) = 0x2040204020402040; ++ *((unsigned long*)& __m256i_result[1]) = 0x000001200000011a; ++ *((unsigned long*)& __m256i_result[0]) = 0x2040204020402040; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000001010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010001; ++ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000009e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000009e; ++ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001ffff0101ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001ffff; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff81001dff9dff9e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff81001dff9d003b; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff81001dff9dff9e; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff81001dff9d003b; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff81001dff9dff9e; ++ *((unsigned long*)& __m256i_result[2]) = 0xff81001dff9d003b; ++ *((unsigned long*)& __m256i_result[1]) = 0xff81001dff9dff9e; ++ *((unsigned long*)& __m256i_result[0]) = 0xff81001dff9d003b; ++ __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff81001dff9dff9e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff81001dff9d003b; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff81001dff9dff9e; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff81001dff9d003b; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000001010002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010002; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff81001dff9dff9e; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff81001dff9d003b; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff81001dff9dff9e; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff81001dff9d003b; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0002000200010002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0002000200010002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0002000200010002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0002000200010002; ++ *((unsigned long*)& __m256i_result[3]) = 0x7f1d7f7f7f1d7f3b; ++ *((unsigned long*)& __m256i_result[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[1]) = 0x7f1d7f7f7f1d7f3b; ++ *((unsigned long*)& __m256i_result[0]) = 0x0202010202020102; ++ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x7f1d7f7f; ++ *((int*)& __m256_op0[6]) = 0x7f1d7f3b; ++ *((int*)& __m256_op0[5]) = 0x02020102; ++ *((int*)& __m256_op0[4]) = 0x02020102; ++ *((int*)& __m256_op0[3]) = 0x7f1d7f7f; ++ *((int*)& __m256_op0[2]) = 0x7f1d7f3b; ++ *((int*)& __m256_op0[1]) = 0x02020102; ++ *((int*)& __m256_op0[0]) = 0x02020102; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000010; ++ *((int*)& __m256_op0[6]) = 0x00000010; ++ *((int*)& __m256_op0[5]) = 0x00000010; ++ *((int*)& __m256_op0[4]) = 0x00000010; ++ *((int*)& __m256_op0[3]) = 0x00000010; ++ *((int*)& __m256_op0[2]) = 0x00000010; ++ *((int*)& __m256_op0[1]) = 0x00000010; ++ *((int*)& __m256_op0[0]) = 0x00000010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xffffff00; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000001; ++ *((int*)& __m128_op1[2]) = 0x00000001; ++ *((int*)& __m128_op1[1]) = 0x00000001; ++ *((int*)& __m128_op1[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001ffff0101ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0103fefd0303fefd; ++ *((unsigned long*)& __m128i_result[0]) = 0x0103fefd0103fefd; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffefff00001000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffefff00001000; ++ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000103030102ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000010102ffff; ++ __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[0]) = 0x0202010202020102; ++ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fe36364661af18f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fe36364661af18f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_result[3]) = 0x40f23232330df9c8; ++ *((unsigned long*)& __m256i_result[2]) = 0x40f2323240f23232; ++ *((unsigned long*)& __m256i_result[1]) = 0x40f23232330df9c8; ++ *((unsigned long*)& __m256i_result[0]) = 0x40f2323240f23232; ++ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010100000000; ++ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vextl_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000101010015; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffed00010001; ++ __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x02020102; ++ *((int*)& __m256_op1[6]) = 0x02020102; ++ *((int*)& __m256_op1[5]) = 0x02020102; ++ *((int*)& __m256_op1[4]) = 0x02020102; ++ *((int*)& __m256_op1[3]) = 0x02020102; ++ *((int*)& __m256_op1[2]) = 0x02020102; ++ *((int*)& __m256_op1[1]) = 0x02020102; ++ *((int*)& __m256_op1[0]) = 0x02020102; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000201220001011c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000201220001011c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000201220001011c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000201220001011c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000014; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000014; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000001400000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x1f81e3779b97f4a8; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x7fe36364661af18f; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7fe363637fe36364; ++ *((unsigned long*)& __m256d_op0[1]) = 0x7fe36364661af18f; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7fe363637fe36364; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fe36364661af18f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fe363637fe36364; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fe36364661af18f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fe363637fe36364; ++ *((unsigned long*)& __m256i_result[3]) = 0x00001ff8d8d8c000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00001ff8d8d90000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00001ff8d8d8c000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00001ff8d8d90000; ++ __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001400000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1f81e3779b97f4a8; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff02000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1f81e3779b97f4a8; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fe36364661af18f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fe36364661af18f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00e30064001a008f; ++ *((unsigned long*)& __m256i_result[2]) = 0x00e3006300e30063; ++ *((unsigned long*)& __m256i_result[1]) = 0x00e30064001a008f; ++ *((unsigned long*)& __m256i_result[0]) = 0x00e3006300e30063; ++ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffff02000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x1f81e3779b97f4a8; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff02000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000014; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000014; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0xc3110000; ++ *((int*)& __m128_result[1]) = 0xff800000; ++ *((int*)& __m128_result[0]) = 0xc3110000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff02000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000008; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000008; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000008; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000008; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000008; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000008; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000008; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000008; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000001; ++ *((int*)& __m256_op2[4]) = 0x00000001; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000001; ++ *((int*)& __m256_op2[0]) = 0x00000001; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x80000001; ++ *((int*)& __m256_result[4]) = 0x80000001; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x80000001; ++ *((int*)& __m256_result[0]) = 0x80000001; ++ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0x23); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101000101010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101000101010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101000101010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101000101010001; ++ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101000101010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101000101010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101000101010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101000101010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fe36364661af18f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fe36364661af18f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fe363637fe36363; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101000101010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101000101010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101000101010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101000101010001; ++ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0200000202000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0200000202000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0200000202000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0200000202000002; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0200000202000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0200000202000002; ++ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0002000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0200000202000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0200000202000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000400010004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000400010004; ++ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0002000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0200000202000002; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0200000202000002; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0101000101010001; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0101000101010001; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0101000101010001; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0101000101010001; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0101000101010001; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0101000101010001; ++ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00001ff8d8d8c000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00001ff8d8d90000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00001ff8d8d8c000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00001ff8d8d90000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0200000202000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0200000202000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x00001ff800000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xd8d8c00000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00001ff800000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xd8d8c00000000000; ++ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0101000101010001; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0101000101010001; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0101000101010001; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0101000101010001; ++ __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_result[0]) = 0x0202010202020102; ++ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long*)& __m256d_result[3]) = 0x4380100810101008; ++ *((unsigned long*)& __m256d_result[2]) = 0x4380100810101008; ++ *((unsigned long*)& __m256d_result[1]) = 0x4380100810101008; ++ *((unsigned long*)& __m256d_result[0]) = 0x4380100810101008; ++ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00001ff800000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xd8d8c00000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00001ff800000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xd8d8c00000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00001ff8; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xd8d8c000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00001ff8; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xd8d8c000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x02020102; ++ *((int*)& __m256_op1[6]) = 0x02020102; ++ *((int*)& __m256_op1[5]) = 0x02020102; ++ *((int*)& __m256_op1[4]) = 0x02020102; ++ *((int*)& __m256_op1[3]) = 0x02020102; ++ *((int*)& __m256_op1[2]) = 0x02020102; ++ *((int*)& __m256_op1[1]) = 0x02020102; ++ *((int*)& __m256_op1[0]) = 0x02020102; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0007fff800000000; ++ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001400000014; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101000101010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101000101010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000000010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000000010000; ++ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0014001400140000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000554; ++ __m128i_out = __lsx_vmsknz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0014001400140000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001400000014; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001400000000; ++ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00001ff8d8d8c000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00001ff8d8d90000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00001ff8d8d8c000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00001ff8d8d90000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00001ef8d8d8c000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00001ef8d8d80000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00001ef8d8d8c000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00001ef8d8d80000; ++ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff2fff2fff2fff2; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff2fff2fff2fff2; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff2fff2fff2fff2; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff2fff2fff2fff2; ++ __m256i_out = __lasx_xvmini_h(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001400000014; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001400000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000053a4f452; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001400000014; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001400000000; ++ __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001400000014; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001400000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff9000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc000400000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0007001400000014; ++ *((unsigned long*)& __m128i_result[0]) = 0x0004001000000000; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000400010004; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000400010004; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000400010004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000400010004; ++ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000053a4f452; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000053a; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000400010004; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000400010004; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000e0001000e; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000e0001000e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000e0001000e; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000e0001000e; ++ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000053a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0007001400000014; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0004001000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000000053a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000700140000053a; ++ *((unsigned long*)& __m128i_result[0]) = 0x0004001000000000; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000e0001000e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000e0001000e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000e0001000e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000e0001000e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000053a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff9000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc000400000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffc000400000000; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000014; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000014; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xfffc0004; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x0000000e; ++ *((int*)& __m256_op1[6]) = 0x0000000e; ++ *((int*)& __m256_op1[5]) = 0x0000000e; ++ *((int*)& __m256_op1[4]) = 0x0000000e; ++ *((int*)& __m256_op1[3]) = 0x0000000e; ++ *((int*)& __m256_op1[2]) = 0x0000000e; ++ *((int*)& __m256_op1[1]) = 0x0000000e; ++ *((int*)& __m256_op1[0]) = 0x0000000e; ++ *((int*)& __m256_result[7]) = 0x0000000e; ++ *((int*)& __m256_result[6]) = 0x0000000e; ++ *((int*)& __m256_result[5]) = 0x0000000e; ++ *((int*)& __m256_result[4]) = 0x0000000e; ++ *((int*)& __m256_result[3]) = 0x0000000e; ++ *((int*)& __m256_result[2]) = 0x0000000e; ++ *((int*)& __m256_result[1]) = 0x0000000e; ++ *((int*)& __m256_result[0]) = 0x0000000e; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0080000700000014; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffbffda; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001010101; ++ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000005003a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0080000700000014; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffbffda; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000005003a; ++ *((unsigned long*)& __m128i_op2[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0080000700000014; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffbffda; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffc000400000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00003fff00010000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_w(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000005003a; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmini_w(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0080000700000014; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffbffda; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00003fff00010000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00123fff00120012; ++ *((unsigned long*)& __m128i_result[0]) = 0x0012001200120012; ++ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00123fff00120012; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0012001200120012; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000005003a; ++ *((unsigned long*)& __m128i_result[1]) = 0x00123fff00120012; ++ *((unsigned long*)& __m128i_result[0]) = 0x001200120017004c; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0007000700070007; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long*)& __m256i_result[2]) = 0xf2f2f2f2f2f2f2f2; ++ *((unsigned long*)& __m256i_result[1]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long*)& __m256i_result[0]) = 0xf2f2f2f2f2f2f2f2; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000008000000080; ++ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0xaa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00123fff00120012; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0012001200120012; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00003fff00010000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1200091212121212; ++ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000e0000000d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000e0000000d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffff03ffffff07; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff03ffffff07; ++ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1200091212121212; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000f0001000f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000f0001000d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000f0001000f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000f0001000d; ++ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000008000000080; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000f0001000f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000f0001000d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000f0001000f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000f0001000d; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000010000000f; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000010000000f; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000010000000f; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000010000000d; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x55); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000008000000080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x51); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000008000000080; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0x26); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x80000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x80000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h(__m128i_op0,-8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskgez_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000e000e000e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000e000e000e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000e000e000e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000e0000000d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000e000e000e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000e0000000d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000dfffffff1; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000cfffffff3; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000dfffffff1; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000cfffffff3; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000dfffffff1; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000cfffffff3; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000dfffffff1; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000cfffffff3; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff0000000f; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000000f; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000000d; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_h(__m256i_op0,2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000dfffffff1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000cfffffff3; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000dfffffff1; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000cfffffff3; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00003f3f00003f3f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00003f3f00003f3f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_hu_w(__m256i_op0,__m256i_op1,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0000000f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000000f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff0000000f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000000f; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff0000000f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000000f; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00003f3f00003f3f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00003f3f00003f3f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff0000000f; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000000f; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0x56); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000000d; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000000d; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_d(__m128i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000000d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000e; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000e; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000000d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000000d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff00ffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff00ffffffffff; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000000e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000000e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000000d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000000d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00008000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00008000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0400000004000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000400; ++ *((unsigned long*)& __m256i_result[1]) = 0x0400000004000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000400; ++ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000008000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000000d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000000d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000000d; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000000d; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x2); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000000d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000000d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000fffe0000000c; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fffe0000000c; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_h(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000003ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000003ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_b(__m256i_op0,15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitrevi_h(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vextl_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0008000000000000; ++ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_h(__m128i_op0,11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_h(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; ++ __m256i_out = __lasx_xvmini_b(__m256i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_h(__m256i_op0,-1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op0[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f900000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f900000002; ++ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff0607ffff0607; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0607ffff0607; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff0607ffff0607; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0607ffff0607; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_result[3]) = 0xfff8fffffff8ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff8fffffff8ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xfff8fffffff8ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff8fffffff8ffff; ++ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xfff8fffffff8ffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xfff8fffffff8ffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xfff8fffffff8ffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xfff8fffffff8ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00f9f9f900000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00f9f9f900000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000faf3f3f2; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000faf3f3f2; ++ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00f9f9f900000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00f9f9f900000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007cfcfd80000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x007cfcfd80000001; ++ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff0607ffff0607; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0607ffff0607; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff0607ffff0607; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0607ffff0607; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000faf3f3f2; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000faf3f3f2; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff0607ffff0383; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0607ffffc0c1; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff0607ffff0383; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0607ffffc0c1; ++ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000001000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000001000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007cfcfd80000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007cfcfd80000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000001000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000001000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff0607ffff0607; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0607ffff0607; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff0607ffff0607; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0607ffff0607; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000000faf3f3f2; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000faf3f3f2; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffdbbbcf; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffb8579f; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffdbbbcf; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffb8579f; ++ __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffcfffcfffcfffc; ++ __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x5); ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffdbbbcf; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffb8579f; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffdbbbcf; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffb8579f; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffcfffc; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffcfffc; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffcfffc; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffcfffc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000003fff; ++ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffdbbbcf; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffb8579f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffdbbbcf; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffb8579f; ++ *((unsigned long*)& __m256i_op2[3]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_op2[2]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_op2[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_op2[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op2[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0008000000000000; ++ __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmsknz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000001555; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000015554001c003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000001555; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000015554001c003; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000304; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000030401010202; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000304; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000030401010202; ++ __m256i_out = __lasx_xvpcnt_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0x00030005; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0x00030005; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vmini_w(__m128i_op0,8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffc001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000c000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffc001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000c000; ++ __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x6d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x0000ffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x0000ffff; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x0000ffff; ++ __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3e25c8317394dae6; ++ *((unsigned long*)& __m128i_op0[0]) = 0xcda585aebbb2836a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xcda585aebbb2836a; ++ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xcda585aebbb2836a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcda585aebbb2836a; ++ *((unsigned long*)& __m128i_result[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long*)& __m128i_result[0]) = 0x5779108fdedda7e4; ++ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3e25c8317394dae6; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcda585aebbb2836a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0xfefeff00fefeff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefeff00fefeff00; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff000300030000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffc000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff000300030000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffc000; ++ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcda585aebbb2836a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000080808080; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffc4cdfd16; ++ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000000faf3f3f2; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000faf3f3f2; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xffdbbbcf; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0xffb8579f; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xffdbbbcf; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0xffb8579f; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0xfff8579f; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0xfff8579f; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskgez_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128d_op1[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5779108fdedda7e4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xd78cfd70b5f65d77; ++ *((unsigned long*)& __m128i_result[0]) = 0x5779108fdedda7e5; ++ __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x5b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000003fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003fff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffdbbbcf; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffb8579f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffdbbbcf; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffb8579f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff00bb; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0057; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff00bb; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0057; ++ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vclo_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfefeff00fefeff00; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfefeff00fefeff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x00007e7e00007e7e; ++ *((unsigned long*)& __m128i_result[0]) = 0x00007e7e00007e7e; ++ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff8579f; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xd78cfd70b5f65d77; ++ *((unsigned long*)& __m128d_op1[0]) = 0x5779108fdedda7e5; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080800008; ++ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x80808080; ++ *((int*)& __m128_op0[2]) = 0x80808080; ++ *((int*)& __m128_op0[1]) = 0x80808080; ++ *((int*)& __m128_op0[0]) = 0x80800008; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x80000000; ++ *((int*)& __m128_result[2]) = 0x80000000; ++ *((int*)& __m128_result[1]) = 0x80000000; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000b3a6000067da; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00004e420000c26a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x7a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000faf3f3f2; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000faf3f3f2; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00bb; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0057; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff00bb; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0057; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000fffa003e; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fffb009c; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000fffa003e; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fffb009c; ++ __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff8579f; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0007a861; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0007a861; ++ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000b3a6000067da; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00004e420000c26a; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5779108fdedda7e4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000b3a6000067da; ++ *((unsigned long*)& __m128i_result[0]) = 0x5779108f0000c26a; ++ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5779108fdedda7e4; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslei_w(__m128i_op0,-16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0000b3a6; ++ *((int*)& __m128_op0[2]) = 0x000067da; ++ *((int*)& __m128_op0[1]) = 0x00004e42; ++ *((int*)& __m128_op0[0]) = 0x0000c26a; ++ *((unsigned long*)& __m128d_result[1]) = 0x379674c000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x3789f68000000000; ++ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff0007a861; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff0007a861; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5779108fdedda7e4; ++ *((unsigned long*)& __m128i_result[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vslli_d(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x379674c000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3789f68000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x379674c000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3789f68000000000; ++ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_w(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8080808080800008; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x975ca6046e2e4889; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080ffffffff8080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00008080ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xff80ffffffffff80; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff80ffffffff; ++ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0x0007a861; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0x0007a861; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x379674c000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3789f68000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x379674c000000000; ++ __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x379674c000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3789f68000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfefeff00fefeff00; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfefeff00fefeff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00c0000000800000; ++ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsrl_v(__m256i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff80ffffffffff80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff80ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff7ffffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffffffe; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff0007a861; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff0007a861; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x379674c000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffff7ffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x379674c000000000; ++ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xff80ffffffffff80; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000ff80ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff0007a861; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0007a861; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x975ca6046e2e4889; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1748c4f9ed1a5870; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x6a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ff960001005b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffa500010003; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffff7ffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0020000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x2b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff7ffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffff7ffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000003; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000003; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000003; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000003; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b(__m256i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffee00ba; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffee00ba; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffee; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffee; ++ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x80008000fff98000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x80008000fff98000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6a5d5b056f2f4978; ++ *((unsigned long*)& __m128i_op1[0]) = 0x17483c07141b5971; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002001000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000008000020000; ++ __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff80ffffffffff80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff80ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6a5d5b056f2f4978; ++ *((unsigned long*)& __m128i_op1[0]) = 0x17483c07141b5971; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0800010001ff8000; ++ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffee00ba; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffee00ba; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x80008000fff98000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x80008000fff98000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00fffff500ba; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00fffff500ba; ++ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w(__m256i_op0,15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0x0007a861; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0x0007a861; ++ *((int*)& __m256_op1[7]) = 0x80008000; ++ *((int*)& __m256_op1[6]) = 0x80008000; ++ *((int*)& __m256_op1[5]) = 0x80008000; ++ *((int*)& __m256_op1[4]) = 0xfff98000; ++ *((int*)& __m256_op1[3]) = 0x80008000; ++ *((int*)& __m256_op1[2]) = 0x80008000; ++ *((int*)& __m256_op1[1]) = 0x80008000; ++ *((int*)& __m256_op1[0]) = 0xfff98000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffee00ba; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffee00ba; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xefefefefefee00aa; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xefefefefefee00aa; ++ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0800010001ff8000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_b(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1748c4f9ed1a5870; ++ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_b(__m128i_op0,12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000800080008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80008000fff98000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80008000fff98000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x21); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000800080008000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x80008000fff98000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000800080008000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x80008000fff98000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x000000ff; ++ *((int*)& __m256_op1[6]) = 0x000000ff; ++ *((int*)& __m256_op1[5]) = 0x000000ff; ++ *((int*)& __m256_op1[4]) = 0x000000ff; ++ *((int*)& __m256_op1[3]) = 0x000000ff; ++ *((int*)& __m256_op1[2]) = 0x000000ff; ++ *((int*)& __m256_op1[1]) = 0x000000ff; ++ *((int*)& __m256_op1[0]) = 0x000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6a5d5b056f2f4978; ++ *((unsigned long*)& __m128i_op1[0]) = 0x17483c07141b5971; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xd4bade5e2e902836; ++ __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0017004800c400f9; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ed001a00580070; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffff7ffffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x800b7fe38062007b; ++ *((unsigned long*)& __m128i_result[0]) = 0x0076800d802c0037; ++ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffa003e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffb009c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffa003e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffb009c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffee; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffee; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffff0000; ++ *((int*)& __m256_op1[4]) = 0xffff0000; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffff0000; ++ *((int*)& __m256_op1[0]) = 0xffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6a5d5b056f2f4978; ++ *((unsigned long*)& __m128i_op0[0]) = 0x17483c07141b5971; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xd4bade5e2e902836; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x345002920f3017d6; ++ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x800000ff800000ff; ++ __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x345002920f3017d6; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvreplve0_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x40fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x40fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x40fe00fe00fe00fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x40fe00fe00fe00fe; ++ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x800000ff800000ff; ++ __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2e9028362e902836; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2e9028362e902836; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x345002920f3017d6; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffff7fffffff7; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffff7fffffff7; ++ __m128i_out = __lsx_vmini_w(__m128i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000002; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000002; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x34500292; ++ *((int*)& __m128_op1[0]) = 0x0f3017d6; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffff7ffffffffe; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffff7ffffffffe; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256d_op1[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00ff00ff; ++ *((int*)& __m256_op0[6]) = 0x00ff00ff; ++ *((int*)& __m256_op0[5]) = 0x00ff00ff; ++ *((int*)& __m256_op0[4]) = 0x00ff00ff; ++ *((int*)& __m256_op0[3]) = 0x00ff00ff; ++ *((int*)& __m256_op0[2]) = 0x00ff00ff; ++ *((int*)& __m256_op0[1]) = 0x00ff00ff; ++ *((int*)& __m256_op0[0]) = 0x00ff00ff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffff7fffffff7; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffff7fffffff7; ++ *((unsigned long*)& __m128i_result[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcdcfcfcfcdc; ++ __m128i_out = __lsx_vslli_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff00000000ffff; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x80fe80ff80fe00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff80ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x80fe80ff80fe00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff80ff; ++ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x67eb85afb2ebb000; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5252adadadadadad; ++ *((unsigned long*)& __m128i_op1[0]) = 0xadad52525252adad; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000adad0000adad; ++ *((unsigned long*)& __m128i_result[0]) = 0x000052520000adad; ++ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000adad0000adad; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000052520000adad; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_d_q(__m128i_op0,__m128i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpermi_q(__m256i_op0,__m256i_op1,0xca); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff7cffd6ffc700b0; ++ __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000080ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000080ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x08000000000000f8; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x08000000000000f8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x800000ff800000ff; ++ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0080000000000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0080000000000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0080000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0080000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff7cffd6ffc700b0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x008300290038ff50; ++ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0080000000000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0080000000000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x08000000000000f8; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x08000000000000f8; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0200000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x2000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0200000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x2000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x7fff8000; ++ *((int*)& __m256_op1[6]) = 0x7fff0000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00008000; ++ *((int*)& __m256_op1[3]) = 0x7fff8000; ++ *((int*)& __m256_op1[2]) = 0x7fff0000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00008000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00830029; ++ *((int*)& __m128_op0[0]) = 0x0038ff50; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0080000000000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0080000000000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000800080008000; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff7ffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000000010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff800000ff; ++ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0200000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0200000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000020000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000200000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x800000ff800000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff7fffffff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff7fffffff7fff; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x4000c08000000080; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000080c000c080; ++ *((unsigned long*)& __m256i_result[1]) = 0x4000c08000000080; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000080c000c080; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffe00000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffe00000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfefee00000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfefee00000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000014155445; ++ *((unsigned long*)& __m128i_result[1]) = 0x33f5c2d7d9f5d800; ++ *((unsigned long*)& __m128i_result[0]) = 0xe4c23ffb002a3a22; ++ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfefee00000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfefee00000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xfefee00000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfefee00000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffc0007ffe0002; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000400000018002; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffc0007ffe0002; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000400000018002; ++ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000007fff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000007fff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_result[2]) = 0x8100810081008100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0100010001000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x8100810081008100; ++ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff800000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x800080ff800080ff; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x33f5c2d7d9f5d800; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe4c23ffb002a3a22; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4000c08000000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000080c000c080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4000c08000000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000080c000c080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000000010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x67eb85af0000b000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000400000003fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000400000003fff; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x800080ff800080ff; ++ __m256i_out = __lasx_xvreplve0_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128i_result[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcdcfcfcfcdc; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85af0000b000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x67eb85af0000b000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x67eb85af0000b000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4000c08000000080; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000080c000c080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4000c08000000080; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000080c000c080; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000400080ffc080; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080ff0080; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000400080ffc080; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080ff0080; ++ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4000c08000000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000080c000c080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4000c08000000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000080c000c080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000200000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000200000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000004000; ++ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x31); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x67eb85af0000b000; ++ *((unsigned long*)& __m128d_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvclo_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc07f8000c07f8000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc07f8000c07f8000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000fff01fe0; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000fff01fe0; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x2a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000007fff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000007fff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff01fffe00000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xff01fffe00000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000400080ffc080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080ff0080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000400080ffc080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080ff0080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000200000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000200000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff000000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff000000000080; ++ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff000000000080; ++ *((unsigned long*)& __m256d_result[3]) = 0x416fe00000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x4060000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x416fe00000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x4060000000000000; ++ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x67eb85af0000b000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x67157b5100005000; ++ *((unsigned long*)& __m128i_result[0]) = 0x387c7e0a133f2000; ++ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000400080ffc080; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080ff0080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000400080ffc080; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080ff0080; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000400080ffc080; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000400080ffc080; ++ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x680485c8b304b019; ++ *((unsigned long*)& __m128i_result[0]) = 0xc89d7f0fed582019; ++ __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff01fffe00000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff01fffe00000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x800080ff800080ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000000010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1000000010001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000002000; ++ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000003ddc5dac; ++ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x3ddc5dac; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xffffffff; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000001030103; ++ __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffc606ec5; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000014155445; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x76); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000200000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000200000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000004000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x3fffbfff80000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00004000007f8000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x3fffbfff80000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00004000007f8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x67157b5100005000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x387c7e0a133f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000800080010000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000800080010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000800080010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000800080010000; ++ __m256i_out = __lasx_xvpickev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xfc606ec5; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x14155445; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x01030103; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x3fffbfff80000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00004000007f8000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x3fffbfff80000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00004000007f8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x680485c8b304b019; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc89d7f0fed582019; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000003ddc5dac; ++ *((unsigned long*)& __m128i_op2[1]) = 0x67157b5100005000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x387c7e0a133f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x680485c8b304b019; ++ *((unsigned long*)& __m128i_result[0]) = 0xc89d7f0ff90da019; ++ __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000a95afc60a5c5; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000b6e414157f84; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000204264602444; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000266404046604; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x66); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x8000800080008000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; ++ *((unsigned long*)& __m128i_op1[1]) = 0x67157b5100005000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x387c7e0a133f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000004870ba0; ++ __m128i_out = __lsx_vmulwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67157b5100005000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x387c7e0a133f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000003ddc5dac; ++ *((unsigned long*)& __m128i_result[1]) = 0x67157b5100005000; ++ *((unsigned long*)& __m128i_result[0]) = 0x387c7e0a511b7dac; ++ __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x680485c8b304b019; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc89d7f0ff90da019; ++ *((unsigned long*)& __m128i_op1[1]) = 0x680485c8b304b019; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc89d7f0ff90da019; ++ *((unsigned long*)& __m128i_result[1]) = 0x00680486ffffffda; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff913bfffffffd; ++ __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f010000000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f010000000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f010100000101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f010100000101; ++ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000003ddc5dac; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x007f010000000100; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x007f010000000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f010100000101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f010100000101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000200000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000200000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0008000000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0008000000000010; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x04000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x04000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x04000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x04000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00680486ffffffda; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff913bfffffffd; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00680486ffffffda; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff913bfffffffd; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x000000003ddc5dac; ++ *((unsigned long*)& __m128i_result[1]) = 0x00680486ffffffda; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff913bb9951901; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001030103; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0020006000200060; ++ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000400080ffc080; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000400080ffc080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff80ff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff80ff; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x67eb85af; ++ *((int*)& __m128_op0[2]) = 0xb2ebb000; ++ *((int*)& __m128_op0[1]) = 0xc8847ef6; ++ *((int*)& __m128_op0[0]) = 0xed3f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00680486; ++ *((int*)& __m128_op0[2]) = 0xffffffda; ++ *((int*)& __m128_op0[1]) = 0xffff913b; ++ *((int*)& __m128_op0[0]) = 0xb9951901; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x01030103; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00200060; ++ *((int*)& __m128_op2[0]) = 0x00200060; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0xffffffda; ++ *((int*)& __m128_result[1]) = 0xffff913b; ++ *((int*)& __m128_result[0]) = 0x001fed4d; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00080000; ++ *((int*)& __m256_op0[4]) = 0x00000010; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00080000; ++ *((int*)& __m256_op0[0]) = 0x00000010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x7f010000; ++ *((int*)& __m256_op0[5]) = 0x00010000; ++ *((int*)& __m256_op0[4]) = 0x00007f7f; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x7f010000; ++ *((int*)& __m256_op0[1]) = 0x00010000; ++ *((int*)& __m256_op0[0]) = 0x00007f7f; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00680486ffffffda; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff913bb9951901; ++ *((unsigned long*)& __m128i_op1[1]) = 0x67157b5100005000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x387c7e0a133f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000003; ++ *((unsigned long*)& __m128i_result[0]) = 0x0c0f000a070f0204; ++ __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x478b478b38031779; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6b769e690fa1e119; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001030103; ++ *((unsigned long*)& __m128i_result[1]) = 0x0047004700380017; ++ *((unsigned long*)& __m128i_result[0]) = 0x006bff9e0010ffe2; ++ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000004870ba0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x478b478b38031779; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6b769e690fa1e119; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000004870ba0; ++ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000200000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000200000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000003ddc5dac; ++ *((unsigned long*)& __m128i_result[1]) = 0x67ebb2ebc884ed3f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003ddc; ++ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001030103; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000103; ++ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000200000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000200000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x39); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; ++ long_int_result = 0x000000003ddc5dac; ++ long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x0); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001030103; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffc; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,-4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6b75948a91407a42; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0b5471b633e54fde; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000004870ba0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000004870ba0; ++ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000004870ba0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3f80000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3f80000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x4efffe00; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x47000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x4efffe00; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x47000000; ++ __m256_out = __lasx_xvffint_s_w(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000007fff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000007fff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000017fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000017fff; ++ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xfffffffc; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xfffffffc; ++ *((int*)& __m128_op1[3]) = 0x00000001; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000103; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000017fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000017fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x04870ba0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000004870ba0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x478b478b38031779; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6b769e690fa1e119; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fe98c2a0; ++ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff00ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff8000fffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00017fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff8000fffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe00017fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000007f00fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000fe0000007f; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000007f00fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000fe0000007f; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000103; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffc; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001000000010; ++ __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x3a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128d_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000103; ++ *((unsigned long*)& __m128d_result[1]) = 0x8000000100000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000103; ++ __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000004870ba0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000010; ++ *((unsigned long*)& __m128i_op2[1]) = 0x8000000100000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x8000000000000103; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000010300000103; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010300000000; ++ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000004efffe00; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000047000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000004efffe00; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000047000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff01; ++ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_result[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long*)& __m128i_result[0]) = 0xe4423f7b769f8ffe; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000010000ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000010000ff00; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe4423f7b769f8ffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fe96fe95; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6afc01000001ff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fe96fe95; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6afc01000001ff00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000010000ff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000010000ff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x7e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff010000ff01; ++ __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsat_w(__m128i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff010000ff01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000004efffe00; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000047000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000004efffe00; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000047000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000956a00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000956a00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x007fffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xb500000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x007fffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xb500000000000000; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x29); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000956a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000004efffe00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000956a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000004efffe00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x007fffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xb500000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x007fffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xb500000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x007fffffffff9569; ++ *((unsigned long*)& __m256i_result[2]) = 0xb50000004efffe00; ++ *((unsigned long*)& __m256i_result[1]) = 0x007fffffffff9569; ++ *((unsigned long*)& __m256i_result[0]) = 0xb50000004efffe00; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000956a; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000004efffe00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000956a; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000004efffe00; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000000000000956a; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000000004efffe00; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000000000956a; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000000004efffe00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000057348fe3; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000057348fe3; ++ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ long_int_result = 0x000000000000ffff; ++ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x0); ++ *((int*)& __m256_op0[7]) = 0x0000ff01; ++ *((int*)& __m256_op0[6]) = 0x00ff0000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0000ff01; ++ *((int*)& __m256_op0[3]) = 0x0000ff01; ++ *((int*)& __m256_op0[2]) = 0x00ff0000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0000ff01; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x67eb85b0b2ebb001; ++ *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x67eb85b0; ++ *((int*)& __m128_op0[2]) = 0xb2ebb001; ++ *((int*)& __m128_op0[1]) = 0xc8847ef6; ++ *((int*)& __m128_op0[0]) = 0xed3f2000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010001000100010; ++ unsigned_int_result = 0x0000000000100010; ++ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x2); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x38); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000101000001010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000101000001010; ++ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xa87745dbd93e4ea1; ++ *((unsigned long*)& __m128i_op1[0]) = 0xaa49601e26d39860; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001f0000001f; ++ __m128i_out = __lsx_vpcnt_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000101000001010; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000101000001010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000101000001010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000101000001010; ++ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000020; ++ *((int*)& __m128_op1[2]) = 0x00000020; ++ *((int*)& __m128_op1[1]) = 0x0000001f; ++ *((int*)& __m128_op1[0]) = 0x0000001f; ++ *((int*)& __m128_result[3]) = 0x00000020; ++ *((int*)& __m128_result[2]) = 0x00000020; ++ *((int*)& __m128_result[1]) = 0x0000001f; ++ *((int*)& __m128_result[0]) = 0x0000001f; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x33eac9fdca42f660; ++ *((unsigned long*)& __m128i_op0[0]) = 0xaa472d26fe867091; ++ *((unsigned long*)& __m128i_op1[1]) = 0x33eac9fdca42f660; ++ *((unsigned long*)& __m128i_op1[0]) = 0xaa472d26fe867091; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff5; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff5; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff5; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff5; ++ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xc0c0c0c0c0c0c0c0; ++ *((unsigned long*)& __m128i_result[0]) = 0xc0c0c0c0c0c0c0c0; ++ __m128i_out = __lsx_vslli_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000001f; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000008000001e; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffff5; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffff5; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffff5; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffff5; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff01; ++ __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000008000001e; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe1; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff7fffffe2; ++ __m128i_out = __lsx_vneg_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000008000001e; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000200000001b; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000000; ++ __m128i_out = __lsx_vclz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0xd48acbfe13102acf; ++ *((unsigned long*)& __m128i_result[0]) = 0xf4af70d0c4000000; ++ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x67eb8590b2ebafe1; ++ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001f00000000; ++ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00100010; ++ *((int*)& __m256_op1[5]) = 0x00100010; ++ *((int*)& __m256_op1[4]) = 0x00100010; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00100010; ++ *((int*)& __m256_op1[1]) = 0x00100010; ++ *((int*)& __m256_op1[0]) = 0x00100010; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000020; ++ *((int*)& __m128_op1[2]) = 0x00000020; ++ *((int*)& __m128_op1[1]) = 0x0000001f; ++ *((int*)& __m128_op1[0]) = 0x0000001f; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000200000001b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd400c02000002acf; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf4000020c4000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x6453f5e01d6e5000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000fdec000000000; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4000000040000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x27); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6453f5e01d6e5000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000fdec000000000; ++ int_result = 0x000000001d6e5000; ++ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x2); ++ *((unsigned long*)& __m128i_op0[1]) = 0x801dd5cb0004e058; ++ *((unsigned long*)& __m128i_op0[0]) = 0x77eb15638eeb5fc2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000200000001b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000004e03d; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000008eeb5fc2; ++ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000101000001010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000101000001010; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000101000001010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000101000001010; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0000ff01; ++ *((int*)& __m256_op0[6]) = 0x00ff0000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0000ff01; ++ *((int*)& __m256_op0[3]) = 0x0000ff01; ++ *((int*)& __m256_op0[2]) = 0x00ff0000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0000ff01; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000808; ++ *((int*)& __m256_op1[4]) = 0x00000808; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000808; ++ *((int*)& __m256_op1[0]) = 0x00000808; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000fff000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fff000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000001ffe00000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000001ffe00000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff010ff0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff010ff0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000201; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000201; ++ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffac0a000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x801d5de0000559e0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x77eb86788eebafe1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffac00000000; ++ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffac0a000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ac00000000; ++ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff010ff0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff010ff0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x801d5de0000559e0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x77eb86788eebaf00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x2e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffac0a000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000200000001b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffac0a000000; ++ __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000fff000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fff000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000fff000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000fff000000000; ++ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff010ff0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff010ff0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ff0100ff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff01; ++ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x6f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000ac00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffac0a000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffac0a000000; ++ __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffac0a000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000085af0000b000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00017ea200002000; ++ __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000fff000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fff000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000fff000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000fff000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000001; ++ *((int*)& __m256_op1[6]) = 0xffe00000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000001; ++ *((int*)& __m256_op1[2]) = 0xffe00000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_h(__m128i_op0,-2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000085af0000b000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00017ea200002000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x98147a504d145000; ++ *((unsigned long*)& __m128i_result[0]) = 0x377b810912c0e000; ++ __m128i_out = __lsx_vneg_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffefffe; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00ff00ff; ++ *((int*)& __m256_op0[6]) = 0x00ff00ff; ++ *((int*)& __m256_op0[5]) = 0x00ff00ff; ++ *((int*)& __m256_op0[4]) = 0x00ff00ff; ++ *((int*)& __m256_op0[3]) = 0x00ff00ff; ++ *((int*)& __m256_op0[2]) = 0x00ff00ff; ++ *((int*)& __m256_op0[1]) = 0x00ff00ff; ++ *((int*)& __m256_op0[0]) = 0x00ff00ff; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffefffe00000000; ++ __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000085af0000b000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00017ea200002000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x98147a504d145000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x377b810912c0e000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfffffffffffffff7; ++ *((unsigned long*)& __m128d_result[1]) = 0x98147a504d145000; ++ *((unsigned long*)& __m128d_result[0]) = 0x377b810912c0e000; ++ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x98147a504d145000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x377b810912c0e000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x98147a504d145000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long*)& __m128i_result[1]) = 0x98147a504d145000; ++ *((unsigned long*)& __m128i_result[0]) = 0x377b810912c0e000; ++ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x98147a4f4d144fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x377b810812c0dfff; ++ *((unsigned long*)& __m128i_result[1]) = 0x98137a4d4d144fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x377a810612c0dfff; ++ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x98147a504d145000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x5a57bacbd7e39680; ++ *((unsigned long*)& __m128i_op2[0]) = 0x6bae051ffed76001; ++ *((unsigned long*)& __m128i_result[1]) = 0xf3eb458161080000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffe9454286c0e000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5a57bacbd7e39680; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6bae051ffed76001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf3e6586b60d7b152; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf7077b934ac0e000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4e3e133738bb47d2; ++ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x98147a504d145000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x377b810912c0e000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4e3e133738bb47d2; ++ *((unsigned long*)& __m128i_result[1]) = 0xff98007a004d0050; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff9ff4a0057000e; ++ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x98147a504d145000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x377b810912c0e000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x98147a504d145000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x98147a504d145000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vslt_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080805; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080805; ++ __m128i_out = __lsx_vclz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xf3e6586b; ++ *((int*)& __m128_op0[2]) = 0x60d7b152; ++ *((int*)& __m128_op0[1]) = 0xf7077b93; ++ *((int*)& __m128_op0[0]) = 0x4ac0e000; ++ *((int*)& __m128_op1[3]) = 0x1498507a; ++ *((int*)& __m128_op1[2]) = 0x144d0050; ++ *((int*)& __m128_op1[1]) = 0x7b370981; ++ *((int*)& __m128_op1[0]) = 0xc01200e0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x000001fffdfffdff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000001fffdfffdff; ++ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0080000000800000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0080000000800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0080000000800000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0080000000800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x43); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0080000000800000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0080000000800000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0080000000800000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0080000000800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x4e3e1337; ++ *((int*)& __m128_op0[0]) = 0x38bb47d2; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0xff800000; ++ *((int*)& __m128_result[1]) = 0x41e80000; ++ *((int*)& __m128_result[0]) = 0xc1600000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000001fffdfffdff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000001fffdfffdff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000010101010101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010101010101; ++ __m128i_out = __lsx_vmini_bu(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000700000004fdff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000300000000fdff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0080000000800000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0080000000800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0080000000800000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0080000000800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffff7f8c; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x98147a504d145000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080805; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080805; ++ *((unsigned long*)& __m128i_result[1]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0020002000200014; ++ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x7ff80000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x7ff80000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x7ff80000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x7ff80000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4e3e133738bb47d2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x9c7c266e71768fa4; ++ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000700000004fdff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000300000000fdff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff7fffefffa01ff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffbfffefffe01ff; ++ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff6ff4ffff8db8; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffbaf4ffffb805; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff4ffb800ff0080; ++ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000005; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000005; ++ *((int*)& __m128_op1[3]) = 0xfffefffe; ++ *((int*)& __m128_op1[2]) = 0xfffefffe; ++ *((int*)& __m128_op1[1]) = 0xfffefffe; ++ *((int*)& __m128_op1[0]) = 0xfffefffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000040; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000040; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfff7fffe; ++ *((int*)& __m128_op0[2]) = 0xfffa01ff; ++ *((int*)& __m128_op0[1]) = 0xfffbfffe; ++ *((int*)& __m128_op0[0]) = 0xfffe01ff; ++ *((int*)& __m128_result[3]) = 0xfff7fffe; ++ *((int*)& __m128_result[2]) = 0xfffa01ff; ++ *((int*)& __m128_result[1]) = 0xfffbfffe; ++ *((int*)& __m128_result[0]) = 0xfffe01ff; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000700000004fdff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000300000000fdff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0006fff20003fff8; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002fffa00000000; ++ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff7fffefffa01ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffbfffefffe01ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0305030203020502; ++ *((unsigned long*)& __m128i_result[0]) = 0x0301030203020502; ++ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4e3e13368c17f6e6; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00009c7c00007176; ++ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfefefefe01010101; ++ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfefefefe01010101; ++ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h(__m128i_op0,-4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefe01010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefe01010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfefefefe01010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfefefefe01010101; ++ __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long*)& __m128i_result[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcfcfcfcfcfd; ++ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0305030203020502; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0301030203020502; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000003050302; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000003010302; ++ __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x03050302; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x03010302; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xfefefefe; ++ *((int*)& __m256_op0[4]) = 0x01010101; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xfefefefe; ++ *((int*)& __m256_op0[0]) = 0x01010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfefefefe3f800000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfefefefe3f800000; ++ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x40404040; ++ *((int*)& __m256_op1[6]) = 0x40404040; ++ *((int*)& __m256_op1[5]) = 0x40404040; ++ *((int*)& __m256_op1[4]) = 0x40404040; ++ *((int*)& __m256_op1[3]) = 0x40404040; ++ *((int*)& __m256_op1[2]) = 0x40404040; ++ *((int*)& __m256_op1[1]) = 0x40404040; ++ *((int*)& __m256_op1[0]) = 0x40404040; ++ *((int*)& __m256_result[7]) = 0x40404040; ++ *((int*)& __m256_result[6]) = 0x40404040; ++ *((int*)& __m256_result[5]) = 0x40404040; ++ *((int*)& __m256_result[4]) = 0x40404040; ++ *((int*)& __m256_result[3]) = 0x40404040; ++ *((int*)& __m256_result[2]) = 0x40404040; ++ *((int*)& __m256_result[1]) = 0x40404040; ++ *((int*)& __m256_result[0]) = 0x40404040; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000071768fa4; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0404000004040000; ++ __m256i_out = __lasx_xvslli_w(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00009c7c00007176; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000040; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000040; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x40404040; ++ *((int*)& __m256_op2[6]) = 0x40404040; ++ *((int*)& __m256_op2[5]) = 0x40404040; ++ *((int*)& __m256_op2[4]) = 0x40404040; ++ *((int*)& __m256_op2[3]) = 0x40404040; ++ *((int*)& __m256_op2[2]) = 0x40404040; ++ *((int*)& __m256_op2[1]) = 0x40404040; ++ *((int*)& __m256_op2[0]) = 0x40404040; ++ *((int*)& __m256_result[7]) = 0xc0404040; ++ *((int*)& __m256_result[6]) = 0xc0404040; ++ *((int*)& __m256_result[5]) = 0xc0404040; ++ *((int*)& __m256_result[4]) = 0xc0404040; ++ *((int*)& __m256_result[3]) = 0xc0404040; ++ *((int*)& __m256_result[2]) = 0xc0404040; ++ *((int*)& __m256_result[1]) = 0xc0404040; ++ *((int*)& __m256_result[0]) = 0xc0404040; ++ __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x3); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfefefefe3f800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfefefefe3f800000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000fe0000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000fe0000000; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcfcfcfc0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefe3f800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefe3f800000; ++ *((unsigned long*)& __m256i_result[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256i_result[2]) = 0xfefefefeffe0e0e0; ++ *((unsigned long*)& __m256i_result[1]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256i_result[0]) = 0xfefefefeffe0e0e0; ++ __m256i_out = __lasx_xvori_b(__m256i_op0,0xe0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfefefefe3f800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfefefefe3f800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000040404040; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x3a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1010101010101010; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000040404000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000040404000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000404; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000404; ++ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfcfcfcfcfcfc0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0404000004040000; ++ *((unsigned long*)& __m256i_result[3]) = 0x4000400040004000; ++ *((unsigned long*)& __m256i_result[2]) = 0x4000400040004000; ++ *((unsigned long*)& __m256i_result[1]) = 0x4000400040004000; ++ *((unsigned long*)& __m256i_result[0]) = 0x4000400040004000; ++ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040004000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040004000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000040404000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000040404000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000040004000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000040004000; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000404; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000404; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000020202000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000020202000; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h(__m128i_op0,-16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000404; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000404; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_result[2]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_result[1]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_result[0]) = 0x0404040404040404; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfefefefeffe0e0e0; ++ *((unsigned long*)& __m256d_op0[1]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfefefefeffe0e0e0; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000040004000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000040004000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfefefefe3f800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfefefefe3f800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfcfcfcfcfcfc0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00009c7c00007176; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffcfcfcfc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffcfc6080; ++ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00009c7c00007176; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x9c7c266e3faa293c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b(__m128i_op0,0xe); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e3faa293c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000f3040705; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00009c7c; ++ *((int*)& __m128_op0[0]) = 0x00007176; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xf3040705; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0xf3040705; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0xf3040705; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000f3040705; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000f3040705; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0404040404040404; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefefe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003f800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000003f800000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffefefefe; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffefefefe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000040404040; ++ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7c7c000000007176; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x3e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x40404040; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x40404040; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0xfefefefe; ++ *((int*)& __m256_op1[4]) = 0x3f800000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0xfefefefe; ++ *((int*)& __m256_op1[0]) = 0x3f800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefefe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffefefefe; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffefefefe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000040404040; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_b(__m128i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000f3040705; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000f3040705; ++ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000020202000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000020202000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x3d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7c7c000000007176; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000f3040705; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000001f1f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x32); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000100; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefefe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefefe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404040; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfe01fe01fd02fd02; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000003fc03fc0; ++ *((unsigned long*)& __m256i_result[1]) = 0xfe01fe01fd02fd02; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000003fc03fc0; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7c7c9c0000007176; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00000000f3040705; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7c7c9c0000007176; ++ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000f3040705; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000f3040705; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001f1f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff000000001f1f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000404; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000404; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7c7c9c0000007176; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00ff000000001f1f; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7c7c9c0000007176; ++ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfe01fe01fc01fc01; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfe01fe01fc01fc01; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffc01fc01; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffc01fc01; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000003fc03bbc; ++ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfe01fe01fd02fd02; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03fc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfe01fe01fd02fd02; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03fc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000405; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000405; ++ *((unsigned long*)& __m256i_result[3]) = 0xfe01fe017e81fd02; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000003fc001fe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfe01fe017e81fd02; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000003fc001fe; ++ __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffc01fc01; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffc01fc01; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000405; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000405; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfc01fc0101fe01dd; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfc01fc0101fe01dd; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc5c53492f25acbf2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000f3040705; ++ *((unsigned long*)& __m128i_result[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long*)& __m128i_result[0]) = 0xc5c534920000c4ed; ++ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffc01fc01; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffc01fc01; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x41cfe01dde000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x41cfe01dde000000; ++ __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xff000000001f1f00; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00009c7c00007176; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfe01fe01fc01fc01; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfe01fe01fc01fc01; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfc01000000003fc0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfc01000000003fc0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc5c53492f25acbf2; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff000000001f1f00; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long*)& __m128i_result[0]) = 0xc5c53492f25acbf2; ++ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x30); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfe01fe01; ++ *((int*)& __m256_op0[6]) = 0x7e81fd02; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x3fc001fe; ++ *((int*)& __m256_op0[3]) = 0xfe01fe01; ++ *((int*)& __m256_op0[2]) = 0x7e81fd02; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x3fc001fe; ++ *((int*)& __m256_op1[7]) = 0xfe01fe01; ++ *((int*)& __m256_op1[6]) = 0x7e81fd02; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x3fc001fe; ++ *((int*)& __m256_op1[3]) = 0xfe01fe01; ++ *((int*)& __m256_op1[2]) = 0x7e81fd02; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x3fc001fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc5c534920000c4ed; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfe01fe017e81fd02; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000003fc001fe; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfe01fe017e81fd02; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000000003fc001fe; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x41cfe01dde000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x41cfe01dde000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x41cfe01dde000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x41cfe01dde000000; ++ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffc01fc01; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffc01fc01; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x41cfe01dde000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x41cfe01dde000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000013fc03bbc; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000013fc03bbc; ++ __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000001010100; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000405; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000001010100; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000405; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000001010100; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000405; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000001010100; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000405; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x01010100; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000405; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x01010100; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000405; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x01010100; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000405; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x01010100; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000405; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0x01010100; ++ *((int*)& __m256_result[5]) = 0x80000000; ++ *((int*)& __m256_result[4]) = 0x00000405; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0x01010100; ++ *((int*)& __m256_result[1]) = 0x80000000; ++ *((int*)& __m256_result[0]) = 0x00000405; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfe01fe01fd02fd02; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03fc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfe01fe01fd02fd02; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03fc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3f00c0003f00c000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3f00c0003f00c000; ++ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc5c534920000c4ed; ++ *((unsigned long*)& __m128i_result[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long*)& __m128i_result[0]) = 0xc5c534920000c4ed; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffc01fc01; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffc01fc01; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000003fc03bbc; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffe00fe00; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000001fe01dde; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffe00fe00; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000001fe01dde; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc5c534920000c4ed; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,-2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000001010100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000405; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000001010100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000405; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffe00000ffe00000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffe00000ffe00000; ++ __m256i_out = __lasx_xvsrani_h_w(__m256i_op0,__m256i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xc2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffe00000ffe00000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffe00000ffe00000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000001010100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000405; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000001010100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000405; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xf6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000405; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000405; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000800080; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000800080; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000202; ++ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrm_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc5c534920000c4ed; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xedededededededed; ++ *((unsigned long*)& __m128i_result[0]) = 0xedededededededed; ++ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00800080; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000202; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00800080; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000202; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00800080; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000202; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00800080; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000202; ++ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000009c007c00; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000071007600; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000010000000100; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000100; ++ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000010000000100; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000010000000100; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x1fa0000000080000; ++ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x7f800000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000009c007c00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000071007600; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000009000900; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000009000900; ++ __m128i_out = __lsx_vmini_bu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000009000900; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000009000900; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xd454545454545454; ++ *((unsigned long*)& __m128i_result[0]) = 0xd454545454545454; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x54); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1f60010000080100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1f60010000080100; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfe01fe010000fd02; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03fc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfe01fe010000fd02; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03fc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfe01fe010000fd02; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000003fc03fc0; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfe01fe010000fd02; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000003fc03fc0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007f807f80; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007f807f80; ++ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d(__m256i_op0,15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000800080; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000800080; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1fa0000000080000; ++ __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000007fffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000007fffff; ++ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000009000900; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000009000900; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000009000900; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000009000900; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000009000900; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000009000900; ++ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffc000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffeff000c057c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffc000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffeff000c057c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000f0f0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000f0f0; ++ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000800080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000800080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000202; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000202; ++ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0606060606060606; ++ *((unsigned long*)& __m128i_result[0]) = 0x0606060606060606; ++ __m128i_out = __lsx_vmaxi_b(__m128i_op0,6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000f0f0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000f0f0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000f0f0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000f0f0; ++ __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000f0f0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000f0f0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007878; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007878; ++ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsub_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1f60010000080100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1f60010000080100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1f60010000080100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1f60010000080100; ++ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007878; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007878; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010001000107878; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010001000107878; ++ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x80000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0xff88ff88; ++ *((int*)& __m256_op0[3]) = 0x80000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0xff88ff88; ++ *((int*)& __m256_op1[7]) = 0xfe01fe01; ++ *((int*)& __m256_op1[6]) = 0x0000fd02; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x3fc03fc0; ++ *((int*)& __m256_op1[3]) = 0xfe01fe01; ++ *((int*)& __m256_op1[2]) = 0x0000fd02; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x3fc03fc0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_h(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1fa0000000080000; ++ __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x1fa0000000080000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000003ddc5dac; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0010001000100010; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0010001000107878; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0010001000107878; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00800080; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000202; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00800080; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000202; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0xff88ff88; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0xff88ff88; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0x80000000; ++ *((int*)& __m256_result[5]) = 0x80000000; ++ *((int*)& __m256_result[4]) = 0xffc8ff88; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0x80000000; ++ *((int*)& __m256_result[1]) = 0x80000000; ++ *((int*)& __m256_result[0]) = 0xffc8ff88; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000020; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x56a09e662ab46b31; ++ *((unsigned long*)& __m128d_op1[0]) = 0xb4b8122ef4054bb3; ++ *((unsigned long*)& __m128d_result[1]) = 0xd6a09e662ab46b31; ++ *((unsigned long*)& __m128d_result[0]) = 0x34b8122ef4054bb3; ++ __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd6a09e662ab46b31; ++ *((unsigned long*)& __m128i_op0[0]) = 0x34b8122ef4054bb3; ++ *((unsigned long*)& __m128i_result[1]) = 0xd6e09e262af46b71; ++ *((unsigned long*)& __m128i_result[0]) = 0x34f8126ef4454bf3; ++ __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x80000000ffc8ff88; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x80000000ffc8ff88; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001ff91ff100000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001ff91ff100000; ++ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x56a09e662ab46b31; ++ *((unsigned long*)& __m128i_op1[0]) = 0xb4b8122ef4054bb3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4b47edd10bfab44d; ++ __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001ff91ff100000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001ff91ff100000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000800080; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000800080; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000202; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffffff7fff80; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001ff91ff0ffdfe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffff7fff80; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001ff91ff0ffdfe; ++ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f807f80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f807f80; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007f7f; ++ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007f433c78; ++ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000a0008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000a0008; ++ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd6a09e662ab46b31; ++ *((unsigned long*)& __m128i_op0[0]) = 0x34b8122ef4054bb3; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xeb504f33155a3598; ++ *((unsigned long*)& __m128i_result[0]) = 0x1a5c0917fa02a5d9; ++ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrph_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000a0008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000a0008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007f433c78; ++ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f807f80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f807f80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff00; ++ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000a0008; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000a0008; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffff5fff7; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffff5fff7; ++ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007f433c78; ++ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x1); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007f433c78; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000007f433c79; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007f433c79; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000007f8000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000007f8000; ++ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f8000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000007f8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000029; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000029; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000029; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000029; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000029; ++ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000a0008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000a0008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007f807f80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007f807f80; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000007f8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000007f8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x7b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x9c9c9c9c9c9c9c9c; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000000a0008; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000000a0008; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_result[1]) = 0x4e4e4e4e00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000900000020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; ++ __m128i_out = __lsx_vmaxi_w(__m128i_op0,9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff00; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x477f0000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x477f0000; ++ __m256_out = __lasx_xvffint_s_w(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_result[2]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_result[1]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_result[0]) = 0xebebebebebebebeb; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000f788f788; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000f788f788; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d(__m256i_op0,14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0xbff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xbff0000000000000; ++ __m128d_out = __lsx_vffint_d_l(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_op0[2]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_op0[1]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_op0[0]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff00; ++ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000c6c6c6c6; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c6c6c6c6; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_op0[2]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_op0[1]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_op0[0]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_result[2]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_result[1]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_result[0]) = 0xebebebebebebebeb; ++ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3131313131313131; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3131313131313131; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3131313131313131; ++ __m128i_out = __lsx_vextl_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000f788f788; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000f788f788; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd6a09e662ab46b31; ++ *((unsigned long*)& __m128i_op0[0]) = 0x34b8122ef4054bb3; ++ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9b509be72f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3513f2e3a1774d2c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000501ffff0005; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xc6c6c6c6; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0xc6c6c6c6; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0xc6c6c6c6; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0xc6c6c6c6; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9c9ca19d509ae734; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd1b09480f2123460; ++ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001fffeff98; ++ *((unsigned long*)& __m128i_result[0]) = 0x0014ffe4ff76ffc4; ++ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3131313131313131; ++ *((unsigned long*)& __m128i_result[1]) = 0x0313100003131000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0313100003131000; ++ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000600000006; ++ __m128i_out = __lsx_vmaxi_w(__m128i_op0,6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000f788f788; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000f788f788; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000f788f788; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000f788f788; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f00000000; ++ __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0313100003131000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0313100003131000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w(__m128i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffeff98; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0014ffe4ff76ffc4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000501ffff0005; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000600000001; ++ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffeff98; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0014ffe4ff76ffc4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3131313131313131; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000017fff7fcc; ++ *((unsigned long*)& __m128i_result[0]) = 0x18a3188b9854187b; ++ __m128i_out = __lsx_vavgr_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000600000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000c6c6c6c6; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c6c6c6c6; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000c6c7; ++ *((unsigned long*)& __m128i_result[0]) = 0x8d8d8d8d8d8cc6c6; ++ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000c6c6c6c6; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c6c6c6c6; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffeff98; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0014ffe4ff76ffc4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3131313131313131; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000c6c7; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8d8d8d8d8d8cc6c6; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x3c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x31313131; ++ *((int*)& __m128_op0[0]) = 0x31313131; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x31313131; ++ *((int*)& __m128_op1[0]) = 0x31313131; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000008; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x80000000; ++ *((int*)& __m128_result[2]) = 0x80000008; ++ *((int*)& __m128_result[1]) = 0xa2f54a1e; ++ *((int*)& __m128_result[0]) = 0xa2f54a1e; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000008; ++ *((unsigned long*)& __m128i_op1[0]) = 0xa2f54a1ea2f54a1e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_result[0]) = 0x00004a1e00004a1e; ++ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000013; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000013; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000013; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000013; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000013; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00004a1e00004a1e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000100; ++ *((unsigned long*)& __m128i_result[0]) = 0x4000000040000000; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_d(__m128i_op0,14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0313100003131000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0313100003131000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x80000000; ++ *((int*)& __m128_op0[2]) = 0x80000008; ++ *((int*)& __m128_op0[1]) = 0xa2f54a1e; ++ *((int*)& __m128_op0[0]) = 0xa2f54a1e; ++ *((int*)& __m128_op1[3]) = 0x80000000; ++ *((int*)& __m128_op1[2]) = 0x80000008; ++ *((int*)& __m128_op1[1]) = 0xa2f54a1e; ++ *((int*)& __m128_op1[0]) = 0xa2f54a1e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0313100003131000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0313100003131000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000013; ++ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_w(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000200008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200000; ++ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x6a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000200008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000200008; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200000; ++ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffed; ++ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000200008; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffff00ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffff00ffff; ++ __m128i_out = __lsx_vslei_b(__m128i_op0,11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000013; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_w_d(__m256i_op0,__m256i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000020000000200; ++ __m256i_out = __lasx_xvfclass_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff88ff88; ++ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000fffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffffffe; ++ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffff0078ffff0078; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffff0078ffff0078; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0xffffffffffffffff; ++ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x3); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff88ff88; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8d78336c83652b86; ++ *((unsigned long*)& __m128i_op1[0]) = 0x39c51f389c0d6112; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffff0001ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ff9b0082; ++ *((unsigned long*)& __m128i_result[0]) = 0x003a0037fff2fff8; ++ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000201fe01fc; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000201fe01fc; ++ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8d78336c83652b86; ++ *((unsigned long*)& __m128i_op0[0]) = 0x39c51f389c0d6112; ++ int_result = 0xffffffff9c0d6112; ++ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x0); ++ *((unsigned long*)& __m128i_op0[1]) = 0x8d78336c83652b86; ++ *((unsigned long*)& __m128i_op0[0]) = 0x39c51f389c0d6112; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000001ce28f9c0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000004e06b0890; ++ __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000001ce28f9c0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000004e06b0890; ++ *((unsigned long*)& __m128i_result[1]) = 0xfefefefdbffefdfe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefefeeffef7fefe; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff7300000ca00430; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001a00000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_hu(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000020000000200; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101010240010202; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffe00; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffe00; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe00; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffe00; ++ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfefefefdbffefdfe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfefefeeffef7feff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfcfcfcffbdfcfffc; ++ *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcedfcf5fcfd; ++ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000fffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000f0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000000ffff88ff88; ++ *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000000ffff88ff88; ++ *((unsigned long*)& __m256i_result[3]) = 0xff88ff88ff880000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff88ff88ff880000; ++ *((unsigned long*)& __m256i_result[1]) = 0xff88ff88ff880000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff88ff88ff880000; ++ __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffe00; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffe00; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffe00; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffe00; ++ *((unsigned long*)& __m256d_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x000000ffff88ff88; ++ *((unsigned long*)& __m256d_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000000ffff88ff88; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsat_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfefefefdbffefdfe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfefefeeffef7fefe; ++ int_op1 = 0xffffffff9c0d6112; ++ *((unsigned long*)& __m128i_result[1]) = 0xbffefdfebffefdfe; ++ *((unsigned long*)& __m128i_result[0]) = 0xbffefdfebffefdfe; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000ffff88ff88; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ffff88ff88; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ffff88ff88; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ffff88ff88; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfefefefdbffefdfe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfefefeeffef7fefe; ++ *((unsigned long*)& __m128i_result[1]) = 0xfef7fefebffefdfe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfefefefdfefefeef; ++ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x2d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe0001fefc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001fffe0001fefc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0007000000050000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001fffe0001fefc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0006000100040001; ++ *((unsigned long*)& __m128i_result[0]) = 0x00010002ffff0105; ++ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100040; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffc0; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fff0ffc0; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffc0; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff0ffc0; ++ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000040; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff88ff88ff880000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff88ff88ff880000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff88ff88ff880000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff88ff88ff880000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffc0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff0ffc0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffc0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff0ffc0; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff78ffc0; ++ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0001fffe0001fefc; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0007000000050000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0003000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000ffff88ff88; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000ffff88ff88; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000800000000000; ++ __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x2f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0006000100040001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00010002ffff0105; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x28); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff88ff88ff880000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff88ff88ff880000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff88ff88ff880000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff88ff88ff880000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000800000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000800000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000800000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0100010001000101; ++ *((unsigned long*)& __m128i_result[0]) = 0x0100010001000101; ++ __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000100040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000100040; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256d_result[3]) = 0x00000000ff890000; ++ *((unsigned long*)& __m256d_result[2]) = 0x00000000ff790000; ++ *((unsigned long*)& __m256d_result[1]) = 0x00000000ff890000; ++ *((unsigned long*)& __m256d_result[0]) = 0x00000000ff790000; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0007000000050000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0003000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000100040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000100040; ++ unsigned_int_result = 0x0000000000000040; ++ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x6); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ff890000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff790000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ff890000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff790000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ff790000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff790000; ++ __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000bffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000040001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x6d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0007000000050000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0003000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0080000100200001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0008000200020002; ++ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000060002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000060002; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe4c8b96e2560afe9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc001a1867fffa207; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000c0010000a186; ++ *((unsigned long*)& __m128i_result[0]) = 0x00067fff0002a207; ++ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000020ff790020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000020ff790020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffcfffffffc; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffcfffffffc; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffcfffffffc; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffcfffffffc; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xe4c8b96e2560afe9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc001a1867fffa207; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe4c8b96e2560afe9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc001a1867fffa207; ++ *((unsigned long*)& __m128i_result[1]) = 0xe2560afe9c001a18; ++ *((unsigned long*)& __m128i_result[0]) = 0xe2560afe9c001a18; ++ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x24); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xe2560afe9c001a18; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe2560afe9c001a18; ++ *((unsigned long*)& __m128i_result[1]) = 0x89582bf870006860; ++ *((unsigned long*)& __m128i_result[0]) = 0x89582bf870006860; ++ __m128i_out = __lsx_vslli_w(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000020ff790020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000020ff790020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xa5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x89582bf870006860; ++ *((unsigned long*)& __m128i_op1[0]) = 0x89582bf870006860; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x94); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000087; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000087; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xff800000; ++ *((int*)& __m256_result[6]) = 0xff800000; ++ *((int*)& __m256_result[5]) = 0xc30e0000; ++ *((int*)& __m256_result[4]) = 0xff800000; ++ *((int*)& __m256_result[3]) = 0xff800000; ++ *((int*)& __m256_result[2]) = 0xff800000; ++ *((int*)& __m256_result[1]) = 0xc30e0000; ++ *((int*)& __m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvflogb_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvmadd_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000c0010000a186; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00067fff0002a207; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0002; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ff0000857a; ++ *((unsigned long*)& __m128i_result[0]) = 0x05fafe0101fe000e; ++ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000100080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000100080; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000100040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000100040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffff8900000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff8900000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000c0010000a186; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00067fff0002a207; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x05fafe0101fe000e; ++ unsigned_int_result = 0x000000000000857a; ++ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x4); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000100080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000100080; ++ *((unsigned long*)& __m256i_result[3]) = 0x001a001a001a009a; ++ *((unsigned long*)& __m256i_result[2]) = 0x001a001a002a009a; ++ *((unsigned long*)& __m256i_result[1]) = 0x001a001a001a009a; ++ *((unsigned long*)& __m256i_result[0]) = 0x001a001a002a009a; ++ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x001a001a001a009a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x001a001a002a009a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x001a001a001a009a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x001a001a002a009a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001a000000da; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001a000000da; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001a000000da; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001a000000da; ++ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xe2560afe9c001a18; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe2560afe9c001a18; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff0000857a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x05fafe0101fe000e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000d82; ++ *((unsigned long*)& __m128i_result[0]) = 0x046a09ec009c0000; ++ __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc30e0000ff800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc30e0000ff800000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_result[2]) = 0xc3030000ff800000; ++ *((unsigned long*)& __m256i_result[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_result[0]) = 0xc3030000ff800000; ++ __m256i_out = __lasx_xvmini_b(__m256i_op0,3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000d82; ++ *((unsigned long*)& __m128i_op0[0]) = 0x046a09ec009c0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x046a09ec009c0000; ++ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffff8900000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffff8900000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvslei_h(__m256i_op0,-16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000600007fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000008ffffa209; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000011; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000016; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000011; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000016; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000011; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000016; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000600007fff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000008ffffa209; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x046a09ec009c0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d(__m128i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000002000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000002000000000; ++ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h(__m128i_op0,0x1); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000600007fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000008ffffa209; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000600007fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000008ffffa209; ++ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000080040; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x046a09ec; ++ *((int*)& __m128_op0[0]) = 0x009c0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff0000857a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x05fafe0101fe000e; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff7a86; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffe01fff2; ++ __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000100080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000100080; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000006d; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000010006d; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000006d; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000010006d; ++ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000002000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000002000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000080040; ++ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x80000000; ++ *((int*)& __m128_result[2]) = 0x80000000; ++ *((int*)& __m128_result[1]) = 0x80000000; ++ *((int*)& __m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x80000000; ++ *((int*)& __m128_op0[2]) = 0x80000000; ++ *((int*)& __m128_op0[1]) = 0x80000000; ++ *((int*)& __m128_op0[0]) = 0x80000000; ++ *((int*)& __m128_op1[3]) = 0x000000ff; ++ *((int*)& __m128_op1[2]) = 0x0000857a; ++ *((int*)& __m128_op1[1]) = 0x05fafe01; ++ *((int*)& __m128_op1[0]) = 0x01fe000e; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000000000000006d; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000000010006d; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000000000000006d; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000000000010006d; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000080040; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000080040; ++ *((unsigned long*)& __m256d_result[3]) = 0x00000000000000ad; ++ *((unsigned long*)& __m256d_result[2]) = 0x00000000001800ad; ++ *((unsigned long*)& __m256d_result[1]) = 0x00000000000000ad; ++ *((unsigned long*)& __m256d_result[0]) = 0x00000000001800ad; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000006; ++ *((int*)& __m128_op1[2]) = 0x00007fff; ++ *((int*)& __m128_op1[1]) = 0x00000008; ++ *((int*)& __m128_op1[0]) = 0xffffa209; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x0000006d; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0010006d; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x0000006d; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0010006d; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00080040; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00080040; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00080040; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00080040; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00080040; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x0010006d; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00080040; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x0010006d; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x2b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000010006d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000010006d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000080; ++ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff88ffc0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff78ffc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000001ff1; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000001ff1; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x53); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc3030000ff800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc3030000ff800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x05fafe0101fe000e; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000ff0000857a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x05fafe0101fe000e; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000ff0000857a; ++ *((unsigned long*)& __m128i_result[0]) = 0x05fafe0101fe000e; ++ __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000006d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000400008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000006d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000400008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000010006d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000010006d; ++ *((unsigned long*)& __m256i_result[3]) = 0x010101010101016c; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101410128; ++ *((unsigned long*)& __m256i_result[1]) = 0x010101010101016c; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101410128; ++ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000010006d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000010006d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000006d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000400008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000006d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000400008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_op2[3]) = 0xcd636363; ++ *((int*)& __m128_op2[2]) = 0xcd636363; ++ *((int*)& __m128_op2[1]) = 0xcd636363; ++ *((int*)& __m128_op2[0]) = 0xcd636363; ++ *((int*)& __m128_result[3]) = 0xcd636363; ++ *((int*)& __m128_result[2]) = 0xcd636363; ++ *((int*)& __m128_result[1]) = 0xcd636363; ++ *((int*)& __m128_result[0]) = 0xcd636363; ++ __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0xcd636363; ++ *((int*)& __m128_op1[2]) = 0xcd636363; ++ *((int*)& __m128_op1[1]) = 0xcd636363; ++ *((int*)& __m128_op1[0]) = 0xcd636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vinsgr2vr_d(__m128i_op0,long_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000006d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000400008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000006d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000400008; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000080040; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000008002d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000008002d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010000000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000007f00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000007f00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000007f00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000007f00000000; ++ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x05fafe0101fe000e; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000010000080040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010000080040; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000007f00000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000007f00000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000007f00000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000007f00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x2e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_h(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x00cd006300cd0063; ++ *((unsigned long*)& __m128i_result[0]) = 0x00cd006300cd0063; ++ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000010000080040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000080040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000010000080040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010000080040; ++ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000010006d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000010006d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000800400010006d; ++ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000000010000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000cd630000cd63; ++ *((unsigned long*)& __m128i_op1[1]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffcd63ffffcd63; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffd765ffffd765; ++ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffcd63ffffcd63; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffd765ffffd765; ++ *((unsigned long*)& __m128i_result[1]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long*)& __m128i_result[0]) = 0x1f1f1f1f1f1f1f1f; ++ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000100080; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000100080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000010000080040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000080040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000010000080040; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000fff8ffc0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000fff8ffc0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ff00fff8ffc0; ++ __m256i_out = __lasx_xvneg_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x2d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long*)& __m128i_op2[0]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long*)& __m128i_result[1]) = 0x00081f1f1f1f1f1f; ++ *((unsigned long*)& __m128i_result[0]) = 0x1f1f1f1f1f1f1f1f; ++ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_result[1]) = 0xf359f359f359f359; ++ *((unsigned long*)& __m128i_result[0]) = 0xf359f359f359f359; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff00000000ffff; ++ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0x93); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00010000; ++ *((int*)& __m128_op0[2]) = 0x00010000; ++ *((int*)& __m128_op0[1]) = 0x0000cd63; ++ *((int*)& __m128_op0[0]) = 0x0000cd63; ++ *((int*)& __m128_op1[3]) = 0xffffcd63; ++ *((int*)& __m128_op1[2]) = 0xffffcd63; ++ *((int*)& __m128_op1[1]) = 0xffffd765; ++ *((int*)& __m128_op1[0]) = 0xffffd765; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000048; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000048; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000800000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000010; ++ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8ff40; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ff0100090040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8ff40; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ff0100090040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0001000000010000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000cd630000cd63; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffff00000000ffff; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xffff00000000ffff; ++ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000800000010; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000800000010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000002000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000002000000; ++ __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00081f1f; ++ *((int*)& __m128_op0[2]) = 0x1f1f1f1f; ++ *((int*)& __m128_op0[1]) = 0x1f1f1f1f; ++ *((int*)& __m128_op0[0]) = 0x1f1f1f1f; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000010000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000cd630000cd63; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000329d0000329d; ++ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8ffc0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8ffc0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ff00fff8ffc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000fff80000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000fff80000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000fff80000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000fff80000; ++ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8ffc0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8ffc0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ff00fff8ffc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000fff8ffc0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000fff8ffc0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ff00fff8ffc0; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ff00fff8ffc0; ++ __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0x82); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x1c083b1f3b1f3b1f; ++ *((unsigned long*)& __m128d_op0[0]) = 0xf244b948a323ab42; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ff00fff8ffc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000fff8ff40; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ff0100090040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000fff8ff40; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ff0100090040; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffefff80; ++ __m256i_out = __lasx_xvsub_q(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_b(__m128i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,-8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00010000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00010000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x02000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x02000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000002000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000002000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000002000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000002000000; ++ __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x43); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc3030000ff800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc3030000ff800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003cfc0000006f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003cfc0000006f; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000800400010006d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0008001c0010001c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0008001c0010001c; ++ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000010; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000010; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf359f359f359f359; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf359f359f359f359; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long*)& __m128i_result[1]) = 0x86dd8341b164f12b; ++ *((unsigned long*)& __m128i_result[0]) = 0x9611c3985b3159f5; ++ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0200000002000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000002000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0200000002000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000002000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff01fb0408; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff01fb0408; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00003cfc0000006f; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00003cfc0000006f; ++ *((unsigned long*)& __m256i_result[3]) = 0x02007f8002000400; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000c5dc02005f64; ++ *((unsigned long*)& __m256i_result[1]) = 0x02007f8002000400; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000c5dc02005f64; ++ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8ff40; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ff0100090040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8ff40; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ff0100090040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff02; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff02; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x86dd8341b164f12b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9611c3985b3159f5; ++ *((unsigned long*)& __m128i_result[1]) = 0x86dd8341b164f12b; ++ *((unsigned long*)& __m128i_result[0]) = 0x9611c3985b3159f5; ++ __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long*)& __m128i_op1[1]) = 0x86dd8341b164f12b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9611c3985b3159f5; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000035697d4e; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000013ecaadf2; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xf359f359f359f359; ++ *((unsigned long*)& __m128d_op0[0]) = 0xf359f359f359f359; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xfff8ff40; ++ *((int*)& __m256_op0[5]) = 0x0000ff01; ++ *((int*)& __m256_op0[4]) = 0x00090040; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xfff8ff40; ++ *((int*)& __m256_op0[1]) = 0x0000ff01; ++ *((int*)& __m256_op0[0]) = 0x00090040; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001700000017; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001700000017; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001700000017; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001700000017; ++ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000010; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x9d9d9d9d9d9d9d8d; ++ *((unsigned long*)& __m256i_result[2]) = 0x9d9d9d9d9d9d9d9d; ++ *((unsigned long*)& __m256i_result[1]) = 0x9d9d9d9d9d9d9d8d; ++ *((unsigned long*)& __m256i_result[0]) = 0x9d9d9d9d9d9d9d9d; ++ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x62); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf359f359f359f359; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf359f359f359f359; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffff359f358; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffff359f358; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x86dd8341b164f12b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9611c3985b3159f5; ++ *((unsigned long*)& __m128i_result[1]) = 0x0021b761002c593c; ++ *((unsigned long*)& __m128i_result[0]) = 0x002584710016cc56; ++ __m128i_out = __lsx_vsrlri_w(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x86dd8341; ++ *((int*)& __m128_op1[2]) = 0xb164f12b; ++ *((int*)& __m128_op1[1]) = 0x9611c398; ++ *((int*)& __m128_op1[0]) = 0x5b3159f5; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x86dd8341b164f12b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9611c3985b3159f5; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff86dd83ff9611c3; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x28); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffefff7f00100080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffefff7f00100080; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff01fb0408; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff01fb0408; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0021b761002c593c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x002584710016cc56; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0200000002000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x02000000fdffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0200000002000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x02000000fdffffff; ++ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long*)& __m128i_result[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_result[0]) = 0xf9796558e39953fd; ++ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000d3259a; ++ __m128i_out = __lsx_vbsrl_v(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0200000002000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x02000000fdffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0200000002000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x02000000fdffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000004ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000004ffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff86dd83ff9611c3; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000035697d4e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000013ecaadf2; ++ *((unsigned long*)& __m128i_result[1]) = 0xe280e67f00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00007f7f00007f80; ++ __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff01fb0408; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff01fb0408; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0xf2b180c9fc1fefdc; ++ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000002ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000002ff; ++ __m256i_out = __lasx_xvmsknz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsqrt_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ef; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ef; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000016e00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000016e00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000155b200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000b70000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000016e00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000016e00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000016e00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000016e00; ++ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000035697d4e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000013ecaadf2; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000016e00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000016e00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000000000155b200; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000b70000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000016e00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000016e00; ++ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x000002ff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x000002ff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x000002ff; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x000002ff; ++ __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00016e00; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00016e00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfffffffff359f358; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfffffffff359f358; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001fff000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000029170; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fff000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000029170; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001fff000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001fff000; ++ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4ee376188658d85f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5728dcc85ac760d2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4e1d76187a58285f; ++ *((unsigned long*)& __m128i_result[0]) = 0x572824385a39602e; ++ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0021b761002c593c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x002584710016cc56; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000001e03; ++ *((unsigned long*)& __m128i_result[1]) = 0x0021b761002c593c; ++ *((unsigned long*)& __m128i_result[0]) = 0x002584710016ea59; ++ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long*)& __m128i_result[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_result[0]) = 0xf9796558e39953fd; ++ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001fff000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001fff000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffdfff80; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffdfff80; ++ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001e03; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000011e04; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffdfff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffdfff80; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000016e00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000016e00; ++ *((unsigned long*)& __m256i_result[3]) = 0xffdfff80ffdfff80; ++ *((unsigned long*)& __m256i_result[2]) = 0xffdfff80ffdfff80; ++ *((unsigned long*)& __m256i_result[1]) = 0xffdfff80ffdfff80; ++ *((unsigned long*)& __m256i_result[0]) = 0xffdfff80ffdfff80; ++ __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long*)& __m256i_result[2]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long*)& __m256i_result[1]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long*)& __m256i_result[0]) = 0x2a2a2a2a2a2a2a2a; ++ __m256i_out = __lasx_xvnori_b(__m256i_op0,0xd5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001fff000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001fff000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffdfff80ffdfff80; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffdfff80ffdfff80; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffdfff80ffdfff80; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffdfff80ffdfff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff00; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff359f358; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffff359f358; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffff00ff00; ++ __m128i_out = __lsx_vslt_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000d3460001518a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000084300000e55f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000016; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000016; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001fff000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000029170; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fff000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000029170; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001fff000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000029170; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fff000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000029170; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000001ff03ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000203ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001ff03ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000203ff; ++ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000100; ++ *((int*)& __m256_op0[5]) = 0x00000002; ++ *((int*)& __m256_op0[4]) = 0xff910072; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000100; ++ *((int*)& __m256_op0[1]) = 0x00000002; ++ *((int*)& __m256_op0[0]) = 0xff910072; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000001fff0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000feff0001ffb8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000001fff0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000feff0001ffb8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long*)& __m256i_result[3]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long*)& __m256i_result[2]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long*)& __m256i_result[1]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long*)& __m256i_result[0]) = 0x2a2a2a2a2a2a2a2a; ++ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fff0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000feff0001ffb8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fff0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000feff0001ffb8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000016; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000016; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffd5d5ffffd5d6; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffd5d5ffffd5d6; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff6361; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4d0a902890b800dc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff6361; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4d0a902890b800dc; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long*)& __m128i_result[1]) = 0x001a64b345308091; ++ *((unsigned long*)& __m128i_result[0]) = 0x001f2f2cab1c732a; ++ __m128i_out = __lsx_vsrli_d(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff6361; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4d0a902890b800dc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff6361; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4d0a902890b800dc; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001ff03ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000203ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001ff03ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000203ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000001ff03fe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffec75c2d209f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000001ff03fe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffec75c2d209f; ++ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff6361; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4d0a902890b800dc; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff6361; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4d0a902890b800dc; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff6361; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4d0a902890b800dc; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff6361; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4d0a902890b800dc; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffb2f600006f48; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffb2f600006f48; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000014414104505; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1011050040004101; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000014414104505; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1011050040004101; ++ *((unsigned long*)& __m128i_result[1]) = 0x1010111105050000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4040000041410101; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001a64b345308091; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001f2f2cab1c732a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000014414104505; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1011050040004101; ++ *((unsigned long*)& __m128i_result[1]) = 0x001a323b5430048c; ++ *((unsigned long*)& __m128i_result[0]) = 0x008f792cab1cb915; ++ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1010111105050000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4040000041410101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000808000020200; ++ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x2d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000001ff03ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000203ff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000001ff03ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000203ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001ff03ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001ff03ff; ++ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1010111105050000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4040000041410101; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000110011; ++ *((unsigned long*)& __m128i_result[0]) = 0x0005000500000000; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001a323b5430048c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x008f792cab1cb915; ++ *((unsigned long*)& __m128i_result[1]) = 0x001a323b00ffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x008f792c00ffffff; ++ __m128i_out = __lsx_vsat_wu(__m128i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fff0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000feff0001ffb8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fff0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000feff0001ffb8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ff03ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000203ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ff03ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000203ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000fafe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000fafe; ++ __m256i_out = __lasx_xvmskgez_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ff03fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffec75c2d209f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ff03fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffec75c2d209f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ff03fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffec75c2d209f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ff03fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffec75c2d209f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000001ff000003fe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000001ff000003fe; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffb2f600006f48; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffb2f600006f48; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x00000000000000ff; ++ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000808000020200; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff8000020000; ++ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffb3430a; ++ *((int*)& __m256_op0[4]) = 0x006ed8b8; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffb3430a; ++ *((int*)& __m256_op0[0]) = 0x006ed8b8; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x000001ff; ++ *((int*)& __m256_op1[4]) = 0x000003fe; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x000001ff; ++ *((int*)& __m256_op1[0]) = 0x000003fe; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x000000ff; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x000000ff; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xfff3430a; ++ *((int*)& __m256_result[4]) = 0x000000ff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xfff3430a; ++ *((int*)& __m256_result[0]) = 0x000000ff; ++ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000001; ++ *((int*)& __m256_op0[6]) = 0x00000001; ++ *((int*)& __m256_op0[5]) = 0xffffb2f6; ++ *((int*)& __m256_op0[4]) = 0x00006f48; ++ *((int*)& __m256_op0[3]) = 0x00000001; ++ *((int*)& __m256_op0[2]) = 0x00000001; ++ *((int*)& __m256_op0[1]) = 0xffffb2f6; ++ *((int*)& __m256_op0[0]) = 0x00006f48; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x000000ff; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001a64b345308091; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001f2f2cab1c732a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1baf8eabd26bc629; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1c2640b9a8e9fb49; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002dab8746acf8e; ++ *((unsigned long*)& __m128i_result[0]) = 0x00036dd1c5c15856; ++ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffb2f600006f48; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffb2f600006f48; ++ *((unsigned long*)& __m256i_result[3]) = 0x4000400140004001; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffff2f640006f48; ++ *((unsigned long*)& __m256i_result[1]) = 0x4000400140004001; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffff2f640006f48; ++ __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000aa822a8228222; ++ *((unsigned long*)& __m128i_op0[0]) = 0x03aa558ec8546eb6; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001a64b345308091; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001f2f2cab1c732a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0155ffff754affff; ++ *((unsigned long*)& __m128i_result[0]) = 0x034cffff03e5ffff; ++ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3fd1000000000000; ++ __m256i_out = __lasx_xvldi(-943); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001e03; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001a64b345308091; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001f2f2cab1c732a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000780c00000; ++ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256d_op2[2]) = 0xffffb2f600006f48; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256d_op2[0]) = 0xffffb2f600006f48; ++ *((unsigned long*)& __m256d_result[3]) = 0x8000000100000001; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffb2f600006f48; ++ *((unsigned long*)& __m256d_result[1]) = 0x8000000100000001; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffb2f600006f48; ++ __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff000000ff000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff000000ff000000; ++ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffb2f600006f48; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffb2f600006f48; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000008c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000008c; ++ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1baf8eabd26bc629; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1c2640b9a8e9fb49; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0002dab8746acf8e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00036dd1c5c15856; ++ *((unsigned long*)& __m128i_result[1]) = 0x1bb1686346d595b7; ++ *((unsigned long*)& __m128i_result[0]) = 0x1c29ad8a6daa539f; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff00ffffffff; ++ __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000006de1; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5f9ccf33cf600000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x41f0000000000000; ++ __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010000000001; ++ __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000006de1; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5f9ccf33cf600000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000007; ++ *((unsigned long*)& __m128i_result[0]) = 0x0007000700070000; ++ __m128i_out = __lsx_vsat_hu(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x000aa822a79308f6; ++ *((unsigned long*)& __m128d_op1[0]) = 0x03aa558e1d37b5a1; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000008c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000008c; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000008b; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff010000008b; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000fafe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000fafe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0000008c; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0000008c; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x0000008c; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x0000008c; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000118; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000118; ++ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000008c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000008c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001180000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001180000000; ++ __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000aa822a79308f6; ++ *((unsigned long*)& __m128i_op0[0]) = 0x03aa558e1d37b5a1; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff80fd820000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000aa822a79308f6; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000084d12ce; ++ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000008b; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff010000008b; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x000aa822; ++ *((int*)& __m128_op0[2]) = 0xa79308f6; ++ *((int*)& __m128_op0[1]) = 0x03aa355e; ++ *((int*)& __m128_op0[0]) = 0x1d37b5a1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000118; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000118; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000024170000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000aa822a79308f6; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000024170000; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x32); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000118; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000118; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000aa822a79308f6; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000aa822a79308f6; ++ *((unsigned long*)& __m128i_op1[0]) = 0x03aa558e1d37b5a1; ++ *((unsigned long*)& __m128i_result[1]) = 0x00155044ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x03aa558e2584c86f; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0002de46; ++ *((int*)& __m128_op0[2]) = 0x682de060; ++ *((int*)& __m128_op0[1]) = 0x09b50da6; ++ *((int*)& __m128_op0[0]) = 0xe67b8fc0; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x084d12ce; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x24170000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000024170000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000020300000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000044470000; ++ __m128i_out = __lsx_vadda_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000024170000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x56); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000118; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000118; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x2e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000044470000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00004dce00004700; ++ __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0000fafe; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0000fafe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001ffff0001ffff; ++ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000044470000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0000ffff; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000024170000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000042ab41; ++ *((unsigned long*)& __m128i_result[0]) = 0xb1b1b1b1b16f0670; ++ __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve0_q(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0b4c600000000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000042ab41; ++ *((unsigned long*)& __m128i_op0[0]) = 0xb1b1b1b1b16f0670; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000042ab41; ++ *((unsigned long*)& __m128i_result[0]) = 0xb1b1b1b1b16f0670; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000042ab41; ++ *((unsigned long*)& __m128i_op0[0]) = 0xb1b1b1b1b16f0670; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000044470000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0b4c600000000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0004280808080808; ++ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0xa4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x084d12ce; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x24170000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000024170000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000044470000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001ffff0001ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vmini_b(__m128i_op0,5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0004280808080808; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010203030201000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000808080800; ++ __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000024170000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000024170000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ff0000ffff; ++ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000000ff0000ffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x03fbfffc03fc07fc; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x03fbfffc03fc07fc; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000404040; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x03fbfffc03fc07fc; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x03fbfffc03fc07fc; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff80000000; ++ __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000001fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vsat_du(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0000ffff; ++ *((int*)& __m256_op0[6]) = 0x0000ffff; ++ *((int*)& __m256_op0[5]) = 0x0000ffff; ++ *((int*)& __m256_op0[4]) = 0x0000ffff; ++ *((int*)& __m256_op0[3]) = 0x0000ffff; ++ *((int*)& __m256_op0[2]) = 0x0000ffff; ++ *((int*)& __m256_op0[1]) = 0x0000ffff; ++ *((int*)& __m256_op0[0]) = 0x0000ffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x03fbfffc03fc07fc; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x03fbfffc03fc07fc; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000ffff0000ffff; ++ __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000404040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x08080807f7f7f7f8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x08080805f5f5f5f8; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffff00; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000001ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000001ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x08080807f5f5f5f8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ff00; ++ *((unsigned long*)& __m128i_result[1]) = 0x04040403fafafafc; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff80; ++ __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7efefefe80ffffff; ++ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvexth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x08080807f5f5f5f8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0202f5f80000ff00; ++ __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0xffff0000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffe0000000; ++ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long*)& __m128i_op1[1]) = 0x04040403fafafafc; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ff80; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x007efffefffefffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff80fffffffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x007efffefffefffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff80fffffffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0202f5f80000ff00; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffbfff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3f7f7f7f407fffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3f7f7f7f407fffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000fdfdfe; ++ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x27); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffe0000000; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fdfdfe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0001fffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00010000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7ffe0001fffe0001; ++ *((unsigned long*)& __m256i_result[2]) = 0x7ffe0001fffeffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000fdfdfe; ++ __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xc5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_w_d(__m256i_op0,__m256i_op1,0x34); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x36); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff8001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7efefefe80ffffff; ++ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w(__m256i_op0,4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4079808280057efe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007ffcfcfd020202; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x004000800080007e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000fc00fd0002; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100c00000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x26); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ffe0001fffe0001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ffe0001fffeffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fdfdfe; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrm_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000017f00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007f7f03030000; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000017f00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007f7f03030000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000017f00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007f7f03030000; ++ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x37); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000020006; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000020006; ++ __m256i_out = __lasx_xvreplgr2vr_d(long_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0xff800000; ++ *((int*)& __m128_result[1]) = 0xff800000; ++ *((int*)& __m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000ffffff0000; ++ __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00020006; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00020006; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00020006; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00020006; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x37b0003000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x37b0003000000000; ++ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffe045fffffeff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffff7d; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000017f00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00007f7f03030000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_h(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrp_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_h(__m128i_op0,3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_result[0]) = 0x5252525252525252; ++ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256d_op1[2]) = 0x4079808280057efe; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x007ffcfcfd020202; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0fffffff0fffffff; ++ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_d(__m256i_op0,9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x90007fff90008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0ffffffe90008000; ++ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffff8000; ++ *((int*)& __m256_op0[5]) = 0x7efefefe; ++ *((int*)& __m256_op0[4]) = 0x80ffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x7efefefe; ++ *((int*)& __m256_op0[0]) = 0x80ffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x07ffffff07ffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x07ffffff08000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x07ffffff08000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x207f207f207f2000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000207f2000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x207f207f207f2000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000207f2000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[3]) = 0xdf80df80df80dfff; ++ *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffdf80dfff; ++ *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xdf80df80df80dfff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffdf80dfff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_b(__m256i_op0,11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000290; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000290; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff003fffc0; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000003fffc0; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x90007fff90008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0ffffffe90008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x87ffffff87ffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xc880bfffc880c080; ++ *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_result[0]) = 0x87ffffffc880c080; ++ __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000290; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000290; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xdf80df80df80dfff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffdf80dfff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffc00fffffc00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffc00fffffc00; ++ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaxi_w(__m256i_op0,-2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_w(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010100000101; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0xff800000; ++ *((int*)& __m128_result[1]) = 0xff800000; ++ *((int*)& __m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000010100000101; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000010100000101; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00ff007f007f00; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00ff007f007f00; ++ __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2e2b34ca59fa4c88; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x2e34594c3b000000; ++ __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000101; ++ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2e2b34ca59fa4c88; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f8000007f800000; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00ff007f007f00; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00ff007f007f00; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00ff007f007f00; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00ff007f007f00; ++ __m256i_out = __lasx_xvmini_d(__m256i_op0,-5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffc00fffffc00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffc00fffffc00; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff00ff007f007f00; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00ff007f007f00; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[2]) = 0xc03fc03fc03fc03f; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_result[0]) = 0xc03fc03fc03fc03f; ++ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x3a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc03fc03fc03fc03f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc03fc03fc03fc03f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000002d; ++ *((unsigned long*)& __m256i_result[2]) = 0xc02dc02dc02dc02d; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000002d; ++ *((unsigned long*)& __m256i_result[0]) = 0xc02dc02dc02dc02d; ++ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xed); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2e34594c3b000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x002e0059003b0000; ++ __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmskgez_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256d_op0[2]) = 0xff00ff007f007f00; ++ *((unsigned long*)& __m256d_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256d_op0[0]) = 0xff00ff007f007f00; ++ *((unsigned long*)& __m256d_op1[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256d_op1[2]) = 0xff00ff007f007f00; ++ *((unsigned long*)& __m256d_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m256d_op1[0]) = 0xff00ff007f007f00; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x001a001a001a001a; ++ *((unsigned long*)& __m128i_result[0]) = 0x001a001a001a001a; ++ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x000000ff; ++ *((int*)& __m256_op0[4]) = 0x000000ff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x000000ff; ++ *((int*)& __m256_op0[0]) = 0x000000ff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000101; ++ *((int*)& __m256_op1[4]) = 0x00000101; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000101; ++ *((int*)& __m256_op1[0]) = 0x00000101; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7ff80000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x7ff80000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x2e34594c; ++ *((int*)& __m128_op0[0]) = 0x3b000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x800000ff000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x800000ff000000ff; ++ __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001a001a001a001a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001a001a001a001a; ++ *((unsigned long*)& __m128i_result[1]) = 0x001a001a001a000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x001a001a001a000b; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2e34594c3b000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long*)& __m128i_result[0]) = 0x171d423524e9e9e9; ++ __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x002e0059003b0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000005c000000b2; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000007600000000; ++ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2e34594c3b000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x017001a002c80260; ++ *((unsigned long*)& __m128i_result[0]) = 0x01d8000000000000; ++ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x000000ff; ++ *((int*)& __m256_op0[4]) = 0x000000ff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x000000ff; ++ *((int*)& __m256_op0[0]) = 0x000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010000000100; ++ __m256i_out = __lasx_xvfclass_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x017001a002c80260; ++ *((unsigned long*)& __m128i_op0[0]) = 0x01d8000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2e34594c3b000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00feff0100feff01; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00feff0100feff01; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000005c000000b2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000007600000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffffffff; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2e34594c3b000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000002e34594c; ++ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x800000ff000000ff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x800000ff000000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x90007fff90008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0ffffffe90008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x4800408ef07f7f01; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0800000eeffffe02; ++ __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0xff800000; ++ *((int*)& __m128_result[1]) = 0xff800000; ++ *((int*)& __m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000010000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000010000000; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff800000ff800000; ++ __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00feff0100feff01; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00feff0100feff01; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000010000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000010000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff8000010f800000; ++ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff8000010f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fff80000; ++ __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001a001a001a000b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001a001a001a000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001a001a001a000b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001a001a001a000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x001a001a001a0008; ++ *((unsigned long*)& __m128i_result[0]) = 0x001a001a001a000b; ++ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff8000010f800000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000002d; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc02dc02dc02dc02d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000002d; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc02dc02dc02dc02d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffe0000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff80000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff8000010f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ff8000010f78; ++ __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x002a001a001a000b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000002a001a; ++ *((unsigned long*)& __m128i_result[0]) = 0x001a000b00000000; ++ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x78); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x001a001a; ++ *((int*)& __m128_op0[2]) = 0x001a0008; ++ *((int*)& __m128_op0[1]) = 0x001a001a; ++ *((int*)& __m128_op0[0]) = 0x001a000b; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xff800001; ++ *((int*)& __m128_op1[0]) = 0x0f800000; ++ *((int*)& __m128_op2[3]) = 0xff800000; ++ *((int*)& __m128_op2[2]) = 0xff800000; ++ *((int*)& __m128_op2[1]) = 0xff800000; ++ *((int*)& __m128_op2[0]) = 0xff800000; ++ *((int*)& __m128_result[3]) = 0xffffffff; ++ *((int*)& __m128_result[2]) = 0xffffffff; ++ *((int*)& __m128_result[1]) = 0xffc00001; ++ *((int*)& __m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000002a001a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001a000b00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffe000ffffffffff; ++ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff8000010f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000900000009; ++ *((unsigned long*)& __m128i_result[0]) = 0xff80000a0f800009; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x002a001a001a000b; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x002a001a001a000b; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xff800001; ++ *((int*)& __m128_op0[0]) = 0x0f800000; ++ *((int*)& __m128_op1[3]) = 0x00000009; ++ *((int*)& __m128_op1[2]) = 0x00000009; ++ *((int*)& __m128_op1[1]) = 0xff80000a; ++ *((int*)& __m128_op1[0]) = 0x0f800009; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xff7fff80; ++ *((int*)& __m128_op0[2]) = 0xff800001; ++ *((int*)& __m128_op0[1]) = 0xe593d844; ++ *((int*)& __m128_op0[0]) = 0xe593c8c4; ++ *((int*)& __m128_op1[3]) = 0xff800000; ++ *((int*)& __m128_op1[2]) = 0xff800000; ++ *((int*)& __m128_op1[1]) = 0xe593c8c4; ++ *((int*)& __m128_op1[0]) = 0xe593c8c4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ff8000010f78; ++ *((unsigned long*)& __m128i_op1[1]) = 0x002a001a001a000b; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001a0000000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff8000010f78; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff7f0080ff7ef088; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0010001000030000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0010001000030000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0010001000030000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0010001000030000; ++ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x0000ffff; ++ *((int*)& __m256_op0[4]) = 0x0000ffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x0000ffff; ++ *((int*)& __m256_op0[0]) = 0x0000ffff; ++ *((int*)& __m256_op1[7]) = 0x00100010; ++ *((int*)& __m256_op1[6]) = 0x00030000; ++ *((int*)& __m256_op1[5]) = 0x00100010; ++ *((int*)& __m256_op1[4]) = 0x00030000; ++ *((int*)& __m256_op1[3]) = 0x00100010; ++ *((int*)& __m256_op1[2]) = 0x00030000; ++ *((int*)& __m256_op1[1]) = 0x00100010; ++ *((int*)& __m256_op1[0]) = 0x00030000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff800000ff800000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff800000ff800000; ++ __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x002a001a001a000b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000002a001a; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000001a000b; ++ __m128i_out = __lsx_vexth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0010001000030000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0010001000030000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0010001000030000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0010001000030000; ++ *((int*)& __m256_result[7]) = 0x49800080; ++ *((int*)& __m256_result[6]) = 0x48400000; ++ *((int*)& __m256_result[5]) = 0x49800080; ++ *((int*)& __m256_result[4]) = 0x48400000; ++ *((int*)& __m256_result[3]) = 0x49800080; ++ *((int*)& __m256_result[2]) = 0x48400000; ++ *((int*)& __m256_result[1]) = 0x49800080; ++ *((int*)& __m256_result[0]) = 0x48400000; ++ __m256_out = __lasx_xvffint_s_w(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001a0000000b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b(__m128i_op0,15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080000080800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x9380c4009380c400; ++ __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000001a0000000b; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000080000000ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff000000000000; ++ __m256i_out = __lasx_xvslei_h(__m256i_op0,-8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffc00001ff800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x003ffffe00800000; ++ __m128i_out = __lsx_vneg_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003ffffe00800000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff810001ff810002; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f804000ff810001; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003ffffe00800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000034; ++ __m128i_out = __lsx_vmskltz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000002a001a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000001a000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h(__m128i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long*)& __m128d_result[1]) = 0x805ffffe01001fe0; ++ *((unsigned long*)& __m128d_result[0]) = 0x9a49e11102834d70; ++ __m128d_out = __lsx_vfrecip_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffff801000000010; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffff800300000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffff801000000010; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffff800300000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000004843ffdff; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000004843ffdff; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff801000000010; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff800300000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff801000000010; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff800300000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff801000000010; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff800300000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff801000000010; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff800300000000; ++ __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvpickve_w(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d(__m256i_op0,-3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffff801000000010; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffff800300000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffff801000000010; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffff800300000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffe0000000; ++ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x805ffffe01001fe0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9a49e11102834d70; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8144ffff01c820a4; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9b2ee1a4034b4e34; ++ *((unsigned long*)& __m128i_result[1]) = 0xff1affff01001fe0; ++ *((unsigned long*)& __m128i_result[0]) = 0xff1aff6d02834d70; ++ __m128i_out = __lsx_vmod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x841f000fc28f801f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x007c0000003e0080; ++ __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x003ffffe00800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8144ffff01c820a4; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9b2ee1a4034b4e34; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff80c400000148; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff80c1ffffe8de; ++ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff801000000010; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff800300000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff801000000010; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff800300000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000cc; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000cc; ++ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xc1bdceee242070dc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe907b754d7eaa478; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff1affff01001fe0; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff1aff6d02834d70; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f800d007f803680; ++ *((unsigned long*)& __m128i_result[0]) = 0x0100418026803800; ++ __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffef; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffef; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffee; ++ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff1affff01001fe0; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff1aff6d02834d70; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000034; ++ *((unsigned long*)& __m128i_result[1]) = 0xfe1bfefe00011ee1; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe1bfe6c03824c60; ++ __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000004843ffdff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000004843ffdff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000c040c0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000c040c0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff80c400000148; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff80c1ffffe8de; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000148; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000034; ++ __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x841f000fc28f801f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x107c003c083c007c; ++ __m128i_out = __lsx_vslli_b(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffe00000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffe00000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000007f8; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000002de; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000007f8; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000002de; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000007f7; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffff808; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000007f7; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffff808; ++ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000c040c0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000c040c0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00000004843ffdff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000004843ffdff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffe000ffffffff08; ++ *((unsigned long*)& __m256i_result[1]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffe000ffffffff08; ++ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff1afffefec0ec85; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff1aff6d48ce567f; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff80c400000148; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff80c1ffffe8de; ++ *((unsigned long*)& __m128i_result[1]) = 0xffe3ffd8ffe30919; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffffffff; ++ __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvneg_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff80000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x841f000fc28f801f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x841f000fc28f801f; ++ *((unsigned long*)& __m128i_op2[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x76ecfc8b85ac78db; ++ __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x001c001c001c001c; ++ *((unsigned long*)& __m256i_result[2]) = 0x001c001c001c001c; ++ *((unsigned long*)& __m256i_result[1]) = 0x001c001c001c001c; ++ *((unsigned long*)& __m256i_result[0]) = 0x001c001c001d001d; ++ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffe000ffffffff08; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffe000ffffffff08; ++ *((unsigned long*)& __m256i_result[3]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0fffffff0fffffff; ++ __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0fffffff0fffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_result[2]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_result[1]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_result[0]) = 0x0fffffff10000006; ++ __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long*)& __m128i_op1[1]) = 0x01017f3c00000148; ++ *((unsigned long*)& __m128i_op1[0]) = 0x117d7f7b093d187f; ++ *((unsigned long*)& __m128i_result[1]) = 0x117d7f7b093d187f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000034; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x70); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x117d7f7b093d187f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfe1bfefe00011ee1; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe1bfe6c03824c60; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f7f7f7f0000001a; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f7f017f7f7f7f7f; ++ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffe000ffffffff08; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffe000ffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffe000ffffffff08; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000001fffffff9; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_result[2]) = 0x10ffffff10000006; ++ *((unsigned long*)& __m256i_result[1]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_result[0]) = 0x10ffffff10000006; ++ __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000498000000080; ++ *((unsigned long*)& __m256i_result[2]) = 0x00004843ffffffe0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000498000000080; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000684000000000; ++ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000126000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x2555205ea7bc4020; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000126000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x2555205ea7bc4020; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_op1[2]) = 0x10ffffff10000006; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0fffffff10000006; ++ *((unsigned long*)& __m256i_op1[0]) = 0x10ffffff10000006; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000004980008; ++ *((unsigned long*)& __m256i_result[2]) = 0x003ffffffc400000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000004980008; ++ *((unsigned long*)& __m256i_result[0]) = 0x003ffffffc400000; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x46); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x413e276583869d79; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f7f017f9d8726d3; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7c7cd2eb63637c52; ++ *((unsigned long*)& __m128i_op1[0]) = 0x82ffd2210127add2; ++ *((unsigned long*)& __m128i_result[1]) = 0xffc2007aff230027; ++ *((unsigned long*)& __m128i_result[0]) = 0x0080005eff600001; ++ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000498000000080; ++ *((unsigned long*)& __m256i_result[2]) = 0x000048430000ffe0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000498000000080; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000684000000000; ++ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffc2007aff230027; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0080005eff600001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x01017f3c00000148; ++ *((unsigned long*)& __m128i_op1[0]) = 0x117d7f7b093d187f; ++ *((unsigned long*)& __m128i_result[1]) = 0xff23002700000148; ++ *((unsigned long*)& __m128i_result[0]) = 0xff600001093d187f; ++ __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000497fe0000080; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000683fe0000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000497fe0000080; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000683fe0000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffb6811fffff80; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff97c120000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffb6811fffff80; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff97c120000000; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x413e276583869d79; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f7f017f9d8726d3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x413e276583869d79; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f7f017f9d8726d3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffc2007a; ++ *((int*)& __m128_op0[2]) = 0xff230027; ++ *((int*)& __m128_op0[1]) = 0x0080005e; ++ *((int*)& __m128_op0[0]) = 0xff600001; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000117d00007f7b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000093d0000187f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7d7f027f7c7f7c79; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7e7f7e7f027f032f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7d7f13fc7c7ffbf4; ++ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x2); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000004843ffdff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000004843ffdff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_result[2]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_result[0]) = 0x4980008068400000; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffb6811fffff80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff97c120000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffb6811fffff80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff97c120000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000004843ffdff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000004843ffdff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00043fff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00043fff00000000; ++ __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffb6804cb9; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffb7bbdec0; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffb680489b; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffb7bc02a0; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0xfffffffd; ++ *((int*)& __m256_result[4]) = 0xfffffffd; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0xfffffffd; ++ *((int*)& __m256_result[0]) = 0xfffffffd; ++ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffb6811fffff80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff97c120000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffb6811fffff80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff97c120000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xdb410010cbe10010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xdb410010cbe10010; ++ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffd27db010d20fbf; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffff00000000f; ++ __m128i_out = __lsx_vsat_w(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffd27db010d20fbf; ++ int_op1 = 0x0000000000000040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0fbf0fbf0fbf0fbf; ++ *((unsigned long*)& __m128i_result[0]) = 0x0fbf0fbf0fbf0fbf; ++ __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x3de00103153ff5fb; ++ *((unsigned long*)& __m256d_op0[2]) = 0xbffffffe80000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x3de00103153ff5fb; ++ *((unsigned long*)& __m256d_op0[0]) = 0xbffffffe80000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x40f69fe73c26f4ee; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x40f69fe73c26f4ee; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffd27db010d20fbf; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffd27db010d20fbf; ++ *((unsigned long*)& __m128i_result[1]) = 0x9727b8499727b849; ++ *((unsigned long*)& __m128i_result[0]) = 0x12755900b653f081; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x40f69fe73c26f4ee; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x40f69fe73c26f4ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_result[3]) = 0x40f69fe63c26f4f5; ++ *((unsigned long*)& __m256i_result[2]) = 0x7ff7ffff00000007; ++ *((unsigned long*)& __m256i_result[1]) = 0x40f69fe63c26f4f5; ++ *((unsigned long*)& __m256i_result[0]) = 0x7ff7ffff00000007; ++ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0fffffffffffffff; ++ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x3c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x40f69fe6; ++ *((int*)& __m256_op0[6]) = 0x3c26f4f5; ++ *((int*)& __m256_op0[5]) = 0x7ff7ffff; ++ *((int*)& __m256_op0[4]) = 0x00000007; ++ *((int*)& __m256_op0[3]) = 0x40f69fe6; ++ *((int*)& __m256_op0[2]) = 0x3c26f4f5; ++ *((int*)& __m256_op0[1]) = 0x7ff7ffff; ++ *((int*)& __m256_op0[0]) = 0x00000007; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9727b8499727b849; ++ *((unsigned long*)& __m128i_op0[0]) = 0x12755900b653f081; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7d7f13fc7c7ffbf4; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff9727ffff9727; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffe79ffffba5f; ++ __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffff9727; ++ *((int*)& __m128_op0[2]) = 0xffff9727; ++ *((int*)& __m128_op0[1]) = 0xfffffe79; ++ *((int*)& __m128_op0[0]) = 0xffffba5f; ++ *((int*)& __m128_result[3]) = 0xffff9727; ++ *((int*)& __m128_result[2]) = 0xffff9727; ++ *((int*)& __m128_result[1]) = 0xfffffe79; ++ *((int*)& __m128_result[0]) = 0xffffba5f; ++ __m128_out = __lsx_vfsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00060fbf00040fbf; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00020fbf00000fbf; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x9727b8499727b849; ++ *((unsigned long*)& __m128i_op2[0]) = 0x12755900b653f081; ++ *((unsigned long*)& __m128i_result[1]) = 0x00060fbf00040fbf; ++ *((unsigned long*)& __m128i_result[0]) = 0x00020fbf00000fbf; ++ __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x0fffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x0fffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x0fffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x0fffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000555889; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002580f01; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010000000455889; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010000002480f01; ++ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff9727ffff9727; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffe79ffffba5f; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff972700000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffba5f00000000; ++ __m128i_out = __lsx_vslli_d(__m128i_op0,0x20); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00060fbf00040fbf; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00020fbf00000fbf; ++ *((unsigned long*)& __m128i_result[1]) = 0x00060fbf02040fbf; ++ *((unsigned long*)& __m128i_result[0]) = 0x00020fbf02000fbf; ++ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff8; ++ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000002c21ffeff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc0000000c0000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000002c21ffeff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc0000000c0000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff8; ++ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x498000804843ffe0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4980008068400000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x498100814843ffe1; ++ *((unsigned long*)& __m256i_result[2]) = 0x4981008168410001; ++ *((unsigned long*)& __m256i_result[1]) = 0x498100814843ffe1; ++ *((unsigned long*)& __m256i_result[0]) = 0x4981008168410001; ++ __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x49810081; ++ *((int*)& __m256_op1[6]) = 0x4843ffe1; ++ *((int*)& __m256_op1[5]) = 0x49810081; ++ *((int*)& __m256_op1[4]) = 0x68410001; ++ *((int*)& __m256_op1[3]) = 0x49810081; ++ *((int*)& __m256_op1[2]) = 0x4843ffe1; ++ *((int*)& __m256_op1[1]) = 0x49810081; ++ *((int*)& __m256_op1[0]) = 0x68410001; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x80000000; ++ *((int*)& __m256_result[6]) = 0x80000000; ++ *((int*)& __m256_result[5]) = 0x80000000; ++ *((int*)& __m256_result[4]) = 0x80000000; ++ *((int*)& __m256_result[3]) = 0x80000000; ++ *((int*)& __m256_result[2]) = 0x80000000; ++ *((int*)& __m256_result[1]) = 0x80000000; ++ *((int*)& __m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000001fffffff9; ++ *((unsigned long*)& __m256i_result[3]) = 0x9ffffd8020010001; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff9fffffff9; ++ *((unsigned long*)& __m256i_result[1]) = 0x9ffffd8020010001; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff9fffffff9; ++ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x9ffffd8020010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffff9fffffff9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x9ffffd8020010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffff9fffffff9; ++ *((unsigned long*)& __m256i_op1[3]) = 0x40f69fe73c26f4ee; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x40f69fe73c26f4ee; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000018ffff2b13; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000018ffff2b13; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00060fbf00040fbf; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00020fbf00000fbf; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffac5cffffac5c; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffac5cffffac5c; ++ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000555889; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000002580f01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00060fbf02040fbf; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00020fbf02000fbf; ++ *((unsigned long*)& __m128i_result[1]) = 0x00060fbf02596848; ++ *((unsigned long*)& __m128i_result[0]) = 0x00020fbf04581ec0; ++ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff9727ffff9727; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffe79ffffba5f; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x010169d9010169d9; ++ *((unsigned long*)& __m128i_result[0]) = 0x01010287010146a1; ++ __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x498100814843ffe1; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4981008168410001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x498100814843ffe1; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4981008168410001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x40f69fe73c26f4ee; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x40f69fe73c26f4ee; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff896099cbdbfff1; ++ *((unsigned long*)& __m256i_result[2]) = 0xc987ffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xff896099cbdbfff1; ++ *((unsigned long*)& __m256i_result[0]) = 0xc987ffffffffffff; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00060fbf02596848; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00020fbf04581ec0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x010169d9010169d9; ++ *((unsigned long*)& __m128i_op1[0]) = 0x01010287010146a1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000200000001; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_op1[1]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffac5cffffac5c; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffac5cffffac5c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x010169d9010169d9; ++ *((unsigned long*)& __m128i_op1[0]) = 0x01010287010146a1; ++ *((unsigned long*)& __m128i_result[1]) = 0xff01ff01ac025c87; ++ *((unsigned long*)& __m128i_result[0]) = 0xff01ff01ac465ca1; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff896099cbdbfff1; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc987ffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff896099cbdbfff1; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc987ffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00769f673424000f; ++ *((unsigned long*)& __m256i_result[2]) = 0x3678000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x00769f673424000f; ++ *((unsigned long*)& __m256i_result[0]) = 0x3678000100000001; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffd27db010d20fbf; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffd27db010d20fbf; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0xffa4fb6021a41f7e; ++ __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_result[3]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_result[1]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_result[0]) = 0xfffffffffffffff8; ++ __m256d_out = __lasx_xvfrint_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x9ffffd8020010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff9fffffff9; ++ *((unsigned long*)& __m256i_op1[1]) = 0x9ffffd8020010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff9fffffff9; ++ *((unsigned long*)& __m256i_result[3]) = 0x00009fff00002001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00009fff00002001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00060fbf; ++ *((int*)& __m128_op0[2]) = 0x02040fbf; ++ *((int*)& __m128_op0[1]) = 0x00020fbf; ++ *((int*)& __m128_op0[0]) = 0x02000fbf; ++ *((int*)& __m128_op1[3]) = 0x63636363; ++ *((int*)& __m128_op1[2]) = 0x63636363; ++ *((int*)& __m128_op1[1]) = 0xffd27db0; ++ *((int*)& __m128_op1[0]) = 0x10d20fbf; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00009fff; ++ *((int*)& __m256_op0[6]) = 0x00002001; ++ *((int*)& __m256_op0[5]) = 0x0000ffff; ++ *((int*)& __m256_op0[4]) = 0x0000ffff; ++ *((int*)& __m256_op0[3]) = 0x00009fff; ++ *((int*)& __m256_op0[2]) = 0x00002001; ++ *((int*)& __m256_op0[1]) = 0x0000ffff; ++ *((int*)& __m256_op0[0]) = 0x0000ffff; ++ *((int*)& __m256_op1[7]) = 0xfffeb683; ++ *((int*)& __m256_op1[6]) = 0x9ffffd80; ++ *((int*)& __m256_op1[5]) = 0xfffe97c0; ++ *((int*)& __m256_op1[4]) = 0x20010001; ++ *((int*)& __m256_op1[3]) = 0xfffeb683; ++ *((int*)& __m256_op1[2]) = 0x9ffffd80; ++ *((int*)& __m256_op1[1]) = 0xfffe97c0; ++ *((int*)& __m256_op1[0]) = 0x20010001; ++ *((int*)& __m256_op2[7]) = 0x00009fff; ++ *((int*)& __m256_op2[6]) = 0x00002001; ++ *((int*)& __m256_op2[5]) = 0x0000ffff; ++ *((int*)& __m256_op2[4]) = 0x0000ffff; ++ *((int*)& __m256_op2[3]) = 0x00009fff; ++ *((int*)& __m256_op2[2]) = 0x00002001; ++ *((int*)& __m256_op2[1]) = 0x0000ffff; ++ *((int*)& __m256_op2[0]) = 0x0000ffff; ++ *((int*)& __m256_result[7]) = 0xfffeb683; ++ *((int*)& __m256_result[6]) = 0x80002001; ++ *((int*)& __m256_result[5]) = 0xfffe97c0; ++ *((int*)& __m256_result[4]) = 0x8000ffff; ++ *((int*)& __m256_result[3]) = 0xfffeb683; ++ *((int*)& __m256_result[2]) = 0x80002001; ++ *((int*)& __m256_result[1]) = 0xfffe97c0; ++ *((int*)& __m256_result[0]) = 0x8000ffff; ++ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb68380002001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c08000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb68380002001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c08000ffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000007fff5b41c0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000007fff5b41d0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000007fff5b41c0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000007fff5b41d0; ++ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x59); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00009fff; ++ *((int*)& __m256_op0[6]) = 0x00002001; ++ *((int*)& __m256_op0[5]) = 0x0000ffff; ++ *((int*)& __m256_op0[4]) = 0x0000ffff; ++ *((int*)& __m256_op0[3]) = 0x00009fff; ++ *((int*)& __m256_op0[2]) = 0x00002001; ++ *((int*)& __m256_op0[1]) = 0x0000ffff; ++ *((int*)& __m256_op0[0]) = 0x0000ffff; ++ *((int*)& __m256_op1[7]) = 0xfffeb683; ++ *((int*)& __m256_op1[6]) = 0x9ffffd80; ++ *((int*)& __m256_op1[5]) = 0xfffe97c0; ++ *((int*)& __m256_op1[4]) = 0x20010001; ++ *((int*)& __m256_op1[3]) = 0xfffeb683; ++ *((int*)& __m256_op1[2]) = 0x9ffffd80; ++ *((int*)& __m256_op1[1]) = 0xfffe97c0; ++ *((int*)& __m256_op1[0]) = 0x20010001; ++ *((int*)& __m256_result[7]) = 0x00009fff; ++ *((int*)& __m256_result[6]) = 0x9ffffd80; ++ *((int*)& __m256_result[5]) = 0x0000ffff; ++ *((int*)& __m256_result[4]) = 0x20010001; ++ *((int*)& __m256_result[3]) = 0x00009fff; ++ *((int*)& __m256_result[2]) = 0x9ffffd80; ++ *((int*)& __m256_result[1]) = 0x0000ffff; ++ *((int*)& __m256_result[0]) = 0x20010001; ++ __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff01ff01ac025c87; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff01ff01ac465ca1; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff01ff0100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xac465ca100000000; ++ __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00009fff00002001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00009fff00002001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0002000200000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x6363636163636363; ++ __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00009fff9ffffd80; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff20010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00009fff9ffffd80; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff20010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00002080df5b41cf; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00002080df5b41cf; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000009fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff40a6; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000009fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff40a6; ++ __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff01ff01ac025c87; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff01ff01ac465ca1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636163636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfffeb683; ++ *((int*)& __m256_op0[6]) = 0x9ffffd80; ++ *((int*)& __m256_op0[5]) = 0xfffe97c0; ++ *((int*)& __m256_op0[4]) = 0x20010001; ++ *((int*)& __m256_op0[3]) = 0xfffeb683; ++ *((int*)& __m256_op0[2]) = 0x9ffffd80; ++ *((int*)& __m256_op0[1]) = 0xfffe97c0; ++ *((int*)& __m256_op0[0]) = 0x20010001; ++ *((int*)& __m256_op1[7]) = 0x00009fff; ++ *((int*)& __m256_op1[6]) = 0x9ffffd80; ++ *((int*)& __m256_op1[5]) = 0x0000ffff; ++ *((int*)& __m256_op1[4]) = 0x20010001; ++ *((int*)& __m256_op1[3]) = 0x00009fff; ++ *((int*)& __m256_op1[2]) = 0x9ffffd80; ++ *((int*)& __m256_op1[1]) = 0x0000ffff; ++ *((int*)& __m256_op1[0]) = 0x20010001; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00002080; ++ *((int*)& __m256_op2[4]) = 0xdf5b41cf; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00002080; ++ *((int*)& __m256_op2[0]) = 0xdf5b41cf; ++ *((int*)& __m256_result[7]) = 0xfffeb683; ++ *((int*)& __m256_result[6]) = 0x007ffd80; ++ *((int*)& __m256_result[5]) = 0xfffe97c0; ++ *((int*)& __m256_result[4]) = 0xdf5b41cf; ++ *((int*)& __m256_result[3]) = 0xfffeb683; ++ *((int*)& __m256_result[2]) = 0x007ffd80; ++ *((int*)& __m256_result[1]) = 0xfffe97c0; ++ *((int*)& __m256_result[0]) = 0xdf5b41cf; ++ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfffeb683; ++ *((int*)& __m256_op0[6]) = 0x9ffffd80; ++ *((int*)& __m256_op0[5]) = 0xfffe97c0; ++ *((int*)& __m256_op0[4]) = 0x20010001; ++ *((int*)& __m256_op0[3]) = 0xfffeb683; ++ *((int*)& __m256_op0[2]) = 0x9ffffd80; ++ *((int*)& __m256_op0[1]) = 0xfffe97c0; ++ *((int*)& __m256_op0[0]) = 0x20010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000019ffdf403; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000011ffd97c3; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000019ffdf403; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000011ffd97c3; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffeb8649d0d6250; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffeb8649d0d6250; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op2[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op2[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op2[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op2[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x7f800000; ++ *((int*)& __m256_result[6]) = 0x7f800000; ++ *((int*)& __m256_result[5]) = 0x7f800000; ++ *((int*)& __m256_result[4]) = 0x7f800000; ++ *((int*)& __m256_result[3]) = 0x7f800000; ++ *((int*)& __m256_result[2]) = 0x7f800000; ++ *((int*)& __m256_result[1]) = 0x7f800000; ++ *((int*)& __m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000200000001; ++ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0002000200000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0xff01ff01ac025c87; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff01ff01ac465ca1; ++ *((unsigned long*)& __m128i_result[1]) = 0x64616462b76106dc; ++ *((unsigned long*)& __m128i_result[0]) = 0x64616462b71d06c2; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000019ffdf403; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000011ffd97c3; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000019ffdf403; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000011ffd97c3; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000019ffdf403; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000019ffdf403; ++ __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x64616462b76106dc; ++ *((unsigned long*)& __m128i_op1[0]) = 0x64616462b71d06c2; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000001; ++ *((int*)& __m256_op1[6]) = 0x9ffdf403; ++ *((int*)& __m256_op1[5]) = 0x00000001; ++ *((int*)& __m256_op1[4]) = 0x1ffd97c3; ++ *((int*)& __m256_op1[3]) = 0x00000001; ++ *((int*)& __m256_op1[2]) = 0x9ffdf403; ++ *((int*)& __m256_op1[1]) = 0x00000001; ++ *((int*)& __m256_op1[0]) = 0x1ffd97c3; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000200a000020020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000200a000020020; ++ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256d_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00020000ffff0001; ++ __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000001; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00010001; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00060fbf; ++ *((int*)& __m128_op1[2]) = 0x02040fbf; ++ *((int*)& __m128_op1[1]) = 0x00020fbf; ++ *((int*)& __m128_op1[0]) = 0x02000fbf; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0002000200000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000400000001; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h(__m128i_op0,7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00020000; ++ *((int*)& __m128_op0[0]) = 0xffff0001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256d_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256d_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb683007ffd80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c0df5b41cf; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb683007ffd80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c0df5b41cf; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffeb664007ffd61; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffe97a1df5b41b0; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffeb664007ffd61; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffe97a1df5b41b0; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00020000; ++ *((int*)& __m128_op0[0]) = 0xffff0001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb664007ffd61; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97a1df5b41b0; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb664007ffd61; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97a1df5b41b0; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff007ffd61; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff007ffd61; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x62); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000fffe00009fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000fffe00002001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000fffe00009fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000fffe00002001; ++ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0002000400000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0003000500000001; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x63636363; ++ *((int*)& __m128_op0[2]) = 0x63636363; ++ *((int*)& __m128_op0[1]) = 0x63636363; ++ *((int*)& __m128_op0[0]) = 0x63636363; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vftint_wu_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfffeb664; ++ *((int*)& __m256_op0[6]) = 0x007ffd61; ++ *((int*)& __m256_op0[5]) = 0xfffe97a1; ++ *((int*)& __m256_op0[4]) = 0xdf5b41b0; ++ *((int*)& __m256_op0[3]) = 0xfffeb664; ++ *((int*)& __m256_op0[2]) = 0x007ffd61; ++ *((int*)& __m256_op0[1]) = 0xfffe97a1; ++ *((int*)& __m256_op0[0]) = 0xdf5b41b0; ++ *((int*)& __m256_op1[7]) = 0xfffeb683; ++ *((int*)& __m256_op1[6]) = 0x9ffffd80; ++ *((int*)& __m256_op1[5]) = 0xfffe97c0; ++ *((int*)& __m256_op1[4]) = 0x20010001; ++ *((int*)& __m256_op1[3]) = 0xfffeb683; ++ *((int*)& __m256_op1[2]) = 0x9ffffd80; ++ *((int*)& __m256_op1[1]) = 0xfffe97c0; ++ *((int*)& __m256_op1[0]) = 0x20010001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000001faf19b60; ++ *((unsigned long*)& __m256i_op1[2]) = 0x6c2905ae7c14c561; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000001faf19b60; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6c2905ae7c14c561; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x94d7fb5200000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x94d7fb5200000000; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00020004; ++ *((int*)& __m128_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000e3ab0001352b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000e3ab0001352b; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000038ea4d4a; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000038ea4d4a; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff00007fff0000; ++ __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000038ea4d4a; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000038ea4d4a; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000038ea4d4a; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000038ea4d4a; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff00007fff0000; ++ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000038ea4d4a; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000038ea4d4a; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff8; ++ __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x94d7fb5200000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x94d7fb5200000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000038ea4d4a; ++ *((unsigned long*)& __m256i_op2[2]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000038ea4d4a; ++ *((unsigned long*)& __m256i_op2[0]) = 0x7fff00007fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x94d7fb5200000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x94d7fb5200000000; ++ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001388928513889; ++ *((unsigned long*)& __m128i_op0[0]) = 0x006938094a013889; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001388928513889; ++ *((unsigned long*)& __m128i_op1[0]) = 0x006938094a013889; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002711250a27112; ++ *((unsigned long*)& __m128i_result[0]) = 0x00d2701294027112; ++ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffeb683007ffd80; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c0df5b41cf; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffeb683007ffd80; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c0df5b41cf; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001497c98ea4fca; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001497c98ea4fca; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0001497c98ea4fca; ++ *((unsigned long*)& __m256i_op2[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0001497c98ea4fca; ++ *((unsigned long*)& __m256i_op2[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000006715b036; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000006715b036; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0xfffeb664; ++ *((int*)& __m256_op1[6]) = 0x007ffd61; ++ *((int*)& __m256_op1[5]) = 0xfffe97a1; ++ *((int*)& __m256_op1[4]) = 0xdf5b41b0; ++ *((int*)& __m256_op1[3]) = 0xfffeb664; ++ *((int*)& __m256_op1[2]) = 0x007ffd61; ++ *((int*)& __m256_op1[1]) = 0xfffe97a1; ++ *((int*)& __m256_op1[0]) = 0xdf5b41b0; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x94d7fb52; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xfffeb664; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xfffe97a1; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xfffeb664; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xfffe97a1; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000003fffffffd; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000003fffffffd; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000003fffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000003fffffffd; ++ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0002711250a27112; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00d2701294027112; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff7112ffff7112; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff7012ffff7112; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002711250a27112; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00d2701294027112; ++ *((unsigned long*)& __m128i_result[1]) = 0x080a791a58aa791a; ++ *((unsigned long*)& __m128i_result[0]) = 0x08da781a9c0a791a; ++ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x94d7fb5200000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00020000; ++ *((int*)& __m128_op0[0]) = 0xffff0001; ++ *((int*)& __m128_op1[3]) = 0x63636363; ++ *((int*)& __m128_op1[2]) = 0x63636363; ++ *((int*)& __m128_op1[1]) = 0x63636363; ++ *((int*)& __m128_op1[0]) = 0x63636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x3c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00020000ffff0001; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00020000ffff0001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000003030000; ++ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00020000ffff0001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000001; ++ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffeb664007ffd61; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffe97a1df5b41b0; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffeb664007ffd61; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffe97a1df5b41b0; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffeb664007ffd61; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffe97a1df5b41b0; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffeb664007ffd61; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffe97a1df5b41b0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc1f03e1042208410; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x00f0001000000010; ++ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf000f000f000f000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf000f010f000f010; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf000f000f000f000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf000f010f000f010; ++ *((unsigned long*)& __m256i_result[3]) = 0x00f0000000f00010; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff0ff00fff0ff10; ++ *((unsigned long*)& __m256i_result[1]) = 0x00f0000000f00010; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff0ff00fff0ff10; ++ __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00f0001000000010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x00f0001000000010; ++ __m128i_out = __lsx_vsrai_h(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0002711350a27112; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00d5701794027113; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff61010380; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff61010380; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000006; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000006; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000006; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000006; ++ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ef00ff010f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff010f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc1f03e1042208410; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000001000110; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000431f851f; ++ __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffeffff97a1; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffdf5b000041b0; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffeffff97a1; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffdf5b000041b0; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000010000685e; ++ *((unsigned long*)& __m256i_result[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000010000685e; ++ *((unsigned long*)& __m256i_result[0]) = 0x000020a4ffffbe4f; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00027113; ++ *((int*)& __m128_op0[2]) = 0x50a27112; ++ *((int*)& __m128_op0[1]) = 0x00d57017; ++ *((int*)& __m128_op0[0]) = 0x94027113; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002711350a27112; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00d5701794027113; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfff0ff000000000f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000f00f000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfff0ff000000000f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000f00f000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00f8000000000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x000800f800000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00f8000000000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x000800f800000000; ++ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000110; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000431f851f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000001011010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000043431f1f; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xf0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xf2f444429d96dbe1; ++ *((unsigned long*)& __m128d_op0[0]) = 0xddd76c75f2f44442; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128d_op1[0]) = 0xc1f03e1042208410; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x07fee332883f86b0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x07fee332883f86b0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long*)& __m256i_result[3]) = 0x07fee332883f86b0; ++ *((unsigned long*)& __m256i_result[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long*)& __m256i_result[1]) = 0x07fee332883f86b0; ++ *((unsigned long*)& __m256i_result[0]) = 0x07fed3c8f7ad28d0; ++ __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x400000003fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x4000000040000000; ++ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x400000003fffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4000000040000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x400000003fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x4000000040000000; ++ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x400000003fffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0x4000000040000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffeffff97a1; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffdf5b000041b0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffeffff97a1; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffdf5b000041b0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x07fee332883f86b0; ++ *((unsigned long*)& __m256i_op2[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long*)& __m256i_op2[1]) = 0x07fee332883f86b0; ++ *((unsigned long*)& __m256i_op2[0]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffeffff97a1; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffdf5b000041b0; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffeffff97a1; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffdf5b000041b0; ++ __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00f0000000f00010; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfff0ff00fff0ff10; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00f0000000f00010; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfff0ff00fff0ff10; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w(__m256i_op0,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffeffff97a1; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffdf5b000041b0; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffeffff97a1; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffdf5b000041b0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00f8000000000008; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000800f800000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00f8000000000008; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000800f800000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xe3f7fff7fffcbd08; ++ *((unsigned long*)& __m256i_result[2]) = 0x0dbfa28000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xe3f7fff7fffcbd08; ++ *((unsigned long*)& __m256i_result[0]) = 0x0dbfa28000000000; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00f0000000f00010; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0ff00fff0ff10; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00f0000000f00010; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0ff00fff0ff10; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0087ff87f807ff87; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0087ff87f807ff87; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x004001be00dc008e; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ffff0100010001; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x004001be00dc008e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000010000685e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000010000685e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000a400ff004f; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000a400ff004f; ++ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0087ff87f807ff87; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0087ff87f807ff87; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000000000; ++ __m128i_out = __lsx_vclz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00ff00ef; ++ *((int*)& __m128_op0[2]) = 0x00ff010f; ++ *((int*)& __m128_op0[1]) = 0x00ff00ff; ++ *((int*)& __m128_op0[0]) = 0x00ff010f; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000400080003fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000bc2000007e10; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000400080003fff; ++ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x07fee332883f86b0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x07fee332883f86b0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long*)& __m256i_result[3]) = 0x01c03f8034c03200; ++ *((unsigned long*)& __m256i_result[2]) = 0x3dc02b400a003400; ++ *((unsigned long*)& __m256i_result[1]) = 0x01c03f8034c03200; ++ *((unsigned long*)& __m256i_result[0]) = 0x3dc02b400a003400; ++ __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x23); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x01c03f8034c03200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x3dc02b400a003400; ++ *((unsigned long*)& __m256i_op0[1]) = 0x01c03f8034c03200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x3dc02b400a003400; ++ *((unsigned long*)& __m256i_op1[3]) = 0x01c03f8034c03200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3dc02b400a003400; ++ *((unsigned long*)& __m256i_op1[1]) = 0x01c03f8034c03200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3dc02b400a003400; ++ *((unsigned long*)& __m256i_op2[3]) = 0x07fee332883f86b0; ++ *((unsigned long*)& __m256i_op2[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long*)& __m256i_op2[1]) = 0x07fee332883f86b0; ++ *((unsigned long*)& __m256i_op2[0]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long*)& __m256i_result[3]) = 0x01ce3c0050d32d40; ++ *((unsigned long*)& __m256i_result[2]) = 0x3fadafc013acf600; ++ *((unsigned long*)& __m256i_result[1]) = 0x01ce3c0050d32d40; ++ *((unsigned long*)& __m256i_result[0]) = 0x3fadafc013acf600; ++ __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000400080003fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000bc2000007e10; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000400080003fff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000bc2000007e04; ++ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vneg_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000400080003fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000bc2000007e04; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000400080003fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000bc2000007e04; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffbfff7fffc000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff43dfffff81fb; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_result[3]) = 0x97a297a297a297a2; ++ *((unsigned long*)& __m256i_result[2]) = 0x97a297a297a297a2; ++ *((unsigned long*)& __m256i_result[1]) = 0x97a297a297a297a2; ++ *((unsigned long*)& __m256i_result[0]) = 0x97a297a297a297a2; ++ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000234545b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0dec4d1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000002345454; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000c0dec4ca; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffbfff7fffc000; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffff43dfffff81fb; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000a400ff004f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000a400ff004f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000010000005e; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x30eb020302101b03; ++ *((unsigned long*)& __m128i_op0[0]) = 0x020310d0c0030220; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000002345454; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c0dec4ca; ++ *((unsigned long*)& __m128i_result[1]) = 0x000030ebffffffdc; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000203ffffff25; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000002345454; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0dec4ca; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000060006; ++ __m128i_out = __lsx_vsrli_h(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000a400ff004f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000a400ff004f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000a400ff004f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000a400ff004f; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x000000010000685e; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256d_op0[1]) = 0x000000010000685e; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0087ff87f807ff87; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0087ff87f807ff87; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xb3b3b3b3b3b3b3b3; ++ *((unsigned long*)& __m256i_result[2]) = 0xb3b3b3b3b3b3b3b3; ++ *((unsigned long*)& __m256i_result[1]) = 0xb3b3b3b3b3b3b3b3; ++ *((unsigned long*)& __m256i_result[0]) = 0xb3b3b3b3b3b3b3b3; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x4c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c0dec4d1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff3f213b2f; ++ __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000000010000685e; ++ *((unsigned long*)& __m256i_op2[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000010000685e; ++ *((unsigned long*)& __m256i_op2[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000203000010d0; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffc00300000220; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x27); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x30eb020302101b03; ++ *((unsigned long*)& __m128i_op0[0]) = 0x020310d0c0030220; ++ *((unsigned long*)& __m128i_result[1]) = 0x30eb022002101b20; ++ *((unsigned long*)& __m128i_result[0]) = 0x020310edc003023d; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x30eb022002101b20; ++ *((unsigned long*)& __m128i_op0[0]) = 0x020310edc003023d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x30eb020302101b03; ++ *((unsigned long*)& __m128i_op1[0]) = 0x020310d0c0030220; ++ *((unsigned long*)& __m128i_result[1]) = 0x30eb022002101b20; ++ *((unsigned long*)& __m128i_result[0]) = 0x020310edc003023d; ++ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000c0dec4d1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000040223c2e; ++ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x000000010000685e; ++ *((unsigned long*)& __m256d_op1[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256d_op1[1]) = 0x000000010000685e; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x30eb020302101b03; ++ *((unsigned long*)& __m128i_op0[0]) = 0x020310d0c0030220; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0dec4d1; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000010000685e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000010000685e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000003ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001ffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000003ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001ffffffffffff; ++ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000003ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001ffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000003ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001ffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000010000005e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_w_d(__m256i_op0,__m256i_op1,0x3c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x000b000b000b000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000b000b000b000b; ++ __m128i_out = __lsx_vmaxi_h(__m128i_op0,11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x30eb020302101b03; ++ *((unsigned long*)& __m128i_op0[0]) = 0x020310d0c0030220; ++ *((unsigned long*)& __m128i_op1[1]) = 0x30eb020302101b03; ++ *((unsigned long*)& __m128i_op1[0]) = 0x020310d0c0030220; ++ *((unsigned long*)& __m128i_result[1]) = 0x020310d0c0030220; ++ *((unsigned long*)& __m128i_result[0]) = 0x020310d0c0030220; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000b000b000b000b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000b000b000b000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000b000b000b000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000b000b000b000b; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x30eb022002101b20; ++ *((unsigned long*)& __m128i_op1[0]) = 0x020310edc003023d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0xffff97a2; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0xffff97a2; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001010000; ++ __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x30eb022002101b20; ++ *((unsigned long*)& __m128i_op0[0]) = 0x020310edc003023d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffc3; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000010000685e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000010000685e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000040000001b; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000040000001b; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x01010000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x01010000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x101b0330eb022002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x030220020310edc0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0080800080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000080008000; ++ __m128i_out = __lsx_vslli_b(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x30eb022002101b20; ++ *((unsigned long*)& __m128i_op0[0]) = 0x020310edc003023d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x020310edc003023d; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff97a2; ++ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvmod_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000008; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000040000001b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000008; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000040000001b; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x021b7d24; ++ *((int*)& __m128_op0[2]) = 0x49678a35; ++ *((int*)& __m128_op0[1]) = 0x030298a6; ++ *((int*)& __m128_op0[0]) = 0x21030a49; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000002; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0020004000400040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0020004000400040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0020004000400040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0020004000400040; ++ __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x328e1080889415a0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3960b1a401811060; ++ *((unsigned long*)& __m128i_op1[1]) = 0x328e1080889415a0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3960b1a401811060; ++ *((unsigned long*)& __m128i_op2[1]) = 0x020310edc003023d; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x32f3c7a38f9f4b8b; ++ *((unsigned long*)& __m128i_result[0]) = 0x2c9e5069f5d57780; ++ __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000027; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000027; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x020310edc003023d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000080c43b700; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x56); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000027; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000027; ++ *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefe7f; ++ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefe7f; ++ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x3f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x30eb022002101b20; ++ *((unsigned long*)& __m128i_op0[0]) = 0x020310edc003023d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x30eb022002101b20; ++ *((unsigned long*)& __m128i_op1[0]) = 0x020310edc003023d; ++ *((unsigned long*)& __m128i_result[1]) = 0x022002101b200203; ++ *((unsigned long*)& __m128i_result[0]) = 0x022002101b200203; ++ __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x30); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0000; ++ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000027; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000027; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvslti_hu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x022002101b200203; ++ *((unsigned long*)& __m128i_op0[0]) = 0x022002101b200203; ++ *((unsigned long*)& __m128i_op1[1]) = 0x022002101b200203; ++ *((unsigned long*)& __m128i_op1[0]) = 0x022002101b200203; ++ *((unsigned long*)& __m128i_op2[1]) = 0x000000080c43b700; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x036caeeca7592703; ++ *((unsigned long*)& __m128i_result[0]) = 0x022002101b200203; ++ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x021b7d2449678a35; ++ *((unsigned long*)& __m128i_op0[0]) = 0x030298a621030a49; ++ int_result = 0xffffffffffff8a35; ++ int_out = __lsx_vpickve2gr_h(__m128i_op0,0x4); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00000000abba7980; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ccf98000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000001010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000001010000; ++ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00c0c000c0000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc0000000c000c000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000027; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000027; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000027; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000027; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x6); ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00c0c000c0000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc0000000c000c000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00c0c000c0000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc0000000c000c000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x001e001e001e001e; ++ *((unsigned long*)& __m128i_result[0]) = 0x001e001e001e001e; ++ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrp_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,-4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00010001; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00010001; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00010001; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x021b7d24c9678a35; ++ *((unsigned long*)& __m128i_op0[0]) = 0x030298a6a1030a49; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x021b7d24c9678a35; ++ *((unsigned long*)& __m128i_result[0]) = 0x030298a6a1030a49; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001e001e001e001e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001e001e001e001e; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x021b7d24c9678a35; ++ *((unsigned long*)& __m128i_op1[0]) = 0x030298a6a1030a49; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff4; ++ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x021b7d2449678a35; ++ *((unsigned long*)& __m128i_op0[0]) = 0x030298a621030a49; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x021b7d24c9678a35; ++ *((unsigned long*)& __m128i_op2[0]) = 0x030298a6a1030a49; ++ *((unsigned long*)& __m128i_result[1]) = 0x021b7d24c9678a35; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x021b7d24c9678a35; ++ *((unsigned long*)& __m128i_op0[0]) = 0x030298a6a1030a49; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_result[1]) = 0xada4808924882588; ++ *((unsigned long*)& __m128i_result[0]) = 0xacad25090caca5a4; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xada4808924882588; ++ *((unsigned long*)& __m128i_op0[0]) = 0xacad25090caca5a4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x021b7d24c9678a35; ++ *((unsigned long*)& __m128i_op1[0]) = 0x030298a6a1030a49; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x021b7d24c9678a35; ++ *((unsigned long*)& __m128i_op0[0]) = 0x030298a6a1030a49; ++ *((unsigned long*)& __m128i_result[1]) = 0x00197f26cb658837; ++ *((unsigned long*)& __m128i_result[0]) = 0x01009aa4a301084b; ++ __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001e001e001e001e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001e001e001e001e; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffaeffaeffaeffae; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffaeffaeffaeffae; ++ *((unsigned long*)& __m128i_result[1]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_result[0]) = 0x001effae001effae; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x59f7fd70; ++ *((int*)& __m128_result[0]) = 0x59f7fd70; ++ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00197f26cb658837; ++ *((unsigned long*)& __m128i_op0[0]) = 0x01009aa4a301084b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_result[1]) = 0x0037ffd40083ffe5; ++ *((unsigned long*)& __m128i_result[0]) = 0x001e0052001ffff9; ++ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffaeffaeffaeffae; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffaeffaeffaeffae; ++ *((unsigned long*)& __m128i_result[1]) = 0x0051005200510052; ++ *((unsigned long*)& __m128i_result[0]) = 0x0051005200510052; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvpickve_w(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0051005200510052; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0051005200510052; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffaeffaeffaeffae; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffaeffaeffaeffae; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffe65ecc1be5bc; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffe65ecc1be5bc; ++ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x001effae001effae; ++ *((unsigned long*)& __m128d_op0[0]) = 0x001effae001effae; ++ *((unsigned long*)& __m128d_result[1]) = 0x2006454690d3de87; ++ *((unsigned long*)& __m128d_result[0]) = 0x2006454690d3de87; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x59f7fd7059f7fd70; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001700000017; ++ *((unsigned long*)& __m128i_result[0]) = 0x59f7fd8759f7fd87; ++ __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001700000017; ++ *((unsigned long*)& __m128i_op0[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000001700000017; ++ *((unsigned long*)& __m128i_op1[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000007fff7fff; ++ __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2006454690d3de87; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2006454690d3de87; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2006454690d3de87; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2006454690d3de87; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0037ffd40083ffe5; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001e0052001ffff9; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00df020f0078007f; ++ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00df020f; ++ *((int*)& __m128_op0[0]) = 0x0078007f; ++ *((int*)& __m128_op1[3]) = 0x0037ffd4; ++ *((int*)& __m128_op1[2]) = 0x0083ffe5; ++ *((int*)& __m128_op1[1]) = 0x001e0052; ++ *((int*)& __m128_op1[0]) = 0x001ffff9; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2006454690d3de87; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2006454690d3de87; ++ *((unsigned long*)& __m128i_result[1]) = 0x2006454652525252; ++ *((unsigned long*)& __m128i_result[0]) = 0x2006454652525252; ++ __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffae001effae; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_result[1]) = 0xffaeffadffaeffad; ++ *((unsigned long*)& __m128i_result[0]) = 0xffaeffadffaeffad; ++ __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001700000017; ++ *((unsigned long*)& __m128i_op0[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000001700000017; ++ *((unsigned long*)& __m128i_op1[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000021100000211; ++ *((unsigned long*)& __m128i_result[0]) = 0xfb141d31fb141d31; ++ __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001700000017; ++ *((unsigned long*)& __m128i_op0[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffae001effae; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000001700000017; ++ *((unsigned long*)& __m128i_op2[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long*)& __m128i_result[1]) = 0xfd200ed2fd370775; ++ *((unsigned long*)& __m128i_result[0]) = 0x96198318780e32c5; ++ __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfd200ed2fd370775; ++ *((unsigned long*)& __m128d_op0[0]) = 0x96198318780e32c5; ++ *((unsigned long*)& __m128d_result[1]) = 0xfd200ed2fd370775; ++ *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfrint_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_op1[1]) = 0x2006454690d3de87; ++ *((unsigned long*)& __m128i_op1[0]) = 0x2006454690d3de87; ++ *((unsigned long*)& __m128i_result[1]) = 0x202544f490f2de35; ++ *((unsigned long*)& __m128i_result[0]) = 0x202544f490f2de35; ++ __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001effae001effae; ++ unsigned_int_result = 0x000000000000001e; ++ unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x3); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0x004d004d004d004d; ++ *((unsigned long*)& __m128i_result[0]) = 0x004d004d004d004d; ++ __m128i_out = __lsx_vldi(1101); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfd200ed2fd370775; ++ *((unsigned long*)& __m128i_op0[0]) = 0x96198318780e32c5; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffe65ecc1be5bc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffe65ecc1be5bc; ++ *((unsigned long*)& __m128i_result[1]) = 0xfe212874311c22b9; ++ *((unsigned long*)& __m128i_result[0]) = 0x971a9dbaacf34d09; ++ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x202544f490f2de35; ++ *((unsigned long*)& __m128i_op0[0]) = 0x202544f490f2de35; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmsknz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0040000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0040000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0040000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0040000000000000; ++ __m256i_out = __lasx_xvsrlrni_w_d(__m256i_op0,__m256i_op1,0x2a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000021100000211; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfb141d31fb141d31; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; ++ *((unsigned long*)& __m128i_op2[1]) = 0x2006454690d3de87; ++ *((unsigned long*)& __m128i_op2[0]) = 0x2006454690d3de87; ++ *((unsigned long*)& __m128i_result[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_result[0]) = 0xbbc8ecc5f3ced5f3; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128i_result[1]) = 0xd1c0c0a5baf8f8d3; ++ *((unsigned long*)& __m128i_result[0]) = 0xecbbbbc5d5f3f3f3; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x7c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfffbfffb; ++ *((int*)& __m128_op0[2]) = 0xfffbfffb; ++ *((int*)& __m128_op0[1]) = 0xfffbfffb; ++ *((int*)& __m128_op0[0]) = 0xfffbfffb; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffbfffbfffbfffb; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffbfffbfffbfffb; ++ __m128i_out = __lsx_vfrintrne_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffaefffbffaefffb; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffaefffbffaefffb; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0005ffff0005; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000500000004; ++ __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op1[1]) = 0x004d004d004d004d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x004d004d004d004d; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2006454652525252; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2006454652525252; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0040000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0040000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0040000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0040000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128i_result[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_result[0]) = 0xbbc8ecc5f3ced5f3; ++ __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffaefffbffaefffb; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffaefffbffaefffb; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffc105d1aa; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffbc19ecca; ++ __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128d_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128i_result[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long*)& __m128i_result[0]) = 0xff76ffd8ffe6ffaa; ++ __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd1c0c0a5baf8f8d3; ++ *((unsigned long*)& __m128i_op0[0]) = 0xecbbbbc5d5f3f3f3; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffaefffbffaefffb; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffaefffbffaefffb; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000d16fc0a0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ec6abbc0; ++ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ long_int_result = 0xffffffffffffffff; ++ long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x0); ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffc105d1aa; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbc19ecca; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffff9bffbfb; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffdffdfb; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x004d004d004d004d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x004d004d004d004d; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffc105d1aa; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffbc19ecca; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff3efa; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff43e6; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128i_result[1]) = 0x0303030303030303; ++ *((unsigned long*)& __m128i_result[0]) = 0x0303030303030303; ++ __m128i_out = __lsx_vmini_bu(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long*)& __m128d_op0[0]) = 0xff76ffd8ffe6ffaa; ++ *((unsigned long*)& __m128d_op1[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long*)& __m128d_op1[0]) = 0xff76ffd8ffe6ffaa; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0303030303030303; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0303030303030303; ++ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff76ffd8ffe6ffaa; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffc105d1aa; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffbc19ecca; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffe03ff63ff9bf; ++ __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128i_result[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_result[0]) = 0xbbc8ecc5f3ced5f3; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff76ffd8ffe6ffaa; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128i_result[1]) = 0xe01ae8a3fc55dd23; ++ *((unsigned long*)& __m128i_result[0]) = 0xdd9ff64ef9daeace; ++ __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ unsigned_int_result = 0x00000000ffffffff; ++ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x5); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0303030303030303; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0303030303030303; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x02f3030303030303; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x06d9090909090909; ++ __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x02f3030303030303; ++ *((unsigned long*)& __m128i_op1[1]) = 0x004d004d004d004d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x004d004d004d004d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x02f3030303100303; ++ __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x004d004d004d004d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x004d004d004d004d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001340134013401; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001340134013401; ++ __m128i_out = __lsx_vsrari_d(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0303030303030303; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0303030303030303; ++ *((unsigned long*)& __m128i_result[1]) = 0x1313131313131313; ++ *((unsigned long*)& __m128i_result[0]) = 0x1313131313131313; ++ __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x030804010d090107; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1313131313131313; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1313131313131313; ++ *((unsigned long*)& __m128i_result[1]) = 0x0039d21e3229d4e8; ++ *((unsigned long*)& __m128i_result[0]) = 0x6d339b4f3b439885; ++ __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x06d9090909090909; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0039d21e3229d4e8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6d339b4f3b439885; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000db24848; ++ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1313131313131313; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1313131313131313; ++ *((unsigned long*)& __m128i_op1[1]) = 0x34947b4b11684f92; ++ *((unsigned long*)& __m128i_op1[0]) = 0xd73691661e5b68b4; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffff000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000d00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffef; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000c; ++ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xff01ff0100000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff01ff0100000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xff01ff0100000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff01ff0100000000; ++ __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x06d9090909090909; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x48); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0039d21e3229d4e8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6d339b4f3b439885; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffff000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000d00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffc0000000000000; ++ __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x2e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfffffff000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000d00000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffc0000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000001; ++ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x34947b4b11684f92; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd73691661e5b68b4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000016f303dff6d2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000016f303dff6d2; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x7fffffff00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x34947b4b11684f92; ++ *((unsigned long*)& __m128i_result[0]) = 0xee297a731e5c5f86; ++ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x05f5e2320605e1e2; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h(__m128i_op0,-2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x03080401; ++ *((int*)& __m128_op0[2]) = 0x0d090107; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvldi(1820); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x34947b4b11684f92; ++ *((unsigned long*)& __m128i_op1[0]) = 0xee297a731e5c5f86; ++ *((unsigned long*)& __m128i_result[1]) = 0xff6cffb5ff98ff6e; ++ *((unsigned long*)& __m128i_result[0]) = 0xffd7ff8dffa4ff7a; ++ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff6cffb5ff98ff6e; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffd7ff8dffa4ff7a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x34947b4b11684f92; ++ *((unsigned long*)& __m128i_op1[0]) = 0xee297a731e5c5f86; ++ *((unsigned long*)& __m128i_op2[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffc0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000868686868686; ++ __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000180; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007f80; ++ __m256i_out = __lasx_xvmadd_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ff1b00e4; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010003; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010081; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010003; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100018080; ++ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000868686868686; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1c3fc7; ++ *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1c3fc7; ++ *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x000000000000001e; ++ *((unsigned long*)& __m128i_result[1]) = 0x1e1e1e1e1e1e1e1e; ++ *((unsigned long*)& __m128i_result[0]) = 0x1e1e1e1e1e1e1e1e; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1c3fc7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1c3fc7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0xff1cff1c; ++ *((int*)& __m256_op1[6]) = 0xff1cff1c; ++ *((int*)& __m256_op1[5]) = 0xff1cff1c; ++ *((int*)& __m256_op1[4]) = 0xff1cff1c; ++ *((int*)& __m256_op1[3]) = 0xff1cff1c; ++ *((int*)& __m256_op1[2]) = 0xff1cff1c; ++ *((int*)& __m256_op1[1]) = 0xff1cff1c; ++ *((int*)& __m256_op1[0]) = 0xff1cff1c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffff1cffffff1c; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffff1cffffff1c; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffff1cffffff1c; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff1cffffff1c; ++ __m256i_out = __lasx_xvexth_w_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d(__m128i_op0,15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128d_op0[0]) = 0x5252dcdcdcdcdcdc; ++ *((unsigned long*)& __m128d_result[1]) = 0x2d8bf1f8fc7e3f20; ++ *((unsigned long*)& __m128d_result[0]) = 0x2d8b24b936d1b24d; ++ __m128d_out = __lsx_vfrecip_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff80; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x15); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001c; ++ __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf8f8372f752402ee; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffc0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff01; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000868686868686; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1e1e1e1e1e1e1e1e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1e1e1e1e1e1e1e1e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0f0f0f0f0f0f0f0f; ++ *((unsigned long*)& __m128i_result[0]) = 0x0f0f525252525252; ++ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_h(__m256i_op0,11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0674c8868a74fc80; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfdce8003090b0906; ++ *((unsigned long*)& __m128d_result[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long*)& __m128d_result[0]) = 0xc3818bffe7b7a7b8; ++ __m128d_out = __lsx_vffint_d_l(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000400000004; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long*)& __m128d_op0[0]) = 0xc3818bffe7b7a7b8; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f1f1f1f1f1; ++ *((unsigned long*)& __m128i_result[0]) = 0xf1f1f1f1f1f1f1f1; ++ __m128i_out = __lsx_vmini_b(__m128i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000400000004; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffff1cff1c; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff1cff18; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffff1cff1c; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff1cff18; ++ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff1cff1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff1cff18; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff1cff1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff1cff18; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1f3f06d4fcba4e98; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2e1135681fa8d951; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000007d07fffffff; ++ __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff1cff1b00e300e4; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff1cff1b00e300e4; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff1cff1b00e300e4; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff1cff1b00e30100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0020000000200000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x002000000020ffff; ++ __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffff1cff1c; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff1cff1c; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffff1cff1c; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff1cff1c; ++ __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffff1cff1c; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffff1cff1c; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffff1cff1c; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffff1cff1c; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0xdc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc3818bffe7b7a7b8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmskltz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op2[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long*)& __m128i_op2[0]) = 0xc3818bffe7b7a7b8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x084d1a0907151a3d; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x084d1a0907151a3d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000007d07fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000868686868686; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000868600008785; ++ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0674c8868a74fc80; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfdce8003090b0906; ++ int_result = 0x00000000090b0906; ++ int_out = __lsx_vpickve2gr_w(__m128i_op0,0x0); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000008686; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00008e5680008685; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00007fff7fff8000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000868686868686; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0674c8868a74fc80; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfdce8003090b0906; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0674c8868a74fc80; ++ *((unsigned long*)& __m128i_op2[0]) = 0xfdce8003090b0906; ++ *((unsigned long*)& __m128i_result[1]) = 0x0029aeaca57d74e6; ++ *((unsigned long*)& __m128i_result[0]) = 0xdbe332365392c686; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000b000b000b000b; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000b000b000b000b; ++ __m256i_out = __lasx_xvpcnt_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0020000000200000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x002000000020ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000056f64adb9464; ++ *((unsigned long*)& __m128i_op1[0]) = 0x29ca096f235819c2; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000004399d32; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xff1cff1c; ++ *((int*)& __m256_op0[6]) = 0xff1cff1c; ++ *((int*)& __m256_op0[5]) = 0xff1cff1c; ++ *((int*)& __m256_op0[4]) = 0xff1cff1c; ++ *((int*)& __m256_op0[3]) = 0xff1cff1c; ++ *((int*)& __m256_op0[2]) = 0xff1cff1c; ++ *((int*)& __m256_op0[1]) = 0xff1cff1c; ++ *((int*)& __m256_op0[0]) = 0xff1cff1c; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0674c886fcba4e98; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfdce8003090b0906; ++ *((unsigned long*)& __m128i_result[1]) = 0x003fffc0ffc0003f; ++ *((unsigned long*)& __m128i_result[0]) = 0xffc0ffc0003f003f; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xd3220000d3f20000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8bff0000a7b80000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0909000009090000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0909000009090000; ++ __m128i_out = __lsx_vmini_bu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x000b000b000b000b; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000b000b000b000b; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0674c886fcba4e98; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfdce8003090b0906; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff001a00000000; ++ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0029aeaca57d74e6; ++ *((unsigned long*)& __m128i_op0[0]) = 0xdbe332365392c686; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000056f64adb9464; ++ *((unsigned long*)& __m128i_op1[0]) = 0x29ca096f235819c2; ++ *((unsigned long*)& __m128i_result[1]) = 0x002a05a2f059094a; ++ *((unsigned long*)& __m128i_result[0]) = 0x05ad3ba576eae048; ++ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0674c886fcba4e98; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfdce8003090b0906; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003fffc0ffc0003f; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffc0ffc0003f003f; ++ *((unsigned long*)& __m128i_op2[1]) = 0x002a05a2f059094a; ++ *((unsigned long*)& __m128i_op2[0]) = 0x05ad3ba576eae048; ++ *((unsigned long*)& __m128i_result[1]) = 0xd4a6cc27d02397ce; ++ *((unsigned long*)& __m128i_result[0]) = 0x24b85f887e903abe; ++ __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_result[2]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_result[1]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_result[0]) = 0x6b6b6b6b6b6b6b6b; ++ __m256i_out = __lasx_xvori_b(__m256i_op0,0x6b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_op0[2]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_op0[1]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_op0[0]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_op1[3]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_op1[2]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_op1[1]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_op1[0]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff001a00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x003fffc0ffc0003f; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffc0ffc0003f003f; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff0000000000ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff00ff; ++ __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0017001700176d6d; ++ *((unsigned long*)& __m256i_result[2]) = 0x0017001700176d6d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0017001700176d6d; ++ *((unsigned long*)& __m256i_result[0]) = 0x0017001700176d6d; ++ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0909000009090000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0909000009090000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0909000009090000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0909000009090000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x002a05a2f059094a; ++ *((unsigned long*)& __m128i_op2[0]) = 0x05ad3ba576eae048; ++ *((unsigned long*)& __m128i_result[1]) = 0x0909e0480909e048; ++ *((unsigned long*)& __m128i_result[0]) = 0x0909e0480909e048; ++ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000004000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00007770ffff9411; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000400000004c; ++ *((unsigned long*)& __m128i_result[0]) = 0x00007770ffff941d; ++ __m128i_out = __lsx_vaddi_du(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000004000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00007770ffff9411; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000004000000040; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00007770ffff9411; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000100000001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x37b951002d81a921; ++ __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x003fffc0ffc0003f; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffc0ffc0003f003f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00007770ffff941d; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000ffff000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000077529b522400; ++ __m128i_out = __lsx_vmulwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00007fff7fff8000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000b81c8382; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000077af9450; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00007efe7f7f8000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00007efe7f7f8000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000b81c8382; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000077af9450; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000077af9450; ++ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x003fffc0; ++ *((int*)& __m128_op0[2]) = 0xffc0003f; ++ *((int*)& __m128_op0[1]) = 0xffc0ffc0; ++ *((int*)& __m128_op0[0]) = 0x003f003f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000004000000040; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00007770ffff9411; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00007770ffff941d; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000400000004c; ++ *((unsigned long*)& __m128i_result[0]) = 0x000047404f4f040d; ++ __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x4f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000077af9450; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000047404f4f040d; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000214f; ++ *((unsigned long*)& __m128i_result[0]) = 0xc31b63d846ebc810; ++ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000400000004c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00007770ffff941d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00007770ffff941d; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000400000004c; ++ *((unsigned long*)& __m128i_result[0]) = 0x00007770ffff941d; ++ __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_hu(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x98); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000100000001000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x37b951002d81a921; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000047404f4f040d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000082000000826; ++ *((unsigned long*)& __m128i_result[0]) = 0x1b5c4c203e685617; ++ __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000100000001000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x37b951002d81a921; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x3e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000214f; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc31b63d846ebc810; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00ff0000800000ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff941d; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000010a7; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000046ebaa2c; ++ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000b81c8382; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000077af9450; ++ *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long*)& __m128i_result[0]) = 0xf1f1f1f1865e65a1; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0xf1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000077af9450; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3ff0000000000000; ++ __m128i_out = __lsx_vfrintrp_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00007fff7fff8000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000007f7f7f; ++ __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f7f7f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x007f007f00007f7f; ++ __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0x58); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000010a7; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000046ebaa2c; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf1f1f1f1865e65a1; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000800000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000800000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf1f1f1f1865e65a1; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff941d; ++ *((unsigned long*)& __m128i_op2[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long*)& __m128i_op2[0]) = 0xf1f1f1f1865e65a1; ++ *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long*)& __m128i_result[0]) = 0x78508ad4ec2ffcde; ++ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffdfdc0d; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffdfdc0d; ++ __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long*)& __m128i_op0[0]) = 0x78508ad4ec2ffcde; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffdfdc0d; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00000000ffdfdc0d; ++ *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long*)& __m128i_result[0]) = 0x78508ad4ae70fd87; ++ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000014eb54ab; ++ *((unsigned long*)& __m128i_op0[0]) = 0x14eb6a002a406a00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffdfdc0d; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000a752a55; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a753500950fa306; ++ __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; ++ int_op1 = 0x00000000090b0906; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000090b0906; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000014eb54ab; ++ *((unsigned long*)& __m128d_op0[0]) = 0x14eb6a002a406a00; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00007fff7fff8000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x007fffff; ++ *((int*)& __m128_op0[1]) = 0x007fffff; ++ *((int*)& __m128_op0[0]) = 0xff800000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x007f7f7f; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x380fdfdfc0000000; ++ __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000014eb54ab; ++ *((unsigned long*)& __m128i_op1[0]) = 0x14eb6a002a406a00; ++ *((unsigned long*)& __m128i_result[1]) = 0xe0001fffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x66); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff8000; ++ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff8; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffff8000; ++ __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x380fdfdfc0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffc7f100004000; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f7f7f; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffc7f100004000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000c7f14000; ++ __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x0a752a55; ++ *((int*)& __m128_op0[1]) = 0x0a753500; ++ *((int*)& __m128_op0[0]) = 0x950fa306; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x380fdfdf; ++ *((int*)& __m128_op1[0]) = 0xc0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000014eb54ab; ++ *((unsigned long*)& __m128d_op0[0]) = 0x14eb6a002a406a00; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000014eb54ab; ++ *((unsigned long*)& __m128d_op1[0]) = 0x14eb6a002a406a00; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000014eb54ab; ++ *((unsigned long*)& __m128i_op1[0]) = 0x14eb6a002a406a00; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff14eb54ab; ++ *((unsigned long*)& __m128i_result[0]) = 0x14ea6a002a406a00; ++ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff14eb54ab; ++ *((unsigned long*)& __m128i_op0[0]) = 0x14ea6a002a406a00; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff80008a7555aa; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a7535006af05cf9; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000014eb54ab; ++ *((unsigned long*)& __m128i_op0[0]) = 0x14eb6a002a406a00; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff80008a7555aa; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a7535006af05cf9; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff758aaa56; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffa9fb0d07; ++ __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000a752a55; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0a753500950fa306; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff14eb54ab; ++ *((unsigned long*)& __m128i_op1[0]) = 0x14ea6a002a406a00; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00007fff7fff8000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000a752a55; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a753500950fa306; ++ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x68); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x0a752a55; ++ *((int*)& __m128_op0[1]) = 0x0a753500; ++ *((int*)& __m128_op0[0]) = 0x950fa306; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x0a752a55; ++ *((int*)& __m128_op1[1]) = 0x0a753500; ++ *((int*)& __m128_op1[0]) = 0x950fa306; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000a752a55; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0a753500950fa306; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x000000000a752a55; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0a753500950fa306; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000a752a55; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a753500a9fa0d06; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000a752a55; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a753500a9fa0d06; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xf589caff5605f2fa; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000090b0906; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100002000; ++ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000090b0906; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000005060503; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000073737; ++ __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000090b0906; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000005060503; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000073737; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000050007; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000039; ++ __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000050007; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000039; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100002000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000a752a55; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0a753500a9fa0d06; ++ *((unsigned long*)& __m128i_result[1]) = 0x0d060d060d060d06; ++ *((unsigned long*)& __m128i_result[0]) = 0x0d060d060d060d06; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf589caff5605f2fa; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000014eb54ab; ++ *((unsigned long*)& __m128i_op1[0]) = 0x14eb6a002a406a00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000eb00ab; ++ *((unsigned long*)& __m128i_result[0]) = 0x017400ff004500fa; ++ __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x0a752a55; ++ *((int*)& __m128_op0[1]) = 0x0a753500; ++ *((int*)& __m128_op0[0]) = 0xa9fa0d06; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,-7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00007fff7fff8000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000040; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000100002000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffc0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00007fff7fff8000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0xce); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000014; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000014; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000014; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf589caff5605f2fa; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000055; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000054; ++ __m256i_out = __lasx_xvmskltz_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0d060d060d060d06; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0d060d060d060d06; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0d060d060d060d06; ++ __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0d060d060d060d06; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0d060d060d060d06; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w(__m128i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000200; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000a74aa8a55ab; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6adeb5dfcb000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003ff8; ++ __m128i_out = __lsx_vmsknz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000a74aa8a55ab; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6adeb5dfcb000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a7480007fff8000; ++ __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0x7200000072000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7200000072000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7200000072000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7200000072000000; ++ __m256i_out = __lasx_xvldi(-3214); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7200000072000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7200000072000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7200000072000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7200000072000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x721e001e721e001e; ++ *((unsigned long*)& __m256i_result[2]) = 0x721e001e721e001e; ++ *((unsigned long*)& __m256i_result[1]) = 0x721e001e721e001e; ++ *((unsigned long*)& __m256i_result[0]) = 0x721e001e721e001e; ++ __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000055; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000054; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000a74aa8a55ab; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6adeb5dfcb000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000007000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffeffff0000; ++ __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003ff8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000003ff8; ++ __m128i_out = __lsx_vsat_w(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00003ff8; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0xff800000; ++ *((int*)& __m128_result[1]) = 0xff800000; ++ *((int*)& __m128_result[0]) = 0xc3080000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0000033a; ++ *((int*)& __m128_op0[2]) = 0x0bde0853; ++ *((int*)& __m128_op0[1]) = 0x0a960e6b; ++ *((int*)& __m128_op0[0]) = 0x0a4f0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_result[2]) = 0x2020000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_result[0]) = 0x2020000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0000; ++ __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x2020000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x2020000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7fffffffffffffff; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003ff8; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x467fe000; ++ __m128_out = __lsx_vffint_s_w(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0xffffff1dffffff1d; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffff1dffffff1d; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffff1dffffff1d; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff1dffffff1d; ++ __m256i_out = __lasx_xvldi(2845); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001f00000020; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001f00000020; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xff00ffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xff00ffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffffffff; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x000fffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x000fffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000467fe000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000003ff8; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000003ff8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000467fef81; ++ __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003ff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000467fef81; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x000000ffffff1dff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffff1dffffff1dff; ++ *((unsigned long*)& __m256i_op2[1]) = 0x000000ffffff1dff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffff1dffffff1dff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0020; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff8001ffff0001; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0020; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff8001ffff0001; ++ __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000467fef81; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000013; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000ffffff1dff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff1dffffff1dff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ffffff1dff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff1dffffff1dff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff1dffffff1dff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff1dffffff1dff; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x2aaaaa85aaaaaa85; ++ *((unsigned long*)& __m256i_op1[2]) = 0x2aaa48f4aaaa48f4; ++ *((unsigned long*)& __m256i_op1[1]) = 0x2aaaaa85aaaaaa85; ++ *((unsigned long*)& __m256i_op1[0]) = 0x2aaa48f4aaaa48f4; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000003ff8; ++ *((unsigned long*)& __m128d_op1[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128d_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0020; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff8001ffff0001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0020; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff8001ffff0001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00ff008000ff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00ff008000ff0000; ++ __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x001fffffffe00011; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x001fffffffe00011; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000003ff8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00011; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00011; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0020; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff8001ffff0001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0020; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff8001ffff0001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff8001ffff8001; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff8001ffff8001; ++ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x6e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x41dffc0000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x41dffc0000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x01533b5e7489ae24; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe519ab7e71e33848; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x01533b5e7489ae24; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffab7e71e33848; ++ __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0xbc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff8001ffff8001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff8001ffff8001; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fff800000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffc0017fffc001; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fff800000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffc0017fffc001; ++ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff9dff9dff9dff9d; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x01533b5e7489ae24; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffab7e71e33848; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x3b5eae24ab7e3848; ++ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003ff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff9dff9dff9dff9d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffceffceffcf1fcb; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000090b0906; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h(__m128i_op0,0x3); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000c6c60000c6c6; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000c6c58000c6b2; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000c6c40000c6c6; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000c6c78000c6b2; ++ __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x21); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000000000000; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x30); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x0000ffff; ++ *((int*)& __m128_op0[1]) = 0x3b5eae24; ++ *((int*)& __m128_op0[0]) = 0xab7e3848; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00003f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x800fffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x800fffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x800fffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x800fffffffffffff; ++ __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x01533b5e7489ae24; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffab7e71e33848; ++ *((unsigned long*)& __m128d_op1[1]) = 0x01533b5e7489ae24; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffab7e71e33848; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffab7e71e33848; ++ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffab7e71e33848; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffe1ffffffe1; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffab5f71e33829; ++ __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x1f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000075dbe982; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000071e48cca; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0ebb7d300e3c9199; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrm_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x41dffbffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffff00ff800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x41dffbffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffff00ff800000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfbff0000ffff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfbff0000ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x800fffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x800fffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x800fffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x800fffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0020; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff8001ffff0001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0020; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff8001ffff0001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0020; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff8001ffff0001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff8001ffff0001; ++ __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x01533b5e7489ae24; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffab7e71e33848; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x41dffbffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ff800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x41dffbffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ff800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0xff00ff00ff00ff00; ++ __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x01533b5e7489ae24; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffab7e71e33848; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xce9135c49ffff570; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x23); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000800000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000800000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000800000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000800000; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfbff0000ffff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfbff0000ffff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfbff0000ffff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xfbff0000ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x41dffbffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ff800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x41dffbffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ff800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f80ffffff808000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f80ffffff808000; ++ __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_w(__m128i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f80ffffff808000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f80ffffff808000; ++ *((unsigned long*)& __m256i_result[3]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f0000007f7fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f0000007f7fff; ++ __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x001fffffffe00000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x001f001fffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffe0ffe000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x001f001fffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffe0ffe000000000; ++ __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7f80ffffff808000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7f80ffffff808000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x001f001fffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffe0ffe000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x001f001fffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffe0ffe000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffe0ffe000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fa0001fff808000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffe0ffe000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fa0001fff808000; ++ __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x60600000ffff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x6060000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x60600000ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x6060000000000000; ++ __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x60); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x001f001fffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffe0ffe000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x001f001fffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffe0ffe000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffe0ffe000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fa0001fff808000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffe0ffe000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fa0001fff808000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x007f0000ffffff80; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x007f0000ffffff80; ++ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x001f001fffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffe0ffe000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x001f001fffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffe0ffe000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xce9035c49ffff570; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[0]) = 0xce9035c49ffff574; ++ __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xce9035c49ffff570; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op1[0]) = 0xce9035c49ffff574; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000454ffff9573; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000004; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000004; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000004; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000004; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000004; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000004; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000454ffff9573; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000454ffff9573; ++ __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xa4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x41dffbffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffff00ff800000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x41dffbffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffff00ff800000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffe6ffe6e6800001; ++ *((unsigned long*)& __m256d_op1[2]) = 0x19660019ff806680; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffe6ffe6e6800001; ++ *((unsigned long*)& __m256d_op1[0]) = 0x19660019ff806680; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00ff0000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00ff0000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00ff0000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00ff0000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h(__m128i_op0,-14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffff00; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ff8000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffff00; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ff8000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000016; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000016; ++ __m128i_out = __lsx_vaddi_du(__m128i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff800000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x007f0000ff807f81; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff800000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x007f0000ff807f81; ++ *((unsigned long*)& __m256i_result[3]) = 0x5d5d5d5d5d22a2a2; ++ *((unsigned long*)& __m256i_result[2]) = 0xa2dda2a25d22dd23; ++ *((unsigned long*)& __m256i_result[1]) = 0x5d5d5d5d5d22a2a2; ++ *((unsigned long*)& __m256i_result[0]) = 0xa2dda2a25d22dd23; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0xa2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffff800000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x007f0000ff807f81; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffff800000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x007f0000ff807f81; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0000; ++ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffff8000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffff8000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffff8000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffff8000; ++ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001000000010; ++ __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0000; ++ __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00c00040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000008000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00c00040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000008000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfffffffffffbfffc; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000001000000048; ++ *((unsigned long*)& __m128d_result[1]) = 0xfffffffffffbfffc; ++ *((unsigned long*)& __m128d_result[0]) = 0xc090380000000000; ++ __m128d_out = __lsx_vflogb_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffbfffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000048; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffeffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000016; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffbfffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc090380000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000200000000d; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x001d001d001d001d; ++ *((unsigned long*)& __m128i_result[0]) = 0x001d001d001d001d; ++ __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000200000000d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x41dffbffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ff800000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x41dffbffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ff800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplgr2vr_w(int_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffbfffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc090380000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffbfffc; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc090380000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffbfffc; ++ *((unsigned long*)& __m128i_result[0]) = 0xc090380000000000; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_result[2]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_result[0]) = 0x1c1c1c1c1c1c1c1c; ++ __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001d001d001d001d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x001d001d001d0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001d001d001d001d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001d001d001d0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00fffbfffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff01ff1100000048; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ffff8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffff8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vmaxi_h(__m128i_op0,4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x7070545438381c1c; ++ *((unsigned long*)& __m256i_result[2]) = 0x7070545438381c1c; ++ *((unsigned long*)& __m256i_result[1]) = 0x7070545438381c1c; ++ *((unsigned long*)& __m256i_result[0]) = 0x7070545438381c1c; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7070545438381c1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7070545438381c1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7070545438381c1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7070545438381c1c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ffff8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffff8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffff00ffff8000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff00ffff8000; ++ __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[0]) = 0xff01ff01ff01ff01; ++ __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ffff8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffff8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff00007fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff00007fff; ++ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfe03fe01fe01fe01; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe3bfa3ffe3bfb21; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001d001d001d001d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001d001d001d0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x001d001d001d001d; ++ *((unsigned long*)& __m128i_result[0]) = 0x001d001d001d0000; ++ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001200000012; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001200000012; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001200000012; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001200000012; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d(__m128i_op0,0x3d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000001; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000001; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000001200000012; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000001200000012; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000001200000012; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001200000012; ++ *((unsigned long*)& __m256i_result[3]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long*)& __m256i_result[2]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long*)& __m256i_result[1]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long*)& __m256i_result[0]) = 0x1a1a1a2c1a1a1a2c; ++ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b(__m256i_op0,2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long*)& __m128i_result[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe03fe3ffe01fa21; ++ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long*)& __m128i_result[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long*)& __m128i_result[0]) = 0xfe03fe3ffe01fa21; ++ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long*)& __m128i_op2[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffff8000; ++ __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000001200000012; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000001200000012; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000001200000012; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001200000012; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000001ffff8000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000001ffff8000; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ unsigned_int_result = 0x00000000ffffffff; ++ unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x4); ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff80000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x60); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xfe3bfb01; ++ *((int*)& __m128_op0[2]) = 0xfe3bfe01; ++ *((int*)& __m128_op0[1]) = 0xfe03fe3f; ++ *((int*)& __m128_op0[0]) = 0xfe01fa21; ++ *((int*)& __m128_op1[3]) = 0xfe3bfb01; ++ *((int*)& __m128_op1[2]) = 0xfe3bfe01; ++ *((int*)& __m128_op1[1]) = 0xfe03fe3f; ++ *((int*)& __m128_op1[0]) = 0xfe01fa21; ++ *((int*)& __m128_op2[3]) = 0x00000000; ++ *((int*)& __m128_op2[2]) = 0x00000000; ++ *((int*)& __m128_op2[1]) = 0x00000000; ++ *((int*)& __m128_op2[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffffff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffeffffff00; ++ __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fe03fe01; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fe01fe01; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000007020701; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000007010701; ++ __m128i_out = __lsx_vpcnt_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffeffffff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffeffffff00; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000100; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000100; ++ __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0700f8ff0700f8ff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000007020701; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000007010701; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f8000008680f1ff; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff9fffefff9ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0280000000000000; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000100; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0002000200000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0002000200000000; ++ __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000008680f1ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xff80ffffff80ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xff80ffff8680f1ff; ++ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000008680f1ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0280000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ffffff00000000; ++ __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x1c1c1c1c; ++ *((int*)& __m256_op0[6]) = 0x1c1c1c1c; ++ *((int*)& __m256_op0[5]) = 0xfffffffe; ++ *((int*)& __m256_op0[4]) = 0xffffff00; ++ *((int*)& __m256_op0[3]) = 0x1c1c1c1c; ++ *((int*)& __m256_op0[2]) = 0x1c1c1c1c; ++ *((int*)& __m256_op0[1]) = 0xfffffffe; ++ *((int*)& __m256_op0[0]) = 0xffffff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffffff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x3f8000003f800000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffeffffff00; ++ __m256i_out = __lasx_xvfrintrp_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffc0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffc0; ++ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffeffffff00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffeffffff00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffeffffff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffeffffff00; ++ *((unsigned long*)& __m256i_result[3]) = 0x3838383838383838; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffdfffffe00; ++ *((unsigned long*)& __m256i_result[1]) = 0x3838383838383838; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffdfffffe00; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a0000000a000000; ++ __m128i_out = __lsx_vldi(-3318); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffff000000; ++ __m256i_out = __lasx_xvslli_d(__m256i_op0,0x18); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff80ffff7e02; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00feff8000ff80ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0280000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff81ffff7f03; ++ *((unsigned long*)& __m128i_result[0]) = 0x04ffff8101ff81ff; ++ __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff81ffff7f03; ++ *((unsigned long*)& __m128i_op0[0]) = 0x04ffff8101ff81ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0a0000001e000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a000000f6000000; ++ __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0002000200000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0002000200000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000020002000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000020002000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x3838383838383838; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffdfffffe00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x3838383838383838; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffdfffffe00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff80ff807e017f01; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f3b7f3f7f3b7f21; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0a0000001e000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a000000f6000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0980ff8174017f01; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xff80ff80; ++ *((int*)& __m128_op0[2]) = 0x7e017f01; ++ *((int*)& __m128_op0[1]) = 0x7f3b7f3f; ++ *((int*)& __m128_op0[0]) = 0x7f3b7f21; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vftintrz_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000020002000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000020002000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffc0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfff0fff0fff0fc00; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfff0fff0fff0fc00; ++ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff0fc00; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff0fc00; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000f880f87e; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000f880f87e; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvsrlrni_h_w(__m256i_op0,__m256i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffff000000; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff80ffff7e02; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00feff8000ff80ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf931fd04f832fe02; ++ *((unsigned long*)& __m128i_result[1]) = 0x80007fc000003f00; ++ *((unsigned long*)& __m128i_result[0]) = 0x7d187e427c993f80; ++ __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0a000000; ++ *((int*)& __m128_op0[2]) = 0x0a000000; ++ *((int*)& __m128_op0[1]) = 0x0a000000; ++ *((int*)& __m128_op0[0]) = 0x0a000000; ++ *((int*)& __m128_result[3]) = 0x75000000; ++ *((int*)& __m128_result[2]) = 0x75000000; ++ *((int*)& __m128_result[1]) = 0x75000000; ++ *((int*)& __m128_result[0]) = 0x75000000; ++ __m128_out = __lsx_vfrecip_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000f880f87e; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000f880f87e; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000017f7f7f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000017f7f7f7f; ++ __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0700f8ff0700f8ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0700f8ff0700f8ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff8000; ++ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000004000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0280000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7500000075000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7500000075000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3bc000003a800000; ++ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000017f7f7f7f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000017f7f7f7f; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000017fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000017fff; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x80007fc000003f00; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7d187e427c993f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7500000075000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7500000075000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00007d1800007c99; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff800000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff000000017fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff000000017fff; ++ __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000017fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000017fff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00007d1800007c99; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0a0000001e000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a621b3ebe5e1c02; ++ *((unsigned long*)& __m128i_result[1]) = 0x04ffc0000f000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x05314c2bdf2f4c4e; ++ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000017f7f7f7f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000017f7f7f7f; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010080; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7500000075007500; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00feff8000ff80ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff800000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00007d1800007c99; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000f50000007500; ++ *((unsigned long*)& __m128i_result[0]) = 0x00007e1600007d98; ++ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000fe00fe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00fe00fe00fe00fe; ++ __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000f50000007500; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00007e1600007d98; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000fe00fe; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000f50000fe75fe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00fe7efe00fe7dfe; ++ __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010080; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f7f00007f7f7500; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3b42017f3a7f7f01; ++ *((unsigned long*)& __m128i_result[1]) = 0x04faf60009f5f092; ++ *((unsigned long*)& __m128i_result[0]) = 0x04fafa9200000000; ++ __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff9fffefff9ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x04faf60009f5f092; ++ *((unsigned long*)& __m128i_op1[0]) = 0x04fafa9200000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfc06066e00000000; ++ __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x04faf60009f5f092; ++ *((unsigned long*)& __m128i_op0[0]) = 0x04fafa9200000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff9fffefff9ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000004fa000009f5; ++ *((unsigned long*)& __m128i_result[0]) = 0x000004f3fffffff9; ++ __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3bc000003a800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4480000044800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x45c0000044800000; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000f50000007500; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00007e1600007d98; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000f50000000900; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000090900000998; ++ __m128i_out = __lsx_vmini_b(__m128i_op0,9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff800000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffc0000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffc0000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffbfffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000800000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffbfffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000800000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0102020202010202; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0102020202010202; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0xa9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000fe00fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000f50000007500; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00007e1600007d98; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00fe00fe7fffffff; ++ __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4480000044800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x45c0000044800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe7fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x4481000144810001; ++ *((unsigned long*)& __m128i_result[0]) = 0x45c04000c4808000; ++ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3fffffff3fffc000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3fffffff3fffc000; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x04faf60009f5f092; ++ *((unsigned long*)& __m128i_op0[0]) = 0x04fafa9200000000; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x04faf600fff5f092; ++ *((unsigned long*)& __m128i_result[0]) = 0x04fafa9200000000; ++ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000100010; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000f50000000900; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000090900000998; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00007a8000000480; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000485000004cc; ++ __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00007a8000000480; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000485000004cc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100010; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000010000f; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000010000f; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000f50000000900; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000090900000998; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff00ffffff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3bc000003a800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe7fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x3a8100013a810001; ++ *((unsigned long*)& __m128i_result[0]) = 0x7bc04000ba808000; ++ __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00007a8000000480; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000485000004cc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00007a8000000480; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000485000004cc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000f50000000900; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000090a00000998; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff3a81ffff89fd; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffb3c3ffff51ba; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0802080408060803; ++ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3bc000003a800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000f50000000900; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000090a00000998; ++ *((unsigned long*)& __m128i_result[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000ef0000000003b; ++ __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff00ffffff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000f50000000900; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000090900000998; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff000900ffff98; ++ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff760386bdae46; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc1fc7941bc7e00ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0802080408060803; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff000086bd; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ca000000c481; ++ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3bc000003a800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe7fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x1d4000001d400000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1e5f007f5d400000; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffff; ++ *((int*)& __m256_op0[6]) = 0xffffffff; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0xffffffff; ++ *((int*)& __m256_op0[2]) = 0xffffffff; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00100010; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00100010; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00100010; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00100010; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff000086bd; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ca000000c481; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_h(__m256i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff00ffffff00ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00ff000900ffff98; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000f50000000900; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000090900000998; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x20); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000003fffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000003fffff; ++ __m256i_out = __lasx_xvsat_h(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff760386bdae46; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc1fc7941bc7e00ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff7603; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0xc3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000003fffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000003fffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffff7603; ++ *((unsigned long*)& __m128d_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x45000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x44000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x3cb504f3; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x3d3504f3; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4500000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4400000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff000000ff000000; ++ __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000003fffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000003fffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff010100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff010100000001; ++ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffff80; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffff80; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000ef0000000003b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000ef0000000003b; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_h_w(__m256i_op0,__m256i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0802080408060803; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00001fffe0001fff; ++ __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffff010100000001; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffff010100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000ef0000000003b; ++ *((int*)& __m128_result[3]) = 0x577fff00; ++ *((int*)& __m128_result[2]) = 0x577fff00; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x596f0000; ++ __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffff0101; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffff0101; ++ *((int*)& __m256_op0[0]) = 0x00000001; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0xffff0101; ++ *((int*)& __m256_result[4]) = 0x00000001; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0xffff0101; ++ *((int*)& __m256_result[0]) = 0x00000001; ++ __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x3fffffff3fffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x3fffffff3fffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff010100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff010100000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff010100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff010100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000810001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000810001; ++ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000440efffff000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000003b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000440efffff000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000003b; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffff0101; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffff0101; ++ *((int*)& __m256_op0[0]) = 0x00000001; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000440efffff000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000003b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x440ef000440ef000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x4400000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000440efffff000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000003b; ++ __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff010100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff010100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000008000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000008000000080; ++ __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x39); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffff0101; ++ *((int*)& __m256_op1[4]) = 0x00000001; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffff0101; ++ *((int*)& __m256_op1[0]) = 0x00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x440ef000440ef000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4400000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000ef0000000003b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0f8d33000f8d3300; ++ *((unsigned long*)& __m128i_result[0]) = 0x0003b80000000000; ++ __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff2356fe165486; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5efeb3165bd7653d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w(__m128i_op0,5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseqi_h(__m128i_op0,0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0f8d33000f8d3300; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003b80000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0f8d33000f8d32fd; ++ *((unsigned long*)& __m128i_result[0]) = 0x0003b7fffffffffd; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000007fff9; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff2356fe165486; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5efeb3165bd7653d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000235600005486; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000b31600006544; ++ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vneg_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff2356fe165486; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5efeb3165bd7653d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff2356fe165486; ++ __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffff010100000001; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffff010100000001; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000235600005486; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000b31600006544; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff2356fe165486; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000ef0000000003b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000003b0000ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff2356fe165486; ++ __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x70); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x50); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff0fffffff0; ++ __m256i_out = __lasx_xvmini_w(__m256i_op0,-16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_hu(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfefefefefdfdfdfd; ++ *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfefefefefdfdfdfd; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x26); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmini_w(__m256i_op0,-1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00003a7fc58074ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000eeff1100e; ++ __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff2356fe165486; ++ *((unsigned long*)& __m128i_op0[0]) = 0x5efeb3165bd7653d; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000007; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000007; ++ __m128i_out = __lsx_vmini_du(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3a8000003a800000; ++ __m128i_out = __lsx_vexth_q_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfffffff0; ++ *((int*)& __m256_op0[6]) = 0xfffffff0; ++ *((int*)& __m256_op0[5]) = 0xfffffff0; ++ *((int*)& __m256_op0[4]) = 0xfffffff0; ++ *((int*)& __m256_op0[3]) = 0xfffffff0; ++ *((int*)& __m256_op0[2]) = 0xfffffff0; ++ *((int*)& __m256_op0[1]) = 0xfffffff0; ++ *((int*)& __m256_op0[0]) = 0xfffffff0; ++ *((unsigned long*)& __m256d_result[3]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xfffffffe00000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xfffffffe00000000; ++ __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfefefefefdfdfdfd; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefdfdfdfd; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010202020203; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010201010102; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010202020203; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010201010102; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x3a800000; ++ *((int*)& __m128_op0[2]) = 0x3a800000; ++ *((int*)& __m128_op0[1]) = 0x000ef000; ++ *((int*)& __m128_op0[0]) = 0x0000003b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0xffffffff; ++ *((int*)& __m256_op0[4]) = 0xffffffff; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0xffffffff; ++ *((int*)& __m256_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000feff2356; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fd165486; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000007; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000246d9755; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000002427c2ee; ++ __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000056; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffff86; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000eefff; ++ *((unsigned long*)& __m128i_result[0]) = 0xf8e1a03affffe3e2; ++ __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000eefff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000eefff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000efffefff; ++ *((unsigned long*)& __m128i_result[0]) = 0xa03aa03ae3e2e3e2; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ffffff00ffffff; ++ __m128i_out = __lsx_vslei_b(__m128i_op0,15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; ++ unsigned_int_result = 0x00000000000000ff; ++ unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0xc); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000056000056; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3a8000003a800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000efffefff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xa03aa03ae3e2e3e2; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_d_q(__m128i_op0,__m128i_op1,0x75); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffff0fffffff0; ++ __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000246d9755; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000002427c2ee; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010202020203; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010201010102; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010202020203; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010201010102; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffff0fffffff0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0101010202020203; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0101010201010102; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0101010202020203; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0101010201010102; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000eefff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000003e0000003f; ++ __m128i_out = __lsx_vsrli_w(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010202020203; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010201010102; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010202020203; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010201010102; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0x00ffffff; ++ *((int*)& __m128_op0[0]) = 0x00ffffff; ++ *((int*)& __m128_op1[3]) = 0x0000feff; ++ *((int*)& __m128_op1[2]) = 0x23560000; ++ *((int*)& __m128_op1[1]) = 0x0000fd16; ++ *((int*)& __m128_op1[0]) = 0x54860000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000eefff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3a80613fda5dcb4a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x93f0b81a914c003b; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000051649b6; ++ *((unsigned long*)& __m128i_result[0]) = 0xd2f005e44bb43416; ++ __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000051649b6; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd2f005e44bb43416; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000003e0000003f; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000051649b6; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000003e0000003f; ++ __m128i_out = __lsx_vmax_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000760151; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003e0021009a009a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000246d9755; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000003e2427c2ee; ++ *((unsigned long*)& __m128i_result[1]) = 0x00001e5410082727; ++ *((unsigned long*)& __m128i_result[0]) = 0x00007f7f00107f7f; ++ __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000051649b6; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000003e0000003f; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x41945926d8000000; ++ __m128d_out = __lsx_vffinth_d_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x41945926d8000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00001e5410082727; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00007f7f00107f7f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001001001000080; ++ *((unsigned long*)& __m128i_result[0]) = 0x4195d926d8018000; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000f0f0f0f0f0f0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xff01ff01ff01f010; ++ *((unsigned long*)& __m256i_op1[2]) = 0xff01ff01ff01f010; ++ *((unsigned long*)& __m256i_op1[1]) = 0xff01ff01ff01f010; ++ *((unsigned long*)& __m256i_op1[0]) = 0xff01ff01ff01f010; ++ *((unsigned long*)& __m256i_result[3]) = 0x000078780000f0f1; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000078780000f0f1; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3a80613fda5dcb4a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x93f0b81a914c003b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000feff23560000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000fd1654860000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1e242e4d68dc0000; ++ *((unsigned long*)& __m128i_result[0]) = 0x2ff8fddb7ae20000; ++ __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000eefff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000246d9755; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000002427c2ee; ++ *((unsigned long*)& __m128i_result[1]) = 0xf8e10000a03a0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff2427e3e2c2ee; ++ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363abdf16; ++ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e08016161198; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000246d9755; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000002427c2ee; ++ *((unsigned long*)& __m128i_result[1]) = 0x636363633f3e47c1; ++ *((unsigned long*)& __m128i_result[0]) = 0x41f8e080f1ef4eaa; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363abdf16; ++ *((unsigned long*)& __m128i_op1[0]) = 0x41f8e08016161198; ++ *((unsigned long*)& __m128i_result[1]) = 0x9c9d9b9bbfaa20e9; ++ *((unsigned long*)& __m128i_result[0]) = 0xbe081c963e6fee68; ++ __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0fffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0fffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_b(__m128i_op0,-16); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001001001000080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4195d926d8018000; ++ *((int*)& __m128_result[3]) = 0x33800000; ++ *((int*)& __m128_result[2]) = 0x35800000; ++ *((int*)& __m128_result[1]) = 0x37800000; ++ *((int*)& __m128_result[0]) = 0x37000000; ++ __m128_out = __lsx_vfcvth_s_h(__m128i_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff010100000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff010100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffffffff; ++ __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0fffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x41957fff7fff7fff; ++ *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_result[0]) = 0xbf6b810181018101; ++ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363abdf16; ++ *((unsigned long*)& __m128i_op1[0]) = 0x41f8e08016161198; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000030000; ++ __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x63636363; ++ *((int*)& __m128_op0[2]) = 0x63abdf16; ++ *((int*)& __m128_op0[1]) = 0x41f8e080; ++ *((int*)& __m128_op0[0]) = 0x16161198; ++ *((unsigned long*)& __m128i_result[1]) = 0x6363636363abdf16; ++ *((unsigned long*)& __m128i_result[0]) = 0x420000003f800000; ++ __m128i_out = __lsx_vfrintrp_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000080801030000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000080103040000; ++ __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x6c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000001e001e001e0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000001e001e001e0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffffffff; ++ __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000001e001e001e0; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000001e001e001e0; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9c9d9b9bbfaa20e9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xbe081c963e6fee68; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000feff23560000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000fd1654860000; ++ *((unsigned long*)& __m128i_result[1]) = 0x6363636463abdf17; ++ *((unsigned long*)& __m128i_result[0]) = 0x41f8e08016161198; ++ __m128i_out = __lsx_vabsd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7fff7fff; ++ *((int*)& __m128_op0[2]) = 0x7fff7fff; ++ *((int*)& __m128_op0[1]) = 0xbf6b8101; ++ *((int*)& __m128_op0[0]) = 0x81018101; ++ *((int*)& __m128_op1[3]) = 0xe3636363; ++ *((int*)& __m128_op1[2]) = 0x63abdf16; ++ *((int*)& __m128_op1[1]) = 0x41f8e080; ++ *((int*)& __m128_op1[0]) = 0x16161198; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x01010101; ++ *((int*)& __m256_op0[6]) = 0x01010101; ++ *((int*)& __m256_op0[5]) = 0x01010101; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x01010101; ++ *((int*)& __m256_op0[2]) = 0x01010101; ++ *((int*)& __m256_op0[1]) = 0x01010101; ++ *((int*)& __m256_op0[0]) = 0x00000001; ++ *((int*)& __m256_op1[7]) = 0x000001e0; ++ *((int*)& __m256_op1[6]) = 0x01e001e0; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x000001e0; ++ *((int*)& __m256_op1[2]) = 0x01e001e0; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636463abdf17; ++ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e08016161198; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x6363636463abdf17; ++ *((unsigned long*)& __m128i_result[0]) = 0x41f8e08016161198; ++ __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x01010101; ++ *((int*)& __m256_op0[6]) = 0x01010101; ++ *((int*)& __m256_op0[5]) = 0x01010101; ++ *((int*)& __m256_op0[4]) = 0x00000001; ++ *((int*)& __m256_op0[3]) = 0x01010101; ++ *((int*)& __m256_op0[2]) = 0x01010101; ++ *((int*)& __m256_op0[1]) = 0x01010101; ++ *((int*)& __m256_op0[0]) = 0x00000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x5847b72626ce61ef; ++ *((unsigned long*)& __m128d_op0[0]) = 0x110053f401e7cced; ++ *((unsigned long*)& __m128i_result[1]) = 0x5847b72626ce61ef; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrz_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000010100000101; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000807bf0a1f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000800ecedee68; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5847b72626ce61ef; ++ *((unsigned long*)& __m128i_op1[0]) = 0x110053f401e7cced; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x5847bf2de5d8816f; ++ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000010100000101; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000010100000101; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000010100000101; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000000000001e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001e00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_d(__m256i_op0,12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x636363633f3e47c1; ++ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e080f1ef4eaa; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000807bf0a1f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000800ecedee68; ++ *((unsigned long*)& __m128i_result[1]) = 0x63636b6afe486741; ++ *((unsigned long*)& __m128i_result[0]) = 0x41f8e880ffffffff; ++ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000010100000101; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x5847b72626ce61ef; ++ *((unsigned long*)& __m128i_op0[0]) = 0x110053f401e7cced; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5847b72626ce61ef; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0005847b00011005; ++ *((unsigned long*)& __m128i_result[0]) = 0x0005847b00000000; ++ __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x2c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0005847b00011005; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0005847b00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000807bf0a1f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000800ecedee68; ++ *((unsigned long*)& __m128i_result[1]) = 0x0005840100000005; ++ *((unsigned long*)& __m128i_result[0]) = 0x0005847b00000000; ++ __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x63636b6afe486741; ++ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e880ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x63636b6afe486741; ++ *((unsigned long*)& __m128i_result[0]) = 0x41f8e880ffffffff; ++ __m128i_out = __lsx_vmaxi_d(__m128i_op0,-2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x63636b6afe486741; ++ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e880ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000027; ++ __m128i_out = __lsx_vmskltz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x57f160c4a1750eda; ++ *((unsigned long*)& __m128i_result[1]) = 0x000002bf8b062000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffd0ba876d000; ++ __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000807bf0a1f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000800ecedee68; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0005840100000005; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0005847b00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001f0a20001cedf; ++ *((unsigned long*)& __m128i_result[0]) = 0x0058000000580000; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0101010101010110; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0101010101010110; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000002bf8b062000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffd0ba876d000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x63636b6afe486741; ++ *((unsigned long*)& __m128i_op1[0]) = 0x41f8e880ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ff110db0; ++ *((unsigned long*)& __m128i_result[0]) = 0x41f7be08ffff578a; ++ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000002bf8b062000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffd0ba876d000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe363636363abdf16; ++ *((unsigned long*)& __m128i_op1[0]) = 0x41f8e08016161198; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0005840100000005; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0005847b00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0004e8f09e99b528; ++ *((unsigned long*)& __m128i_result[0]) = 0xcf1225129ad22b6e; ++ __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x63636b6afe486741; ++ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e880ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe363636363abdf16; ++ *((unsigned long*)& __m128i_op1[0]) = 0x41f8e08016161198; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000cecd00004657; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000c90000011197; ++ __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x63636b6a; ++ *((int*)& __m128_op0[2]) = 0xfe486741; ++ *((int*)& __m128_op0[1]) = 0x41f8e880; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0xe3636363; ++ *((int*)& __m128_op1[2]) = 0x63abdf16; ++ *((int*)& __m128_op1[1]) = 0x41f8e080; ++ *((int*)& __m128_op1[0]) = 0x16161198; ++ *((int*)& __m128_op2[3]) = 0x00c27580; ++ *((int*)& __m128_op2[2]) = 0x00bccf42; ++ *((int*)& __m128_op2[1]) = 0x00a975be; ++ *((int*)& __m128_op2[0]) = 0x00accf03; ++ *((int*)& __m128_result[3]) = 0xff800000; ++ *((int*)& __m128_result[2]) = 0xff800000; ++ *((int*)& __m128_result[1]) = 0x4471fb84; ++ *((int*)& __m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x63636b6afe486741; ++ *((unsigned long*)& __m128i_op0[0]) = 0x41f8e880ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d(__m128i_op0,-13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010110; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010110; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0005840100000005; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0005847b00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x636363633f3e47c1; ++ *((unsigned long*)& __m128i_op1[0]) = 0x41f8e080f1ef4eaa; ++ *((unsigned long*)& __m128i_result[1]) = 0xa000308000008002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0500847b00000000; ++ __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa000308000008002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0500847b00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslti_w(__m128i_op0,7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0101010101010110; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0101010101010110; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000001e00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000f00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x63636363; ++ *((int*)& __m128_op0[2]) = 0x3f3e47c1; ++ *((int*)& __m128_op0[1]) = 0x41f8e080; ++ *((int*)& __m128_op0[0]) = 0xf1ef4eaa; ++ *((int*)& __m128_op1[3]) = 0x0000cecd; ++ *((int*)& __m128_op1[2]) = 0x00004657; ++ *((int*)& __m128_op1[1]) = 0x0000c900; ++ *((int*)& __m128_op1[0]) = 0x00011197; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x00c2758000bccf42; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00a975be00accf03; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x00000000ffffffff; ++ __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000001e00000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0002000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x0000000c; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x0000000c; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xa000308000008002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0500847b00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00c2758000bccf42; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00a975be00accf03; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00c2758000bccf42; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00a975be00accf03; ++ *((unsigned long*)& __m128i_result[1]) = 0x00c2758000bccf42; ++ *((unsigned long*)& __m128i_result[0]) = 0x00a975be00accf03; ++ __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffbfffffffb; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffbfffffffb; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffbfffffffb; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffbfffffffb; ++ __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffa7; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00c2758000bccf42; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00a975be00accf03; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00c2758000bccf42; ++ *((unsigned long*)& __m128i_op2[0]) = 0x00a975be00accf03; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000930400008a10; ++ *((unsigned long*)& __m128i_result[0]) = 0x00006f9100007337; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000930400008a10; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00006f9100007337; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00c2758000bccf42; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00a975be00accf03; ++ *((unsigned long*)& __m128i_result[1]) = 0x00250023001c001d; ++ *((unsigned long*)& __m128i_result[0]) = 0x309d2f342a5d2b34; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00c2758000bccf42; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00a975be00accf03; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00250023001c001d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x309d2f342a5d2b34; ++ *((unsigned long*)& __m128i_result[1]) = 0x00060eb000000006; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000075c00000cf0; ++ __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x0000cecd; ++ *((int*)& __m128_op1[2]) = 0x00004657; ++ *((int*)& __m128_op1[1]) = 0x0000c900; ++ *((int*)& __m128_op1[0]) = 0x00011197; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffefffffffeff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffbfffffffb; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffbfffffffb; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffbfffffffb; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffbfffffffb; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffbfffffffb; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffbfffffffb; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffbfffffffb; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffbfffffffb; ++ __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00060eb000000006; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000075c00000cf0; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffaf1500000fffa; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000f8a40000f310; ++ __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000cecd00004657; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000c90000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00019d9a00008cae; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00250023001c001d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x309d2f342a5d2b34; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du(__m128i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000f00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000700000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000f00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000f00000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00050eb00000fffa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000f8a50000f310; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffaf1500000fffa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000f8a40000f310; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000003e2; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x26); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xe4e4e4e4e4e4e4e4; ++ *((unsigned long*)& __m256i_result[2]) = 0xe4e4e4e4e4e4e4e4; ++ *((unsigned long*)& __m256i_result[1]) = 0xe4e4e4e4e4e4e4e4; ++ *((unsigned long*)& __m256i_result[0]) = 0xe4e4e4e4e4e4e4e4; ++ __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000cecd00004657; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000c90000011197; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000200000800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100800000; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000002000000000; ++ __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0008000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0008000000000000; ++ __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000000c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w(__m256i_op0,9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfffffffb; ++ *((int*)& __m256_op0[6]) = 0xfffffffb; ++ *((int*)& __m256_op0[5]) = 0xfffffffb; ++ *((int*)& __m256_op0[4]) = 0xfffffffb; ++ *((int*)& __m256_op0[3]) = 0xfffffffb; ++ *((int*)& __m256_op0[2]) = 0xfffffffb; ++ *((int*)& __m256_op0[1]) = 0xfffffffb; ++ *((int*)& __m256_op0[0]) = 0xfffffffb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvfclass_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003e2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000003e2; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003e2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000003ffe2; ++ __m128i_out = __lsx_vexth_h_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfffffffb; ++ *((int*)& __m256_op0[6]) = 0xfffffffb; ++ *((int*)& __m256_op0[5]) = 0xfffffffb; ++ *((int*)& __m256_op0[4]) = 0xfffffffb; ++ *((int*)& __m256_op0[3]) = 0xfffffffb; ++ *((int*)& __m256_op0[2]) = 0xfffffffb; ++ *((int*)& __m256_op0[1]) = 0xfffffffb; ++ *((int*)& __m256_op0[0]) = 0xfffffffb; ++ *((int*)& __m256_op1[7]) = 0x0000ffff; ++ *((int*)& __m256_op1[6]) = 0x0001000e; ++ *((int*)& __m256_op1[5]) = 0x0000ffff; ++ *((int*)& __m256_op1[4]) = 0x0000ffff; ++ *((int*)& __m256_op1[3]) = 0x0000ffff; ++ *((int*)& __m256_op1[2]) = 0x0000ffff; ++ *((int*)& __m256_op1[1]) = 0x0000ffff; ++ *((int*)& __m256_op1[0]) = 0x0000ffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x007fffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x007fffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x003fffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x003fffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0xc7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x003fffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x003fffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x001fffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x001fffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_op2[7]) = 0x001fffff; ++ *((int*)& __m256_op2[6]) = 0xffffffff; ++ *((int*)& __m256_op2[5]) = 0xffffffff; ++ *((int*)& __m256_op2[4]) = 0xffffffff; ++ *((int*)& __m256_op2[3]) = 0x001fffff; ++ *((int*)& __m256_op2[2]) = 0xffffffff; ++ *((int*)& __m256_op2[1]) = 0xffffffff; ++ *((int*)& __m256_op2[0]) = 0xffffffff; ++ *((int*)& __m256_result[7]) = 0x001fffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0x001fffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00050eb00000fffa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000f8a50000f310; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000003e2; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003e2; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00050eb00000fffa; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000f8a50000f310; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000001; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x007fffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x007fffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x003fffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x003fffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000f1384; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000004ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000001000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000001000000000; ++ __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000001000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000004ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000667ae56; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x003fffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x003fffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x0667ae56; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000020; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000001fffd; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000001fffd; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000700020004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000700020004; ++ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fffe0002; ++ __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000020; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000667ae56; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000000004ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000667ae56; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffe0002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000667ae56; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000667ae56; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0040000000000003; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0040000000000003; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[3]) = 0x0020000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0020000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0040000000000003; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0040000000000003; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003e2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsrai_h(__m128i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[3]) = 0x001fffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x001fffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000700020004; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000700020004; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0040000000000003; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0040000000000003; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000003; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000070002000a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000070002000a; ++ __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w(__m128i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000070002000a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000070002000a; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000060002000a; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000060002000a; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000070002000a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000070002000a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0040000000000003; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0040000000000003; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long*)& __m256i_result[3]) = 0xffbffffffffffffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m256i_result[1]) = 0xffbffffffffffffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffa; ++ __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000fffe0001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0001fffe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0000ffff; ++ __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff00ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff00ff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffd; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000070002000a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000070002000a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000070002000a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000070002000a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000070002000a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000070002000a; ++ __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffbffffffffffffe; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffbffffffffffffe; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffff00ff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffff0000; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0x0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000700000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000700000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x60); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff00ff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000000a; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000a; ++ __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x7f800000; ++ *((int*)& __m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001fffe00014b41; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0001fffe0001ffde; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0002ffffb4bf; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0002ffff0022; ++ __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000070002000a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000070002000a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x001fffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x001fffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_d(__m256i_op0,0x32); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000700000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000700000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000700000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000700000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000007; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000020000000b; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000007; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000000020000000a; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x000000000000000a; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000000000000000a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000032; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000032; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffce; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffce; ++ __m256i_out = __lasx_xvneg_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001fffe00014b41; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe0001ffde; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000100020002; ++ __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f8100017f810001; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f8100017f810001; ++ __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x7f8100017f810001; ++ *((unsigned long*)& __m128d_op0[0]) = 0x7f8100017f810001; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffce; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffce; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000700000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000700000000; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f8000007f800000; ++ __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0080000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0080000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0040000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[1]) = 0x0040000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f8000017f800001; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f8000017f800001; ++ __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x80000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x80000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x80000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x80000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0040000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0040000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000a000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000a000; ++ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0080000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00020001; ++ *((int*)& __m128_op0[0]) = 0x00020002; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x7f800000; ++ *((int*)& __m128_result[1]) = 0x607fffc0; ++ *((int*)& __m128_result[0]) = 0x607fff80; ++ __m128_out = __lsx_vfrsqrt_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000017f800001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f8000017f800001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000007f800001; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000007f800001; ++ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000100020002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0002000100020002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000100020002; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffce; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffce; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d(__m128i_op0,0x21); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f7f00007f7f0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f7f80807f7f8080; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000fffe0000fffe; ++ *((unsigned long*)& __m128i_op2[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((int*)& __m128_result[3]) = 0x7f800000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[0]) = 0x6363636463636363; ++ __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256d_result[3]) = 0x606a20bd700e59a3; ++ *((unsigned long*)& __m256d_result[2]) = 0x6066a09e66c5f1bb; ++ *((unsigned long*)& __m256d_result[1]) = 0x606a20bd700e59a3; ++ *((unsigned long*)& __m256d_result[0]) = 0x6066a09e66c5f1bb; ++ __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffce; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffce; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x6b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000e2e36363; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000063636363; ++ __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x317fce80317fce80; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000500020002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000700020033; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000500020002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000700020033; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000500020002; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000700020033; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000500020002; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000700020033; ++ *((unsigned long*)& __m256i_result[3]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1400080008000000; ++ __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x26); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x317fce80317fce80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vmsknz_b(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa2e3a36363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0xa2e3a36463636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000a2e300006363; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000a2e300006363; ++ __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1400080008000000; ++ __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x317fce80317fce80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xf0000000f0000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_h(__m128i_op0,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x317fce80317fce80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000fffe0000fffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vslt_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f80000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000400000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000007f80; ++ __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f80000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7f80000000000007; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000700000007; ++ __m128i_out = __lsx_vmaxi_w(__m128i_op0,7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x317fce80317fce80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0808080808080808; ++ *((unsigned long*)& __m256i_result[3]) = 0x0807f7f80807f7f8; ++ *((unsigned long*)& __m256i_result[2]) = 0x0807f7f80807f7f8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0807f7f80807f7f8; ++ *((unsigned long*)& __m256i_result[0]) = 0x0807f7f80807f7f8; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x1400080008000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x1400080008000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x1400080008000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x1400080008000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000501ffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000701ffffce; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000501ffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000701ffffce; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000080000000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vaddi_du(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa2e3a36363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0xa2e3a36463636363; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7f80000000000007; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000700000007; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000e32c50e; ++ *((unsigned long*)& __m128i_result[0]) = 0xf2b2ce330e32c50e; ++ __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1400080008000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x7f800000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000008; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe8440000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe8440000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xffffffffe8440000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xffffffffe8440000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffe8440000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffe8440000; ++ __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000014; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000014; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000014; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000014; ++ __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000020000000200; ++ __m256i_out = __lasx_xvfclass_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffe0001fffe0001; ++ __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvth_d_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x1); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xf5fffc00; ++ *((int*)& __m256_op0[6]) = 0xfc000000; ++ *((int*)& __m256_op0[5]) = 0xf5fffc00; ++ *((int*)& __m256_op0[4]) = 0xfc000000; ++ *((int*)& __m256_op0[3]) = 0xf5fffc00; ++ *((int*)& __m256_op0[2]) = 0xfc000000; ++ *((int*)& __m256_op0[1]) = 0xf5fffc00; ++ *((int*)& __m256_op0[0]) = 0xfc000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xf5fffc00fc000000; ++ __m256i_out = __lasx_xvfrintrz_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x4f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffe00; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffe00; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe00; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffe00; ++ __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x4f4f4f4f; ++ *((int*)& __m128_op0[2]) = 0x4f4f4f4f; ++ *((int*)& __m128_op0[1]) = 0x4f4f4f4f; ++ *((int*)& __m128_op0[0]) = 0x4f4f4f4f; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000cf4f4f00; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000cf4f4f00; ++ __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000cf4f4f00; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000cf4f4f00; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu(__m128i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff80ffffff80ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000018080807f; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001ffff80fe; ++ __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0xffffffffffff8a35; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffed; ++ __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x13); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000007ffffffce; ++ __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; ++ __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000020000000200; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000020000010201; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000020000010201; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000020000010201; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000020000010201; ++ __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffed; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffed; ++ __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffe7ffffffe7; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f4f00004f4f0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f4f00004f4f0000; ++ unsigned_int_result = 0x000000004f4f0000; ++ unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x0); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; ++ __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x9c83e21a22001818; ++ *((unsigned long*)& __m128i_op1[0]) = 0xdd3b8b02563b2d7b; ++ *((unsigned long*)& __m128i_op2[1]) = 0x000000009c83e21a; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000022001818; ++ *((unsigned long*)& __m128i_result[1]) = 0xf2c97aaa7d8fa270; ++ *((unsigned long*)& __m128i_result[0]) = 0x0b73e427f7cfcb88; ++ __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f4f4f4f4f4f0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f4f4f4f4f4f0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f007f7f7f00; ++ __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x9c83e21a22001818; ++ *((unsigned long*)& __m128d_op0[0]) = 0xdd3b8b02563b2d7b; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x7f7f7f007f7f7f00; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x7f7f7f007f7f7f00; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000001c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000001de; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000001c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000001de; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000060000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000060000000; ++ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x44); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf5fffc00fc000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0001001900010019; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0a02041904010019; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0001001900010019; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0a02041904010019; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000007b007e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000007b007e; ++ __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffed; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffeffed; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffeffed; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffeffed; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffeffed; ++ __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000007b007e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000007b007e; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffe700000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffe7007b007e; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffe700000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffe7007b007e; ++ __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256d_result[3]) = 0xc039000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0xc039000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0xc039000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0xc039000000000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000009c83e21a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000022001818; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslti_hu(__m128i_op0,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000400000004000; ++ __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffeffed; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffeffed; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffeffed; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffeffed; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffeffed; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffeffed; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffeffed; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffeffed; ++ *((unsigned long*)& __m256i_op2[3]) = 0xc039000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0xc039000000000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0xc039000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0xc039000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xbf3ffffffffeffed; ++ *((unsigned long*)& __m256i_result[2]) = 0xbf3ffffffffeffed; ++ *((unsigned long*)& __m256i_result[1]) = 0xbf3ffffffffeffed; ++ *((unsigned long*)& __m256i_result[0]) = 0xbf3ffffffffeffed; ++ __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc039000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc039000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc039000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc039000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xc03b000200020002; ++ *((unsigned long*)& __m256i_result[2]) = 0xc03b000200020002; ++ *((unsigned long*)& __m256i_result[1]) = 0xc03b000200020002; ++ *((unsigned long*)& __m256i_result[0]) = 0xc03b000200020002; ++ __m256i_out = __lasx_xvbitrevi_h(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7f7f7f007f7f7f00; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf2c97aaa7d8fa270; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0b73e427f7cfcb88; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffb1fb1000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf2c97aaa7d8fa270; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0b73e427f7cfcb88; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x3f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x9c83e21a22001818; ++ *((unsigned long*)& __m128i_op0[0]) = 0xdd3b8b02563b2d7b; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ebd20000714f; ++ *((unsigned long*)& __m128i_result[0]) = 0x00012c8a0000a58a; ++ __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000007b00f9007e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000007b00f9007e; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000007b00f9007e; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000007b00f9007e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000f601f200fc; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000f601f200fc; ++ __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffe7ffffffe7; ++ *((unsigned long*)& __m256i_op1[3]) = 0xbf3ffffffffeffed; ++ *((unsigned long*)& __m256i_op1[2]) = 0xbf3ffffffffeffed; ++ *((unsigned long*)& __m256i_op1[1]) = 0xbf3ffffffffeffed; ++ *((unsigned long*)& __m256i_op1[0]) = 0xbf3ffffffffeffed; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000009c83e21a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000022001818; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000e21a00001818; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000400000004000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000400000004000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000400000004000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x000000009c83e21a; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000022001818; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf2c97aaa7d8fa270; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0b73e427f7cfcb88; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ebd20000714f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00012c8a0000a58a; ++ *((unsigned long*)& __m128i_result[1]) = 0xf654ad7447e59090; ++ *((unsigned long*)& __m128i_result[0]) = 0x27b1b106b8145f50; ++ __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf654ad7447e59090; ++ *((unsigned long*)& __m128i_op1[0]) = 0x27b1b106b8145f50; ++ *((unsigned long*)& __m128i_result[1]) = 0x0a545374471b7070; ++ *((unsigned long*)& __m128i_result[0]) = 0x274f4f0648145f50; ++ __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0a545374471b7070; ++ *((unsigned long*)& __m128i_op0[0]) = 0x274f4f0648145f50; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_result[1]) = 0xa8a736e19e9e28bf; ++ *((unsigned long*)& __m128i_result[0]) = 0x9e9f9e9f9e9f9e9f; ++ __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000400000004000; ++ __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; ++ __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ebd20000714f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00012c8a0000a58a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ebd20000714f; ++ *((unsigned long*)& __m128i_result[0]) = 0x00012c8a0000a58a; ++ __m128i_out = __lsx_vmax_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007b007e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000007b007e; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc03b000200020002; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc03b000200020002; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc03b000200020002; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc03b000200020002; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000001ec020; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000001ec020; ++ __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_result[1]) = 0x09e009e009e009e0; ++ *((unsigned long*)& __m128i_result[0]) = 0x09e009e009e009e0; ++ __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xa8a74bff9e9e0070; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9e9e72ff9e9ff9ff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vsat_du(__m128i_op0,0x2f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xc039000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xc039000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xc039000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xc039000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf654ad7447e59090; ++ *((unsigned long*)& __m128i_op0[0]) = 0x27b1b106b8145f50; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000120000000d; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000000e; ++ __m128i_out = __lsx_vpcnt_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000120000000d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000e0000000e; ++ unsigned_long_int_result = 0x0000000e0000000e; ++ unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op2[1]) = 0x000000120000000d; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000cfffffff2; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000dfffffff1; ++ __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0001000c; ++ *((int*)& __m128_op0[2]) = 0xfffffff2; ++ *((int*)& __m128_op0[1]) = 0x0001000d; ++ *((int*)& __m128_op0[0]) = 0xfffffff1; ++ *((int*)& __m128_op1[3]) = 0xffff8a17; ++ *((int*)& __m128_op1[2]) = 0xffffc758; ++ *((int*)& __m128_op1[1]) = 0xffff69bb; ++ *((int*)& __m128_op1[0]) = 0xffffad3b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000120000000d; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000011ffee; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000dfff2; ++ __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000120000000d; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vsat_bu(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000007b007e; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000007b007e; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000007b007e; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000007b007e; ++ __m256i_out = __lasx_xvmaxi_d(__m256i_op0,2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000007ffffffce; ++ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000000011ffee; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000dfff2; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf654ad7447e59090; ++ *((unsigned long*)& __m128i_op1[0]) = 0x27b1b106b8145f50; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffb81a6f70; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000047eba0b0; ++ __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ebd20000714f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00012c8a0000a58a; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffb81a6f70; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000d48eaa1a2; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffb81ae0bf; ++ *((unsigned long*)& __m128i_result[0]) = 0x00012c9748eaffff; ++ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000001de2dc20; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000001de2dc20; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000ebd20000714f; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00012c8a0000a58a; ++ *((unsigned long*)& __m128d_op1[1]) = 0xf654ad7447e59090; ++ *((unsigned long*)& __m128d_op1[0]) = 0x27b1b106b8145f50; ++ *((unsigned long*)& __m128d_result[1]) = 0xf654ad7447e59090; ++ *((unsigned long*)& __m128d_result[0]) = 0x27b1b106b8145f50; ++ __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0xffffffe7; ++ *((int*)& __m256_op0[6]) = 0xffffffe7; ++ *((int*)& __m256_op0[5]) = 0xffffffe7; ++ *((int*)& __m256_op0[4]) = 0xffffffe7; ++ *((int*)& __m256_op0[3]) = 0xffffffe7; ++ *((int*)& __m256_op0[2]) = 0xffffffe7; ++ *((int*)& __m256_op0[1]) = 0xffffffe7; ++ *((int*)& __m256_op0[0]) = 0xffffffe7; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000500000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000700000032; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000500000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000700000032; ++ __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000040e7; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000004000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000040e7; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000200000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000200000000000; ++ __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x21); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000011ffee; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000dfff2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf654ad7447e59090; ++ *((unsigned long*)& __m128i_op0[0]) = 0x27b1b106b8145f50; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x3f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ebd20000714f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00012c8a0000a58a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000e7; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000000001fe; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000001ce; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000000001fe; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001ce; ++ __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000001fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000001ce; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000001fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000001ce; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000000001fd; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000000001fd; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003fff00003fff; ++ __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf6548a1747e59090; ++ *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long*)& __m128i_result[1]) = 0xf6548a1747e59090; ++ *((unsigned long*)& __m128i_result[0]) = 0x27b169bbb8145f50; ++ __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000005; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000400000003ffb; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000400100004001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000400000003ffb; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000400100004001; ++ __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000019001c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000019001c; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000000001fe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000000001fe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpermi_q(__m256i_op0,__m256i_op1,0xb9); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000ebd20000714f; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00012c8a0000a58a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_du(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf6548a1747e59090; ++ *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf6548a1747e59090; ++ *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long*)& __m128i_result[1]) = 0xf6548a1747e59090; ++ *((unsigned long*)& __m128i_result[0]) = 0x27b169bbb8145f50; ++ __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrecip_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf6548a1747e59090; ++ *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000047e59090; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffb8145f50; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long*)& __m256i_result[2]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long*)& __m256i_result[1]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long*)& __m256i_result[0]) = 0xd3d3d3d3d3d3d3d3; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0xd3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf6548a1747e59090; ++ *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf6548a1747e59090; ++ *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000800000008; ++ __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256d_op0[2]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256d_op0[1]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256d_op0[0]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op2[3]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long*)& __m256d_op2[2]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long*)& __m256d_op2[1]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long*)& __m256d_op2[0]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffefffefffefffe; ++ __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x17c64aaef639f093; ++ *((unsigned long*)& __m128d_op0[0]) = 0xdb8f439722ec502d; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x17c64aaef639f093; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m128_op0[3]) = 0xf6548a17; ++ *((int*)& __m128_op0[2]) = 0x47e59090; ++ *((int*)& __m128_op0[1]) = 0x27b169bb; ++ *((int*)& __m128_op0[0]) = 0xb8145f50; ++ *((int*)& __m128_op1[3]) = 0x004eff62; ++ *((int*)& __m128_op1[2]) = 0x00d2ff76; ++ *((int*)& __m128_op1[1]) = 0xff700028; ++ *((int*)& __m128_op1[0]) = 0x00be00a0; ++ *((int*)& __m128_result[3]) = 0xb7032c34; ++ *((int*)& __m128_result[2]) = 0x093d35ab; ++ *((int*)& __m128_result[1]) = 0xe7a6533b; ++ *((int*)& __m128_result[0]) = 0x800001b8; ++ __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xb7032c34093d35ab; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe7a6533b800001b8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000900000009; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; ++ __m128i_out = __lsx_vmini_wu(__m128i_op0,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100003ffe; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100003fcd; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100003ffe; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100003fcd; ++ __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000900000009; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000900000009; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000090; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000090; ++ __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000400000003ffb; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000400100004001; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000400000003ffb; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000400100004001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000400000003ffb; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000400100004001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000400000003ffb; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000400100004001; ++ __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfffefffe; ++ *((int*)& __m256_op0[6]) = 0xfffefffe; ++ *((int*)& __m256_op0[5]) = 0xfffefffe; ++ *((int*)& __m256_op0[4]) = 0xfffefffe; ++ *((int*)& __m256_op0[3]) = 0xfffefffe; ++ *((int*)& __m256_op0[2]) = 0xfffefffe; ++ *((int*)& __m256_op0[1]) = 0xfffefffe; ++ *((int*)& __m256_op0[0]) = 0xfffefffe; ++ *((int*)& __m256_op1[7]) = 0x000023a3; ++ *((int*)& __m256_op1[6]) = 0x00003fff; ++ *((int*)& __m256_op1[5]) = 0x000023a3; ++ *((int*)& __m256_op1[4]) = 0x00003fef; ++ *((int*)& __m256_op1[3]) = 0x000023a3; ++ *((int*)& __m256_op1[2]) = 0x00003fff; ++ *((int*)& __m256_op1[1]) = 0x000023a3; ++ *((int*)& __m256_op1[0]) = 0x00003fef; ++ *((int*)& __m256_result[7]) = 0xfffefffe; ++ *((int*)& __m256_result[6]) = 0xfffefffe; ++ *((int*)& __m256_result[5]) = 0xfffefffe; ++ *((int*)& __m256_result[4]) = 0xfffefffe; ++ *((int*)& __m256_result[3]) = 0xfffefffe; ++ *((int*)& __m256_result[2]) = 0xfffefffe; ++ *((int*)& __m256_result[1]) = 0xfffefffe; ++ *((int*)& __m256_result[0]) = 0xfffefffe; ++ __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x004eff6200d2ff76; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff70002800be00a0; ++ *((unsigned long*)& __m128i_result[1]) = 0x004eff6200d2ff76; ++ *((unsigned long*)& __m128i_result[0]) = 0xff70002800be00a0; ++ __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003fff00003fff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffebffffffebfff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffebffffffebfff; ++ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000090; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000090; ++ *((unsigned long*)& __m128d_op1[1]) = 0x004eff6200d2ff76; ++ *((unsigned long*)& __m128d_op1[0]) = 0xff70002800be00a0; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000400000003ffb; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000400100004001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000400000003ffb; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000400100004001; ++ *((unsigned long*)& __m256i_result[3]) = 0x00003fef00003fea; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003ff000003ff0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00003fef00003fea; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003ff000003ff0; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x17c64aaef639f093; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0xf6548a1747e59090; ++ *((unsigned long*)& __m128i_op2[0]) = 0x27b169bbb8145f50; ++ *((unsigned long*)& __m128i_result[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff800000; ++ __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00003fef00003fea; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003ff000003ff0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00003fef00003fea; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003ff000003ff0; ++ *((unsigned long*)& __m256i_result[3]) = 0x00003fea00013feb; ++ *((unsigned long*)& __m256i_result[2]) = 0x00003fe900014022; ++ *((unsigned long*)& __m256i_result[1]) = 0x00003fea00013feb; ++ *((unsigned long*)& __m256i_result[0]) = 0x00003fe900014022; ++ __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000005858585a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000005858585a; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000005858585a; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000005858585a; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000023a300003fff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000023a300003fef; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000023a300003fff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000023a300003fef; ++ *((unsigned long*)& __m256i_result[3]) = 0x000011d1ac2c4c2d; ++ *((unsigned long*)& __m256i_result[2]) = 0x000011d1ac2c4c25; ++ *((unsigned long*)& __m256i_result[1]) = 0x000011d1ac2c4c2d; ++ *((unsigned long*)& __m256i_result[0]) = 0x000011d1ac2c4c25; ++ __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ebd20000714f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00012c8a0000a58a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ebd20000714f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00012c8a0000a58a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000e29e; ++ *((unsigned long*)& __m128i_result[0]) = 0x000259140000ffff; ++ __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000008e8c000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x000000000fffc000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000008e8c000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x000000000fffc000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvpcnt_w(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256d_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000400000004000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00003feec0108022; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003fe9c015802c; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00003feec0108022; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003fe9c015802c; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007f124010c022; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007f174015c02c; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007f124010c022; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007f174015c02c; ++ __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x08e8c000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x0fffc000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x08e8c000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x0fffc000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00003fea; ++ *((int*)& __m256_op0[6]) = 0x00013feb; ++ *((int*)& __m256_op0[5]) = 0x00003fe9; ++ *((int*)& __m256_op0[4]) = 0x00014022; ++ *((int*)& __m256_op0[3]) = 0x00003fea; ++ *((int*)& __m256_op0[2]) = 0x00013feb; ++ *((int*)& __m256_op0[1]) = 0x00003fe9; ++ *((int*)& __m256_op0[0]) = 0x00014022; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0xffffffff; ++ *((int*)& __m256_op1[6]) = 0xffffffff; ++ *((int*)& __m256_op1[5]) = 0xffffffff; ++ *((int*)& __m256_op1[4]) = 0xffffffff; ++ *((int*)& __m256_op1[3]) = 0xffffffff; ++ *((int*)& __m256_op1[2]) = 0xffffffff; ++ *((int*)& __m256_op1[1]) = 0xffffffff; ++ *((int*)& __m256_op1[0]) = 0xffffffff; ++ *((int*)& __m256_op2[7]) = 0x00000000; ++ *((int*)& __m256_op2[6]) = 0x00000000; ++ *((int*)& __m256_op2[5]) = 0x00000000; ++ *((int*)& __m256_op2[4]) = 0x00000000; ++ *((int*)& __m256_op2[3]) = 0x00000000; ++ *((int*)& __m256_op2[2]) = 0x00000000; ++ *((int*)& __m256_op2[1]) = 0x00000000; ++ *((int*)& __m256_op2[0]) = 0x00000000; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffffff; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x02b010f881a281a2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vmini_hu(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x02b010f881a281a2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w(__m256i_op0,11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x02b010f881a281a2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long*)& __m128i_op1[1]) = 0x02b010f881a281a2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x02b010f881a281a2; ++ *((unsigned long*)& __m128i_result[0]) = 0x27b169bbb8140001; ++ __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_hu_w(__m256i_op0,__m256i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x02b010f881a281a2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8140001; ++ *((unsigned long*)& __m128i_result[1]) = 0x000010f8000081a2; ++ *((unsigned long*)& __m128i_result[0]) = 0x000069bb00000001; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000010f8000081a2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000069bb00000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001000010f8; ++ __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000ff800000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long*)& __m128d_result[0]) = 0x00000000ff800000; ++ __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long*)& __m128i_result[1]) = 0xf1f181a2f1f1f1b0; ++ *((unsigned long*)& __m128i_result[0]) = 0xf1f1f1f1f180f1f1; ++ __m128i_out = __lsx_vmini_b(__m128i_op0,-15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffc8027; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffc7ff1; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffc8027; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffc7ff1; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000100000014; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000014; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000100000014; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000014; ++ __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x14); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff800000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ff800000; ++ __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[3]) = 0x1d1d1d1e1d1d1d1e; ++ *((unsigned long*)& __m256i_result[2]) = 0x1d1d1d1e1d1d1d1e; ++ *((unsigned long*)& __m256i_result[1]) = 0x1d1d1d1e1d1d1d1e; ++ *((unsigned long*)& __m256i_result[0]) = 0x1d1d1d1e1d1d1d1e; ++ __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x1d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x10f8000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001000010f8; ++ *((unsigned long*)& __m128i_result[1]) = 0x10f8000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000001000010f8; ++ __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001000010f8; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0f0f0f0f00000f00; ++ __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xf1f181a2; ++ *((int*)& __m128_op0[2]) = 0xf1f1f1b0; ++ *((int*)& __m128_op0[1]) = 0xf1f1f1f1; ++ *((int*)& __m128_op0[0]) = 0xf180f1f1; ++ *((int*)& __m128_result[3]) = 0x7fc00000; ++ *((int*)& __m128_result[2]) = 0x7fc00000; ++ *((int*)& __m128_result[1]) = 0x7fc00000; ++ *((int*)& __m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vflogb_s(__m128_op0); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00003fef00003fea; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003ff000003ff0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00003fef00003fea; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003ff000003ff0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00003fef00003fea; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003ff000003ff0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00003fef00003fea; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003ff000003ff0; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007fde00007fd4; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007fe000007fe0; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007fde00007fd4; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007fe000007fe0; ++ __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00007fde00007fd4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00007fe000007fe0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00007fde00007fd4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007fe000007fe0; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff7eddffff7ed3; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff7edfffff7edf; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff7eddffff7ed3; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff7edfffff7edf; ++ __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h(__m128i_op0,15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff7eddffff7ed3; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff7edfffff7edf; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff7eddffff7ed3; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff7edfffff7edf; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00007edd; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00007ed3; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00007edf; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00007edf; ++ __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x10f8000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x1e); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff800000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0108015e01030150; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000017f0000; ++ __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x10f8000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000ff800000; ++ *((unsigned long*)& __m128d_result[1]) = 0x10f8000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x00000000ff800000; ++ __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00003fef00003fea; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003ff000003ff0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00003fef00003fea; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003ff000003ff0; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x50); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00007fde00007fd4; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00007fe000007fe0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00007fde00007fd4; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00007fe000007fe0; ++ *((unsigned long*)& __m256i_result[3]) = 0x000081220000812c; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000812000008120; ++ *((unsigned long*)& __m256i_result[1]) = 0x000081220000812c; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000812000008120; ++ __m256i_out = __lasx_xvneg_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff7eddffff7ed3; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff7edfffff7edf; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff7eddffff7ed3; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff7edfffff7edf; ++ *((unsigned long*)& __m256i_op1[3]) = 0x00003fef00003fea; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00003ff000003ff0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x00003fef00003fea; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00003ff000003ff0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff3eedffff3ee3; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff3eedffff3ee3; ++ __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x10f8000100000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000001000010f8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x087c000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000087c; ++ __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00003fea00013fec; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003fe50001c013; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00003fea00013fec; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003fe50001c013; ++ *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000ff0000ff00; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000ff0000ff00; ++ __m256i_out = __lasx_xvsat_b(__m256i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0ff780a10efc01af; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fe7f0000; ++ __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x0ff780a1; ++ *((int*)& __m128_op0[2]) = 0x0efc01af; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0xfe7f0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000fe7f0000; ++ __m128i_out = __lsx_vfrintrne_s(__m128_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x087c000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000087c; ++ *((unsigned long*)& __m128i_result[1]) = 0xf784000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffff784; ++ __m128i_out = __lsx_vneg_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_wu(__m128i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x087c000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000087c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x10f8000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001000010f8; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffefffff784; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000081220000812c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000812000008120; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000081220000812c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000812000008120; ++ *((unsigned long*)& __m256i_result[3]) = 0xe9e968c9e9e968c1; ++ *((unsigned long*)& __m256i_result[2]) = 0xe9e968c9e9e968c9; ++ *((unsigned long*)& __m256i_result[1]) = 0xe9e968c9e9e968c1; ++ *((unsigned long*)& __m256i_result[0]) = 0xe9e968c9e9e968c9; ++ __m256i_out = __lasx_xvnori_b(__m256i_op0,0x16); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000081220000812c; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000812000008120; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000081220000812c; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000812000008120; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefefe; ++ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffefffff784; ++ *((unsigned long*)& __m128i_op1[1]) = 0x10f8000100000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001000010f8; ++ *((unsigned long*)& __m128i_result[1]) = 0x0177fff0fffffff0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000011ff8bc; ++ __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256d_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0005000500050005; ++ *((unsigned long*)& __m256d_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256d_op1[3]) = 0x00003fea00013fec; ++ *((unsigned long*)& __m256d_op1[2]) = 0x00003fe50001c013; ++ *((unsigned long*)& __m256d_op1[1]) = 0x00003fea00013fec; ++ *((unsigned long*)& __m256d_op1[0]) = 0x00003fe50001c013; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000180000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000180000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe013fcf2e015fc38; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe013fd00dff78420; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe013fcf2e015fc38; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe013fd00dff78420; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00003fea0014734d; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00003fe900140d85; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00003fea0014734d; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00003fe900140d85; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000000ff0000ff00; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000000ff0000ff00; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0177fff0fffffff0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000011ff8bc; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffefffff784; ++ *((unsigned long*)& __m128i_result[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff008ff820; ++ __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000050005; ++ *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefefe; ++ __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xfffffffe; ++ *((int*)& __m128_op0[0]) = 0xfffff784; ++ *((int*)& __m128_op1[3]) = 0x0177fff0; ++ *((int*)& __m128_op1[2]) = 0xfffffff0; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x011ff8bc; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfd02fd02; ++ *((int*)& __m256_op0[6]) = 0xfd02fd02; ++ *((int*)& __m256_op0[5]) = 0xfd02fd02; ++ *((int*)& __m256_op0[4]) = 0xfd02fd02; ++ *((int*)& __m256_op0[3]) = 0xfd02fd02; ++ *((int*)& __m256_op0[2]) = 0xfd02fd02; ++ *((int*)& __m256_op0[1]) = 0xfd02fd02; ++ *((int*)& __m256_op0[0]) = 0xfd02fd02; ++ *((int*)& __m256_result[7]) = 0x81fa28e4; ++ *((int*)& __m256_result[6]) = 0x81fa28e4; ++ *((int*)& __m256_result[5]) = 0x81fa28e4; ++ *((int*)& __m256_result[4]) = 0x81fa28e4; ++ *((int*)& __m256_result[3]) = 0x81fa28e4; ++ *((int*)& __m256_result[2]) = 0x81fa28e4; ++ *((int*)& __m256_result[1]) = 0x81fa28e4; ++ *((int*)& __m256_result[0]) = 0x81fa28e4; ++ __m256_out = __lasx_xvfrecip_s(__m256_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff008ff820; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffff008ff820; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000011ff040; ++ __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0005000500050005; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000050005; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long*)& __m256i_op1[3]) = 0xf007fe76f008fe19; ++ *((unsigned long*)& __m256i_op1[2]) = 0xf08aff01f07cc291; ++ *((unsigned long*)& __m256i_op1[1]) = 0xf007fe76f008fe19; ++ *((unsigned long*)& __m256i_op1[0]) = 0xf08aff01f07cc291; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000001400; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000003c01ff9; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000003c01ff9; ++ __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x66); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0177fff0fffffff0; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000011ff8bc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0177fff0fffffff0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000011ff8bc; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000011f0000f040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0177fff0fffffff0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff8bc; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000001400; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000003c01ff9; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000003c01ff9; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfffffffff08a7de0; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfffffffff07c4170; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfffffffff08a7de0; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfffffffff07c4170; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffff08a7de0; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffff07c4170; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffff08a7de0; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffff07c4170; ++ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000001400; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000003c01ff9; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000003c01ff9; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffec00; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffc3fe007; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffc3fe007; ++ __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000011ff040; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe0000ff18; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0177fff0fffffff0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff8bc; ++ *((unsigned long*)& __m128i_op2[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long*)& __m128i_op2[0]) = 0xffffffff008ff820; ++ *((unsigned long*)& __m128i_result[1]) = 0xffe8008fffe7008f; ++ *((unsigned long*)& __m128i_result[0]) = 0x00010001f1153780; ++ __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m128d_op0[0]) = 0x00000000011ff040; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h(__m256i_op0,-11); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffee; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010012; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffe1ffc0; ++ __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff008ff820; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010012; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffe1ffc0; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff009ff83f; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff040; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000011ff040; ++ __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x81fa28e4; ++ *((int*)& __m256_op0[6]) = 0x81fa28e4; ++ *((int*)& __m256_op0[5]) = 0x81fa28e4; ++ *((int*)& __m256_op0[4]) = 0x81fa28e4; ++ *((int*)& __m256_op0[3]) = 0x81fa28e4; ++ *((int*)& __m256_op0[2]) = 0x81fa28e4; ++ *((int*)& __m256_op0[1]) = 0x81fa28e4; ++ *((int*)& __m256_op0[0]) = 0x81fa28e4; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x0); ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000001400; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000003c01ff9; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000003c01ff9; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000001400; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000003c01ff9; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000003c01ff9; ++ __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010012; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffe1ffc0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010012; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffe1ffc0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001000100010012; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffe1ffc0; ++ __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff040; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_result[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_result[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_result[0]) = 0xfd02fd02fd02fd02; ++ __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffe4ffe4ffe4ffe4; ++ *((unsigned long*)& __m128i_result[0]) = 0xffe4ffe4ffe4ffe4; ++ __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x1b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffe4ffe4ffe4ffe4; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffe4ffe4ffe4ffe4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff040; ++ *((unsigned long*)& __m128i_result[1]) = 0xff00e400ff00e400; ++ *((unsigned long*)& __m128i_result[0]) = 0xff01e41ffff0e440; ++ __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_du(__m256i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_result[3]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_result[2]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_result[1]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_result[0]) = 0xfd12fd12fd12fd12; ++ __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0xfa15fa15fa15fa14; ++ __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfa15fa15fa15fa14; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[0]) = 0x05ea05ea05ea05ec; ++ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0xffffffffffffffff; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x2); ++ *((int*)& __m128_op0[3]) = 0xffffffff; ++ *((int*)& __m128_op0[2]) = 0xffffffff; ++ *((int*)& __m128_op0[1]) = 0xffffffff; ++ *((int*)& __m128_op0[0]) = 0xffffffff; ++ *((int*)& __m128_op1[3]) = 0xffffffff; ++ *((int*)& __m128_op1[2]) = 0xffffffff; ++ *((int*)& __m128_op1[1]) = 0xffffffff; ++ *((int*)& __m128_op1[0]) = 0xffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0177fff0fffffff0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000011ff8bc; ++ *((unsigned long*)& __m128i_result[1]) = 0x05dfffc3ffffffc0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000047fe2f0; ++ __m128i_out = __lsx_vslli_d(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x05dfffc3ffffffc0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000047fe2f0; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000047fe2f0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000047fe2f0; ++ __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_b(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010012; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fec20704; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000012; ++ __m128i_out = __lsx_vexth_wu_hu(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xdfdfdfdfdfdfdfdf; ++ *((unsigned long*)& __m128i_result[0]) = 0xdfdfdfdfdfdfdfdf; ++ __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000047fe2f0; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000047fe2f0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010012; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fec20704; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000043fe2fc; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000001fffff; ++ __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe011df03e; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xf03ef03ef03ef03e; ++ *((unsigned long*)& __m128i_result[0]) = 0xf03ef03ef03ef03e; ++ __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x00000000047fe2f0; ++ *((unsigned long*)& __m128d_op1[0]) = 0x00000000047fe2f0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf03ef03ef03ef03e; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf03ef03ef03ef03e; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_d(__m128i_op0,-9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0xfd12fd12; ++ *((int*)& __m256_op0[6]) = 0xfd12fd12; ++ *((int*)& __m256_op0[5]) = 0xfd12fd12; ++ *((int*)& __m256_op0[4]) = 0xfd12fd12; ++ *((int*)& __m256_op0[3]) = 0xfd12fd12; ++ *((int*)& __m256_op0[2]) = 0xfd12fd12; ++ *((int*)& __m256_op0[1]) = 0xfd12fd12; ++ *((int*)& __m256_op0[0]) = 0xfd12fd12; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x05ea05ea05ea05ec; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_result[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long*)& __m256i_result[1]) = 0x05ea05ea05ea05ec; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x49); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010012; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fec20704; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_result[3]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m256i_result[2]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m256i_result[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m256i_result[0]) = 0x000a000a000a000a; ++ __m256i_out = __lasx_xvmaxi_h(__m256i_op0,10); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfa15fa15fa15fa14; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x05ea05ea05ea05ec; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffefffffffe; ++ __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff01e41ffff0e440; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffe4ffffffe4ff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffe4fffff0e4ff; ++ __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_h(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m256d_op1[2]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m256d_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m256d_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128d_op0[0]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfffefffe011df03e; ++ *((unsigned long*)& __m128d_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128d_result[0]) = 0xfffffffefffffffe; ++ __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff01e41ffff0e440; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffefffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xff01e420fff0e442; ++ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fffffff80000000; ++ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x05ea05ea05ea05ec; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x04f104f104f104f1; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x04f104f104f104f1; ++ __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000808ff821; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256d_op1[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256d_op1[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256d_op1[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_op1[2]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_op1[1]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_op1[0]) = 0xfd12fd12fd12fd12; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x7fffffff; ++ *((int*)& __m256_op0[6]) = 0x80000000; ++ *((int*)& __m256_op0[5]) = 0x7fffffff; ++ *((int*)& __m256_op0[4]) = 0x80000000; ++ *((int*)& __m256_op0[3]) = 0x7fffffff; ++ *((int*)& __m256_op0[2]) = 0x80000000; ++ *((int*)& __m256_op0[1]) = 0x7fffffff; ++ *((int*)& __m256_op0[0]) = 0x80000000; ++ *((int*)& __m256_op1[7]) = 0xfd02fd02; ++ *((int*)& __m256_op1[6]) = 0xfd02fd02; ++ *((int*)& __m256_op1[5]) = 0xfd02fd02; ++ *((int*)& __m256_op1[4]) = 0xfd02fd02; ++ *((int*)& __m256_op1[3]) = 0xfd02fd02; ++ *((int*)& __m256_op1[2]) = 0xfd02fd02; ++ *((int*)& __m256_op1[1]) = 0xfd02fd02; ++ *((int*)& __m256_op1[0]) = 0xfd02fd02; ++ *((int*)& __m256_op2[7]) = 0xfd02fd02; ++ *((int*)& __m256_op2[6]) = 0xfd02fd02; ++ *((int*)& __m256_op2[5]) = 0xfd02fd02; ++ *((int*)& __m256_op2[4]) = 0xfd02fd02; ++ *((int*)& __m256_op2[3]) = 0xfd02fd02; ++ *((int*)& __m256_op2[2]) = 0xfd02fd02; ++ *((int*)& __m256_op2[1]) = 0xfd02fd02; ++ *((int*)& __m256_op2[0]) = 0xfd02fd02; ++ *((int*)& __m256_result[7]) = 0x7fffffff; ++ *((int*)& __m256_result[6]) = 0x7d02fd02; ++ *((int*)& __m256_result[5]) = 0x7fffffff; ++ *((int*)& __m256_result[4]) = 0x7d02fd02; ++ *((int*)& __m256_result[3]) = 0x7fffffff; ++ *((int*)& __m256_result[2]) = 0x7d02fd02; ++ *((int*)& __m256_result[1]) = 0x7fffffff; ++ *((int*)& __m256_result[0]) = 0x7d02fd02; ++ __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0c0c0c0c0c0c0c0c; ++ *((unsigned long*)& __m256i_result[2]) = 0x0c0c0c0c0c0c0c0c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0c0c0c0c0c0c0c0c; ++ *((unsigned long*)& __m256i_result[0]) = 0x0c0c0c0c0c0c0c0c; ++ __m256i_out = __lasx_xvmaxi_b(__m256i_op0,12); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffff7fffffff7f; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffff7fffffff7f; ++ __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b(int_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff01fe03ff01fe03; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff01fe03ff01fe03; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xff01fe03ff01fe03; ++ __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff4; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,-12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op1[0]) = 0x05ea05ea05ea05ec; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xfa15fa15fa15fa14; ++ __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xff01fe03ff01fe03; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m256i_op1[2]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m256i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m256i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long*)& __m256i_result[3]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_result[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0004000500040005; ++ __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001000300000004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000300000004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001000300000004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000300000004; ++ __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m128d_op1[0]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfa15fa15fa15fa14; ++ *((unsigned long*)& __m256i_result[3]) = 0x8282828282828282; ++ *((unsigned long*)& __m256i_result[2]) = 0x8768876887688769; ++ *((unsigned long*)& __m256i_result[1]) = 0x8282828282828282; ++ *((unsigned long*)& __m256i_result[0]) = 0x8768876887688769; ++ __m256i_out = __lasx_xvxori_b(__m256i_op0,0x7d); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff4; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000200000001c; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000200000001c; ++ __m128i_out = __lsx_vclo_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000200000001c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000200000001c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000200000001c; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000200000001c; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000020000000c0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000020000000c0; ++ __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff01e41ffff0ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xff00e400ff00e400; ++ *((unsigned long*)& __m128i_result[0]) = 0xff01e41ffff0ffff; ++ __m128i_out = __lsx_vmini_d(__m128i_op0,14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8282828282828282; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8768876887688769; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8282828282828282; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8768876887688769; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000104000200; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000104000200; ++ __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x00007fff00007fff; ++ __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000104000200; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000104000200; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_result[2]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_result[1]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_result[0]) = 0x0004000500040005; ++ __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long*)& __m128d_op0[0]) = 0xff01e41ffff0ffff; ++ *((unsigned long*)& __m128d_op1[1]) = 0x5555000054100000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x5555000154100155; ++ *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_h(__m128i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000000000000; ++ __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x9f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x8282828282828282; ++ *((unsigned long*)& __m256i_op1[2]) = 0x8768876887688769; ++ *((unsigned long*)& __m256i_op1[1]) = 0x8282828282828282; ++ *((unsigned long*)& __m256i_op1[0]) = 0x8768876887688769; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00000000003fffc0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00000000003fffc0; ++ __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8282828282828282; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8768876887688769; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8282828282828282; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8768876887688769; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x00000000003fffc0; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x00000000003fffc0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffc00040; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffc00040; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_op2[3]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256i_op2[2]) = 0x00007fff00000000; ++ *((unsigned long*)& __m256i_op2[1]) = 0x00007fff00007fff; ++ *((unsigned long*)& __m256i_op2[0]) = 0x00007fff00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff10; ++ __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5555000054100000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5555000154100155; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000155; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff01e41ffff0ffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xff01ffffe41f0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff00000ffff0000; ++ __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff80007fff0000; ++ __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfff0008000000080; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff0008000000080; ++ __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vrotri_h(__m128i_op0,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000003fffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000003fffc0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffffffff; ++ __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000155; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00040004; ++ *((int*)& __m256_op0[6]) = 0x00040004; ++ *((int*)& __m256_op0[5]) = 0x00040005; ++ *((int*)& __m256_op0[4]) = 0x00040005; ++ *((int*)& __m256_op0[3]) = 0x00040004; ++ *((int*)& __m256_op0[2]) = 0x00040004; ++ *((int*)& __m256_op0[1]) = 0x00040005; ++ *((int*)& __m256_op0[0]) = 0x00040005; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff01ffffe41f0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff00000ffff0000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000155; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000000000000002b; ++ __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfee1f6f18800ff7f; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffe4ffffffe4; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffe4ffffffe4; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffe4ffffffe4; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffe4ffffffe4; ++ __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x1c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0xf); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000155; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000100000155; ++ __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffc00040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffc00040; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x1080108010060002; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x1080108010060002; ++ __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffffffe4ffffffe4; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffffffe4ffffffe4; ++ *((unsigned long*)& __m256i_op1[1]) = 0xffffffe4ffffffe4; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffffffe4ffffffe4; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001d0000001c; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001d0000001c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001d0000001c; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001d0000001c; ++ __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x1080108010060002; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x1080108010060002; ++ *((unsigned long*)& __m256d_op1[3]) = 0xffffffe4ffffffe4; ++ *((unsigned long*)& __m256d_op1[2]) = 0xffffffe4ffffffe4; ++ *((unsigned long*)& __m256d_op1[1]) = 0xffffffe4ffffffe4; ++ *((unsigned long*)& __m256d_op1[0]) = 0xffffffe4ffffffe4; ++ *((unsigned long*)& __m256d_op2[3]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256d_op2[1]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256d_result[3]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256d_result[2]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256d_result[1]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256d_result[0]) = 0x7fff00017fff0000; ++ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x545501550001113a; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xd45501550001113a; ++ __m128i_out = __lsx_vbitrev_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclz_h(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000155; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff100000000000; ++ __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfefe000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000155; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff100000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x000f000000000000; ++ __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff00017fff0000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_result[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_result[0]) = 0x04f104f104f504ed; ++ __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x7e); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op0[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op0[0]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_result[3]) = 0x0002ffff00020002; ++ *((unsigned long*)& __m256i_result[2]) = 0x04f504f104f504f5; ++ *((unsigned long*)& __m256i_result[1]) = 0x0002ffff00020002; ++ *((unsigned long*)& __m256i_result[0]) = 0x04f504f104f504f5; ++ __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x65); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((int*)& __m256_op1[7]) = 0x00000000; ++ *((int*)& __m256_op1[6]) = 0x00000000; ++ *((int*)& __m256_op1[5]) = 0x7fff8000; ++ *((int*)& __m256_op1[4]) = 0x7fff0000; ++ *((int*)& __m256_op1[3]) = 0x00000000; ++ *((int*)& __m256_op1[2]) = 0x00000000; ++ *((int*)& __m256_op1[1]) = 0x7fff8000; ++ *((int*)& __m256_op1[0]) = 0x7fff0000; ++ *((int*)& __m256_op2[7]) = 0xffffffff; ++ *((int*)& __m256_op2[6]) = 0xffffffff; ++ *((int*)& __m256_op2[5]) = 0xffffffff; ++ *((int*)& __m256_op2[4]) = 0xffffff10; ++ *((int*)& __m256_op2[3]) = 0xffffffff; ++ *((int*)& __m256_op2[2]) = 0xffffffff; ++ *((int*)& __m256_op2[1]) = 0xffffffff; ++ *((int*)& __m256_op2[0]) = 0xffffff10; ++ *((int*)& __m256_result[7]) = 0xffffffff; ++ *((int*)& __m256_result[6]) = 0xffffffff; ++ *((int*)& __m256_result[5]) = 0xffffffff; ++ *((int*)& __m256_result[4]) = 0xffffff10; ++ *((int*)& __m256_result[3]) = 0xffffffff; ++ *((int*)& __m256_result[2]) = 0xffffffff; ++ *((int*)& __m256_result[1]) = 0xffffffff; ++ *((int*)& __m256_result[0]) = 0xffffff10; ++ __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000155; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000f0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffff10000; ++ __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff80007fff0000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_result[2]) = 0x7fff81007fff0100; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_result[0]) = 0x7fff81007fff0100; ++ __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff81007fff0100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000010000000100; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff81007fff0100; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000008000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0003fffc0803fff8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000008000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0003fffc0803fff8; ++ __m256i_out = __lasx_xvsrari_d(__m256i_op0,0xd); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000008000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0003fffc0803fff8; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000008000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc0803fff8; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000fffc0000fff8; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000fffc0000fff8; ++ __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff100000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000000f0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[2]) = 0x0004000400040004; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_result[0]) = 0x0004000400040004; ++ __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x00000000; ++ *((int*)& __m256_result[4]) = 0x00000000; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x00000000; ++ *((int*)& __m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_wu(__m256i_op0); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffff10000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0002ffff00020002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x04f504f104f504f5; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0002ffff00020002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x04f504f104f504f5; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x000200ff00020002; ++ *((unsigned long*)& __m256i_result[2]) = 0x00f500f100f500f5; ++ *((unsigned long*)& __m256i_result[1]) = 0x000200ff00020002; ++ *((unsigned long*)& __m256i_result[0]) = 0x00f500f100f500f5; ++ __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0004000400040004; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0004000500040005; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0004000400040004; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0004000500040005; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrintrne_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x8a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0002fffc; ++ *((unsigned long*)& __m256i_result[2]) = 0xffff0000fffd0003; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0002fffc; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000fffd0003; ++ __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0002fffc; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffff0000fffd0003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0002fffc; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000fffd0003; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_result[3]) = 0x0001fffe0005fff9; ++ *((unsigned long*)& __m256i_result[2]) = 0x04f004f204f204f0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0001fffe0005fff9; ++ *((unsigned long*)& __m256i_result[0]) = 0x04f004f204f204f0; ++ __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0005fff9; ++ *((unsigned long*)& __m256i_op0[2]) = 0x04f004f204f204f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0005fff9; ++ *((unsigned long*)& __m256i_op0[0]) = 0x04f004f204f204f0; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000900000009; ++ __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x17); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0005fff9; ++ *((unsigned long*)& __m256i_op0[2]) = 0x04f004f204f204f0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0005fff9; ++ *((unsigned long*)& __m256i_op0[0]) = 0x04f004f204f204f0; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000002780; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000002780; ++ __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000002780; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000002780; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffd880; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffd880; ++ __m256i_out = __lasx_xvneg_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_qu_du(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0001000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x03fc03fc03fc03fc; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffd880; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffd880; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x2); ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x03af03af03af03af; ++ *((unsigned long*)& __m256i_op0[2]) = 0x03acfc5303260e80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x03af03af03af03af; ++ *((unsigned long*)& __m256i_op0[0]) = 0x03acfc5303260e80; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000002780; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000002780; ++ *((unsigned long*)& __m256i_result[3]) = 0x03af03af03af03af; ++ *((unsigned long*)& __m256i_result[2]) = 0x03acfc5303260e81; ++ *((unsigned long*)& __m256i_result[1]) = 0x03af03af03af03af; ++ *((unsigned long*)& __m256i_result[0]) = 0x03acfc5303260e81; ++ __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000002780; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000002780; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000010100020103; ++ *((unsigned long*)& __m256i_result[2]) = 0x040f040f040b236d; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000010100020103; ++ *((unsigned long*)& __m256i_result[0]) = 0x040f040f040b236d; ++ __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op0[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op0[0]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op1[3]) = 0x03af03af03af03af; ++ *((unsigned long*)& __m256i_op1[2]) = 0x03acfc5303260e80; ++ *((unsigned long*)& __m256i_op1[1]) = 0x03af03af03af03af; ++ *((unsigned long*)& __m256i_op1[0]) = 0x03acfc5303260e80; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000b0cfffff4f3; ++ *((unsigned long*)& __m256i_result[2]) = 0x000f9bb562f56c80; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000b0cfffff4f3; ++ *((unsigned long*)& __m256i_result[0]) = 0x000f9bb562f56c80; ++ __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x03af03af03af03af; ++ *((unsigned long*)& __m256i_op0[2]) = 0x03acfc5303260e81; ++ *((unsigned long*)& __m256i_op0[1]) = 0x03af03af03af03af; ++ *((unsigned long*)& __m256i_op0[0]) = 0x03acfc5303260e81; ++ *((unsigned long*)& __m256i_op1[3]) = 0x03af03af03af03af; ++ *((unsigned long*)& __m256i_op1[2]) = 0x03acfc5303260e81; ++ *((unsigned long*)& __m256i_op1[1]) = 0x03af03af03af03af; ++ *((unsigned long*)& __m256i_op1[0]) = 0x03acfc5303260e81; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x1b); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubi_du(__m128i_op0,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x1716151417161514; ++ *((unsigned long*)& __m256d_op0[2]) = 0x1716151417161514; ++ *((unsigned long*)& __m256d_op0[1]) = 0x1716151417161514; ++ *((unsigned long*)& __m256d_op0[0]) = 0x1716151417161514; ++ *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[2]) = 0x0000000000002780; ++ *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[0]) = 0x0000000000002780; ++ *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[2]) = 0x0000000000002780; ++ *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op2[0]) = 0x0000000000002780; ++ *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[2]) = 0x8000000000002780; ++ *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256d_result[0]) = 0x8000000000002780; ++ __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); ++ ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8a8a8a8a8a8a8a8a; ++ *((unsigned long*)& __m128i_result[0]) = 0x8a8a8a8a8a8a8a8a; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x8a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8a8a8a8a8a8a8a8a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x8a8a8a8a8a8a8a8a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x8a8a8a8a; ++ *((int*)& __m128_op1[2]) = 0x8a8a8a8a; ++ *((int*)& __m128_op1[1]) = 0x8a8a8a8a; ++ *((int*)& __m128_op1[0]) = 0x8a8a8a8a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h(__m128i_op0,-10); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x00000000; ++ *((int*)& __m256_op0[6]) = 0x00000000; ++ *((int*)& __m256_op0[5]) = 0x00000000; ++ *((int*)& __m256_op0[4]) = 0x00000000; ++ *((int*)& __m256_op0[3]) = 0x00000000; ++ *((int*)& __m256_op0[2]) = 0x00000000; ++ *((int*)& __m256_op0[1]) = 0x00000000; ++ *((int*)& __m256_op0[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_op1[3]) = 0x1716151417161514; ++ *((unsigned long*)& __m256i_op1[2]) = 0x1716151417161514; ++ *((unsigned long*)& __m256i_op1[1]) = 0x1716151417161514; ++ *((unsigned long*)& __m256i_op1[0]) = 0x1716151417161514; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0fff0fff0fff0fff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0fff0fff0fff0fff; ++ __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[2]) = 0x0000000000002780; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op2[0]) = 0x0000000000002780; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_result[2]) = 0x3fff3fff3fff3fc4; ++ *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256i_result[0]) = 0x3fff3fff3fff3fc4; ++ __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000b0cfffff4f3; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000f9bb562f56c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000b0cfffff4f3; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000f9bb562f56c80; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op2[3]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op2[2]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_op2[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op2[0]) = 0x04f104f104f504ed; ++ *((unsigned long*)& __m256i_result[3]) = 0x0018761ed60b5d7f; ++ *((unsigned long*)& __m256i_result[2]) = 0xabdcdc9938afafe9; ++ *((unsigned long*)& __m256i_result[1]) = 0x0018761ed60b5d7f; ++ *((unsigned long*)& __m256i_result[0]) = 0xabdcdc9938afafe9; ++ __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((int*)& __m128_op0[3]) = 0x00000000; ++ *((int*)& __m128_op0[2]) = 0x00000000; ++ *((int*)& __m128_op0[1]) = 0x00000000; ++ *((int*)& __m128_op0[0]) = 0x00000000; ++ *((int*)& __m128_op1[3]) = 0x00000000; ++ *((int*)& __m128_op1[2]) = 0x00000000; ++ *((int*)& __m128_op1[1]) = 0x00000000; ++ *((int*)& __m128_op1[0]) = 0x00000000; ++ *((int*)& __m128_result[3]) = 0x00000000; ++ *((int*)& __m128_result[2]) = 0x00000000; ++ *((int*)& __m128_result[1]) = 0x00000000; ++ *((int*)& __m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); ++ ASSERTEQ_32(__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256d_op1[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256d_op1[2]) = 0x3fff3fff3fff3fc4; ++ *((unsigned long*)& __m256d_op1[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long*)& __m256d_op1[0]) = 0x3fff3fff3fff3fc4; ++ *((int*)& __m256_result[7]) = 0x00000000; ++ *((int*)& __m256_result[6]) = 0x00000000; ++ *((int*)& __m256_result[5]) = 0x3ff9fffa; ++ *((int*)& __m256_result[4]) = 0x3ff9fffa; ++ *((int*)& __m256_result[3]) = 0x00000000; ++ *((int*)& __m256_result[2]) = 0x00000000; ++ *((int*)& __m256_result[1]) = 0x3ff9fffa; ++ *((int*)& __m256_result[0]) = 0x3ff9fffa; ++ __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_32(__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0fff0fff0fff0fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0fff0fff0fff0fff; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101000000010000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101000000010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000010101010101; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000010101010101; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000900000009; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[2]) = 0x3ff9fffa3ff9fffa; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[0]) = 0x3ff9fffa3ff9fffa; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000007ff3; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000007ff3; ++ __m256i_out = __lasx_xvsrani_w_d(__m256i_op0,__m256i_op1,0x2f); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((int*)& __m256_op0[7]) = 0x003f0200; ++ *((int*)& __m256_op0[6]) = 0x01400200; ++ *((int*)& __m256_op0[5]) = 0x003f00ff; ++ *((int*)& __m256_op0[4]) = 0x003f00c4; ++ *((int*)& __m256_op0[3]) = 0x003f0200; ++ *((int*)& __m256_op0[2]) = 0x01400200; ++ *((int*)& __m256_op0[1]) = 0x003f00ff; ++ *((int*)& __m256_op0[0]) = 0x003f00c4; ++ *((int*)& __m256_op1[7]) = 0x00000101; ++ *((int*)& __m256_op1[6]) = 0x01010101; ++ *((int*)& __m256_op1[5]) = 0x00000000; ++ *((int*)& __m256_op1[4]) = 0x00000000; ++ *((int*)& __m256_op1[3]) = 0x00000101; ++ *((int*)& __m256_op1[2]) = 0x01010101; ++ *((int*)& __m256_op1[1]) = 0x00000000; ++ *((int*)& __m256_op1[0]) = 0x00000000; ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_h(__m128i_op0,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d(__m128i_op0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffdbff980038ffaf; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffafffe80004fff1; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffdbff980038ffaf; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffafffe80004fff1; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffc; ++ *((unsigned long*)& __m256i_op1[2]) = 0xffff0000fffd0003; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffc; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000fffd0003; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff0000fffd0004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_result[1]) = 0xffff0000fffd0004; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0002fffd; ++ __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101000000010000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101000000010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000020202020202; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101000000010000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000020202020202; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101000000010000; ++ __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x761ed60b5d7f0000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xdc9938afafe904f1; ++ *((unsigned long*)& __m256i_op0[1]) = 0x761ed60b5d7f0000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xdc9938afafe904f1; ++ *((unsigned long*)& __m256i_result[3]) = 0x03b0feb002eb0000; ++ *((unsigned long*)& __m256i_result[2]) = 0xfee401c5fd7f0027; ++ *((unsigned long*)& __m256i_result[1]) = 0x03b0feb002eb0000; ++ *((unsigned long*)& __m256i_result[0]) = 0xfee401c5fd7f0027; ++ __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x5); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffdbff980038ffaf; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffafffe80004fff1; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffdbff980038ffaf; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffafffe80004fff1; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000020202020202; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101000000010000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000020202020202; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101000000010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000e3fec0004fff1; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000e3fec0004fff1; ++ __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff0000fffd0004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff0000fffd0004; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_result[3]) = 0xffff0000fffd0004; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_result[0]) = 0xffff0000fffd0004; ++ __m256i_out = __lasx_xvpermi_d(__m256i_op0,0xcb); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff01ff68; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000070ff017de6; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff01ff68; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000070ff017de6; ++ *((unsigned long*)& __m256i_op1[3]) = 0x761ed60b5d7f0000; ++ *((unsigned long*)& __m256i_op1[2]) = 0xdc9938afafe904f1; ++ *((unsigned long*)& __m256i_op1[1]) = 0x761ed60b5d7f0000; ++ *((unsigned long*)& __m256i_op1[0]) = 0xdc9938afafe904f1; ++ *((unsigned long*)& __m256i_result[3]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_result[2]) = 0x00004c9000e9d886; ++ *((unsigned long*)& __m256i_result[1]) = 0x00000000007f0000; ++ *((unsigned long*)& __m256i_result[0]) = 0x00004c9000e9d886; ++ __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff0000fffd0004; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffff0000fffd0004; ++ *((unsigned long*)& __m256i_op1[3]) = 0xffff0000fffd0004; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long*)& __m256i_op1[0]) = 0xffff0000fffd0004; ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff0; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000000f; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000000f; ++ __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x6c); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101000000010000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101000000010000; ++ *((unsigned long*)& __m256i_op1[3]) = 0x0000010101010101; ++ *((unsigned long*)& __m256i_op1[2]) = 0x0101000000010000; ++ *((unsigned long*)& __m256i_op1[1]) = 0x0000010101010101; ++ *((unsigned long*)& __m256i_op1[0]) = 0x0101000000010000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0002fffc; ++ *((unsigned long*)& __m256d_op0[2]) = 0xffff0000fffd0003; ++ *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0002fffc; ++ *((unsigned long*)& __m256d_op0[0]) = 0xffff0000fffd0003; ++ *((unsigned long*)& __m256d_op1[3]) = 0x003f020001400200; ++ *((unsigned long*)& __m256d_op1[2]) = 0x003f00ff003f00c4; ++ *((unsigned long*)& __m256d_op1[1]) = 0x003f020001400200; ++ *((unsigned long*)& __m256d_op1[0]) = 0x003f00ff003f00c4; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000260a378; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000d02317; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000260a378; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000d02317; ++ *((unsigned long*)& __m256i_op1[3]) = 0x003f020001400200; ++ *((unsigned long*)& __m256i_op1[2]) = 0x003f00ff003f00c4; ++ *((unsigned long*)& __m256i_op1[1]) = 0x003f020001400200; ++ *((unsigned long*)& __m256i_op1[0]) = 0x003f00ff003f00c4; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0x00a300a300a300a3; ++ *((unsigned long*)& __m128i_result[0]) = 0x00a300a300a300a3; ++ __m128i_out = __lsx_vrepli_h(0xa3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvldi(-4080); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffe15; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffe15; ++ __m128i_out = __lsx_vrepli_d(-491); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m256i_result[3]) = 0xfebcfebcfebcfebc; ++ *((unsigned long*)& __m256i_result[2]) = 0xfebcfebcfebcfebc; ++ *((unsigned long*)& __m256i_result[1]) = 0xfebcfebcfebcfebc; ++ *((unsigned long*)& __m256i_result[0]) = 0xfebcfebcfebcfebc; ++ __m256i_out = __lasx_xvrepli_h(-324); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0xecececececececec; ++ *((unsigned long*)& __m128i_result[0]) = 0xecececececececec; ++ __m128i_out = __lsx_vrepli_b(-20); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0x00ffff00ff00ff00; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ffff00ff00ff00; ++ __m128i_out = __lsx_vldi(-1686); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x3fd1000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x3fd1000000000000; ++ __m256i_out = __lasx_xvldi(-943); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0x004d004d004d004d; ++ *((unsigned long*)& __m128i_result[0]) = 0x004d004d004d004d; ++ __m128i_out = __lsx_vrepli_h(0x4d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvrepli_h(-228); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0x7200000072000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x7200000072000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x7200000072000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x7200000072000000; ++ __m256i_out = __lasx_xvldi(-3214); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0xffffff1dffffff1d; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffff1dffffff1d; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffff1dffffff1d; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffff1dffffff1d; ++ __m256i_out = __lasx_xvrepli_w(-227); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0a0000000a000000; ++ __m128i_out = __lsx_vldi(-3318); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000000000fff8; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000f0000000f; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000020; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000f0000000f; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000808081; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000808081; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000808081; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000808081; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff1ffca0011feca; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff1ffca0011feca; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_d(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000080008000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000007fff7fff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000080008000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fff000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0404040404040404; ++ *((unsigned long*)& __m128i_op0[0]) = 0xec68e3ef5a98ed54; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_d(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x195f307a5d04acbb; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xefff000100000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xefff000100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xc600000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001fc0000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0400040004000400; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_d(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000001000000010; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001000000010; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_d(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffe03; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffe03; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bnz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_d(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_op0[1]) = 0xe1616161e1614e60; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe1616161e1614e60; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00005555aaabfffe; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff7fff; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff874dc687870000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010183f95466; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x01010101d58efe94; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000400; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000400; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7f0101070101010f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000127f010116; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xff80ff80ff80ff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xff80ff80ff80ff80; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010003; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010003; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000002bfd9461; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_d(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_d(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xefffdffff0009d3d; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000010000c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x006ffffefff0000d; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000001f0000001f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000001f0000ffff; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bnz_d(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ffc2f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00201df000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ca0200000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ca0200000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0003000300030003; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0003000300030003; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff082f000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x003f000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0202fe02fd020102; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bnz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0a0aa9890a0ac5f3; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffd8ffc7ffdaff8a; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000b0b100015d1e; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe0001bfff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000b0b100015d1e; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe0001bfff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ff00fe00ff00fe; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6368d2cd63636363; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bnz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x1f001f00000007ef; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00001fff200007ef; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808081; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0038d800ff000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00fffe00fffffe00; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bnz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000008000008080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080800000800080; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bnz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_d(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000002e8b164; ++ *((unsigned long*)& __m128i_op0[0]) = 0x199714a038478040; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffe00029f9f6061; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x007f008000ea007f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_d(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001fe01fe; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0100; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fe01fe; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0100; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffa2beb040; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_d(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffff0020001d001f; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x687a8373f249bc44; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7861145d9241a14a; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bnz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0018; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long*)& __m256i_op0[2]) = 0xe27fe2821d226278; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long*)& __m256i_op0[0]) = 0xe27fe2821d226278; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00f7000000f70007; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00f7000000f70007; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00b213171dff0606; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00e9a80014ff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00b213171dff0606; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00e9a80014ff0000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0010001000100010; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0010001000100010; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffff7ffffffffe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffffffe; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000004870ba0; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_d(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffe00fe00; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000000001fe01dde; ++ *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffe00fe00; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000000001fe01dde; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000a0008; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000a0008; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe0001fffe; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffc0; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff0ffc0; ++ *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffc0; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0ffc0; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xcd636363cd636363; ++ *((unsigned long*)& __m128i_op0[0]) = 0xcd636363cd636363; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bnz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xff808000ff808000; ++ *((unsigned long*)& __m256i_op0[2]) = 0xc3038000ff808000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xff808000ff808000; ++ *((unsigned long*)& __m256i_op0[0]) = 0xc3038000ff808000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe593c8c4e593c8c4; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff9727ffff9727; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffe79ffffba5f; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bnz_w(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffff60000280; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000f64fab372db5; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffff60000280; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000f64fab372db5; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_h(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000ffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000ffff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff0000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000021; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000080801030000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000080103040000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbz_d(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x00000000000001f4; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x00000000000001f4; ++ int_result = 0x0000000000000001; ++ int_out = __lasx_xbnz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_v(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000011ffee; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000dfff2; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bnz_b(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf784000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff784; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000180000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000180000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbz_w(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xf784000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff784; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff009ff83f; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bnz_h(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000001; ++ int_out = __lsx_bz_v(__m128i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000b0cfffff4f3; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000f9bb562f56c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000b0cfffff4f3; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000f9bb562f56c80; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000b0cfffff4f3; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000f9bb562f56c80; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000b0cfffff4f3; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000f9bb562f56c80; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xbnz_b(__m256i_op0); ++ ASSERTEQ_int(__LINE__, int_result, int_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x43ef878780000009; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x43ef878780000009; ++ __m256i_out = __lasx_xvextl_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x000201220001011c; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x000201220001011c; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x000201220001011c; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x000201220001011c; ++ __m256i_out = __lasx_xvextl_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_q_d(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000001010101; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000083f95466; ++ *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0101010100005400; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000083f95466; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0101010100005400; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_result[0]) = 0x3ab7a3fc47a5c31a; ++ __m128i_out = __lsx_vld((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_result[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_result[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_result[0]) = 0x0ad152a5ad72feeb; ++ __m256i_out = __lasx_xvld((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0; ++ *((unsigned long*)& __m128i_result[0]) = 0x0; ++ __lsx_vst(__m128i_op0, (unsigned long *)&__m128i_result, 0x0); ++ ASSERTEQ_64(__LINE__, __m128i_op0, __m128i_result); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0; ++ __lasx_xvst(__m256i_op0, (unsigned long *)&__m256i_result, 0x0); ++ ASSERTEQ_64(__LINE__, __m256i_op0, __m256i_result); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_result[0]) = 0x3ab7a3fc47a5c31a; ++ __m128i_out = __lsx_vldx((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_result[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_result[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_result[0]) = 0x0ad152a5ad72feeb; ++ __m256i_out = __lasx_xvldx((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0; ++ *((unsigned long*)& __m128i_result[0]) = 0x0; ++ __lsx_vstx(__m128i_op0, (unsigned long *)&__m128i_result, 0x0); ++ ASSERTEQ_64(__LINE__, __m128i_op0, __m128i_result); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0; ++ *((unsigned long*)& __m256i_result[0]) = 0x0; ++ __lasx_xvstx(__m256i_op0, (unsigned long *)&__m256i_result, 0x0); ++ ASSERTEQ_64(__LINE__, __m256i_op0, __m256i_result); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0xc3c3c3c3c3c3c3c3; ++ *((unsigned long*)& __m128i_result[0]) = 0xc3c3c3c3c3c3c3c3; ++ __m128i_out = __lsx_vldrepl_b((unsigned long *)&__m128i_op0, 0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[3]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_result[2]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_result[1]) = 0xebebebebebebebeb; ++ *((unsigned long*)& __m256i_result[0]) = 0xebebebebebebebeb; ++ __m256i_out = __lasx_xvldrepl_b((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0xc31ac31ac31ac31a; ++ *((unsigned long*)& __m128i_result[0]) = 0xc31ac31ac31ac31a; ++ __m128i_out = __lsx_vldrepl_h((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[3]) = 0xfeebfeebfeebfeeb; ++ *((unsigned long*)& __m256i_result[2]) = 0xfeebfeebfeebfeeb; ++ *((unsigned long*)& __m256i_result[1]) = 0xfeebfeebfeebfeeb; ++ *((unsigned long*)& __m256i_result[0]) = 0xfeebfeebfeebfeeb; ++ __m256i_out = __lasx_xvldrepl_h((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0x47a5c31a47a5c31a; ++ *((unsigned long*)& __m128i_result[0]) = 0x47a5c31a47a5c31a; ++ __m128i_out = __lsx_vldrepl_w((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[3]) = 0xad72feebad72feeb; ++ *((unsigned long*)& __m256i_result[2]) = 0xad72feebad72feeb; ++ *((unsigned long*)& __m256i_result[1]) = 0xad72feebad72feeb; ++ *((unsigned long*)& __m256i_result[0]) = 0xad72feebad72feeb; ++ __m256i_out = __lasx_xvldrepl_w((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[0]) = 0x3ab7a3fc47a5c31a; ++ __m128i_out = __lsx_vldrepl_d((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[2]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[1]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[0]) = 0x0ad152a5ad72feeb; ++ __m256i_out = __lasx_xvldrepl_d((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0x000001a8000001a8; ++ *((unsigned long*)& __m128i_result[0]) = 0x000001a8000001a8; ++ __m128i_out = __lsx_vrepli_w(424); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0x0000011300000113; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000011300000113; ++ __m128i_out = __lsx_vrepli_w(275); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffee2fffffee2; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffee2fffffee2; ++ __m128i_out = __lsx_vrepli_w(-286); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0; ++ *((unsigned long*)& __m128i_result[0]) = 0x05; ++ *((unsigned long*)& __m128i_out[1]) = 0x0; ++ *((unsigned long*)& __m128i_out[0]) = 0x0; ++ __lsx_vstelm_b(__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0; ++ *((unsigned long*)& __m128i_result[0]) = 0x5c05; ++ *((unsigned long*)& __m128i_out[1]) = 0x0; ++ *((unsigned long*)& __m128i_out[0]) = 0x0; ++ __lsx_vstelm_h(__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0; ++ *((unsigned long*)& __m128i_result[0]) = 0xc9d85c05; ++ *((unsigned long*)& __m128i_out[1]) = 0x0; ++ *((unsigned long*)& __m128i_out[0]) = 0x0; ++ __lsx_vstelm_w(__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long*)& __m128i_result[1]) = 0x0; ++ *((unsigned long*)& __m128i_result[0]) = 0x1dcc4255c9d85c05; ++ *((unsigned long*)& __m128i_out[1]) = 0x0; ++ *((unsigned long*)& __m128i_out[0]) = 0x0; ++ __lsx_vstelm_d(__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0; ++ *((unsigned long*)& __m256i_result[0]) = 0x8d; ++ *((unsigned long*)& __m256i_out[3]) = 0x0; ++ *((unsigned long*)& __m256i_out[2]) = 0x0; ++ *((unsigned long*)& __m256i_out[1]) = 0x0; ++ *((unsigned long*)& __m256i_out[0]) = 0x0; ++ __lasx_xvstelm_b(__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0xe); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0; ++ *((unsigned long*)& __m256i_result[0]) = 0x9100; ++ *((unsigned long*)& __m256i_out[3]) = 0x0; ++ *((unsigned long*)& __m256i_out[2]) = 0x0; ++ *((unsigned long*)& __m256i_out[1]) = 0x0; ++ *((unsigned long*)& __m256i_out[0]) = 0x0; ++ __lasx_xvstelm_h(__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x8); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0; ++ *((unsigned long*)& __m256i_result[0]) = 0xe9179100; ++ *((unsigned long*)& __m256i_out[3]) = 0x0; ++ *((unsigned long*)& __m256i_out[2]) = 0x0; ++ *((unsigned long*)& __m256i_out[1]) = 0x0; ++ *((unsigned long*)& __m256i_out[0]) = 0x0; ++ __lasx_xvstelm_w(__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x4); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long*)& __m256i_result[3]) = 0x0; ++ *((unsigned long*)& __m256i_result[2]) = 0x0; ++ *((unsigned long*)& __m256i_result[1]) = 0x0; ++ *((unsigned long*)& __m256i_result[0]) = 0x58569d7be9179100; ++ *((unsigned long*)& __m256i_out[3]) = 0x0; ++ *((unsigned long*)& __m256i_out[2]) = 0x0; ++ *((unsigned long*)& __m256i_out[1]) = 0x0; ++ *((unsigned long*)& __m256i_out[0]) = 0x0; ++ __lasx_xvstelm_d(__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x2); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_result[2]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_result[1]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long*)& __m256i_result[0]) = 0x6b6b6b6b6b6b6b6b; ++ __m256i_out = __lasx_xvrepli_b(-149); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffe69; ++ *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffe69; ++ *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe69; ++ *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffe69; ++ __m256i_out = __lasx_xvrepli_d(-407); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff76; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff76; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff76; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff76; ++ __m256i_out = __lasx_xvrepli_d(-138); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffa1; ++ *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffa1; ++ *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffa1; ++ *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffa1; ++ __m256i_out = __lasx_xvrepli_d(-95); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0x0000000000000019; ++ *((unsigned long*)& __m256i_result[2]) = 0x0000000000000019; ++ *((unsigned long*)& __m256i_result[1]) = 0x0000000000000019; ++ *((unsigned long*)& __m256i_result[0]) = 0x0000000000000019; ++ __m256i_out = __lasx_xvrepli_d(25); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0x000000000000001e; ++ *((unsigned long*)& __m256i_result[2]) = 0x000000000000001e; ++ *((unsigned long*)& __m256i_result[1]) = 0x000000000000001e; ++ *((unsigned long*)& __m256i_result[0]) = 0x000000000000001e; ++ __m256i_out = __lasx_xvrepli_d(30); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m256i_result[3]) = 0x8d8d8d8d8d8d8d8d; ++ *((unsigned long*)& __m256i_result[2]) = 0x8d8d8d8d8d8d8d8d; ++ *((unsigned long*)& __m256i_result[1]) = 0x8d8d8d8d8d8d8d8d; ++ *((unsigned long*)& __m256i_result[0]) = 0x8d8d8d8d8d8d8d8d; ++ __m256i_out = __lasx_xvrepli_b(-371); ++ ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff8969ffffd7e2; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000d688ffffbd95; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xf12dfafc1ad1f7b3; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x34); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000020; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000200000002000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000200000002000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000010000000100; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x2f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000c0002000c0002; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000400c600700153; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000c0002000c0002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000400c600700153; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000010000007f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000fffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0800000400000800; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000001515151500; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001515151500; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001515000015150; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fdfd0404; ++ *((unsigned long*)& __m128i_op1[1]) = 0x3fffffff3fffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3fffffff3fffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000000000fc08; ++ *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000000000fc08; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000800080008000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffba420000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x000007e044000400; ++ *((unsigned long*)& __m128i_result[0]) = 0xfdd2100000000000; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000081e003f3f3f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3f3f3f0e00000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000081e003f3f3f; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3f3f3f0e00000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000103c007e7e8; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000103c007e7e8; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x43); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0202022302023212; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0202ff3f02022212; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000002100003010; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ff3f00002010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x79); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffff7fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xe2bb5ff00e20aceb; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe2bb5ff00e20aceb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0100010000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00e3000e00e3000e; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf58df7841423142a; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3f7477f8ff4e2152; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3d3e0505101e4008; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2bd5d429e34a1efb; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfc0203fccbedbba7; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc9f66947f077afd0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x89fed7c07fdf5d00; ++ *((unsigned long*)& __m128i_result[1]) = 0x14f1a50ffe65f6de; ++ *((unsigned long*)& __m128i_result[0]) = 0xa3f83bd8e03fefaf; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6ed694e00e0355db; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000010600000106; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xe00e035606000001; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xe739e7ade77ae725; ++ *((unsigned long*)& __m128i_op0[0]) = 0xbb9013bd049bc9ec; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x56aca41400000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7ade77ae3bd049bd; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000041400000; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1010101010101010; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1010101010101010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x8081808180818081; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000006ff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0037f80000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x15); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x69); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0020202020202020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0080808080c04040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0101010101010101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0101010001808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000202000008081; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001010100010101; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x28); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0010000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x00fff00000001000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x28); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x6b); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000adf0000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001e00; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0040000000400040; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000020002020; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808102; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000001010102; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x001000100010000b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x03fc03fc03fc03fc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x04000400ff01ff01; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0xa); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x1010101010101010; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000fff800000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000001ed68; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1ff6a09e667f3bd8; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000007b5a; ++ *((unsigned long*)& __m128i_result[0]) = 0x999fcef600000000; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffe5c8000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x91f80badc162a0c4; ++ *((unsigned long*)& __m128i_op1[0]) = 0x99d1ffff0101ff01; ++ *((unsigned long*)& __m128i_result[1]) = 0x00ff400000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x905d0b06cf0008f8; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3802f4fd025800f7; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc8ff0bffff00ffae; ++ *((unsigned long*)& __m128i_op1[0]) = 0x91ff40fffff8ff50; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000200000000700; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000192000001240; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x33); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff0ffd0ffd; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff0ffc0001; ++ *((unsigned long*)& __m128i_op1[1]) = 0xbb7743ca4c78461f; ++ *((unsigned long*)& __m128i_op1[0]) = 0xd9743eb5fb4deb3a; ++ *((unsigned long*)& __m128i_result[1]) = 0x003fffffffc3ff44; ++ *((unsigned long*)& __m128i_result[0]) = 0x002eddd0f2931e12; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x4a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xbb7743ca4c78461f; ++ *((unsigned long*)& __m128i_op0[0]) = 0xd9743eb5fb4deb3a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x22445e1ad9c3e4f0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x1b43e8a30a570a63; ++ *((unsigned long*)& __m128i_result[1]) = 0x743ca4c843eb5fb5; ++ *((unsigned long*)& __m128i_result[0]) = 0x45e1ad9c3e8a30a5; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x14); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x1204900f62f72565; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x4901725600000000; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x6a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000400000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x12); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000300000003; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x32); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3f3f3f7fbf3fffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x47); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000040804080; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000020100000000; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0xe); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffe8ffff28fc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00007fff0000803e; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000006ffff81e1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0ffffffe8ffff290; ++ *((unsigned long*)& __m128i_result[0]) = 0x000007fff0000804; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x44); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000418200000008e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000002100047; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636362; ++ *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636362; ++ *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636362; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636362; ++ *((unsigned long*)& __m128i_result[1]) = 0x0032003200320032; ++ *((unsigned long*)& __m128i_result[0]) = 0x0032003200320032; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x19); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff01010102; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7ffdf87f0b0c7f7f; ++ *((unsigned long*)& __m128i_op1[1]) = 0xf6b3eb63f6b3f6b3; ++ *((unsigned long*)& __m128i_op1[0]) = 0x363953e42b56432e; ++ *((unsigned long*)& __m128i_result[1]) = 0x010000010080000b; ++ *((unsigned long*)& __m128i_result[0]) = 0x00f700f70036002b; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x18); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xed67d6c7ed67ed67; ++ *((unsigned long*)& __m128i_op1[0]) = 0x6c72a7c856ac865c; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000700000003; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x3d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff40ff83; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x1010101010101010; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xc); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000003030103; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000003030103; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000006060; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000006060; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000002408beb26c8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x000000000000706e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000028c27; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000070; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x8); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x80000b0b80000b0b; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000101080001010; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffefefffffeff0; ++ *((unsigned long*)& __m128i_result[1]) = 0x0061006100020002; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000fe00fe; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000078087f08; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000078087f08; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000e0fc0000e0fc; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff0bff76; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x75); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x33); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x000000ff00ff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000ff00ffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8282828282828282; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000828282828282; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0008000800000008; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00f7000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000005150; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000005150; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000000f7000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x24); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x41afddcb1c000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xd09e1bd99a2c6eb1; ++ *((unsigned long*)& __m128i_op1[0]) = 0xe82f7c27bb0778af; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000040002; ++ *((unsigned long*)& __m128i_result[0]) = 0x000d000a000f000c; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x1c); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff8000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffdff0; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0144329880000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x007fffc0007ffff0; ++ *((unsigned long*)& __m128i_result[0]) = 0x004000004c400000; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x9); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x17); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000001e0000001e; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffafff0fff9ff01; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000d800cff8; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x5); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000002000007d7; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000300000ff1; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x000007d700000ff1; ++ __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x0); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x7fc000007fc00000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffff00ffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffff00ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000ff8; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x74); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000f08; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[0]) = 0x2020202020202020; ++ __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xb); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/lasx-builtin.c b/gcc/testsuite/gcc.target/loongarch/lasx-builtin.c +new file mode 100644 +index 000000000..1f563ec81 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/lasx-builtin.c +@@ -0,0 +1,1509 @@ ++/* Test builtins for LOONGARCH LASX ASE instructions */ ++/* { dg-do compile } */ ++/* { dg-options "-mlasx" } */ ++/* { dg-final { scan-assembler-times "lasx_xvsll_b:.*xvsll\\.b.*lasx_xvsll_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsll_h:.*xvsll\\.h.*lasx_xvsll_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsll_w:.*xvsll\\.w.*lasx_xvsll_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsll_d:.*xvsll\\.d.*lasx_xvsll_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslli_b:.*xvslli\\.b.*lasx_xvslli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslli_h:.*xvslli\\.h.*lasx_xvslli_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslli_w:.*xvslli\\.w.*lasx_xvslli_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslli_d:.*xvslli\\.d.*lasx_xvslli_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsra_b:.*xvsra\\.b.*lasx_xvsra_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsra_h:.*xvsra\\.h.*lasx_xvsra_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsra_w:.*xvsra\\.w.*lasx_xvsra_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsra_d:.*xvsra\\.d.*lasx_xvsra_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrai_b:.*xvsrai\\.b.*lasx_xvsrai_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrai_h:.*xvsrai\\.h.*lasx_xvsrai_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrai_w:.*xvsrai\\.w.*lasx_xvsrai_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrai_d:.*xvsrai\\.d.*lasx_xvsrai_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrar_b:.*xvsrar\\.b.*lasx_xvsrar_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrar_h:.*xvsrar\\.h.*lasx_xvsrar_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrar_w:.*xvsrar\\.w.*lasx_xvsrar_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrar_d:.*xvsrar\\.d.*lasx_xvsrar_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrari_b:.*xvsrari\\.b.*lasx_xvsrari_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrari_h:.*xvsrari\\.h.*lasx_xvsrari_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrari_w:.*xvsrari\\.w.*lasx_xvsrari_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrari_d:.*xvsrari\\.d.*lasx_xvsrari_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrl_b:.*xvsrl\\.b.*lasx_xvsrl_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrl_h:.*xvsrl\\.h.*lasx_xvsrl_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrl_w:.*xvsrl\\.w.*lasx_xvsrl_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrl_d:.*xvsrl\\.d.*lasx_xvsrl_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrli_b:.*xvsrli\\.b.*lasx_xvsrli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrli_h:.*xvsrli\\.h.*lasx_xvsrli_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrli_w:.*xvsrli\\.w.*lasx_xvsrli_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrli_d:.*xvsrli\\.d.*lasx_xvsrli_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlr_b:.*xvsrlr\\.b.*lasx_xvsrlr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlr_h:.*xvsrlr\\.h.*lasx_xvsrlr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlr_w:.*xvsrlr\\.w.*lasx_xvsrlr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlr_d:.*xvsrlr\\.d.*lasx_xvsrlr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlri_b:.*xvsrlri\\.b.*lasx_xvsrlri_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlri_h:.*xvsrlri\\.h.*lasx_xvsrlri_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlri_w:.*xvsrlri\\.w.*lasx_xvsrlri_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlri_d:.*xvsrlri\\.d.*lasx_xvsrlri_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclr_b:.*xvbitclr\\.b.*lasx_xvbitclr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclr_h:.*xvbitclr\\.h.*lasx_xvbitclr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclr_w:.*xvbitclr\\.w.*lasx_xvbitclr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclr_d:.*xvbitclr\\.d.*lasx_xvbitclr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclri_b:.*xvbitclri\\.b.*lasx_xvbitclri_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclri_h:.*xvbitclri\\.h.*lasx_xvbitclri_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclri_w:.*xvbitclri\\.w.*lasx_xvbitclri_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitclri_d:.*xvbitclri\\.d.*lasx_xvbitclri_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitset_b:.*xvbitset\\.b.*lasx_xvbitset_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitset_h:.*xvbitset\\.h.*lasx_xvbitset_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitset_w:.*xvbitset\\.w.*lasx_xvbitset_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitset_d:.*xvbitset\\.d.*lasx_xvbitset_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitseti_b:.*xvbitseti\\.b.*lasx_xvbitseti_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitseti_h:.*xvbitseti\\.h.*lasx_xvbitseti_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitseti_w:.*xvbitseti\\.w.*lasx_xvbitseti_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitseti_d:.*xvbitseti\\.d.*lasx_xvbitseti_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrev_b:.*xvbitrev\\.b.*lasx_xvbitrev_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrev_h:.*xvbitrev\\.h.*lasx_xvbitrev_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrev_w:.*xvbitrev\\.w.*lasx_xvbitrev_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrev_d:.*xvbitrev\\.d.*lasx_xvbitrev_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrevi_b:.*xvbitrevi\\.b.*lasx_xvbitrevi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrevi_h:.*xvbitrevi\\.h.*lasx_xvbitrevi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrevi_w:.*xvbitrevi\\.w.*lasx_xvbitrevi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitrevi_d:.*xvbitrevi\\.d.*lasx_xvbitrevi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadd_b:.*xvadd\\.b.*lasx_xvadd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadd_h:.*xvadd\\.h.*lasx_xvadd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadd_w:.*xvadd\\.w.*lasx_xvadd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadd_d:.*xvadd\\.d.*lasx_xvadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddi_bu:.*xvaddi\\.bu.*lasx_xvaddi_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddi_hu:.*xvaddi\\.hu.*lasx_xvaddi_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddi_wu:.*xvaddi\\.wu.*lasx_xvaddi_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddi_du:.*xvaddi\\.du.*lasx_xvaddi_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsub_b:.*xvsub\\.b.*lasx_xvsub_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsub_h:.*xvsub\\.h.*lasx_xvsub_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsub_w:.*xvsub\\.w.*lasx_xvsub_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsub_d:.*xvsub\\.d.*lasx_xvsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubi_bu:.*xvsubi\\.bu.*lasx_xvsubi_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubi_hu:.*xvsubi\\.hu.*lasx_xvsubi_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubi_wu:.*xvsubi\\.wu.*lasx_xvsubi_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubi_du:.*xvsubi\\.du.*lasx_xvsubi_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_b:.*xvmax\\.b.*lasx_xvmax_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_h:.*xvmax\\.h.*lasx_xvmax_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_w:.*xvmax\\.w.*lasx_xvmax_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_d:.*xvmax\\.d.*lasx_xvmax_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_b:.*xvmaxi\\.b.*lasx_xvmaxi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_h:.*xvmaxi\\.h.*lasx_xvmaxi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_w:.*xvmaxi\\.w.*lasx_xvmaxi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_d:.*xvmaxi\\.d.*lasx_xvmaxi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_bu:.*xvmax\\.bu.*lasx_xvmax_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_hu:.*xvmax\\.hu.*lasx_xvmax_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_wu:.*xvmax\\.wu.*lasx_xvmax_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmax_du:.*xvmax\\.du.*lasx_xvmax_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_bu:.*xvmaxi\\.bu.*lasx_xvmaxi_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_hu:.*xvmaxi\\.hu.*lasx_xvmaxi_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_wu:.*xvmaxi\\.wu.*lasx_xvmaxi_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaxi_du:.*xvmaxi\\.du.*lasx_xvmaxi_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_b:.*xvmin\\.b.*lasx_xvmin_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_h:.*xvmin\\.h.*lasx_xvmin_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_w:.*xvmin\\.w.*lasx_xvmin_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_d:.*xvmin\\.d.*lasx_xvmin_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_b:.*xvmini\\.b.*lasx_xvmini_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_h:.*xvmini\\.h.*lasx_xvmini_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_w:.*xvmini\\.w.*lasx_xvmini_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_d:.*xvmini\\.d.*lasx_xvmini_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_bu:.*xvmin\\.bu.*lasx_xvmin_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_hu:.*xvmin\\.hu.*lasx_xvmin_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_wu:.*xvmin\\.wu.*lasx_xvmin_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmin_du:.*xvmin\\.du.*lasx_xvmin_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_bu:.*xvmini\\.bu.*lasx_xvmini_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_hu:.*xvmini\\.hu.*lasx_xvmini_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_wu:.*xvmini\\.wu.*lasx_xvmini_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmini_du:.*xvmini\\.du.*lasx_xvmini_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseq_b:.*xvseq\\.b.*lasx_xvseq_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseq_h:.*xvseq\\.h.*lasx_xvseq_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseq_w:.*xvseq\\.w.*lasx_xvseq_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseq_d:.*xvseq\\.d.*lasx_xvseq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseqi_b:.*xvseqi\\.b.*lasx_xvseqi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseqi_h:.*xvseqi\\.h.*lasx_xvseqi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseqi_w:.*xvseqi\\.w.*lasx_xvseqi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvseqi_d:.*xvseqi\\.d.*lasx_xvseqi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_b:.*xvslt\\.b.*lasx_xvslt_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_h:.*xvslt\\.h.*lasx_xvslt_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_w:.*xvslt\\.w.*lasx_xvslt_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_d:.*xvslt\\.d.*lasx_xvslt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_b:.*xvslti\\.b.*lasx_xvslti_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_h:.*xvslti\\.h.*lasx_xvslti_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_w:.*xvslti\\.w.*lasx_xvslti_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_d:.*xvslti\\.d.*lasx_xvslti_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_bu:.*xvslt\\.bu.*lasx_xvslt_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_hu:.*xvslt\\.hu.*lasx_xvslt_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_wu:.*xvslt\\.wu.*lasx_xvslt_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslt_du:.*xvslt\\.du.*lasx_xvslt_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_bu:.*xvslti\\.bu.*lasx_xvslti_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_hu:.*xvslti\\.hu.*lasx_xvslti_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_wu:.*xvslti\\.wu.*lasx_xvslti_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslti_du:.*xvslti\\.du.*lasx_xvslti_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_b:.*xvsle\\.b.*lasx_xvsle_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_h:.*xvsle\\.h.*lasx_xvsle_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_w:.*xvsle\\.w.*lasx_xvsle_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_d:.*xvsle\\.d.*lasx_xvsle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_b:.*xvslei\\.b.*lasx_xvslei_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_h:.*xvslei\\.h.*lasx_xvslei_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_w:.*xvslei\\.w.*lasx_xvslei_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_d:.*xvslei\\.d.*lasx_xvslei_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_bu:.*xvsle\\.bu.*lasx_xvsle_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_hu:.*xvsle\\.hu.*lasx_xvsle_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_wu:.*xvsle\\.wu.*lasx_xvsle_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsle_du:.*xvsle\\.du.*lasx_xvsle_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_bu:.*xvslei\\.bu.*lasx_xvslei_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_hu:.*xvslei\\.hu.*lasx_xvslei_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_wu:.*xvslei\\.wu.*lasx_xvslei_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvslei_du:.*xvslei\\.du.*lasx_xvslei_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_b:.*xvsat\\.b.*lasx_xvsat_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_h:.*xvsat\\.h.*lasx_xvsat_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_w:.*xvsat\\.w.*lasx_xvsat_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_d:.*xvsat\\.d.*lasx_xvsat_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_bu:.*xvsat\\.bu.*lasx_xvsat_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_hu:.*xvsat\\.hu.*lasx_xvsat_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_wu:.*xvsat\\.wu.*lasx_xvsat_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsat_du:.*xvsat\\.du.*lasx_xvsat_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadda_b:.*xvadda\\.b.*lasx_xvadda_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadda_h:.*xvadda\\.h.*lasx_xvadda_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadda_w:.*xvadda\\.w.*lasx_xvadda_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadda_d:.*xvadda\\.d.*lasx_xvadda_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_b:.*xvsadd\\.b.*lasx_xvsadd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_h:.*xvsadd\\.h.*lasx_xvsadd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_w:.*xvsadd\\.w.*lasx_xvsadd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_d:.*xvsadd\\.d.*lasx_xvsadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_bu:.*xvsadd\\.bu.*lasx_xvsadd_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_hu:.*xvsadd\\.hu.*lasx_xvsadd_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_wu:.*xvsadd\\.wu.*lasx_xvsadd_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsadd_du:.*xvsadd\\.du.*lasx_xvsadd_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_b:.*xvavg\\.b.*lasx_xvavg_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_h:.*xvavg\\.h.*lasx_xvavg_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_w:.*xvavg\\.w.*lasx_xvavg_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_d:.*xvavg\\.d.*lasx_xvavg_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_bu:.*xvavg\\.bu.*lasx_xvavg_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_hu:.*xvavg\\.hu.*lasx_xvavg_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_wu:.*xvavg\\.wu.*lasx_xvavg_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavg_du:.*xvavg\\.du.*lasx_xvavg_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_b:.*xvavgr\\.b.*lasx_xvavgr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_h:.*xvavgr\\.h.*lasx_xvavgr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_w:.*xvavgr\\.w.*lasx_xvavgr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_d:.*xvavgr\\.d.*lasx_xvavgr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_bu:.*xvavgr\\.bu.*lasx_xvavgr_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_hu:.*xvavgr\\.hu.*lasx_xvavgr_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_wu:.*xvavgr\\.wu.*lasx_xvavgr_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvavgr_du:.*xvavgr\\.du.*lasx_xvavgr_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_b:.*xvssub\\.b.*lasx_xvssub_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_h:.*xvssub\\.h.*lasx_xvssub_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_w:.*xvssub\\.w.*lasx_xvssub_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_d:.*xvssub\\.d.*lasx_xvssub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_bu:.*xvssub\\.bu.*lasx_xvssub_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_hu:.*xvssub\\.hu.*lasx_xvssub_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_wu:.*xvssub\\.wu.*lasx_xvssub_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssub_du:.*xvssub\\.du.*lasx_xvssub_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_b:.*xvabsd\\.b.*lasx_xvabsd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_h:.*xvabsd\\.h.*lasx_xvabsd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_w:.*xvabsd\\.w.*lasx_xvabsd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_d:.*xvabsd\\.d.*lasx_xvabsd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_bu:.*xvabsd\\.bu.*lasx_xvabsd_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_hu:.*xvabsd\\.hu.*lasx_xvabsd_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_wu:.*xvabsd\\.wu.*lasx_xvabsd_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvabsd_du:.*xvabsd\\.du.*lasx_xvabsd_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmul_b:.*xvmul\\.b.*lasx_xvmul_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmul_h:.*xvmul\\.h.*lasx_xvmul_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmul_w:.*xvmul\\.w.*lasx_xvmul_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmul_d:.*xvmul\\.d.*lasx_xvmul_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmadd_b:.*xvmadd\\.b.*lasx_xvmadd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmadd_h:.*xvmadd\\.h.*lasx_xvmadd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmadd_w:.*xvmadd\\.w.*lasx_xvmadd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmadd_d:.*xvmadd\\.d.*lasx_xvmadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmsub_b:.*xvmsub\\.b.*lasx_xvmsub_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmsub_h:.*xvmsub\\.h.*lasx_xvmsub_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmsub_w:.*xvmsub\\.w.*lasx_xvmsub_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmsub_d:.*xvmsub\\.d.*lasx_xvmsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_b:.*xvdiv\\.b.*lasx_xvdiv_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_h:.*xvdiv\\.h.*lasx_xvdiv_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_w:.*xvdiv\\.w.*lasx_xvdiv_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_d:.*xvdiv\\.d.*lasx_xvdiv_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_bu:.*xvdiv\\.bu.*lasx_xvdiv_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_hu:.*xvdiv\\.hu.*lasx_xvdiv_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_wu:.*xvdiv\\.wu.*lasx_xvdiv_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvdiv_du:.*xvdiv\\.du.*lasx_xvdiv_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_h_b:.*xvhaddw\\.h\\.b.*lasx_xvhaddw_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_w_h:.*xvhaddw\\.w\\.h.*lasx_xvhaddw_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_d_w:.*xvhaddw\\.d\\.w.*lasx_xvhaddw_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_hu_bu:.*xvhaddw\\.hu\\.bu.*lasx_xvhaddw_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_wu_hu:.*xvhaddw\\.wu\\.hu.*lasx_xvhaddw_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_du_wu:.*xvhaddw\\.du\\.wu.*lasx_xvhaddw_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_h_b:.*xvhsubw\\.h\\.b.*lasx_xvhsubw_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_w_h:.*xvhsubw\\.w\\.h.*lasx_xvhsubw_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_d_w:.*xvhsubw\\.d\\.w.*lasx_xvhsubw_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_hu_bu:.*xvhsubw\\.hu\\.bu.*lasx_xvhsubw_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_wu_hu:.*xvhsubw\\.wu\\.hu.*lasx_xvhsubw_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_du_wu:.*xvhsubw\\.du\\.wu.*lasx_xvhsubw_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_b:.*xvmod\\.b.*lasx_xvmod_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_h:.*xvmod\\.h.*lasx_xvmod_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_w:.*xvmod\\.w.*lasx_xvmod_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_d:.*xvmod\\.d.*lasx_xvmod_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_bu:.*xvmod\\.bu.*lasx_xvmod_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_hu:.*xvmod\\.hu.*lasx_xvmod_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_wu:.*xvmod\\.wu.*lasx_xvmod_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmod_du:.*xvmod\\.du.*lasx_xvmod_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_b:.*xvrepl128vei\\.b.*lasx_xvrepl128vei_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_h:.*xvrepl128vei\\.h.*lasx_xvrepl128vei_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_w:.*xvrepl128vei\\.w.*lasx_xvrepl128vei_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_d:.*xvrepl128vei\\.d.*lasx_xvrepl128vei_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickev_b:.*xvpickev\\.b.*lasx_xvpickev_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickev_h:.*xvpickev\\.h.*lasx_xvpickev_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickev_w:.*xvpickev\\.w.*lasx_xvpickev_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickev_d:.*xvilvl\\.d.*lasx_xvpickev_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickod_b:.*xvpickod\\.b.*lasx_xvpickod_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickod_h:.*xvpickod\\.h.*lasx_xvpickod_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickod_w:.*xvpickod\\.w.*lasx_xvpickod_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickod_d:.*xvilvh\\.d.*lasx_xvpickod_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvh_b:.*xvilvh\\.b.*lasx_xvilvh_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvh_h:.*xvilvh\\.h.*lasx_xvilvh_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvh_w:.*xvilvh\\.w.*lasx_xvilvh_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvh_d:.*xvilvh\\.d.*lasx_xvilvh_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvl_b:.*xvilvl\\.b.*lasx_xvilvl_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvl_h:.*xvilvl\\.h.*lasx_xvilvl_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvl_w:.*xvilvl\\.w.*lasx_xvilvl_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvilvl_d:.*xvilvl\\.d.*lasx_xvilvl_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackev_b:.*xvpackev\\.b.*lasx_xvpackev_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackev_h:.*xvpackev\\.h.*lasx_xvpackev_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackev_w:.*xvpackev\\.w.*lasx_xvpackev_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackev_d:.*xvilvl\\.d.*lasx_xvpackev_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackod_b:.*xvpackod\\.b.*lasx_xvpackod_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackod_h:.*xvpackod\\.h.*lasx_xvpackod_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackod_w:.*xvpackod\\.w.*lasx_xvpackod_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpackod_d:.*xvilvh\\.d.*lasx_xvpackod_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf_b:.*xvshuf\\.b.*lasx_xvshuf_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf_h:.*xvshuf\\.h.*lasx_xvshuf_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf_w:.*xvshuf\\.w.*lasx_xvshuf_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf_d:.*xvshuf\\.d.*lasx_xvshuf_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvand_v:.*xvand\\.v.*lasx_xvand_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvandi_b:.*xvandi\\.b.*lasx_xvandi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvor_v:.*xvor\\.v.*lasx_xvor_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvori_b:.*xvbitseti\\.b.*lasx_xvori_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvnor_v:.*xvnor\\.v.*lasx_xvnor_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvnori_b:.*xvnori\\.b.*lasx_xvnori_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvxor_v:.*xvxor\\.v.*lasx_xvxor_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvxori_b:.*xvbitrevi\\.b.*lasx_xvxori_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitsel_v:.*xvbitsel\\.v.*lasx_xvbitsel_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbitseli_b:.*xvbitseli\\.b.*lasx_xvbitseli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf4i_b:.*xvshuf4i\\.b.*lasx_xvshuf4i_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf4i_h:.*xvshuf4i\\.h.*lasx_xvshuf4i_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf4i_w:.*xvshuf4i\\.w.*lasx_xvshuf4i_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_b:.*xvreplgr2vr\\.b.*lasx_xvreplgr2vr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_h:.*xvreplgr2vr\\.h.*lasx_xvreplgr2vr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_w:.*xvreplgr2vr\\.w.*lasx_xvreplgr2vr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_d:.*xvreplgr2vr\\.d.*lasx_xvreplgr2vr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpcnt_b:.*xvpcnt\\.b.*lasx_xvpcnt_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpcnt_h:.*xvpcnt\\.h.*lasx_xvpcnt_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpcnt_w:.*xvpcnt\\.w.*lasx_xvpcnt_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpcnt_d:.*xvpcnt\\.d.*lasx_xvpcnt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclo_b:.*xvclo\\.b.*lasx_xvclo_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclo_h:.*xvclo\\.h.*lasx_xvclo_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclo_w:.*xvclo\\.w.*lasx_xvclo_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclo_d:.*xvclo\\.d.*lasx_xvclo_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclz_b:.*xvclz\\.b.*lasx_xvclz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclz_h:.*xvclz\\.h.*lasx_xvclz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclz_w:.*xvclz\\.w.*lasx_xvclz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvclz_d:.*xvclz\\.d.*lasx_xvclz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfadd_s:.*xvfadd\\.s.*lasx_xvfadd_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfadd_d:.*xvfadd\\.d.*lasx_xvfadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfsub_s:.*xvfsub\\.s.*lasx_xvfsub_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfsub_d:.*xvfsub\\.d.*lasx_xvfsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmul_s:.*xvfmul\\.s.*lasx_xvfmul_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmul_d:.*xvfmul\\.d.*lasx_xvfmul_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfdiv_s:.*xvfdiv\\.s.*lasx_xvfdiv_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfdiv_d:.*xvfdiv\\.d.*lasx_xvfdiv_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcvt_h_s:.*xvfcvt\\.h\\.s.*lasx_xvfcvt_h_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcvt_s_d:.*xvfcvt\\.s\\.d.*lasx_xvfcvt_s_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmin_s:.*xvfmin\\.s.*lasx_xvfmin_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmin_d:.*xvfmin\\.d.*lasx_xvfmin_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmina_s:.*xvfmina\\.s.*lasx_xvfmina_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmina_d:.*xvfmina\\.d.*lasx_xvfmina_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmax_s:.*xvfmax\\.s.*lasx_xvfmax_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmax_d:.*xvfmax\\.d.*lasx_xvfmax_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmaxa_s:.*xvfmaxa\\.s.*lasx_xvfmaxa_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmaxa_d:.*xvfmaxa\\.d.*lasx_xvfmaxa_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfclass_s:.*xvfclass\\.s.*lasx_xvfclass_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfclass_d:.*xvfclass\\.d.*lasx_xvfclass_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfsqrt_s:.*xvfsqrt\\.s.*lasx_xvfsqrt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfsqrt_d:.*xvfsqrt\\.d.*lasx_xvfsqrt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrecip_s:.*xvfrecip\\.s.*lasx_xvfrecip_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrecip_d:.*xvfrecip\\.d.*lasx_xvfrecip_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrint_s:.*xvfrint\\.s.*lasx_xvfrint_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrint_d:.*xvfrint\\.d.*lasx_xvfrint_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrsqrt_s:.*xvfrsqrt\\.s.*lasx_xvfrsqrt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrsqrt_d:.*xvfrsqrt\\.d.*lasx_xvfrsqrt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvflogb_s:.*xvflogb\\.s.*lasx_xvflogb_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvflogb_d:.*xvflogb\\.d.*lasx_xvflogb_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcvth_s_h:.*xvfcvth\\.s\\.h.*lasx_xvfcvth_s_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcvth_d_s:.*xvfcvth\\.d\\.s.*lasx_xvfcvth_d_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcvtl_s_h:.*xvfcvtl\\.s\\.h.*lasx_xvfcvtl_s_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcvtl_d_s:.*xvfcvtl\\.d\\.s.*lasx_xvfcvtl_d_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftint_w_s:.*xvftint\\.w\\.s.*lasx_xvftint_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftint_l_d:.*xvftint\\.l\\.d.*lasx_xvftint_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftint_wu_s:.*xvftint\\.wu\\.s.*lasx_xvftint_wu_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftint_lu_d:.*xvftint\\.lu\\.d.*lasx_xvftint_lu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrz_w_s:.*xvftintrz\\.w\\.s.*lasx_xvftintrz_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrz_l_d:.*xvftintrz\\.l\\.d.*lasx_xvftintrz_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrz_wu_s:.*xvftintrz\\.wu\\.s.*lasx_xvftintrz_wu_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrz_lu_d:.*xvftintrz\\.lu\\.d.*lasx_xvftintrz_lu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffint_s_w:.*xvffint\\.s\\.w.*lasx_xvffint_s_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffint_d_l:.*xvffint\\.d\\.l.*lasx_xvffint_d_l" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffint_s_wu:.*xvffint\\.s\\.wu.*lasx_xvffint_s_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffint_d_lu:.*xvffint\\.d\\.lu.*lasx_xvffint_d_lu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve_b:.*xvreplve\\.b.*lasx_xvreplve_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve_h:.*xvreplve\\.h.*lasx_xvreplve_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve_w:.*xvreplve\\.w.*lasx_xvreplve_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve_d:.*xvreplve\\.d.*lasx_xvreplve_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpermi_w:.*xvpermi\\.w.*lasx_xvpermi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvandn_v:.*xvandn\\.v.*lasx_xvandn_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvneg_b:.*xvneg\\.b.*lasx_xvneg_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvneg_h:.*xvneg\\.h.*lasx_xvneg_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvneg_w:.*xvneg\\.w.*lasx_xvneg_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvneg_d:.*xvneg\\.d.*lasx_xvneg_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_b:.*xvmuh\\.b.*lasx_xvmuh_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_h:.*xvmuh\\.h.*lasx_xvmuh_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_w:.*xvmuh\\.w.*lasx_xvmuh_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_d:.*xvmuh\\.d.*lasx_xvmuh_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_bu:.*xvmuh\\.bu.*lasx_xvmuh_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_hu:.*xvmuh\\.hu.*lasx_xvmuh_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_wu:.*xvmuh\\.wu.*lasx_xvmuh_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmuh_du:.*xvmuh\\.du.*lasx_xvmuh_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsllwil_h_b:.*xvsllwil\\.h\\.b.*lasx_xvsllwil_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsllwil_w_h:.*xvsllwil\\.w\\.h.*lasx_xvsllwil_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsllwil_d_w:.*xvsllwil\\.d\\.w.*lasx_xvsllwil_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsllwil_hu_bu:.*xvsllwil\\.hu\\.bu.*lasx_xvsllwil_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsllwil_wu_hu:.*xvsllwil\\.wu\\.hu.*lasx_xvsllwil_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsllwil_du_wu:.*xvsllwil\\.du\\.wu.*lasx_xvsllwil_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsran_b_h:.*xvsran\\.b\\.h.*lasx_xvsran_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsran_h_w:.*xvsran\\.h\\.w.*lasx_xvsran_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsran_w_d:.*xvsran\\.w\\.d.*lasx_xvsran_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssran_b_h:.*xvssran\\.b\\.h.*lasx_xvssran_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssran_h_w:.*xvssran\\.h\\.w.*lasx_xvssran_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssran_w_d:.*xvssran\\.w\\.d.*lasx_xvssran_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssran_bu_h:.*xvssran\\.bu\\.h.*lasx_xvssran_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssran_hu_w:.*xvssran\\.hu\\.w.*lasx_xvssran_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssran_wu_d:.*xvssran\\.wu\\.d.*lasx_xvssran_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarn_b_h:.*xvsrarn\\.b\\.h.*lasx_xvsrarn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarn_h_w:.*xvsrarn\\.h\\.w.*lasx_xvsrarn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarn_w_d:.*xvsrarn\\.w\\.d.*lasx_xvsrarn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarn_b_h:.*xvssrarn\\.b\\.h.*lasx_xvssrarn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarn_h_w:.*xvssrarn\\.h\\.w.*lasx_xvssrarn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarn_w_d:.*xvssrarn\\.w\\.d.*lasx_xvssrarn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarn_bu_h:.*xvssrarn\\.bu\\.h.*lasx_xvssrarn_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarn_hu_w:.*xvssrarn\\.hu\\.w.*lasx_xvssrarn_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarn_wu_d:.*xvssrarn\\.wu\\.d.*lasx_xvssrarn_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrln_b_h:.*xvsrln\\.b\\.h.*lasx_xvsrln_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrln_h_w:.*xvsrln\\.h\\.w.*lasx_xvsrln_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrln_w_d:.*xvsrln\\.w\\.d.*lasx_xvsrln_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrln_bu_h:.*xvssrln\\.bu\\.h.*lasx_xvssrln_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrln_hu_w:.*xvssrln\\.hu\\.w.*lasx_xvssrln_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrln_wu_d:.*xvssrln\\.wu\\.d.*lasx_xvssrln_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrn_b_h:.*xvsrlrn\\.b\\.h.*lasx_xvsrlrn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrn_h_w:.*xvsrlrn\\.h\\.w.*lasx_xvsrlrn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrn_w_d:.*xvsrlrn\\.w\\.d.*lasx_xvsrlrn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_bu_h:.*xvssrlrn\\.bu\\.h.*lasx_xvssrlrn_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_hu_w:.*xvssrlrn\\.hu\\.w.*lasx_xvssrlrn_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_wu_d:.*xvssrlrn\\.wu\\.d.*lasx_xvssrlrn_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrstpi_b:.*xvfrstpi\\.b.*lasx_xvfrstpi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrstpi_h:.*xvfrstpi\\.h.*lasx_xvfrstpi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrstp_b:.*xvfrstp\\.b.*lasx_xvfrstp_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrstp_h:.*xvfrstp\\.h.*lasx_xvfrstp_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvshuf4i_d:.*xvshuf4i\\.d.*lasx_xvshuf4i_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbsrl_v:.*xvbsrl\\.v.*lasx_xvbsrl_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvbsll_v:.*xvbsll\\.v.*lasx_xvbsll_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvextrins_b:.*xvextrins\\.b.*lasx_xvextrins_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvextrins_h:.*xvextrins\\.h.*lasx_xvextrins_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvextrins_w:.*xvextrins\\.w.*lasx_xvextrins_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvextrins_d:.*xvextrins\\.d.*lasx_xvextrins_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmskltz_b:.*xvmskltz\\.b.*lasx_xvmskltz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmskltz_h:.*xvmskltz\\.h.*lasx_xvmskltz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmskltz_w:.*xvmskltz\\.w.*lasx_xvmskltz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmskltz_d:.*xvmskltz\\.d.*lasx_xvmskltz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsigncov_b:.*xvsigncov\\.b.*lasx_xvsigncov_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsigncov_h:.*xvsigncov\\.h.*lasx_xvsigncov_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsigncov_w:.*xvsigncov\\.w.*lasx_xvsigncov_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsigncov_d:.*xvsigncov\\.d.*lasx_xvsigncov_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmadd_s:.*xvfmadd\\.s.*lasx_xvfmadd_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmadd_d:.*xvfmadd\\.d.*lasx_xvfmadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmsub_s:.*xvfmsub\\.s.*lasx_xvfmsub_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfmsub_d:.*xvfmsub\\.d.*lasx_xvfmsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfnmadd_s:.*xvfnmadd\\.s.*lasx_xvfnmadd_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfnmadd_d:.*xvfnmadd\\.d.*lasx_xvfnmadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfnmsub_s:.*xvfnmsub\\.s.*lasx_xvfnmsub_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfnmsub_d:.*xvfnmsub\\.d.*lasx_xvfnmsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrne_w_s:.*xvftintrne\\.w\\.s.*lasx_xvftintrne_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrne_l_d:.*xvftintrne\\.l\\.d.*lasx_xvftintrne_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrp_w_s:.*xvftintrp\\.w\\.s.*lasx_xvftintrp_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrp_l_d:.*xvftintrp\\.l\\.d.*lasx_xvftintrp_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrm_w_s:.*xvftintrm\\.w\\.s.*lasx_xvftintrm_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrm_l_d:.*xvftintrm\\.l\\.d.*lasx_xvftintrm_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftint_w_d:.*xvftint\\.w\\.d.*lasx_xvftint_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffint_s_l:.*xvffint\\.s\\.l.*lasx_xvffint_s_l" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrz_w_d:.*xvftintrz\\.w\\.d.*lasx_xvftintrz_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrp_w_d:.*xvftintrp\\.w\\.d.*lasx_xvftintrp_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrm_w_d:.*xvftintrm\\.w\\.d.*lasx_xvftintrm_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrne_w_d:.*xvftintrne\\.w\\.d.*lasx_xvftintrne_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftinth_l_s:.*xvftinth\\.l\\.s.*lasx_xvftinth_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintl_l_s:.*xvftintl\\.l\\.s.*lasx_xvftintl_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffinth_d_w:.*xvffinth\\.d\\.w.*lasx_xvffinth_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvffintl_d_w:.*xvffintl\\.d\\.w.*lasx_xvffintl_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrzh_l_s:.*xvftintrzh\\.l\\.s.*lasx_xvftintrzh_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrzl_l_s:.*xvftintrzl\\.l\\.s.*lasx_xvftintrzl_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrph_l_s:.*xvftintrph\\.l\\.s.*lasx_xvftintrph_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrpl_l_s:.*xvftintrpl\\.l\\.s.*lasx_xvftintrpl_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrmh_l_s:.*xvftintrmh\\.l\\.s.*lasx_xvftintrmh_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrml_l_s:.*xvftintrml\\.l\\.s.*lasx_xvftintrml_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrneh_l_s:.*xvftintrneh\\.l\\.s.*lasx_xvftintrneh_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvftintrnel_l_s:.*xvftintrnel\\.l\\.s.*lasx_xvftintrnel_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrne_s:.*xvfrintrne\\.s.*lasx_xvfrintrne_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrne_d:.*xvfrintrne\\.d.*lasx_xvfrintrne_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrz_s:.*xvfrintrz\\.s.*lasx_xvfrintrz_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrz_d:.*xvfrintrz\\.d.*lasx_xvfrintrz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrp_s:.*xvfrintrp\\.s.*lasx_xvfrintrp_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrp_d:.*xvfrintrp\\.d.*lasx_xvfrintrp_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrm_s:.*xvfrintrm\\.s.*lasx_xvfrintrm_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfrintrm_d:.*xvfrintrm\\.d.*lasx_xvfrintrm_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvld:.*xvld.*lasx_xvld" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvst:.*xvst.*lasx_xvst" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvstelm_b:.*xvstelm\\.b.*lasx_xvstelm_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvstelm_h:.*xvstelm\\.h.*lasx_xvstelm_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvstelm_w:.*xvstelm\\.w.*lasx_xvstelm_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvstelm_d:.*xvstelm\\.d.*lasx_xvstelm_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvinsve0_w:.*xvinsve0\\.w.*lasx_xvinsve0_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvinsve0_d:.*xvinsve0\\.d.*lasx_xvinsve0_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve_w:.*xvpickve\\.w.*lasx_xvpickve_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve_d:.*xvpickve\\.d.*lasx_xvpickve_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_b_h:.*xvssrlrn\\.b\\.h.*lasx_xvssrlrn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_h_w:.*xvssrlrn\\.h\\.w.*lasx_xvssrlrn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_w_d:.*xvssrlrn\\.w\\.d.*lasx_xvssrlrn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrln_b_h:.*xvssrln\\.b\\.h.*lasx_xvssrln_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrln_h_w:.*xvssrln\\.h\\.w.*lasx_xvssrln_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrln_w_d:.*xvssrln\\.w\\.d.*lasx_xvssrln_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvorn_v:.*xvorn\\.v.*lasx_xvorn_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvldi:.*xvldi.*lasx_xvldi" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvldx:.*xvldx.*lasx_xvldx" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvstx:.*xvstx.*lasx_xvstx" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvextl_qu_du:.*xvextl\\.qu\\.du.*lasx_xvextl_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvinsgr2vr_w:.*xvinsgr2vr\\.w.*lasx_xvinsgr2vr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvinsgr2vr_d:.*xvinsgr2vr\\.d.*lasx_xvinsgr2vr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve0_b:.*xvreplve0\\.b.*lasx_xvreplve0_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve0_h:.*xvreplve0\\.h.*lasx_xvreplve0_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve0_w:.*xvreplve0\\.w.*lasx_xvreplve0_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve0_d:.*xvreplve0\\.d.*lasx_xvreplve0_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvreplve0_q:.*xvreplve0\\.q.*lasx_xvreplve0_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_h_b:.*vext2xv\\.h\\.b.*lasx_vext2xv_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_w_h:.*vext2xv\\.w\\.h.*lasx_vext2xv_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_d_w:.*vext2xv\\.d\\.w.*lasx_vext2xv_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_w_b:.*vext2xv\\.w\\.b.*lasx_vext2xv_w_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_d_h:.*vext2xv\\.d\\.h.*lasx_vext2xv_d_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_d_b:.*vext2xv\\.d\\.b.*lasx_vext2xv_d_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_hu_bu:.*vext2xv\\.hu\\.bu.*lasx_vext2xv_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_wu_hu:.*vext2xv\\.wu\\.hu.*lasx_vext2xv_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_du_wu:.*vext2xv\\.du\\.wu.*lasx_vext2xv_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_wu_bu:.*vext2xv\\.wu\\.bu.*lasx_vext2xv_wu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_du_hu:.*vext2xv\\.du\\.hu.*lasx_vext2xv_du_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_vext2xv_du_bu:.*vext2xv\\.du\\.bu.*lasx_vext2xv_du_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpermi_q:.*xvpermi\\.q.*lasx_xvpermi_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpermi_d:.*xvpermi\\.d.*lasx_xvpermi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvperm_w:.*xvperm\\.w.*lasx_xvperm_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvldrepl_b:.*xvldrepl\\.b.*lasx_xvldrepl_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvldrepl_h:.*xvldrepl\\.h.*lasx_xvldrepl_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvldrepl_w:.*xvldrepl\\.w.*lasx_xvldrepl_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvldrepl_d:.*xvldrepl\\.d.*lasx_xvldrepl_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_w:.*xvpickve2gr\\.w.*lasx_xvpickve2gr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_wu:.*xvpickve2gr\\.wu.*lasx_xvpickve2gr_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_d:.*xvpickve2gr\\.d.*lasx_xvpickve2gr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_du:.*xvpickve2gr\\.du.*lasx_xvpickve2gr_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_q_d:.*xvaddwev\\.q\\.d.*lasx_xvaddwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_d_w:.*xvaddwev\\.d\\.w.*lasx_xvaddwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_w_h:.*xvaddwev\\.w\\.h.*lasx_xvaddwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_h_b:.*xvaddwev\\.h\\.b.*lasx_xvaddwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_q_du:.*xvaddwev\\.q\\.du.*lasx_xvaddwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_d_wu:.*xvaddwev\\.d\\.wu.*lasx_xvaddwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_w_hu:.*xvaddwev\\.w\\.hu.*lasx_xvaddwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_h_bu:.*xvaddwev\\.h\\.bu.*lasx_xvaddwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_q_d:.*xvsubwev\\.q\\.d.*lasx_xvsubwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_d_w:.*xvsubwev\\.d\\.w.*lasx_xvsubwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_w_h:.*xvsubwev\\.w\\.h.*lasx_xvsubwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_h_b:.*xvsubwev\\.h\\.b.*lasx_xvsubwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_q_du:.*xvsubwev\\.q\\.du.*lasx_xvsubwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_d_wu:.*xvsubwev\\.d\\.wu.*lasx_xvsubwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_w_hu:.*xvsubwev\\.w\\.hu.*lasx_xvsubwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwev_h_bu:.*xvsubwev\\.h\\.bu.*lasx_xvsubwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_q_d:.*xvmulwev\\.q\\.d.*lasx_xvmulwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_d_w:.*xvmulwev\\.d\\.w.*lasx_xvmulwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_w_h:.*xvmulwev\\.w\\.h.*lasx_xvmulwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_h_b:.*xvmulwev\\.h\\.b.*lasx_xvmulwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_q_du:.*xvmulwev\\.q\\.du.*lasx_xvmulwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_d_wu:.*xvmulwev\\.d\\.wu.*lasx_xvmulwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_w_hu:.*xvmulwev\\.w\\.hu.*lasx_xvmulwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_h_bu:.*xvmulwev\\.h\\.bu.*lasx_xvmulwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_q_d:.*xvaddwod\\.q\\.d.*lasx_xvaddwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_d_w:.*xvaddwod\\.d\\.w.*lasx_xvaddwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_w_h:.*xvaddwod\\.w\\.h.*lasx_xvaddwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_h_b:.*xvaddwod\\.h\\.b.*lasx_xvaddwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_q_du:.*xvaddwod\\.q\\.du.*lasx_xvaddwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_d_wu:.*xvaddwod\\.d\\.wu.*lasx_xvaddwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_w_hu:.*xvaddwod\\.w\\.hu.*lasx_xvaddwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_h_bu:.*xvaddwod\\.h\\.bu.*lasx_xvaddwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_q_d:.*xvsubwod\\.q\\.d.*lasx_xvsubwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_d_w:.*xvsubwod\\.d\\.w.*lasx_xvsubwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_w_h:.*xvsubwod\\.w\\.h.*lasx_xvsubwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_h_b:.*xvsubwod\\.h\\.b.*lasx_xvsubwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_q_du:.*xvsubwod\\.q\\.du.*lasx_xvsubwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_d_wu:.*xvsubwod\\.d\\.wu.*lasx_xvsubwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_w_hu:.*xvsubwod\\.w\\.hu.*lasx_xvsubwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsubwod_h_bu:.*xvsubwod\\.h\\.bu.*lasx_xvsubwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_q_d:.*xvmulwod\\.q\\.d.*lasx_xvmulwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_d_w:.*xvmulwod\\.d\\.w.*lasx_xvmulwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_w_h:.*xvmulwod\\.w\\.h.*lasx_xvmulwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_h_b:.*xvmulwod\\.h\\.b.*lasx_xvmulwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_q_du:.*xvmulwod\\.q\\.du.*lasx_xvmulwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_d_wu:.*xvmulwod\\.d\\.wu.*lasx_xvmulwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_w_hu:.*xvmulwod\\.w\\.hu.*lasx_xvmulwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_h_bu:.*xvmulwod\\.h\\.bu.*lasx_xvmulwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_d_wu_w:.*xvaddwev\\.d\\.wu\\.w.*lasx_xvaddwev_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_w_hu_h:.*xvaddwev\\.w\\.hu\\.h.*lasx_xvaddwev_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_h_bu_b:.*xvaddwev\\.h\\.bu\\.b.*lasx_xvaddwev_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_d_wu_w:.*xvmulwev\\.d\\.wu\\.w.*lasx_xvmulwev_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_w_hu_h:.*xvmulwev\\.w\\.hu\\.h.*lasx_xvmulwev_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_h_bu_b:.*xvmulwev\\.h\\.bu\\.b.*lasx_xvmulwev_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_d_wu_w:.*xvaddwod\\.d\\.wu\\.w.*lasx_xvaddwod_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_w_hu_h:.*xvaddwod\\.w\\.hu\\.h.*lasx_xvaddwod_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_h_bu_b:.*xvaddwod\\.h\\.bu\\.b.*lasx_xvaddwod_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_d_wu_w:.*xvmulwod\\.d\\.wu\\.w.*lasx_xvmulwod_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_w_hu_h:.*xvmulwod\\.w\\.hu\\.h.*lasx_xvmulwod_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_h_bu_b:.*xvmulwod\\.h\\.bu\\.b.*lasx_xvmulwod_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_q_d:.*xvhaddw\\.q\\.d.*lasx_xvhaddw_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhaddw_qu_du:.*xvhaddw\\.qu\\.du.*lasx_xvhaddw_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_q_d:.*xvhsubw\\.q\\.d.*lasx_xvhsubw_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvhsubw_qu_du:.*xvhsubw\\.qu\\.du.*lasx_xvhsubw_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_q_d:.*xvmaddwev\\.q\\.d.*lasx_xvmaddwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_d_w:.*xvmaddwev\\.d\\.w.*lasx_xvmaddwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_w_h:.*xvmaddwev\\.w\\.h.*lasx_xvmaddwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_h_b:.*xvmaddwev\\.h\\.b.*lasx_xvmaddwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_q_du:.*xvmaddwev\\.q\\.du.*lasx_xvmaddwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_d_wu:.*xvmaddwev\\.d\\.wu.*lasx_xvmaddwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_w_hu:.*xvmaddwev\\.w\\.hu.*lasx_xvmaddwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_h_bu:.*xvmaddwev\\.h\\.bu.*lasx_xvmaddwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_q_d:.*xvmaddwod\\.q\\.d.*lasx_xvmaddwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_d_w:.*xvmaddwod\\.d\\.w.*lasx_xvmaddwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_w_h:.*xvmaddwod\\.w\\.h.*lasx_xvmaddwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_h_b:.*xvmaddwod\\.h\\.b.*lasx_xvmaddwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_q_du:.*xvmaddwod\\.q\\.du.*lasx_xvmaddwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_d_wu:.*xvmaddwod\\.d\\.wu.*lasx_xvmaddwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_w_hu:.*xvmaddwod\\.w\\.hu.*lasx_xvmaddwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_h_bu:.*xvmaddwod\\.h\\.bu.*lasx_xvmaddwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_q_du_d:.*xvmaddwev\\.q\\.du\\.d.*lasx_xvmaddwev_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_d_wu_w:.*xvmaddwev\\.d\\.wu\\.w.*lasx_xvmaddwev_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_w_hu_h:.*xvmaddwev\\.w\\.hu\\.h.*lasx_xvmaddwev_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_h_bu_b:.*xvmaddwev\\.h\\.bu\\.b.*lasx_xvmaddwev_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_q_du_d:.*xvmaddwod\\.q\\.du\\.d.*lasx_xvmaddwod_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_d_wu_w:.*xvmaddwod\\.d\\.wu\\.w.*lasx_xvmaddwod_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_w_hu_h:.*xvmaddwod\\.w\\.hu\\.h.*lasx_xvmaddwod_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_h_bu_b:.*xvmaddwod\\.h\\.bu\\.b.*lasx_xvmaddwod_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotr_b:.*xvrotr\\.b.*lasx_xvrotr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotr_h:.*xvrotr\\.h.*lasx_xvrotr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotr_w:.*xvrotr\\.w.*lasx_xvrotr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotr_d:.*xvrotr\\.d.*lasx_xvrotr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvadd_q:.*xvadd\\.q.*lasx_xvadd_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsub_q:.*xvsub\\.q.*lasx_xvsub_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwev_q_du_d:.*xvaddwev\\.q\\.du\\.d.*lasx_xvaddwev_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvaddwod_q_du_d:.*xvaddwod\\.q\\.du\\.d.*lasx_xvaddwod_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwev_q_du_d:.*xvmulwev\\.q\\.du\\.d.*lasx_xvmulwev_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmulwod_q_du_d:.*xvmulwod\\.q\\.du\\.d.*lasx_xvmulwod_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmskgez_b:.*xvmskgez\\.b.*lasx_xvmskgez_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvmsknz_b:.*xvmsknz\\.b.*lasx_xvmsknz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_h_b:.*xvexth\\.h\\.b.*lasx_xvexth_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_w_h:.*xvexth\\.w\\.h.*lasx_xvexth_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_d_w:.*xvexth\\.d\\.w.*lasx_xvexth_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_q_d:.*xvexth\\.q\\.d.*lasx_xvexth_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_hu_bu:.*xvexth\\.hu\\.bu.*lasx_xvexth_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_wu_hu:.*xvexth\\.wu\\.hu.*lasx_xvexth_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_du_wu:.*xvexth\\.du\\.wu.*lasx_xvexth_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvexth_qu_du:.*xvexth\\.qu\\.du.*lasx_xvexth_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotri_b:.*xvrotri\\.b.*lasx_xvrotri_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotri_h:.*xvrotri\\.h.*lasx_xvrotri_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotri_w:.*xvrotri\\.w.*lasx_xvrotri_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrotri_d:.*xvrotri\\.d.*lasx_xvrotri_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvextl_q_d:.*xvextl\\.q\\.d.*lasx_xvextl_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlni_b_h:.*xvsrlni\\.b\\.h.*lasx_xvsrlni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlni_h_w:.*xvsrlni\\.h\\.w.*lasx_xvsrlni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlni_w_d:.*xvsrlni\\.w\\.d.*lasx_xvsrlni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlni_d_q:.*xvsrlni\\.d\\.q.*lasx_xvsrlni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrni_b_h:.*xvsrlrni\\.b\\.h.*lasx_xvsrlrni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrni_h_w:.*xvsrlrni\\.h\\.w.*lasx_xvsrlrni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrni_w_d:.*xvsrlrni\\.w\\.d.*lasx_xvsrlrni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrlrni_d_q:.*xvsrlrni\\.d\\.q.*lasx_xvsrlrni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_b_h:.*xvssrlni\\.b\\.h.*lasx_xvssrlni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_h_w:.*xvssrlni\\.h\\.w.*lasx_xvssrlni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_w_d:.*xvssrlni\\.w\\.d.*lasx_xvssrlni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_d_q:.*xvssrlni\\.d\\.q.*lasx_xvssrlni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_bu_h:.*xvssrlni\\.bu\\.h.*lasx_xvssrlni_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_hu_w:.*xvssrlni\\.hu\\.w.*lasx_xvssrlni_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_wu_d:.*xvssrlni\\.wu\\.d.*lasx_xvssrlni_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlni_du_q:.*xvssrlni\\.du\\.q.*lasx_xvssrlni_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_b_h:.*xvssrlrni\\.b\\.h.*lasx_xvssrlrni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_h_w:.*xvssrlrni\\.h\\.w.*lasx_xvssrlrni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_w_d:.*xvssrlrni\\.w\\.d.*lasx_xvssrlrni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_d_q:.*xvssrlrni\\.d\\.q.*lasx_xvssrlrni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_bu_h:.*xvssrlrni\\.bu\\.h.*lasx_xvssrlrni_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_hu_w:.*xvssrlrni\\.hu\\.w.*lasx_xvssrlrni_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_wu_d:.*xvssrlrni\\.wu\\.d.*lasx_xvssrlrni_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_du_q:.*xvssrlrni\\.du\\.q.*lasx_xvssrlrni_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrani_b_h:.*xvsrani\\.b\\.h.*lasx_xvsrani_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrani_h_w:.*xvsrani\\.h\\.w.*lasx_xvsrani_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrani_w_d:.*xvsrani\\.w\\.d.*lasx_xvsrani_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrani_d_q:.*xvsrani\\.d\\.q.*lasx_xvsrani_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarni_b_h:.*xvsrarni\\.b\\.h.*lasx_xvsrarni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarni_h_w:.*xvsrarni\\.h\\.w.*lasx_xvsrarni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarni_w_d:.*xvsrarni\\.w\\.d.*lasx_xvsrarni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvsrarni_d_q:.*xvsrarni\\.d\\.q.*lasx_xvsrarni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_b_h:.*xvssrani\\.b\\.h.*lasx_xvssrani_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_h_w:.*xvssrani\\.h\\.w.*lasx_xvssrani_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_w_d:.*xvssrani\\.w\\.d.*lasx_xvssrani_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_d_q:.*xvssrani\\.d\\.q.*lasx_xvssrani_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_bu_h:.*xvssrani\\.bu\\.h.*lasx_xvssrani_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_hu_w:.*xvssrani\\.hu\\.w.*lasx_xvssrani_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_wu_d:.*xvssrani\\.wu\\.d.*lasx_xvssrani_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrani_du_q:.*xvssrani\\.du\\.q.*lasx_xvssrani_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_b_h:.*xvssrarni\\.b\\.h.*lasx_xvssrarni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_h_w:.*xvssrarni\\.h\\.w.*lasx_xvssrarni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_w_d:.*xvssrarni\\.w\\.d.*lasx_xvssrarni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_d_q:.*xvssrarni\\.d\\.q.*lasx_xvssrarni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_bu_h:.*xvssrarni\\.bu\\.h.*lasx_xvssrarni_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_hu_w:.*xvssrarni\\.hu\\.w.*lasx_xvssrarni_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_wu_d:.*xvssrarni\\.wu\\.d.*lasx_xvssrarni_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvssrarni_du_q:.*xvssrarni\\.du\\.q.*lasx_xvssrarni_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbnz_b:.*xvsetanyeqz\\.b.*lasx_xbnz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbnz_d:.*xvsetanyeqz\\.d.*lasx_xbnz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbnz_h:.*xvsetanyeqz\\.h.*lasx_xbnz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbnz_v:.*xvseteqz\\.v.*lasx_xbnz_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbnz_w:.*xvsetanyeqz\\.w.*lasx_xbnz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbz_b:.*xvsetallnez\\.b.*lasx_xbz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbz_d:.*xvsetallnez\\.d.*lasx_xbz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbz_h:.*xvsetallnez\\.h.*lasx_xbz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbz_v:.*xvsetnez\\.v.*lasx_xbz_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xbz_w:.*xvsetallnez\\.w.*lasx_xbz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_caf_d:.*xvfcmp\\.caf\\.d.*lasx_xvfcmp_caf_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_caf_s:.*xvfcmp\\.caf\\.s.*lasx_xvfcmp_caf_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_ceq_d:.*xvfcmp\\.ceq\\.d.*lasx_xvfcmp_ceq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_ceq_s:.*xvfcmp\\.ceq\\.s.*lasx_xvfcmp_ceq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cle_d:.*xvfcmp\\.cle\\.d.*lasx_xvfcmp_cle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cle_s:.*xvfcmp\\.cle\\.s.*lasx_xvfcmp_cle_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_clt_d:.*xvfcmp\\.clt\\.d.*lasx_xvfcmp_clt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_clt_s:.*xvfcmp\\.clt\\.s.*lasx_xvfcmp_clt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cne_d:.*xvfcmp\\.cne\\.d.*lasx_xvfcmp_cne_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cne_s:.*xvfcmp\\.cne\\.s.*lasx_xvfcmp_cne_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cor_d:.*xvfcmp\\.cor\\.d.*lasx_xvfcmp_cor_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cor_s:.*xvfcmp\\.cor\\.s.*lasx_xvfcmp_cor_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cueq_d:.*xvfcmp\\.cueq\\.d.*lasx_xvfcmp_cueq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cueq_s:.*xvfcmp\\.cueq\\.s.*lasx_xvfcmp_cueq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cule_d:.*xvfcmp\\.cule\\.d.*lasx_xvfcmp_cule_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cule_s:.*xvfcmp\\.cule\\.s.*lasx_xvfcmp_cule_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cult_d:.*xvfcmp\\.cult\\.d.*lasx_xvfcmp_cult_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cult_s:.*xvfcmp\\.cult\\.s.*lasx_xvfcmp_cult_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cun_d:.*xvfcmp\\.cun\\.d.*lasx_xvfcmp_cun_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cune_d:.*xvfcmp\\.cune\\.d.*lasx_xvfcmp_cune_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cune_s:.*xvfcmp\\.cune\\.s.*lasx_xvfcmp_cune_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cun_s:.*xvfcmp\\.cun\\.s.*lasx_xvfcmp_cun_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_saf_d:.*xvfcmp\\.saf\\.d.*lasx_xvfcmp_saf_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_saf_s:.*xvfcmp\\.saf\\.s.*lasx_xvfcmp_saf_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_seq_d:.*xvfcmp\\.seq\\.d.*lasx_xvfcmp_seq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_seq_s:.*xvfcmp\\.seq\\.s.*lasx_xvfcmp_seq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sle_d:.*xvfcmp\\.sle\\.d.*lasx_xvfcmp_sle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sle_s:.*xvfcmp\\.sle\\.s.*lasx_xvfcmp_sle_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_slt_d:.*xvfcmp\\.slt\\.d.*lasx_xvfcmp_slt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_slt_s:.*xvfcmp\\.slt\\.s.*lasx_xvfcmp_slt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sne_d:.*xvfcmp\\.sne\\.d.*lasx_xvfcmp_sne_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sne_s:.*xvfcmp\\.sne\\.s.*lasx_xvfcmp_sne_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sor_d:.*xvfcmp\\.sor\\.d.*lasx_xvfcmp_sor_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sor_s:.*xvfcmp\\.sor\\.s.*lasx_xvfcmp_sor_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sueq_d:.*xvfcmp\\.sueq\\.d.*lasx_xvfcmp_sueq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sueq_s:.*xvfcmp\\.sueq\\.s.*lasx_xvfcmp_sueq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sule_d:.*xvfcmp\\.sule\\.d.*lasx_xvfcmp_sule_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sule_s:.*xvfcmp\\.sule\\.s.*lasx_xvfcmp_sule_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sult_d:.*xvfcmp\\.sult\\.d.*lasx_xvfcmp_sult_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sult_s:.*xvfcmp\\.sult\\.s.*lasx_xvfcmp_sult_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sun_d:.*xvfcmp\\.sun\\.d.*lasx_xvfcmp_sun_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sune_d:.*xvfcmp\\.sune\\.d.*lasx_xvfcmp_sune_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sune_s:.*xvfcmp\\.sune\\.s.*lasx_xvfcmp_sune_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sun_s:.*xvfcmp\\.sun\\.s.*lasx_xvfcmp_sun_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve_d_f:.*xvpickve\\.d.*lasx_xvpickve_d_f" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvpickve_w_f:.*xvpickve\\.w.*lasx_xvpickve_w_f" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepli_b:.*xvrepli\\.b.*lasx_xvrepli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepli_d:.*xvrepli\\.d.*lasx_xvrepli_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepli_h:.*xvrepli\\.h.*lasx_xvrepli_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lasx_xvrepli_w:.*xvrepli\\.w.*lasx_xvrepli_w" 1 } } */ ++ ++typedef signed char v32i8 __attribute__ ((vector_size(32), aligned(32))); ++typedef signed char v32i8_b __attribute__ ((vector_size(32), aligned(1))); ++typedef unsigned char v32u8 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned char v32u8_b __attribute__ ((vector_size(32), aligned(1))); ++typedef short v16i16 __attribute__ ((vector_size(32), aligned(32))); ++typedef short v16i16_h __attribute__ ((vector_size(32), aligned(2))); ++typedef unsigned short v16u16 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned short v16u16_h __attribute__ ((vector_size(32), aligned(2))); ++typedef int v8i32 __attribute__ ((vector_size(32), aligned(32))); ++typedef int v8i32_w __attribute__ ((vector_size(32), aligned(4))); ++typedef unsigned int v8u32 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned int v8u32_w __attribute__ ((vector_size(32), aligned(4))); ++typedef long long v4i64 __attribute__ ((vector_size(32), aligned(32))); ++typedef long long v4i64_d __attribute__ ((vector_size(32), aligned(8))); ++typedef unsigned long long v4u64 __attribute__ ((vector_size(32), aligned(32))); ++typedef unsigned long long v4u64_d __attribute__ ((vector_size(32), aligned(8))); ++typedef float v8f32 __attribute__ ((vector_size(32), aligned(32))); ++typedef float v8f32_w __attribute__ ((vector_size(32), aligned(4))); ++typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); ++typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); ++ ++typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); ++typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); ++ ++typedef float __m256 __attribute__ ((__vector_size__ (32), __may_alias__)); ++typedef long long __m256i __attribute__ ((__vector_size__ (32), __may_alias__)); ++typedef double __m256d __attribute__ ((__vector_size__ (32), __may_alias__)); ++ ++/* Unaligned version of the same types. */ ++typedef float __m256_u __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); ++typedef long long __m256i_u __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); ++typedef double __m256d_u __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); ++ ++v32i8 __lasx_xvsll_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsll_b(_1, _2);} ++v16i16 __lasx_xvsll_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsll_h(_1, _2);} ++v8i32 __lasx_xvsll_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsll_w(_1, _2);} ++v4i64 __lasx_xvsll_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsll_d(_1, _2);} ++v32i8 __lasx_xvslli_b(v32i8 _1){return __builtin_lasx_xvslli_b(_1, 1);} ++v16i16 __lasx_xvslli_h(v16i16 _1){return __builtin_lasx_xvslli_h(_1, 1);} ++v8i32 __lasx_xvslli_w(v8i32 _1){return __builtin_lasx_xvslli_w(_1, 1);} ++v4i64 __lasx_xvslli_d(v4i64 _1){return __builtin_lasx_xvslli_d(_1, 1);} ++v32i8 __lasx_xvsra_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsra_b(_1, _2);} ++v16i16 __lasx_xvsra_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsra_h(_1, _2);} ++v8i32 __lasx_xvsra_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsra_w(_1, _2);} ++v4i64 __lasx_xvsra_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsra_d(_1, _2);} ++v32i8 __lasx_xvsrai_b(v32i8 _1){return __builtin_lasx_xvsrai_b(_1, 1);} ++v16i16 __lasx_xvsrai_h(v16i16 _1){return __builtin_lasx_xvsrai_h(_1, 1);} ++v8i32 __lasx_xvsrai_w(v8i32 _1){return __builtin_lasx_xvsrai_w(_1, 1);} ++v4i64 __lasx_xvsrai_d(v4i64 _1){return __builtin_lasx_xvsrai_d(_1, 1);} ++v32i8 __lasx_xvsrar_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrar_b(_1, _2);} ++v16i16 __lasx_xvsrar_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrar_h(_1, _2);} ++v8i32 __lasx_xvsrar_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrar_w(_1, _2);} ++v4i64 __lasx_xvsrar_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrar_d(_1, _2);} ++v32i8 __lasx_xvsrari_b(v32i8 _1){return __builtin_lasx_xvsrari_b(_1, 1);} ++v16i16 __lasx_xvsrari_h(v16i16 _1){return __builtin_lasx_xvsrari_h(_1, 1);} ++v8i32 __lasx_xvsrari_w(v8i32 _1){return __builtin_lasx_xvsrari_w(_1, 1);} ++v4i64 __lasx_xvsrari_d(v4i64 _1){return __builtin_lasx_xvsrari_d(_1, 1);} ++v32i8 __lasx_xvsrl_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrl_b(_1, _2);} ++v16i16 __lasx_xvsrl_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrl_h(_1, _2);} ++v8i32 __lasx_xvsrl_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrl_w(_1, _2);} ++v4i64 __lasx_xvsrl_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrl_d(_1, _2);} ++v32i8 __lasx_xvsrli_b(v32i8 _1){return __builtin_lasx_xvsrli_b(_1, 1);} ++v16i16 __lasx_xvsrli_h(v16i16 _1){return __builtin_lasx_xvsrli_h(_1, 1);} ++v8i32 __lasx_xvsrli_w(v8i32 _1){return __builtin_lasx_xvsrli_w(_1, 1);} ++v4i64 __lasx_xvsrli_d(v4i64 _1){return __builtin_lasx_xvsrli_d(_1, 1);} ++v32i8 __lasx_xvsrlr_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrlr_b(_1, _2);} ++v16i16 __lasx_xvsrlr_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrlr_h(_1, _2);} ++v8i32 __lasx_xvsrlr_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrlr_w(_1, _2);} ++v4i64 __lasx_xvsrlr_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrlr_d(_1, _2);} ++v32i8 __lasx_xvsrlri_b(v32i8 _1){return __builtin_lasx_xvsrlri_b(_1, 1);} ++v16i16 __lasx_xvsrlri_h(v16i16 _1){return __builtin_lasx_xvsrlri_h(_1, 1);} ++v8i32 __lasx_xvsrlri_w(v8i32 _1){return __builtin_lasx_xvsrlri_w(_1, 1);} ++v4i64 __lasx_xvsrlri_d(v4i64 _1){return __builtin_lasx_xvsrlri_d(_1, 1);} ++v32u8 __lasx_xvbitclr_b(v32u8 _1, v32u8 _2){return __builtin_lasx_xvbitclr_b(_1, _2);} ++v16u16 __lasx_xvbitclr_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvbitclr_h(_1, _2);} ++v8u32 __lasx_xvbitclr_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvbitclr_w(_1, _2);} ++v4u64 __lasx_xvbitclr_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvbitclr_d(_1, _2);} ++v32u8 __lasx_xvbitclri_b(v32u8 _1){return __builtin_lasx_xvbitclri_b(_1, 1);} ++v16u16 __lasx_xvbitclri_h(v16u16 _1){return __builtin_lasx_xvbitclri_h(_1, 1);} ++v8u32 __lasx_xvbitclri_w(v8u32 _1){return __builtin_lasx_xvbitclri_w(_1, 1);} ++v4u64 __lasx_xvbitclri_d(v4u64 _1){return __builtin_lasx_xvbitclri_d(_1, 1);} ++v32u8 __lasx_xvbitset_b(v32u8 _1, v32u8 _2){return __builtin_lasx_xvbitset_b(_1, _2);} ++v16u16 __lasx_xvbitset_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvbitset_h(_1, _2);} ++v8u32 __lasx_xvbitset_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvbitset_w(_1, _2);} ++v4u64 __lasx_xvbitset_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvbitset_d(_1, _2);} ++v32u8 __lasx_xvbitseti_b(v32u8 _1){return __builtin_lasx_xvbitseti_b(_1, 1);} ++v16u16 __lasx_xvbitseti_h(v16u16 _1){return __builtin_lasx_xvbitseti_h(_1, 1);} ++v8u32 __lasx_xvbitseti_w(v8u32 _1){return __builtin_lasx_xvbitseti_w(_1, 1);} ++v4u64 __lasx_xvbitseti_d(v4u64 _1){return __builtin_lasx_xvbitseti_d(_1, 1);} ++v32u8 __lasx_xvbitrev_b(v32u8 _1, v32u8 _2){return __builtin_lasx_xvbitrev_b(_1, _2);} ++v16u16 __lasx_xvbitrev_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvbitrev_h(_1, _2);} ++v8u32 __lasx_xvbitrev_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvbitrev_w(_1, _2);} ++v4u64 __lasx_xvbitrev_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvbitrev_d(_1, _2);} ++v32u8 __lasx_xvbitrevi_b(v32u8 _1){return __builtin_lasx_xvbitrevi_b(_1, 1);} ++v16u16 __lasx_xvbitrevi_h(v16u16 _1){return __builtin_lasx_xvbitrevi_h(_1, 1);} ++v8u32 __lasx_xvbitrevi_w(v8u32 _1){return __builtin_lasx_xvbitrevi_w(_1, 1);} ++v4u64 __lasx_xvbitrevi_d(v4u64 _1){return __builtin_lasx_xvbitrevi_d(_1, 1);} ++v32i8 __lasx_xvadd_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvadd_b(_1, _2);} ++v16i16 __lasx_xvadd_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvadd_h(_1, _2);} ++v8i32 __lasx_xvadd_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvadd_w(_1, _2);} ++v4i64 __lasx_xvadd_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvadd_d(_1, _2);} ++v32i8 __lasx_xvaddi_bu(v32i8 _1){return __builtin_lasx_xvaddi_bu(_1, 1);} ++v16i16 __lasx_xvaddi_hu(v16i16 _1){return __builtin_lasx_xvaddi_hu(_1, 1);} ++v8i32 __lasx_xvaddi_wu(v8i32 _1){return __builtin_lasx_xvaddi_wu(_1, 1);} ++v4i64 __lasx_xvaddi_du(v4i64 _1){return __builtin_lasx_xvaddi_du(_1, 1);} ++v32i8 __lasx_xvsub_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsub_b(_1, _2);} ++v16i16 __lasx_xvsub_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsub_h(_1, _2);} ++v8i32 __lasx_xvsub_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsub_w(_1, _2);} ++v4i64 __lasx_xvsub_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsub_d(_1, _2);} ++v32i8 __lasx_xvsubi_bu(v32i8 _1){return __builtin_lasx_xvsubi_bu(_1, 1);} ++v16i16 __lasx_xvsubi_hu(v16i16 _1){return __builtin_lasx_xvsubi_hu(_1, 1);} ++v8i32 __lasx_xvsubi_wu(v8i32 _1){return __builtin_lasx_xvsubi_wu(_1, 1);} ++v4i64 __lasx_xvsubi_du(v4i64 _1){return __builtin_lasx_xvsubi_du(_1, 1);} ++v32i8 __lasx_xvmax_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmax_b(_1, _2);} ++v16i16 __lasx_xvmax_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmax_h(_1, _2);} ++v8i32 __lasx_xvmax_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmax_w(_1, _2);} ++v4i64 __lasx_xvmax_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmax_d(_1, _2);} ++v32i8 __lasx_xvmaxi_b(v32i8 _1){return __builtin_lasx_xvmaxi_b(_1, 1);} ++v16i16 __lasx_xvmaxi_h(v16i16 _1){return __builtin_lasx_xvmaxi_h(_1, 1);} ++v8i32 __lasx_xvmaxi_w(v8i32 _1){return __builtin_lasx_xvmaxi_w(_1, 1);} ++v4i64 __lasx_xvmaxi_d(v4i64 _1){return __builtin_lasx_xvmaxi_d(_1, 1);} ++v32u8 __lasx_xvmax_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmax_bu(_1, _2);} ++v16u16 __lasx_xvmax_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmax_hu(_1, _2);} ++v8u32 __lasx_xvmax_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmax_wu(_1, _2);} ++v4u64 __lasx_xvmax_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmax_du(_1, _2);} ++v32u8 __lasx_xvmaxi_bu(v32u8 _1){return __builtin_lasx_xvmaxi_bu(_1, 1);} ++v16u16 __lasx_xvmaxi_hu(v16u16 _1){return __builtin_lasx_xvmaxi_hu(_1, 1);} ++v8u32 __lasx_xvmaxi_wu(v8u32 _1){return __builtin_lasx_xvmaxi_wu(_1, 1);} ++v4u64 __lasx_xvmaxi_du(v4u64 _1){return __builtin_lasx_xvmaxi_du(_1, 1);} ++v32i8 __lasx_xvmin_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmin_b(_1, _2);} ++v16i16 __lasx_xvmin_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmin_h(_1, _2);} ++v8i32 __lasx_xvmin_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmin_w(_1, _2);} ++v4i64 __lasx_xvmin_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmin_d(_1, _2);} ++v32i8 __lasx_xvmini_b(v32i8 _1){return __builtin_lasx_xvmini_b(_1, 1);} ++v16i16 __lasx_xvmini_h(v16i16 _1){return __builtin_lasx_xvmini_h(_1, 1);} ++v8i32 __lasx_xvmini_w(v8i32 _1){return __builtin_lasx_xvmini_w(_1, 1);} ++v4i64 __lasx_xvmini_d(v4i64 _1){return __builtin_lasx_xvmini_d(_1, 1);} ++v32u8 __lasx_xvmin_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmin_bu(_1, _2);} ++v16u16 __lasx_xvmin_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmin_hu(_1, _2);} ++v8u32 __lasx_xvmin_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmin_wu(_1, _2);} ++v4u64 __lasx_xvmin_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmin_du(_1, _2);} ++v32u8 __lasx_xvmini_bu(v32u8 _1){return __builtin_lasx_xvmini_bu(_1, 1);} ++v16u16 __lasx_xvmini_hu(v16u16 _1){return __builtin_lasx_xvmini_hu(_1, 1);} ++v8u32 __lasx_xvmini_wu(v8u32 _1){return __builtin_lasx_xvmini_wu(_1, 1);} ++v4u64 __lasx_xvmini_du(v4u64 _1){return __builtin_lasx_xvmini_du(_1, 1);} ++v32i8 __lasx_xvseq_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvseq_b(_1, _2);} ++v16i16 __lasx_xvseq_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvseq_h(_1, _2);} ++v8i32 __lasx_xvseq_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvseq_w(_1, _2);} ++v4i64 __lasx_xvseq_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvseq_d(_1, _2);} ++v32i8 __lasx_xvseqi_b(v32i8 _1){return __builtin_lasx_xvseqi_b(_1, 1);} ++v16i16 __lasx_xvseqi_h(v16i16 _1){return __builtin_lasx_xvseqi_h(_1, 1);} ++v8i32 __lasx_xvseqi_w(v8i32 _1){return __builtin_lasx_xvseqi_w(_1, 1);} ++v4i64 __lasx_xvseqi_d(v4i64 _1){return __builtin_lasx_xvseqi_d(_1, 1);} ++v32i8 __lasx_xvslt_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvslt_b(_1, _2);} ++v16i16 __lasx_xvslt_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvslt_h(_1, _2);} ++v8i32 __lasx_xvslt_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvslt_w(_1, _2);} ++v4i64 __lasx_xvslt_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvslt_d(_1, _2);} ++v32i8 __lasx_xvslti_b(v32i8 _1){return __builtin_lasx_xvslti_b(_1, 1);} ++v16i16 __lasx_xvslti_h(v16i16 _1){return __builtin_lasx_xvslti_h(_1, 1);} ++v8i32 __lasx_xvslti_w(v8i32 _1){return __builtin_lasx_xvslti_w(_1, 1);} ++v4i64 __lasx_xvslti_d(v4i64 _1){return __builtin_lasx_xvslti_d(_1, 1);} ++v32i8 __lasx_xvslt_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvslt_bu(_1, _2);} ++v16i16 __lasx_xvslt_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvslt_hu(_1, _2);} ++v8i32 __lasx_xvslt_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvslt_wu(_1, _2);} ++v4i64 __lasx_xvslt_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvslt_du(_1, _2);} ++v32i8 __lasx_xvslti_bu(v32u8 _1){return __builtin_lasx_xvslti_bu(_1, 1);} ++v16i16 __lasx_xvslti_hu(v16u16 _1){return __builtin_lasx_xvslti_hu(_1, 1);} ++v8i32 __lasx_xvslti_wu(v8u32 _1){return __builtin_lasx_xvslti_wu(_1, 1);} ++v4i64 __lasx_xvslti_du(v4u64 _1){return __builtin_lasx_xvslti_du(_1, 1);} ++v32i8 __lasx_xvsle_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsle_b(_1, _2);} ++v16i16 __lasx_xvsle_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsle_h(_1, _2);} ++v8i32 __lasx_xvsle_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsle_w(_1, _2);} ++v4i64 __lasx_xvsle_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsle_d(_1, _2);} ++v32i8 __lasx_xvslei_b(v32i8 _1){return __builtin_lasx_xvslei_b(_1, 1);} ++v16i16 __lasx_xvslei_h(v16i16 _1){return __builtin_lasx_xvslei_h(_1, 1);} ++v8i32 __lasx_xvslei_w(v8i32 _1){return __builtin_lasx_xvslei_w(_1, 1);} ++v4i64 __lasx_xvslei_d(v4i64 _1){return __builtin_lasx_xvslei_d(_1, 1);} ++v32i8 __lasx_xvsle_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvsle_bu(_1, _2);} ++v16i16 __lasx_xvsle_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvsle_hu(_1, _2);} ++v8i32 __lasx_xvsle_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvsle_wu(_1, _2);} ++v4i64 __lasx_xvsle_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvsle_du(_1, _2);} ++v32i8 __lasx_xvslei_bu(v32u8 _1){return __builtin_lasx_xvslei_bu(_1, 1);} ++v16i16 __lasx_xvslei_hu(v16u16 _1){return __builtin_lasx_xvslei_hu(_1, 1);} ++v8i32 __lasx_xvslei_wu(v8u32 _1){return __builtin_lasx_xvslei_wu(_1, 1);} ++v4i64 __lasx_xvslei_du(v4u64 _1){return __builtin_lasx_xvslei_du(_1, 1);} ++v32i8 __lasx_xvsat_b(v32i8 _1){return __builtin_lasx_xvsat_b(_1, 1);} ++v16i16 __lasx_xvsat_h(v16i16 _1){return __builtin_lasx_xvsat_h(_1, 1);} ++v8i32 __lasx_xvsat_w(v8i32 _1){return __builtin_lasx_xvsat_w(_1, 1);} ++v4i64 __lasx_xvsat_d(v4i64 _1){return __builtin_lasx_xvsat_d(_1, 1);} ++v32u8 __lasx_xvsat_bu(v32u8 _1){return __builtin_lasx_xvsat_bu(_1, 1);} ++v16u16 __lasx_xvsat_hu(v16u16 _1){return __builtin_lasx_xvsat_hu(_1, 1);} ++v8u32 __lasx_xvsat_wu(v8u32 _1){return __builtin_lasx_xvsat_wu(_1, 1);} ++v4u64 __lasx_xvsat_du(v4u64 _1){return __builtin_lasx_xvsat_du(_1, 1);} ++v32i8 __lasx_xvadda_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvadda_b(_1, _2);} ++v16i16 __lasx_xvadda_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvadda_h(_1, _2);} ++v8i32 __lasx_xvadda_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvadda_w(_1, _2);} ++v4i64 __lasx_xvadda_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvadda_d(_1, _2);} ++v32i8 __lasx_xvsadd_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsadd_b(_1, _2);} ++v16i16 __lasx_xvsadd_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsadd_h(_1, _2);} ++v8i32 __lasx_xvsadd_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsadd_w(_1, _2);} ++v4i64 __lasx_xvsadd_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsadd_d(_1, _2);} ++v32u8 __lasx_xvsadd_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvsadd_bu(_1, _2);} ++v16u16 __lasx_xvsadd_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvsadd_hu(_1, _2);} ++v8u32 __lasx_xvsadd_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvsadd_wu(_1, _2);} ++v4u64 __lasx_xvsadd_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvsadd_du(_1, _2);} ++v32i8 __lasx_xvavg_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvavg_b(_1, _2);} ++v16i16 __lasx_xvavg_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvavg_h(_1, _2);} ++v8i32 __lasx_xvavg_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvavg_w(_1, _2);} ++v4i64 __lasx_xvavg_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvavg_d(_1, _2);} ++v32u8 __lasx_xvavg_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvavg_bu(_1, _2);} ++v16u16 __lasx_xvavg_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvavg_hu(_1, _2);} ++v8u32 __lasx_xvavg_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvavg_wu(_1, _2);} ++v4u64 __lasx_xvavg_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvavg_du(_1, _2);} ++v32i8 __lasx_xvavgr_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvavgr_b(_1, _2);} ++v16i16 __lasx_xvavgr_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvavgr_h(_1, _2);} ++v8i32 __lasx_xvavgr_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvavgr_w(_1, _2);} ++v4i64 __lasx_xvavgr_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvavgr_d(_1, _2);} ++v32u8 __lasx_xvavgr_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvavgr_bu(_1, _2);} ++v16u16 __lasx_xvavgr_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvavgr_hu(_1, _2);} ++v8u32 __lasx_xvavgr_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvavgr_wu(_1, _2);} ++v4u64 __lasx_xvavgr_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvavgr_du(_1, _2);} ++v32i8 __lasx_xvssub_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssub_b(_1, _2);} ++v16i16 __lasx_xvssub_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssub_h(_1, _2);} ++v8i32 __lasx_xvssub_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssub_w(_1, _2);} ++v4i64 __lasx_xvssub_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssub_d(_1, _2);} ++v32u8 __lasx_xvssub_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvssub_bu(_1, _2);} ++v16u16 __lasx_xvssub_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssub_hu(_1, _2);} ++v8u32 __lasx_xvssub_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssub_wu(_1, _2);} ++v4u64 __lasx_xvssub_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssub_du(_1, _2);} ++v32i8 __lasx_xvabsd_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvabsd_b(_1, _2);} ++v16i16 __lasx_xvabsd_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvabsd_h(_1, _2);} ++v8i32 __lasx_xvabsd_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvabsd_w(_1, _2);} ++v4i64 __lasx_xvabsd_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvabsd_d(_1, _2);} ++v32u8 __lasx_xvabsd_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvabsd_bu(_1, _2);} ++v16u16 __lasx_xvabsd_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvabsd_hu(_1, _2);} ++v8u32 __lasx_xvabsd_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvabsd_wu(_1, _2);} ++v4u64 __lasx_xvabsd_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvabsd_du(_1, _2);} ++v32i8 __lasx_xvmul_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmul_b(_1, _2);} ++v16i16 __lasx_xvmul_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmul_h(_1, _2);} ++v8i32 __lasx_xvmul_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmul_w(_1, _2);} ++v4i64 __lasx_xvmul_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmul_d(_1, _2);} ++v32i8 __lasx_xvmadd_b(v32i8 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvmadd_b(_1, _2, _3);} ++v16i16 __lasx_xvmadd_h(v16i16 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvmadd_h(_1, _2, _3);} ++v8i32 __lasx_xvmadd_w(v8i32 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvmadd_w(_1, _2, _3);} ++v4i64 __lasx_xvmadd_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvmadd_d(_1, _2, _3);} ++v32i8 __lasx_xvmsub_b(v32i8 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvmsub_b(_1, _2, _3);} ++v16i16 __lasx_xvmsub_h(v16i16 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvmsub_h(_1, _2, _3);} ++v8i32 __lasx_xvmsub_w(v8i32 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvmsub_w(_1, _2, _3);} ++v4i64 __lasx_xvmsub_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvmsub_d(_1, _2, _3);} ++v32i8 __lasx_xvdiv_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvdiv_b(_1, _2);} ++v16i16 __lasx_xvdiv_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvdiv_h(_1, _2);} ++v8i32 __lasx_xvdiv_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvdiv_w(_1, _2);} ++v4i64 __lasx_xvdiv_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvdiv_d(_1, _2);} ++v32u8 __lasx_xvdiv_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvdiv_bu(_1, _2);} ++v16u16 __lasx_xvdiv_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvdiv_hu(_1, _2);} ++v8u32 __lasx_xvdiv_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvdiv_wu(_1, _2);} ++v4u64 __lasx_xvdiv_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvdiv_du(_1, _2);} ++v16i16 __lasx_xvhaddw_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvhaddw_h_b(_1, _2);} ++v8i32 __lasx_xvhaddw_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvhaddw_w_h(_1, _2);} ++v4i64 __lasx_xvhaddw_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvhaddw_d_w(_1, _2);} ++v16u16 __lasx_xvhaddw_hu_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvhaddw_hu_bu(_1, _2);} ++v8u32 __lasx_xvhaddw_wu_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvhaddw_wu_hu(_1, _2);} ++v4u64 __lasx_xvhaddw_du_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvhaddw_du_wu(_1, _2);} ++v16i16 __lasx_xvhsubw_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvhsubw_h_b(_1, _2);} ++v8i32 __lasx_xvhsubw_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvhsubw_w_h(_1, _2);} ++v4i64 __lasx_xvhsubw_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvhsubw_d_w(_1, _2);} ++v16i16 __lasx_xvhsubw_hu_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvhsubw_hu_bu(_1, _2);} ++v8i32 __lasx_xvhsubw_wu_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvhsubw_wu_hu(_1, _2);} ++v4i64 __lasx_xvhsubw_du_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvhsubw_du_wu(_1, _2);} ++v32i8 __lasx_xvmod_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmod_b(_1, _2);} ++v16i16 __lasx_xvmod_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmod_h(_1, _2);} ++v8i32 __lasx_xvmod_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmod_w(_1, _2);} ++v4i64 __lasx_xvmod_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmod_d(_1, _2);} ++v32u8 __lasx_xvmod_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmod_bu(_1, _2);} ++v16u16 __lasx_xvmod_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmod_hu(_1, _2);} ++v8u32 __lasx_xvmod_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmod_wu(_1, _2);} ++v4u64 __lasx_xvmod_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmod_du(_1, _2);} ++v32i8 __lasx_xvrepl128vei_b(v32i8 _1){return __builtin_lasx_xvrepl128vei_b(_1, 1);} ++v16i16 __lasx_xvrepl128vei_h(v16i16 _1){return __builtin_lasx_xvrepl128vei_h(_1, 1);} ++v8i32 __lasx_xvrepl128vei_w(v8i32 _1){return __builtin_lasx_xvrepl128vei_w(_1, 1);} ++v4i64 __lasx_xvrepl128vei_d(v4i64 _1){return __builtin_lasx_xvrepl128vei_d(_1, 1);} ++v32i8 __lasx_xvpickev_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpickev_b(_1, _2);} ++v16i16 __lasx_xvpickev_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvpickev_h(_1, _2);} ++v8i32 __lasx_xvpickev_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpickev_w(_1, _2);} ++v4i64 __lasx_xvpickev_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvpickev_d(_1, _2);} ++v32i8 __lasx_xvpickod_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpickod_b(_1, _2);} ++v16i16 __lasx_xvpickod_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvpickod_h(_1, _2);} ++v8i32 __lasx_xvpickod_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpickod_w(_1, _2);} ++v4i64 __lasx_xvpickod_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvpickod_d(_1, _2);} ++v32i8 __lasx_xvilvh_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvilvh_b(_1, _2);} ++v16i16 __lasx_xvilvh_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvilvh_h(_1, _2);} ++v8i32 __lasx_xvilvh_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvilvh_w(_1, _2);} ++v4i64 __lasx_xvilvh_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvilvh_d(_1, _2);} ++v32i8 __lasx_xvilvl_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvilvl_b(_1, _2);} ++v16i16 __lasx_xvilvl_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvilvl_h(_1, _2);} ++v8i32 __lasx_xvilvl_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvilvl_w(_1, _2);} ++v4i64 __lasx_xvilvl_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvilvl_d(_1, _2);} ++v32i8 __lasx_xvpackev_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpackev_b(_1, _2);} ++v16i16 __lasx_xvpackev_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvpackev_h(_1, _2);} ++v8i32 __lasx_xvpackev_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpackev_w(_1, _2);} ++v4i64 __lasx_xvpackev_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvpackev_d(_1, _2);} ++v32i8 __lasx_xvpackod_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpackod_b(_1, _2);} ++v16i16 __lasx_xvpackod_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvpackod_h(_1, _2);} ++v8i32 __lasx_xvpackod_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpackod_w(_1, _2);} ++v4i64 __lasx_xvpackod_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvpackod_d(_1, _2);} ++v32i8 __lasx_xvshuf_b(v32i8 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvshuf_b(_1, _2, _3);} ++v16i16 __lasx_xvshuf_h(v16i16 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvshuf_h(_1, _2, _3);} ++v8i32 __lasx_xvshuf_w(v8i32 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvshuf_w(_1, _2, _3);} ++v4i64 __lasx_xvshuf_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvshuf_d(_1, _2, _3);} ++v32u8 __lasx_xvand_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvand_v(_1, _2);} ++v32u8 __lasx_xvandi_b(v32u8 _1){return __builtin_lasx_xvandi_b(_1, 1);} ++v32u8 __lasx_xvor_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvor_v(_1, _2);} ++v32u8 __lasx_xvori_b(v32u8 _1){return __builtin_lasx_xvori_b(_1, 1);} ++v32u8 __lasx_xvnor_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvnor_v(_1, _2);} ++v32u8 __lasx_xvnori_b(v32u8 _1){return __builtin_lasx_xvnori_b(_1, 1);} ++v32u8 __lasx_xvxor_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvxor_v(_1, _2);} ++v32u8 __lasx_xvxori_b(v32u8 _1){return __builtin_lasx_xvxori_b(_1, 1);} ++v32u8 __lasx_xvbitsel_v(v32u8 _1, v32u8 _2, v32u8 _3){return __builtin_lasx_xvbitsel_v(_1, _2, _3);} ++v32u8 __lasx_xvbitseli_b(v32u8 _1, v32u8 _2){return __builtin_lasx_xvbitseli_b(_1, _2, 1);} ++v32i8 __lasx_xvshuf4i_b(v32i8 _1){return __builtin_lasx_xvshuf4i_b(_1, 1);} ++v16i16 __lasx_xvshuf4i_h(v16i16 _1){return __builtin_lasx_xvshuf4i_h(_1, 1);} ++v8i32 __lasx_xvshuf4i_w(v8i32 _1){return __builtin_lasx_xvshuf4i_w(_1, 1);} ++v32i8 __lasx_xvreplgr2vr_b(int _1){return __builtin_lasx_xvreplgr2vr_b(_1);} ++v16i16 __lasx_xvreplgr2vr_h(int _1){return __builtin_lasx_xvreplgr2vr_h(_1);} ++v8i32 __lasx_xvreplgr2vr_w(int _1){return __builtin_lasx_xvreplgr2vr_w(_1);} ++v4i64 __lasx_xvreplgr2vr_d(int _1){return __builtin_lasx_xvreplgr2vr_d(_1);} ++v32i8 __lasx_xvpcnt_b(v32i8 _1){return __builtin_lasx_xvpcnt_b(_1);} ++v16i16 __lasx_xvpcnt_h(v16i16 _1){return __builtin_lasx_xvpcnt_h(_1);} ++v8i32 __lasx_xvpcnt_w(v8i32 _1){return __builtin_lasx_xvpcnt_w(_1);} ++v4i64 __lasx_xvpcnt_d(v4i64 _1){return __builtin_lasx_xvpcnt_d(_1);} ++v32i8 __lasx_xvclo_b(v32i8 _1){return __builtin_lasx_xvclo_b(_1);} ++v16i16 __lasx_xvclo_h(v16i16 _1){return __builtin_lasx_xvclo_h(_1);} ++v8i32 __lasx_xvclo_w(v8i32 _1){return __builtin_lasx_xvclo_w(_1);} ++v4i64 __lasx_xvclo_d(v4i64 _1){return __builtin_lasx_xvclo_d(_1);} ++v32i8 __lasx_xvclz_b(v32i8 _1){return __builtin_lasx_xvclz_b(_1);} ++v16i16 __lasx_xvclz_h(v16i16 _1){return __builtin_lasx_xvclz_h(_1);} ++v8i32 __lasx_xvclz_w(v8i32 _1){return __builtin_lasx_xvclz_w(_1);} ++v4i64 __lasx_xvclz_d(v4i64 _1){return __builtin_lasx_xvclz_d(_1);} ++v8f32 __lasx_xvfadd_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfadd_s(_1, _2);} ++v4f64 __lasx_xvfadd_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfadd_d(_1, _2);} ++v8f32 __lasx_xvfsub_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfsub_s(_1, _2);} ++v4f64 __lasx_xvfsub_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfsub_d(_1, _2);} ++v8f32 __lasx_xvfmul_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmul_s(_1, _2);} ++v4f64 __lasx_xvfmul_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmul_d(_1, _2);} ++v8f32 __lasx_xvfdiv_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfdiv_s(_1, _2);} ++v4f64 __lasx_xvfdiv_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfdiv_d(_1, _2);} ++v16i16 __lasx_xvfcvt_h_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcvt_h_s(_1, _2);} ++v8f32 __lasx_xvfcvt_s_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcvt_s_d(_1, _2);} ++v8f32 __lasx_xvfmin_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmin_s(_1, _2);} ++v4f64 __lasx_xvfmin_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmin_d(_1, _2);} ++v8f32 __lasx_xvfmina_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmina_s(_1, _2);} ++v4f64 __lasx_xvfmina_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmina_d(_1, _2);} ++v8f32 __lasx_xvfmax_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmax_s(_1, _2);} ++v4f64 __lasx_xvfmax_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmax_d(_1, _2);} ++v8f32 __lasx_xvfmaxa_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmaxa_s(_1, _2);} ++v4f64 __lasx_xvfmaxa_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmaxa_d(_1, _2);} ++v8i32 __lasx_xvfclass_s(v8f32 _1){return __builtin_lasx_xvfclass_s(_1);} ++v4i64 __lasx_xvfclass_d(v4f64 _1){return __builtin_lasx_xvfclass_d(_1);} ++v8f32 __lasx_xvfsqrt_s(v8f32 _1){return __builtin_lasx_xvfsqrt_s(_1);} ++v4f64 __lasx_xvfsqrt_d(v4f64 _1){return __builtin_lasx_xvfsqrt_d(_1);} ++v8f32 __lasx_xvfrecip_s(v8f32 _1){return __builtin_lasx_xvfrecip_s(_1);} ++v4f64 __lasx_xvfrecip_d(v4f64 _1){return __builtin_lasx_xvfrecip_d(_1);} ++v8f32 __lasx_xvfrint_s(v8f32 _1){return __builtin_lasx_xvfrint_s(_1);} ++v4f64 __lasx_xvfrint_d(v4f64 _1){return __builtin_lasx_xvfrint_d(_1);} ++v8f32 __lasx_xvfrsqrt_s(v8f32 _1){return __builtin_lasx_xvfrsqrt_s(_1);} ++v4f64 __lasx_xvfrsqrt_d(v4f64 _1){return __builtin_lasx_xvfrsqrt_d(_1);} ++v8f32 __lasx_xvflogb_s(v8f32 _1){return __builtin_lasx_xvflogb_s(_1);} ++v4f64 __lasx_xvflogb_d(v4f64 _1){return __builtin_lasx_xvflogb_d(_1);} ++v8f32 __lasx_xvfcvth_s_h(v16i16 _1){return __builtin_lasx_xvfcvth_s_h(_1);} ++v4f64 __lasx_xvfcvth_d_s(v8f32 _1){return __builtin_lasx_xvfcvth_d_s(_1);} ++v8f32 __lasx_xvfcvtl_s_h(v16i16 _1){return __builtin_lasx_xvfcvtl_s_h(_1);} ++v4f64 __lasx_xvfcvtl_d_s(v8f32 _1){return __builtin_lasx_xvfcvtl_d_s(_1);} ++v8i32 __lasx_xvftint_w_s(v8f32 _1){return __builtin_lasx_xvftint_w_s(_1);} ++v4i64 __lasx_xvftint_l_d(v4f64 _1){return __builtin_lasx_xvftint_l_d(_1);} ++v8u32 __lasx_xvftint_wu_s(v8f32 _1){return __builtin_lasx_xvftint_wu_s(_1);} ++v4u64 __lasx_xvftint_lu_d(v4f64 _1){return __builtin_lasx_xvftint_lu_d(_1);} ++v8i32 __lasx_xvftintrz_w_s(v8f32 _1){return __builtin_lasx_xvftintrz_w_s(_1);} ++v4i64 __lasx_xvftintrz_l_d(v4f64 _1){return __builtin_lasx_xvftintrz_l_d(_1);} ++v8u32 __lasx_xvftintrz_wu_s(v8f32 _1){return __builtin_lasx_xvftintrz_wu_s(_1);} ++v4u64 __lasx_xvftintrz_lu_d(v4f64 _1){return __builtin_lasx_xvftintrz_lu_d(_1);} ++v8f32 __lasx_xvffint_s_w(v8i32 _1){return __builtin_lasx_xvffint_s_w(_1);} ++v4f64 __lasx_xvffint_d_l(v4i64 _1){return __builtin_lasx_xvffint_d_l(_1);} ++v8f32 __lasx_xvffint_s_wu(v8u32 _1){return __builtin_lasx_xvffint_s_wu(_1);} ++v4f64 __lasx_xvffint_d_lu(v4u64 _1){return __builtin_lasx_xvffint_d_lu(_1);} ++v32i8 __lasx_xvreplve_b(v32i8 _1, int _2){return __builtin_lasx_xvreplve_b(_1, _2);} ++v16i16 __lasx_xvreplve_h(v16i16 _1, int _2){return __builtin_lasx_xvreplve_h(_1, _2);} ++v8i32 __lasx_xvreplve_w(v8i32 _1, int _2){return __builtin_lasx_xvreplve_w(_1, _2);} ++v4i64 __lasx_xvreplve_d(v4i64 _1, int _2){return __builtin_lasx_xvreplve_d(_1, _2);} ++v8i32 __lasx_xvpermi_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpermi_w(_1, _2, 1);} ++v32u8 __lasx_xvandn_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvandn_v(_1, _2);} ++v32i8 __lasx_xvneg_b(v32i8 _1){return __builtin_lasx_xvneg_b(_1);} ++v16i16 __lasx_xvneg_h(v16i16 _1){return __builtin_lasx_xvneg_h(_1);} ++v8i32 __lasx_xvneg_w(v8i32 _1){return __builtin_lasx_xvneg_w(_1);} ++v4i64 __lasx_xvneg_d(v4i64 _1){return __builtin_lasx_xvneg_d(_1);} ++v32i8 __lasx_xvmuh_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmuh_b(_1, _2);} ++v16i16 __lasx_xvmuh_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmuh_h(_1, _2);} ++v8i32 __lasx_xvmuh_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmuh_w(_1, _2);} ++v4i64 __lasx_xvmuh_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmuh_d(_1, _2);} ++v32u8 __lasx_xvmuh_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmuh_bu(_1, _2);} ++v16u16 __lasx_xvmuh_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmuh_hu(_1, _2);} ++v8u32 __lasx_xvmuh_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmuh_wu(_1, _2);} ++v4u64 __lasx_xvmuh_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmuh_du(_1, _2);} ++v16i16 __lasx_xvsllwil_h_b(v32i8 _1){return __builtin_lasx_xvsllwil_h_b(_1, 1);} ++v8i32 __lasx_xvsllwil_w_h(v16i16 _1){return __builtin_lasx_xvsllwil_w_h(_1, 1);} ++v4i64 __lasx_xvsllwil_d_w(v8i32 _1){return __builtin_lasx_xvsllwil_d_w(_1, 1);} ++v16u16 __lasx_xvsllwil_hu_bu(v32u8 _1){return __builtin_lasx_xvsllwil_hu_bu(_1, 1);} ++v8u32 __lasx_xvsllwil_wu_hu(v16u16 _1){return __builtin_lasx_xvsllwil_wu_hu(_1, 1);} ++v4u64 __lasx_xvsllwil_du_wu(v8u32 _1){return __builtin_lasx_xvsllwil_du_wu(_1, 1);} ++v32i8 __lasx_xvsran_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsran_b_h(_1, _2);} ++v16i16 __lasx_xvsran_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsran_h_w(_1, _2);} ++v8i32 __lasx_xvsran_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsran_w_d(_1, _2);} ++v32i8 __lasx_xvssran_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssran_b_h(_1, _2);} ++v16i16 __lasx_xvssran_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssran_h_w(_1, _2);} ++v8i32 __lasx_xvssran_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssran_w_d(_1, _2);} ++v32u8 __lasx_xvssran_bu_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssran_bu_h(_1, _2);} ++v16u16 __lasx_xvssran_hu_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssran_hu_w(_1, _2);} ++v8u32 __lasx_xvssran_wu_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssran_wu_d(_1, _2);} ++v32i8 __lasx_xvsrarn_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrarn_b_h(_1, _2);} ++v16i16 __lasx_xvsrarn_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrarn_h_w(_1, _2);} ++v8i32 __lasx_xvsrarn_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrarn_w_d(_1, _2);} ++v32i8 __lasx_xvssrarn_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrarn_b_h(_1, _2);} ++v16i16 __lasx_xvssrarn_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrarn_h_w(_1, _2);} ++v8i32 __lasx_xvssrarn_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrarn_w_d(_1, _2);} ++v32u8 __lasx_xvssrarn_bu_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssrarn_bu_h(_1, _2);} ++v16u16 __lasx_xvssrarn_hu_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssrarn_hu_w(_1, _2);} ++v8u32 __lasx_xvssrarn_wu_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssrarn_wu_d(_1, _2);} ++v32i8 __lasx_xvsrln_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrln_b_h(_1, _2);} ++v16i16 __lasx_xvsrln_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrln_h_w(_1, _2);} ++v8i32 __lasx_xvsrln_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrln_w_d(_1, _2);} ++v32u8 __lasx_xvssrln_bu_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssrln_bu_h(_1, _2);} ++v16u16 __lasx_xvssrln_hu_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssrln_hu_w(_1, _2);} ++v8u32 __lasx_xvssrln_wu_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssrln_wu_d(_1, _2);} ++v32i8 __lasx_xvsrlrn_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrlrn_b_h(_1, _2);} ++v16i16 __lasx_xvsrlrn_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrlrn_h_w(_1, _2);} ++v8i32 __lasx_xvsrlrn_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrlrn_w_d(_1, _2);} ++v32u8 __lasx_xvssrlrn_bu_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssrlrn_bu_h(_1, _2);} ++v16u16 __lasx_xvssrlrn_hu_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssrlrn_hu_w(_1, _2);} ++v8u32 __lasx_xvssrlrn_wu_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssrlrn_wu_d(_1, _2);} ++v32i8 __lasx_xvfrstpi_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvfrstpi_b(_1, _2, 1);} ++v16i16 __lasx_xvfrstpi_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvfrstpi_h(_1, _2, 1);} ++v32i8 __lasx_xvfrstp_b(v32i8 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvfrstp_b(_1, _2, _3);} ++v16i16 __lasx_xvfrstp_h(v16i16 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvfrstp_h(_1, _2, _3);} ++v4i64 __lasx_xvshuf4i_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvshuf4i_d(_1, _2, 1);} ++v32i8 __lasx_xvbsrl_v(v32i8 _1){return __builtin_lasx_xvbsrl_v(_1, 1);} ++v32i8 __lasx_xvbsll_v(v32i8 _1){return __builtin_lasx_xvbsll_v(_1, 1);} ++v32i8 __lasx_xvextrins_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvextrins_b(_1, _2, 1);} ++v16i16 __lasx_xvextrins_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvextrins_h(_1, _2, 1);} ++v8i32 __lasx_xvextrins_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvextrins_w(_1, _2, 1);} ++v4i64 __lasx_xvextrins_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvextrins_d(_1, _2, 1);} ++v32i8 __lasx_xvmskltz_b(v32i8 _1){return __builtin_lasx_xvmskltz_b(_1);} ++v16i16 __lasx_xvmskltz_h(v16i16 _1){return __builtin_lasx_xvmskltz_h(_1);} ++v8i32 __lasx_xvmskltz_w(v8i32 _1){return __builtin_lasx_xvmskltz_w(_1);} ++v4i64 __lasx_xvmskltz_d(v4i64 _1){return __builtin_lasx_xvmskltz_d(_1);} ++v32i8 __lasx_xvsigncov_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsigncov_b(_1, _2);} ++v16i16 __lasx_xvsigncov_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsigncov_h(_1, _2);} ++v8i32 __lasx_xvsigncov_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsigncov_w(_1, _2);} ++v4i64 __lasx_xvsigncov_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsigncov_d(_1, _2);} ++v8f32 __lasx_xvfmadd_s(v8f32 _1, v8f32 _2, v8f32 _3){return __builtin_lasx_xvfmadd_s(_1, _2, _3);} ++v4f64 __lasx_xvfmadd_d(v4f64 _1, v4f64 _2, v4f64 _3){return __builtin_lasx_xvfmadd_d(_1, _2, _3);} ++v8f32 __lasx_xvfmsub_s(v8f32 _1, v8f32 _2, v8f32 _3){return __builtin_lasx_xvfmsub_s(_1, _2, _3);} ++v4f64 __lasx_xvfmsub_d(v4f64 _1, v4f64 _2, v4f64 _3){return __builtin_lasx_xvfmsub_d(_1, _2, _3);} ++v8f32 __lasx_xvfnmadd_s(v8f32 _1, v8f32 _2, v8f32 _3){return __builtin_lasx_xvfnmadd_s(_1, _2, _3);} ++v4f64 __lasx_xvfnmadd_d(v4f64 _1, v4f64 _2, v4f64 _3){return __builtin_lasx_xvfnmadd_d(_1, _2, _3);} ++v8f32 __lasx_xvfnmsub_s(v8f32 _1, v8f32 _2, v8f32 _3){return __builtin_lasx_xvfnmsub_s(_1, _2, _3);} ++v4f64 __lasx_xvfnmsub_d(v4f64 _1, v4f64 _2, v4f64 _3){return __builtin_lasx_xvfnmsub_d(_1, _2, _3);} ++v8i32 __lasx_xvftintrne_w_s(v8f32 _1){return __builtin_lasx_xvftintrne_w_s(_1);} ++v4i64 __lasx_xvftintrne_l_d(v4f64 _1){return __builtin_lasx_xvftintrne_l_d(_1);} ++v8i32 __lasx_xvftintrp_w_s(v8f32 _1){return __builtin_lasx_xvftintrp_w_s(_1);} ++v4i64 __lasx_xvftintrp_l_d(v4f64 _1){return __builtin_lasx_xvftintrp_l_d(_1);} ++v8i32 __lasx_xvftintrm_w_s(v8f32 _1){return __builtin_lasx_xvftintrm_w_s(_1);} ++v4i64 __lasx_xvftintrm_l_d(v4f64 _1){return __builtin_lasx_xvftintrm_l_d(_1);} ++v8i32 __lasx_xvftint_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftint_w_d(_1, _2);} ++v8f32 __lasx_xvffint_s_l(v4i64 _1, v4i64 _2){return __builtin_lasx_xvffint_s_l(_1, _2);} ++v8i32 __lasx_xvftintrz_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftintrz_w_d(_1, _2);} ++v8i32 __lasx_xvftintrp_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftintrp_w_d(_1, _2);} ++v8i32 __lasx_xvftintrm_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftintrm_w_d(_1, _2);} ++v8i32 __lasx_xvftintrne_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftintrne_w_d(_1, _2);} ++v4i64 __lasx_xvftinth_l_s(v8f32 _1){return __builtin_lasx_xvftinth_l_s(_1);} ++v4i64 __lasx_xvftintl_l_s(v8f32 _1){return __builtin_lasx_xvftintl_l_s(_1);} ++v4f64 __lasx_xvffinth_d_w(v8i32 _1){return __builtin_lasx_xvffinth_d_w(_1);} ++v4f64 __lasx_xvffintl_d_w(v8i32 _1){return __builtin_lasx_xvffintl_d_w(_1);} ++v4i64 __lasx_xvftintrzh_l_s(v8f32 _1){return __builtin_lasx_xvftintrzh_l_s(_1);} ++v4i64 __lasx_xvftintrzl_l_s(v8f32 _1){return __builtin_lasx_xvftintrzl_l_s(_1);} ++v4i64 __lasx_xvftintrph_l_s(v8f32 _1){return __builtin_lasx_xvftintrph_l_s(_1);} ++v4i64 __lasx_xvftintrpl_l_s(v8f32 _1){return __builtin_lasx_xvftintrpl_l_s(_1);} ++v4i64 __lasx_xvftintrmh_l_s(v8f32 _1){return __builtin_lasx_xvftintrmh_l_s(_1);} ++v4i64 __lasx_xvftintrml_l_s(v8f32 _1){return __builtin_lasx_xvftintrml_l_s(_1);} ++v4i64 __lasx_xvftintrneh_l_s(v8f32 _1){return __builtin_lasx_xvftintrneh_l_s(_1);} ++v4i64 __lasx_xvftintrnel_l_s(v8f32 _1){return __builtin_lasx_xvftintrnel_l_s(_1);} ++v8i32 __lasx_xvfrintrne_s(v8f32 _1){return __builtin_lasx_xvfrintrne_s(_1);} ++v4i64 __lasx_xvfrintrne_d(v4f64 _1){return __builtin_lasx_xvfrintrne_d(_1);} ++v8i32 __lasx_xvfrintrz_s(v8f32 _1){return __builtin_lasx_xvfrintrz_s(_1);} ++v4i64 __lasx_xvfrintrz_d(v4f64 _1){return __builtin_lasx_xvfrintrz_d(_1);} ++v8i32 __lasx_xvfrintrp_s(v8f32 _1){return __builtin_lasx_xvfrintrp_s(_1);} ++v4i64 __lasx_xvfrintrp_d(v4f64 _1){return __builtin_lasx_xvfrintrp_d(_1);} ++v8i32 __lasx_xvfrintrm_s(v8f32 _1){return __builtin_lasx_xvfrintrm_s(_1);} ++v4i64 __lasx_xvfrintrm_d(v4f64 _1){return __builtin_lasx_xvfrintrm_d(_1);} ++v32i8 __lasx_xvld(void * _1){return __builtin_lasx_xvld(_1, 1);} ++void __lasx_xvst(v32i8 _1, void * _2){return __builtin_lasx_xvst(_1, _2, 1);} ++void __lasx_xvstelm_b(v32i8 _1, void * _2){return __builtin_lasx_xvstelm_b(_1, _2, 1, 1);} ++void __lasx_xvstelm_h(v16i16 _1, void * _2){return __builtin_lasx_xvstelm_h(_1, _2, 2, 1);} ++void __lasx_xvstelm_w(v8i32 _1, void * _2){return __builtin_lasx_xvstelm_w(_1, _2, 4, 1);} ++void __lasx_xvstelm_d(v4i64 _1, void * _2){return __builtin_lasx_xvstelm_d(_1, _2, 8, 1);} ++v8i32 __lasx_xvinsve0_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvinsve0_w(_1, _2, 1);} ++v4i64 __lasx_xvinsve0_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvinsve0_d(_1, _2, 1);} ++v8i32 __lasx_xvpickve_w(v8i32 _1){return __builtin_lasx_xvpickve_w(_1, 1);} ++v4i64 __lasx_xvpickve_d(v4i64 _1){return __builtin_lasx_xvpickve_d(_1, 1);} ++v32i8 __lasx_xvssrlrn_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrlrn_b_h(_1, _2);} ++v16i16 __lasx_xvssrlrn_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrlrn_h_w(_1, _2);} ++v8i32 __lasx_xvssrlrn_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrlrn_w_d(_1, _2);} ++v32i8 __lasx_xvssrln_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrln_b_h(_1, _2);} ++v16i16 __lasx_xvssrln_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrln_h_w(_1, _2);} ++v8i32 __lasx_xvssrln_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrln_w_d(_1, _2);} ++v32i8 __lasx_xvorn_v(v32i8 _1, v32i8 _2){return __builtin_lasx_xvorn_v(_1, _2);} ++v4i64 __lasx_xvldi(){return __builtin_lasx_xvldi(1);} ++v32i8 __lasx_xvldx(void * _1){return __builtin_lasx_xvldx(_1, 1);} ++void __lasx_xvstx(v32i8 _1, void * _2){return __builtin_lasx_xvstx(_1, _2, 1);} ++v4u64 __lasx_xvextl_qu_du(v4u64 _1){return __builtin_lasx_xvextl_qu_du(_1);} ++v8i32 __lasx_xvinsgr2vr_w(v8i32 _1){return __builtin_lasx_xvinsgr2vr_w(_1, 1, 1);} ++v4i64 __lasx_xvinsgr2vr_d(v4i64 _1){return __builtin_lasx_xvinsgr2vr_d(_1, 1, 1);} ++v32i8 __lasx_xvreplve0_b(v32i8 _1){return __builtin_lasx_xvreplve0_b(_1);} ++v16i16 __lasx_xvreplve0_h(v16i16 _1){return __builtin_lasx_xvreplve0_h(_1);} ++v8i32 __lasx_xvreplve0_w(v8i32 _1){return __builtin_lasx_xvreplve0_w(_1);} ++v4i64 __lasx_xvreplve0_d(v4i64 _1){return __builtin_lasx_xvreplve0_d(_1);} ++v32i8 __lasx_xvreplve0_q(v32i8 _1){return __builtin_lasx_xvreplve0_q(_1);} ++v16i16 __lasx_vext2xv_h_b(v32i8 _1){return __builtin_lasx_vext2xv_h_b(_1);} ++v8i32 __lasx_vext2xv_w_h(v16i16 _1){return __builtin_lasx_vext2xv_w_h(_1);} ++v4i64 __lasx_vext2xv_d_w(v8i32 _1){return __builtin_lasx_vext2xv_d_w(_1);} ++v8i32 __lasx_vext2xv_w_b(v32i8 _1){return __builtin_lasx_vext2xv_w_b(_1);} ++v4i64 __lasx_vext2xv_d_h(v16i16 _1){return __builtin_lasx_vext2xv_d_h(_1);} ++v4i64 __lasx_vext2xv_d_b(v32i8 _1){return __builtin_lasx_vext2xv_d_b(_1);} ++v16i16 __lasx_vext2xv_hu_bu(v32i8 _1){return __builtin_lasx_vext2xv_hu_bu(_1);} ++v8i32 __lasx_vext2xv_wu_hu(v16i16 _1){return __builtin_lasx_vext2xv_wu_hu(_1);} ++v4i64 __lasx_vext2xv_du_wu(v8i32 _1){return __builtin_lasx_vext2xv_du_wu(_1);} ++v8i32 __lasx_vext2xv_wu_bu(v32i8 _1){return __builtin_lasx_vext2xv_wu_bu(_1);} ++v4i64 __lasx_vext2xv_du_hu(v16i16 _1){return __builtin_lasx_vext2xv_du_hu(_1);} ++v4i64 __lasx_vext2xv_du_bu(v32i8 _1){return __builtin_lasx_vext2xv_du_bu(_1);} ++v32i8 __lasx_xvpermi_q(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpermi_q(_1, _2, 1);} ++v4i64 __lasx_xvpermi_d(v4i64 _1){return __builtin_lasx_xvpermi_d(_1, 1);} ++v8i32 __lasx_xvperm_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvperm_w(_1, _2);} ++v32i8 __lasx_xvldrepl_b(void * _1){return __builtin_lasx_xvldrepl_b(_1, 1);} ++v16i16 __lasx_xvldrepl_h(void * _1){return __builtin_lasx_xvldrepl_h(_1, 2);} ++v8i32 __lasx_xvldrepl_w(void * _1){return __builtin_lasx_xvldrepl_w(_1, 4);} ++v4i64 __lasx_xvldrepl_d(void * _1){return __builtin_lasx_xvldrepl_d(_1, 8);} ++int __lasx_xvpickve2gr_w(v8i32 _1){return __builtin_lasx_xvpickve2gr_w(_1, 1);} ++unsigned int __lasx_xvpickve2gr_wu(v8i32 _1){return __builtin_lasx_xvpickve2gr_wu(_1, 1);} ++long __lasx_xvpickve2gr_d(v4i64 _1){return __builtin_lasx_xvpickve2gr_d(_1, 1);} ++unsigned long int __lasx_xvpickve2gr_du(v4i64 _1){return __builtin_lasx_xvpickve2gr_du(_1, 1);} ++v4i64 __lasx_xvaddwev_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvaddwev_q_d(_1, _2);} ++v4i64 __lasx_xvaddwev_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvaddwev_d_w(_1, _2);} ++v8i32 __lasx_xvaddwev_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvaddwev_w_h(_1, _2);} ++v16i16 __lasx_xvaddwev_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvaddwev_h_b(_1, _2);} ++v4i64 __lasx_xvaddwev_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvaddwev_q_du(_1, _2);} ++v4i64 __lasx_xvaddwev_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvaddwev_d_wu(_1, _2);} ++v8i32 __lasx_xvaddwev_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvaddwev_w_hu(_1, _2);} ++v16i16 __lasx_xvaddwev_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvaddwev_h_bu(_1, _2);} ++v4i64 __lasx_xvsubwev_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsubwev_q_d(_1, _2);} ++v4i64 __lasx_xvsubwev_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsubwev_d_w(_1, _2);} ++v8i32 __lasx_xvsubwev_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsubwev_w_h(_1, _2);} ++v16i16 __lasx_xvsubwev_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsubwev_h_b(_1, _2);} ++v4i64 __lasx_xvsubwev_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvsubwev_q_du(_1, _2);} ++v4i64 __lasx_xvsubwev_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvsubwev_d_wu(_1, _2);} ++v8i32 __lasx_xvsubwev_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvsubwev_w_hu(_1, _2);} ++v16i16 __lasx_xvsubwev_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvsubwev_h_bu(_1, _2);} ++v4i64 __lasx_xvmulwev_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmulwev_q_d(_1, _2);} ++v4i64 __lasx_xvmulwev_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmulwev_d_w(_1, _2);} ++v8i32 __lasx_xvmulwev_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmulwev_w_h(_1, _2);} ++v16i16 __lasx_xvmulwev_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmulwev_h_b(_1, _2);} ++v4i64 __lasx_xvmulwev_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmulwev_q_du(_1, _2);} ++v4i64 __lasx_xvmulwev_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmulwev_d_wu(_1, _2);} ++v8i32 __lasx_xvmulwev_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmulwev_w_hu(_1, _2);} ++v16i16 __lasx_xvmulwev_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmulwev_h_bu(_1, _2);} ++v4i64 __lasx_xvaddwod_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvaddwod_q_d(_1, _2);} ++v4i64 __lasx_xvaddwod_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvaddwod_d_w(_1, _2);} ++v8i32 __lasx_xvaddwod_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvaddwod_w_h(_1, _2);} ++v16i16 __lasx_xvaddwod_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvaddwod_h_b(_1, _2);} ++v4i64 __lasx_xvaddwod_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvaddwod_q_du(_1, _2);} ++v4i64 __lasx_xvaddwod_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvaddwod_d_wu(_1, _2);} ++v8i32 __lasx_xvaddwod_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvaddwod_w_hu(_1, _2);} ++v16i16 __lasx_xvaddwod_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvaddwod_h_bu(_1, _2);} ++v4i64 __lasx_xvsubwod_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsubwod_q_d(_1, _2);} ++v4i64 __lasx_xvsubwod_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsubwod_d_w(_1, _2);} ++v8i32 __lasx_xvsubwod_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsubwod_w_h(_1, _2);} ++v16i16 __lasx_xvsubwod_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsubwod_h_b(_1, _2);} ++v4i64 __lasx_xvsubwod_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvsubwod_q_du(_1, _2);} ++v4i64 __lasx_xvsubwod_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvsubwod_d_wu(_1, _2);} ++v8i32 __lasx_xvsubwod_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvsubwod_w_hu(_1, _2);} ++v16i16 __lasx_xvsubwod_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvsubwod_h_bu(_1, _2);} ++v4i64 __lasx_xvmulwod_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmulwod_q_d(_1, _2);} ++v4i64 __lasx_xvmulwod_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmulwod_d_w(_1, _2);} ++v8i32 __lasx_xvmulwod_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmulwod_w_h(_1, _2);} ++v16i16 __lasx_xvmulwod_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmulwod_h_b(_1, _2);} ++v4i64 __lasx_xvmulwod_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmulwod_q_du(_1, _2);} ++v4i64 __lasx_xvmulwod_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmulwod_d_wu(_1, _2);} ++v8i32 __lasx_xvmulwod_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmulwod_w_hu(_1, _2);} ++v16i16 __lasx_xvmulwod_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmulwod_h_bu(_1, _2);} ++v4i64 __lasx_xvaddwev_d_wu_w(v8u32 _1, v8i32 _2){return __builtin_lasx_xvaddwev_d_wu_w(_1, _2);} ++v8i32 __lasx_xvaddwev_w_hu_h(v16u16 _1, v16i16 _2){return __builtin_lasx_xvaddwev_w_hu_h(_1, _2);} ++v16i16 __lasx_xvaddwev_h_bu_b(v32u8 _1, v32i8 _2){return __builtin_lasx_xvaddwev_h_bu_b(_1, _2);} ++v4i64 __lasx_xvmulwev_d_wu_w(v8u32 _1, v8i32 _2){return __builtin_lasx_xvmulwev_d_wu_w(_1, _2);} ++v8i32 __lasx_xvmulwev_w_hu_h(v16u16 _1, v16i16 _2){return __builtin_lasx_xvmulwev_w_hu_h(_1, _2);} ++v16i16 __lasx_xvmulwev_h_bu_b(v32u8 _1, v32i8 _2){return __builtin_lasx_xvmulwev_h_bu_b(_1, _2);} ++v4i64 __lasx_xvaddwod_d_wu_w(v8u32 _1, v8i32 _2){return __builtin_lasx_xvaddwod_d_wu_w(_1, _2);} ++v8i32 __lasx_xvaddwod_w_hu_h(v16u16 _1, v16i16 _2){return __builtin_lasx_xvaddwod_w_hu_h(_1, _2);} ++v16i16 __lasx_xvaddwod_h_bu_b(v32u8 _1, v32i8 _2){return __builtin_lasx_xvaddwod_h_bu_b(_1, _2);} ++v4i64 __lasx_xvmulwod_d_wu_w(v8u32 _1, v8i32 _2){return __builtin_lasx_xvmulwod_d_wu_w(_1, _2);} ++v8i32 __lasx_xvmulwod_w_hu_h(v16u16 _1, v16i16 _2){return __builtin_lasx_xvmulwod_w_hu_h(_1, _2);} ++v16i16 __lasx_xvmulwod_h_bu_b(v32u8 _1, v32i8 _2){return __builtin_lasx_xvmulwod_h_bu_b(_1, _2);} ++v4i64 __lasx_xvhaddw_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvhaddw_q_d(_1, _2);} ++v4u64 __lasx_xvhaddw_qu_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvhaddw_qu_du(_1, _2);} ++v4i64 __lasx_xvhsubw_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvhsubw_q_d(_1, _2);} ++v4u64 __lasx_xvhsubw_qu_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvhsubw_qu_du(_1, _2);} ++v4i64 __lasx_xvmaddwev_q_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvmaddwev_q_d(_1, _2, _3);} ++v4i64 __lasx_xvmaddwev_d_w(v4i64 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvmaddwev_d_w(_1, _2, _3);} ++v8i32 __lasx_xvmaddwev_w_h(v8i32 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvmaddwev_w_h(_1, _2, _3);} ++v16i16 __lasx_xvmaddwev_h_b(v16i16 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvmaddwev_h_b(_1, _2, _3);} ++v4u64 __lasx_xvmaddwev_q_du(v4u64 _1, v4u64 _2, v4u64 _3){return __builtin_lasx_xvmaddwev_q_du(_1, _2, _3);} ++v4u64 __lasx_xvmaddwev_d_wu(v4u64 _1, v8u32 _2, v8u32 _3){return __builtin_lasx_xvmaddwev_d_wu(_1, _2, _3);} ++v8u32 __lasx_xvmaddwev_w_hu(v8u32 _1, v16u16 _2, v16u16 _3){return __builtin_lasx_xvmaddwev_w_hu(_1, _2, _3);} ++v16u16 __lasx_xvmaddwev_h_bu(v16u16 _1, v32u8 _2, v32u8 _3){return __builtin_lasx_xvmaddwev_h_bu(_1, _2, _3);} ++v4i64 __lasx_xvmaddwod_q_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvmaddwod_q_d(_1, _2, _3);} ++v4i64 __lasx_xvmaddwod_d_w(v4i64 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvmaddwod_d_w(_1, _2, _3);} ++v8i32 __lasx_xvmaddwod_w_h(v8i32 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvmaddwod_w_h(_1, _2, _3);} ++v16i16 __lasx_xvmaddwod_h_b(v16i16 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvmaddwod_h_b(_1, _2, _3);} ++v4u64 __lasx_xvmaddwod_q_du(v4u64 _1, v4u64 _2, v4u64 _3){return __builtin_lasx_xvmaddwod_q_du(_1, _2, _3);} ++v4u64 __lasx_xvmaddwod_d_wu(v4u64 _1, v8u32 _2, v8u32 _3){return __builtin_lasx_xvmaddwod_d_wu(_1, _2, _3);} ++v8u32 __lasx_xvmaddwod_w_hu(v8u32 _1, v16u16 _2, v16u16 _3){return __builtin_lasx_xvmaddwod_w_hu(_1, _2, _3);} ++v16u16 __lasx_xvmaddwod_h_bu(v16u16 _1, v32u8 _2, v32u8 _3){return __builtin_lasx_xvmaddwod_h_bu(_1, _2, _3);} ++v4i64 __lasx_xvmaddwev_q_du_d(v4i64 _1, v4u64 _2, v4i64 _3){return __builtin_lasx_xvmaddwev_q_du_d(_1, _2, _3);} ++v4i64 __lasx_xvmaddwev_d_wu_w(v4i64 _1, v8u32 _2, v8i32 _3){return __builtin_lasx_xvmaddwev_d_wu_w(_1, _2, _3);} ++v8i32 __lasx_xvmaddwev_w_hu_h(v8i32 _1, v16u16 _2, v16i16 _3){return __builtin_lasx_xvmaddwev_w_hu_h(_1, _2, _3);} ++v16i16 __lasx_xvmaddwev_h_bu_b(v16i16 _1, v32u8 _2, v32i8 _3){return __builtin_lasx_xvmaddwev_h_bu_b(_1, _2, _3);} ++v4i64 __lasx_xvmaddwod_q_du_d(v4i64 _1, v4u64 _2, v4i64 _3){return __builtin_lasx_xvmaddwod_q_du_d(_1, _2, _3);} ++v4i64 __lasx_xvmaddwod_d_wu_w(v4i64 _1, v8u32 _2, v8i32 _3){return __builtin_lasx_xvmaddwod_d_wu_w(_1, _2, _3);} ++v8i32 __lasx_xvmaddwod_w_hu_h(v8i32 _1, v16u16 _2, v16i16 _3){return __builtin_lasx_xvmaddwod_w_hu_h(_1, _2, _3);} ++v16i16 __lasx_xvmaddwod_h_bu_b(v16i16 _1, v32u8 _2, v32i8 _3){return __builtin_lasx_xvmaddwod_h_bu_b(_1, _2, _3);} ++v32i8 __lasx_xvrotr_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvrotr_b(_1, _2);} ++v16i16 __lasx_xvrotr_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvrotr_h(_1, _2);} ++v8i32 __lasx_xvrotr_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvrotr_w(_1, _2);} ++v4i64 __lasx_xvrotr_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvrotr_d(_1, _2);} ++v4i64 __lasx_xvadd_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvadd_q(_1, _2);} ++v4i64 __lasx_xvsub_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsub_q(_1, _2);} ++v4i64 __lasx_xvaddwev_q_du_d(v4u64 _1, v4i64 _2){return __builtin_lasx_xvaddwev_q_du_d(_1, _2);} ++v4i64 __lasx_xvaddwod_q_du_d(v4u64 _1, v4i64 _2){return __builtin_lasx_xvaddwod_q_du_d(_1, _2);} ++v4i64 __lasx_xvmulwev_q_du_d(v4u64 _1, v4i64 _2){return __builtin_lasx_xvmulwev_q_du_d(_1, _2);} ++v4i64 __lasx_xvmulwod_q_du_d(v4u64 _1, v4i64 _2){return __builtin_lasx_xvmulwod_q_du_d(_1, _2);} ++v32i8 __lasx_xvmskgez_b(v32i8 _1){return __builtin_lasx_xvmskgez_b(_1);} ++v32i8 __lasx_xvmsknz_b(v32i8 _1){return __builtin_lasx_xvmsknz_b(_1);} ++v16i16 __lasx_xvexth_h_b(v32i8 _1){return __builtin_lasx_xvexth_h_b(_1);} ++v8i32 __lasx_xvexth_w_h(v16i16 _1){return __builtin_lasx_xvexth_w_h(_1);} ++v4i64 __lasx_xvexth_d_w(v8i32 _1){return __builtin_lasx_xvexth_d_w(_1);} ++v4i64 __lasx_xvexth_q_d(v4i64 _1){return __builtin_lasx_xvexth_q_d(_1);} ++v16u16 __lasx_xvexth_hu_bu(v32u8 _1){return __builtin_lasx_xvexth_hu_bu(_1);} ++v8u32 __lasx_xvexth_wu_hu(v16u16 _1){return __builtin_lasx_xvexth_wu_hu(_1);} ++v4u64 __lasx_xvexth_du_wu(v8u32 _1){return __builtin_lasx_xvexth_du_wu(_1);} ++v4u64 __lasx_xvexth_qu_du(v4u64 _1){return __builtin_lasx_xvexth_qu_du(_1);} ++v32i8 __lasx_xvrotri_b(v32i8 _1){return __builtin_lasx_xvrotri_b(_1, 1);} ++v16i16 __lasx_xvrotri_h(v16i16 _1){return __builtin_lasx_xvrotri_h(_1, 1);} ++v8i32 __lasx_xvrotri_w(v8i32 _1){return __builtin_lasx_xvrotri_w(_1, 1);} ++v4i64 __lasx_xvrotri_d(v4i64 _1){return __builtin_lasx_xvrotri_d(_1, 1);} ++v4i64 __lasx_xvextl_q_d(v4i64 _1){return __builtin_lasx_xvextl_q_d(_1);} ++v32i8 __lasx_xvsrlni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrlni_b_h(_1, _2, 1);} ++v16i16 __lasx_xvsrlni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrlni_h_w(_1, _2, 1);} ++v8i32 __lasx_xvsrlni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrlni_w_d(_1, _2, 1);} ++v4i64 __lasx_xvsrlni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrlni_d_q(_1, _2, 1);} ++v32i8 __lasx_xvsrlrni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrlrni_b_h(_1, _2, 1);} ++v16i16 __lasx_xvsrlrni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrlrni_h_w(_1, _2, 1);} ++v8i32 __lasx_xvsrlrni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrlrni_w_d(_1, _2, 1);} ++v4i64 __lasx_xvsrlrni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrlrni_d_q(_1, _2, 1);} ++v32i8 __lasx_xvssrlni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssrlni_b_h(_1, _2, 1);} ++v16i16 __lasx_xvssrlni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrlni_h_w(_1, _2, 1);} ++v8i32 __lasx_xvssrlni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrlni_w_d(_1, _2, 1);} ++v4i64 __lasx_xvssrlni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrlni_d_q(_1, _2, 1);} ++v32u8 __lasx_xvssrlni_bu_h(v32u8 _1, v32i8 _2){return __builtin_lasx_xvssrlni_bu_h(_1, _2, 1);} ++v16u16 __lasx_xvssrlni_hu_w(v16u16 _1, v16i16 _2){return __builtin_lasx_xvssrlni_hu_w(_1, _2, 1);} ++v8u32 __lasx_xvssrlni_wu_d(v8u32 _1, v8i32 _2){return __builtin_lasx_xvssrlni_wu_d(_1, _2, 1);} ++v4u64 __lasx_xvssrlni_du_q(v4u64 _1, v4i64 _2){return __builtin_lasx_xvssrlni_du_q(_1, _2, 1);} ++v32i8 __lasx_xvssrlrni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssrlrni_b_h(_1, _2, 1);} ++v16i16 __lasx_xvssrlrni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrlrni_h_w(_1, _2, 1);} ++v8i32 __lasx_xvssrlrni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrlrni_w_d(_1, _2, 1);} ++v4i64 __lasx_xvssrlrni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrlrni_d_q(_1, _2, 1);} ++v32u8 __lasx_xvssrlrni_bu_h(v32u8 _1, v32i8 _2){return __builtin_lasx_xvssrlrni_bu_h(_1, _2, 1);} ++v16u16 __lasx_xvssrlrni_hu_w(v16u16 _1, v16i16 _2){return __builtin_lasx_xvssrlrni_hu_w(_1, _2, 1);} ++v8u32 __lasx_xvssrlrni_wu_d(v8u32 _1, v8i32 _2){return __builtin_lasx_xvssrlrni_wu_d(_1, _2, 1);} ++v4u64 __lasx_xvssrlrni_du_q(v4u64 _1, v4i64 _2){return __builtin_lasx_xvssrlrni_du_q(_1, _2, 1);} ++v32i8 __lasx_xvsrani_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrani_b_h(_1, _2, 1);} ++v16i16 __lasx_xvsrani_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrani_h_w(_1, _2, 1);} ++v8i32 __lasx_xvsrani_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrani_w_d(_1, _2, 1);} ++v4i64 __lasx_xvsrani_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrani_d_q(_1, _2, 1);} ++v32i8 __lasx_xvsrarni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrarni_b_h(_1, _2, 1);} ++v16i16 __lasx_xvsrarni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrarni_h_w(_1, _2, 1);} ++v8i32 __lasx_xvsrarni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrarni_w_d(_1, _2, 1);} ++v4i64 __lasx_xvsrarni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrarni_d_q(_1, _2, 1);} ++v32i8 __lasx_xvssrani_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssrani_b_h(_1, _2, 1);} ++v16i16 __lasx_xvssrani_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrani_h_w(_1, _2, 1);} ++v8i32 __lasx_xvssrani_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrani_w_d(_1, _2, 1);} ++v4i64 __lasx_xvssrani_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrani_d_q(_1, _2, 1);} ++v32u8 __lasx_xvssrani_bu_h(v32u8 _1, v32i8 _2){return __builtin_lasx_xvssrani_bu_h(_1, _2, 1);} ++v16u16 __lasx_xvssrani_hu_w(v16u16 _1, v16i16 _2){return __builtin_lasx_xvssrani_hu_w(_1, _2, 1);} ++v8u32 __lasx_xvssrani_wu_d(v8u32 _1, v8i32 _2){return __builtin_lasx_xvssrani_wu_d(_1, _2, 1);} ++v4u64 __lasx_xvssrani_du_q(v4u64 _1, v4i64 _2){return __builtin_lasx_xvssrani_du_q(_1, _2, 1);} ++v32i8 __lasx_xvssrarni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssrarni_b_h(_1, _2, 1);} ++v16i16 __lasx_xvssrarni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrarni_h_w(_1, _2, 1);} ++v8i32 __lasx_xvssrarni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrarni_w_d(_1, _2, 1);} ++v4i64 __lasx_xvssrarni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrarni_d_q(_1, _2, 1);} ++v32u8 __lasx_xvssrarni_bu_h(v32u8 _1, v32i8 _2){return __builtin_lasx_xvssrarni_bu_h(_1, _2, 1);} ++v16u16 __lasx_xvssrarni_hu_w(v16u16 _1, v16i16 _2){return __builtin_lasx_xvssrarni_hu_w(_1, _2, 1);} ++v8u32 __lasx_xvssrarni_wu_d(v8u32 _1, v8i32 _2){return __builtin_lasx_xvssrarni_wu_d(_1, _2, 1);} ++v4u64 __lasx_xvssrarni_du_q(v4u64 _1, v4i64 _2){return __builtin_lasx_xvssrarni_du_q(_1, _2, 1);} ++int __lasx_xbnz_b(v32u8 _1){return __builtin_lasx_xbnz_b(_1);} ++int __lasx_xbnz_d(v4u64 _1){return __builtin_lasx_xbnz_d(_1);} ++int __lasx_xbnz_h(v16u16 _1){return __builtin_lasx_xbnz_h(_1);} ++int __lasx_xbnz_v(v32u8 _1){return __builtin_lasx_xbnz_v(_1);} ++int __lasx_xbnz_w(v8u32 _1){return __builtin_lasx_xbnz_w(_1);} ++int __lasx_xbz_b(v32u8 _1){return __builtin_lasx_xbz_b(_1);} ++int __lasx_xbz_d(v4u64 _1){return __builtin_lasx_xbz_d(_1);} ++int __lasx_xbz_h(v16u16 _1){return __builtin_lasx_xbz_h(_1);} ++int __lasx_xbz_v(v32u8 _1){return __builtin_lasx_xbz_v(_1);} ++int __lasx_xbz_w(v8u32 _1){return __builtin_lasx_xbz_w(_1);} ++v4i64 __lasx_xvfcmp_caf_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_caf_d(_1, _2);} ++v8i32 __lasx_xvfcmp_caf_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_caf_s(_1, _2);} ++v4i64 __lasx_xvfcmp_ceq_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_ceq_d(_1, _2);} ++v8i32 __lasx_xvfcmp_ceq_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_ceq_s(_1, _2);} ++v4i64 __lasx_xvfcmp_cle_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cle_d(_1, _2);} ++v8i32 __lasx_xvfcmp_cle_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cle_s(_1, _2);} ++v4i64 __lasx_xvfcmp_clt_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_clt_d(_1, _2);} ++v8i32 __lasx_xvfcmp_clt_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_clt_s(_1, _2);} ++v4i64 __lasx_xvfcmp_cne_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cne_d(_1, _2);} ++v8i32 __lasx_xvfcmp_cne_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cne_s(_1, _2);} ++v4i64 __lasx_xvfcmp_cor_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cor_d(_1, _2);} ++v8i32 __lasx_xvfcmp_cor_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cor_s(_1, _2);} ++v4i64 __lasx_xvfcmp_cueq_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cueq_d(_1, _2);} ++v8i32 __lasx_xvfcmp_cueq_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cueq_s(_1, _2);} ++v4i64 __lasx_xvfcmp_cule_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cule_d(_1, _2);} ++v8i32 __lasx_xvfcmp_cule_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cule_s(_1, _2);} ++v4i64 __lasx_xvfcmp_cult_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cult_d(_1, _2);} ++v8i32 __lasx_xvfcmp_cult_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cult_s(_1, _2);} ++v4i64 __lasx_xvfcmp_cun_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cun_d(_1, _2);} ++v4i64 __lasx_xvfcmp_cune_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cune_d(_1, _2);} ++v8i32 __lasx_xvfcmp_cune_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cune_s(_1, _2);} ++v8i32 __lasx_xvfcmp_cun_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cun_s(_1, _2);} ++v4i64 __lasx_xvfcmp_saf_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_saf_d(_1, _2);} ++v8i32 __lasx_xvfcmp_saf_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_saf_s(_1, _2);} ++v4i64 __lasx_xvfcmp_seq_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_seq_d(_1, _2);} ++v8i32 __lasx_xvfcmp_seq_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_seq_s(_1, _2);} ++v4i64 __lasx_xvfcmp_sle_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sle_d(_1, _2);} ++v8i32 __lasx_xvfcmp_sle_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sle_s(_1, _2);} ++v4i64 __lasx_xvfcmp_slt_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_slt_d(_1, _2);} ++v8i32 __lasx_xvfcmp_slt_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_slt_s(_1, _2);} ++v4i64 __lasx_xvfcmp_sne_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sne_d(_1, _2);} ++v8i32 __lasx_xvfcmp_sne_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sne_s(_1, _2);} ++v4i64 __lasx_xvfcmp_sor_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sor_d(_1, _2);} ++v8i32 __lasx_xvfcmp_sor_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sor_s(_1, _2);} ++v4i64 __lasx_xvfcmp_sueq_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sueq_d(_1, _2);} ++v8i32 __lasx_xvfcmp_sueq_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sueq_s(_1, _2);} ++v4i64 __lasx_xvfcmp_sule_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sule_d(_1, _2);} ++v8i32 __lasx_xvfcmp_sule_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sule_s(_1, _2);} ++v4i64 __lasx_xvfcmp_sult_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sult_d(_1, _2);} ++v8i32 __lasx_xvfcmp_sult_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sult_s(_1, _2);} ++v4i64 __lasx_xvfcmp_sun_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sun_d(_1, _2);} ++v4i64 __lasx_xvfcmp_sune_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sune_d(_1, _2);} ++v8i32 __lasx_xvfcmp_sune_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sune_s(_1, _2);} ++v8i32 __lasx_xvfcmp_sun_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sun_s(_1, _2);} ++v4f64 __lasx_xvpickve_d_f(v4f64 _1){return __builtin_lasx_xvpickve_d_f(_1, 1);} ++v8f32 __lasx_xvpickve_w_f(v8f32 _1){return __builtin_lasx_xvpickve_w_f(_1, 1);} ++v32i8 __lasx_xvrepli_b(){return __builtin_lasx_xvrepli_b(1);} ++v4i64 __lasx_xvrepli_d(){return __builtin_lasx_xvrepli_d(1);} ++v16i16 __lasx_xvrepli_h(){return __builtin_lasx_xvrepli_h(1);} ++v8i32 __lasx_xvrepli_w(){return __builtin_lasx_xvrepli_w(1);} +diff --git a/gcc/testsuite/gcc.target/loongarch/loongarch.exp b/gcc/testsuite/gcc.target/loongarch/loongarch.exp +new file mode 100644 +index 000000000..be9543d38 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/loongarch.exp +@@ -0,0 +1,40 @@ ++# Copyright (C) 2017-2018 Free Software Foundation, Inc. ++ ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3 of the License, or ++# (at your option) any later version. ++# ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++# GCC testsuite that uses the `dg.exp' driver. ++ ++# Exit immediately if this isn't a Loongarch target. ++if ![istarget loongarch*-*-*] then { ++ return ++} ++ ++# Load support procs. ++load_lib gcc-dg.exp ++ ++# If a testcase doesn't have special options, use these. ++global DEFAULT_CFLAGS ++if ![info exists DEFAULT_CFLAGS] then { ++ set DEFAULT_CFLAGS " " ++} ++ ++# Initialize `dg'. ++dg-init ++ ++# Main loop. ++dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] \ ++ "" $DEFAULT_CFLAGS ++# All done. ++dg-finish +diff --git a/gcc/testsuite/gcc.target/loongarch/lsx-builtin.c b/gcc/testsuite/gcc.target/loongarch/lsx-builtin.c +new file mode 100644 +index 000000000..296869dc5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/lsx-builtin.c +@@ -0,0 +1,1461 @@ ++/* Test builtins for LOONGARCH LSX ASE instructions */ ++/* { dg-do compile } */ ++/* { dg-options "-mlsx" } */ ++/* { dg-final { scan-assembler-times "lsx_vsll_b:.*vsll\\.b.*lsx_vsll_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsll_h:.*vsll\\.h.*lsx_vsll_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsll_w:.*vsll\\.w.*lsx_vsll_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsll_d:.*vsll\\.d.*lsx_vsll_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslli_b:.*vslli\\.b.*lsx_vslli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslli_h:.*vslli\\.h.*lsx_vslli_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslli_w:.*vslli\\.w.*lsx_vslli_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslli_d:.*vslli\\.d.*lsx_vslli_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsra_b:.*vsra\\.b.*lsx_vsra_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsra_h:.*vsra\\.h.*lsx_vsra_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsra_w:.*vsra\\.w.*lsx_vsra_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsra_d:.*vsra\\.d.*lsx_vsra_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrai_b:.*vsrai\\.b.*lsx_vsrai_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrai_h:.*vsrai\\.h.*lsx_vsrai_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrai_w:.*vsrai\\.w.*lsx_vsrai_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrai_d:.*vsrai\\.d.*lsx_vsrai_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrar_b:.*vsrar\\.b.*lsx_vsrar_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrar_h:.*vsrar\\.h.*lsx_vsrar_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrar_w:.*vsrar\\.w.*lsx_vsrar_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrar_d:.*vsrar\\.d.*lsx_vsrar_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrari_b:.*vsrari\\.b.*lsx_vsrari_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrari_h:.*vsrari\\.h.*lsx_vsrari_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrari_w:.*vsrari\\.w.*lsx_vsrari_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrari_d:.*vsrari\\.d.*lsx_vsrari_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrl_b:.*vsrl\\.b.*lsx_vsrl_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrl_h:.*vsrl\\.h.*lsx_vsrl_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrl_w:.*vsrl\\.w.*lsx_vsrl_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrl_d:.*vsrl\\.d.*lsx_vsrl_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrli_b:.*vsrli\\.b.*lsx_vsrli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrli_h:.*vsrli\\.h.*lsx_vsrli_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrli_w:.*vsrli\\.w.*lsx_vsrli_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrli_d:.*vsrli\\.d.*lsx_vsrli_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlr_b:.*vsrlr\\.b.*lsx_vsrlr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlr_h:.*vsrlr\\.h.*lsx_vsrlr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlr_w:.*vsrlr\\.w.*lsx_vsrlr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlr_d:.*vsrlr\\.d.*lsx_vsrlr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlri_b:.*vsrlri\\.b.*lsx_vsrlri_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlri_h:.*vsrlri\\.h.*lsx_vsrlri_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlri_w:.*vsrlri\\.w.*lsx_vsrlri_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlri_d:.*vsrlri\\.d.*lsx_vsrlri_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclr_b:.*vbitclr\\.b.*lsx_vbitclr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclr_h:.*vbitclr\\.h.*lsx_vbitclr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclr_w:.*vbitclr\\.w.*lsx_vbitclr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclr_d:.*vbitclr\\.d.*lsx_vbitclr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclri_b:.*vbitclri\\.b.*lsx_vbitclri_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclri_h:.*vbitclri\\.h.*lsx_vbitclri_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclri_w:.*vbitclri\\.w.*lsx_vbitclri_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitclri_d:.*vbitclri\\.d.*lsx_vbitclri_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitset_b:.*vbitset\\.b.*lsx_vbitset_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitset_h:.*vbitset\\.h.*lsx_vbitset_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitset_w:.*vbitset\\.w.*lsx_vbitset_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitset_d:.*vbitset\\.d.*lsx_vbitset_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitseti_b:.*vbitseti\\.b.*lsx_vbitseti_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitseti_h:.*vbitseti\\.h.*lsx_vbitseti_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitseti_w:.*vbitseti\\.w.*lsx_vbitseti_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitseti_d:.*vbitseti\\.d.*lsx_vbitseti_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrev_b:.*vbitrev\\.b.*lsx_vbitrev_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrev_h:.*vbitrev\\.h.*lsx_vbitrev_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrev_w:.*vbitrev\\.w.*lsx_vbitrev_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrev_d:.*vbitrev\\.d.*lsx_vbitrev_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrevi_b:.*vbitrevi\\.b.*lsx_vbitrevi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrevi_h:.*vbitrevi\\.h.*lsx_vbitrevi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrevi_w:.*vbitrevi\\.w.*lsx_vbitrevi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitrevi_d:.*vbitrevi\\.d.*lsx_vbitrevi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadd_b:.*vadd\\.b.*lsx_vadd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadd_h:.*vadd\\.h.*lsx_vadd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadd_w:.*vadd\\.w.*lsx_vadd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadd_d:.*vadd\\.d.*lsx_vadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddi_bu:.*vaddi\\.bu.*lsx_vaddi_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddi_hu:.*vaddi\\.hu.*lsx_vaddi_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddi_wu:.*vaddi\\.wu.*lsx_vaddi_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddi_du:.*vaddi\\.du.*lsx_vaddi_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsub_b:.*vsub\\.b.*lsx_vsub_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsub_h:.*vsub\\.h.*lsx_vsub_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsub_w:.*vsub\\.w.*lsx_vsub_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsub_d:.*vsub\\.d.*lsx_vsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubi_bu:.*vsubi\\.bu.*lsx_vsubi_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubi_hu:.*vsubi\\.hu.*lsx_vsubi_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubi_wu:.*vsubi\\.wu.*lsx_vsubi_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubi_du:.*vsubi\\.du.*lsx_vsubi_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_b:.*vmax\\.b.*lsx_vmax_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_h:.*vmax\\.h.*lsx_vmax_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_w:.*vmax\\.w.*lsx_vmax_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_d:.*vmax\\.d.*lsx_vmax_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_b:.*vmaxi\\.b.*lsx_vmaxi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_h:.*vmaxi\\.h.*lsx_vmaxi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_w:.*vmaxi\\.w.*lsx_vmaxi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_d:.*vmaxi\\.d.*lsx_vmaxi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_bu:.*vmax\\.bu.*lsx_vmax_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_hu:.*vmax\\.hu.*lsx_vmax_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_wu:.*vmax\\.wu.*lsx_vmax_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmax_du:.*vmax\\.du.*lsx_vmax_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_bu:.*vmaxi\\.bu.*lsx_vmaxi_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_hu:.*vmaxi\\.hu.*lsx_vmaxi_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_wu:.*vmaxi\\.wu.*lsx_vmaxi_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaxi_du:.*vmaxi\\.du.*lsx_vmaxi_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_b:.*vmin\\.b.*lsx_vmin_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_h:.*vmin\\.h.*lsx_vmin_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_w:.*vmin\\.w.*lsx_vmin_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_d:.*vmin\\.d.*lsx_vmin_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_b:.*vmini\\.b.*lsx_vmini_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_h:.*vmini\\.h.*lsx_vmini_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_w:.*vmini\\.w.*lsx_vmini_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_d:.*vmini\\.d.*lsx_vmini_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_bu:.*vmin\\.bu.*lsx_vmin_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_hu:.*vmin\\.hu.*lsx_vmin_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_wu:.*vmin\\.wu.*lsx_vmin_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmin_du:.*vmin\\.du.*lsx_vmin_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_bu:.*vmini\\.bu.*lsx_vmini_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_hu:.*vmini\\.hu.*lsx_vmini_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_wu:.*vmini\\.wu.*lsx_vmini_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmini_du:.*vmini\\.du.*lsx_vmini_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseq_b:.*vseq\\.b.*lsx_vseq_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseq_h:.*vseq\\.h.*lsx_vseq_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseq_w:.*vseq\\.w.*lsx_vseq_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseq_d:.*vseq\\.d.*lsx_vseq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseqi_b:.*vseqi\\.b.*lsx_vseqi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseqi_h:.*vseqi\\.h.*lsx_vseqi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseqi_w:.*vseqi\\.w.*lsx_vseqi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vseqi_d:.*vseqi\\.d.*lsx_vseqi_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_b:.*vslti\\.b.*lsx_vslti_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_b:.*vslt\\.b.*lsx_vslt_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_h:.*vslt\\.h.*lsx_vslt_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_w:.*vslt\\.w.*lsx_vslt_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_d:.*vslt\\.d.*lsx_vslt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_h:.*vslti\\.h.*lsx_vslti_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_w:.*vslti\\.w.*lsx_vslti_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_d:.*vslti\\.d.*lsx_vslti_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_bu:.*vslt\\.bu.*lsx_vslt_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_hu:.*vslt\\.hu.*lsx_vslt_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_wu:.*vslt\\.wu.*lsx_vslt_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslt_du:.*vslt\\.du.*lsx_vslt_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_bu:.*vslti\\.bu.*lsx_vslti_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_hu:.*vslti\\.hu.*lsx_vslti_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_wu:.*vslti\\.wu.*lsx_vslti_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslti_du:.*vslti\\.du.*lsx_vslti_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_b:.*vsle\\.b.*lsx_vsle_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_h:.*vsle\\.h.*lsx_vsle_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_w:.*vsle\\.w.*lsx_vsle_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_d:.*vsle\\.d.*lsx_vsle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_b:.*vslei\\.b.*lsx_vslei_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_h:.*vslei\\.h.*lsx_vslei_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_w:.*vslei\\.w.*lsx_vslei_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_d:.*vslei\\.d.*lsx_vslei_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_bu:.*vsle\\.bu.*lsx_vsle_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_hu:.*vsle\\.hu.*lsx_vsle_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_wu:.*vsle\\.wu.*lsx_vsle_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsle_du:.*vsle\\.du.*lsx_vsle_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_bu:.*vslei\\.bu.*lsx_vslei_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_hu:.*vslei\\.hu.*lsx_vslei_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_wu:.*vslei\\.wu.*lsx_vslei_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vslei_du:.*vslei\\.du.*lsx_vslei_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_b:.*vsat\\.b.*lsx_vsat_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_h:.*vsat\\.h.*lsx_vsat_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_w:.*vsat\\.w.*lsx_vsat_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_d:.*vsat\\.d.*lsx_vsat_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_bu:.*vsat\\.bu.*lsx_vsat_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_hu:.*vsat\\.hu.*lsx_vsat_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_wu:.*vsat\\.wu.*lsx_vsat_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsat_du:.*vsat\\.du.*lsx_vsat_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadda_b:.*vadda\\.b.*lsx_vadda_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadda_h:.*vadda\\.h.*lsx_vadda_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadda_w:.*vadda\\.w.*lsx_vadda_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadda_d:.*vadda\\.d.*lsx_vadda_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_b:.*vsadd\\.b.*lsx_vsadd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_h:.*vsadd\\.h.*lsx_vsadd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_w:.*vsadd\\.w.*lsx_vsadd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_d:.*vsadd\\.d.*lsx_vsadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_bu:.*vsadd\\.bu.*lsx_vsadd_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_hu:.*vsadd\\.hu.*lsx_vsadd_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_wu:.*vsadd\\.wu.*lsx_vsadd_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsadd_du:.*vsadd\\.du.*lsx_vsadd_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_b:.*vavg\\.b.*lsx_vavg_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_h:.*vavg\\.h.*lsx_vavg_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_w:.*vavg\\.w.*lsx_vavg_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_d:.*vavg\\.d.*lsx_vavg_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_bu:.*vavg\\.bu.*lsx_vavg_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_hu:.*vavg\\.hu.*lsx_vavg_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_wu:.*vavg\\.wu.*lsx_vavg_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavg_du:.*vavg\\.du.*lsx_vavg_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_b:.*vavgr\\.b.*lsx_vavgr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_h:.*vavgr\\.h.*lsx_vavgr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_w:.*vavgr\\.w.*lsx_vavgr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_d:.*vavgr\\.d.*lsx_vavgr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_bu:.*vavgr\\.bu.*lsx_vavgr_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_hu:.*vavgr\\.hu.*lsx_vavgr_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_wu:.*vavgr\\.wu.*lsx_vavgr_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vavgr_du:.*vavgr\\.du.*lsx_vavgr_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_b:.*vssub\\.b.*lsx_vssub_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_h:.*vssub\\.h.*lsx_vssub_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_w:.*vssub\\.w.*lsx_vssub_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_d:.*vssub\\.d.*lsx_vssub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_bu:.*vssub\\.bu.*lsx_vssub_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_hu:.*vssub\\.hu.*lsx_vssub_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_wu:.*vssub\\.wu.*lsx_vssub_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssub_du:.*vssub\\.du.*lsx_vssub_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_b:.*vabsd\\.b.*lsx_vabsd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_h:.*vabsd\\.h.*lsx_vabsd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_w:.*vabsd\\.w.*lsx_vabsd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_d:.*vabsd\\.d.*lsx_vabsd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_bu:.*vabsd\\.bu.*lsx_vabsd_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_hu:.*vabsd\\.hu.*lsx_vabsd_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_wu:.*vabsd\\.wu.*lsx_vabsd_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vabsd_du:.*vabsd\\.du.*lsx_vabsd_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmul_b:.*vmul\\.b.*lsx_vmul_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmul_h:.*vmul\\.h.*lsx_vmul_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmul_w:.*vmul\\.w.*lsx_vmul_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmul_d:.*vmul\\.d.*lsx_vmul_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmadd_b:.*vmadd\\.b.*lsx_vmadd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmadd_h:.*vmadd\\.h.*lsx_vmadd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmadd_w:.*vmadd\\.w.*lsx_vmadd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmadd_d:.*vmadd\\.d.*lsx_vmadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmsub_b:.*vmsub\\.b.*lsx_vmsub_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmsub_h:.*vmsub\\.h.*lsx_vmsub_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmsub_w:.*vmsub\\.w.*lsx_vmsub_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmsub_d:.*vmsub\\.d.*lsx_vmsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_b:.*vdiv\\.b.*lsx_vdiv_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_h:.*vdiv\\.h.*lsx_vdiv_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_w:.*vdiv\\.w.*lsx_vdiv_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_d:.*vdiv\\.d.*lsx_vdiv_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_bu:.*vdiv\\.bu.*lsx_vdiv_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_hu:.*vdiv\\.hu.*lsx_vdiv_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_wu:.*vdiv\\.wu.*lsx_vdiv_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vdiv_du:.*vdiv\\.du.*lsx_vdiv_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_h_b:.*vhaddw\\.h\\.b.*lsx_vhaddw_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_w_h:.*vhaddw\\.w\\.h.*lsx_vhaddw_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_d_w:.*vhaddw\\.d\\.w.*lsx_vhaddw_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_hu_bu:.*vhaddw\\.hu\\.bu.*lsx_vhaddw_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_wu_hu:.*vhaddw\\.wu\\.hu.*lsx_vhaddw_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_du_wu:.*vhaddw\\.du\\.wu.*lsx_vhaddw_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_h_b:.*vhsubw\\.h\\.b.*lsx_vhsubw_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_w_h:.*vhsubw\\.w\\.h.*lsx_vhsubw_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_d_w:.*vhsubw\\.d\\.w.*lsx_vhsubw_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_hu_bu:.*vhsubw\\.hu\\.bu.*lsx_vhsubw_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_wu_hu:.*vhsubw\\.wu\\.hu.*lsx_vhsubw_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_du_wu:.*vhsubw\\.du\\.wu.*lsx_vhsubw_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_b:.*vmod\\.b.*lsx_vmod_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_h:.*vmod\\.h.*lsx_vmod_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_w:.*vmod\\.w.*lsx_vmod_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_d:.*vmod\\.d.*lsx_vmod_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_bu:.*vmod\\.bu.*lsx_vmod_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_hu:.*vmod\\.hu.*lsx_vmod_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_wu:.*vmod\\.wu.*lsx_vmod_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmod_du:.*vmod\\.du.*lsx_vmod_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplve_b:.*vreplve\\.b.*lsx_vreplve_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplve_h:.*vreplve\\.h.*lsx_vreplve_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplve_w:.*vreplve\\.w.*lsx_vreplve_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplve_d:.*vreplve\\.d.*lsx_vreplve_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplvei_b:.*vreplvei\\.b.*lsx_vreplvei_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplvei_h:.*vreplvei\\.h.*lsx_vreplvei_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplvei_w:.*vreplvei\\.w.*lsx_vreplvei_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplvei_d:.*vreplvei\\.d.*lsx_vreplvei_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickev_b:.*vpickev\\.b.*lsx_vpickev_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickev_h:.*vpickev\\.h.*lsx_vpickev_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickev_w:.*vpickev\\.w.*lsx_vpickev_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickev_d:.*vilvl\\.d.*lsx_vpickev_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickod_b:.*vpickod\\.b.*lsx_vpickod_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickod_h:.*vpickod\\.h.*lsx_vpickod_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickod_w:.*vpickod\\.w.*lsx_vpickod_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickod_d:.*vilvh\\.d.*lsx_vpickod_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvh_b:.*vilvh\\.b.*lsx_vilvh_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvh_h:.*vilvh\\.h.*lsx_vilvh_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvh_w:.*vilvh\\.w.*lsx_vilvh_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvh_d:.*vilvh\\.d.*lsx_vilvh_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvl_b:.*vilvl\\.b.*lsx_vilvl_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvl_h:.*vilvl\\.h.*lsx_vilvl_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvl_w:.*vilvl\\.w.*lsx_vilvl_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vilvl_d:.*vilvl\\.d.*lsx_vilvl_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackev_b:.*vpackev\\.b.*lsx_vpackev_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackev_h:.*vpackev\\.h.*lsx_vpackev_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackev_w:.*vpackev\\.w.*lsx_vpackev_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackev_d:.*vilvl\\.d.*lsx_vpackev_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackod_b:.*vpackod\\.b.*lsx_vpackod_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackod_h:.*vpackod\\.h.*lsx_vpackod_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackod_w:.*vpackod\\.w.*lsx_vpackod_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpackod_d:.*vilvh\\.d.*lsx_vpackod_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf_h:.*vshuf\\.h.*lsx_vshuf_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf_w:.*vshuf\\.w.*lsx_vshuf_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf_d:.*vshuf\\.d.*lsx_vshuf_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vand_v:.*vand\\.v.*lsx_vand_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vandi_b:.*vandi\\.b.*lsx_vandi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vor_v:.*vor\\.v.*lsx_vor_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vori_b:.*vbitseti\\.b.*lsx_vori_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vnor_v:.*vnor\\.v.*lsx_vnor_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vnori_b:.*vnori\\.b.*lsx_vnori_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vxor_v:.*vxor\\.v.*lsx_vxor_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vxori_b:.*vbitrevi\\.b.*lsx_vxori_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitsel_v:.*vbitsel\\.v.*lsx_vbitsel_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbitseli_b:.*vbitseli\\.b.*lsx_vbitseli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf4i_b:.*vshuf4i\\.b.*lsx_vshuf4i_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf4i_h:.*vshuf4i\\.h.*lsx_vshuf4i_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf4i_w:.*vshuf4i\\.w.*lsx_vshuf4i_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_b:.*vreplgr2vr\\.b.*lsx_vreplgr2vr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_h:.*vreplgr2vr\\.h.*lsx_vreplgr2vr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_w:.*vreplgr2vr\\.w.*lsx_vreplgr2vr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_d:.*vreplgr2vr\\.d.*lsx_vreplgr2vr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpcnt_b:.*vpcnt\\.b.*lsx_vpcnt_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpcnt_h:.*vpcnt\\.h.*lsx_vpcnt_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpcnt_w:.*vpcnt\\.w.*lsx_vpcnt_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpcnt_d:.*vpcnt\\.d.*lsx_vpcnt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclo_b:.*vclo\\.b.*lsx_vclo_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclo_h:.*vclo\\.h.*lsx_vclo_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclo_w:.*vclo\\.w.*lsx_vclo_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclo_d:.*vclo\\.d.*lsx_vclo_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclz_b:.*vclz\\.b.*lsx_vclz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclz_h:.*vclz\\.h.*lsx_vclz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclz_w:.*vclz\\.w.*lsx_vclz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vclz_d:.*vclz\\.d.*lsx_vclz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_b:.*vpickve2gr\\.b.*lsx_vpickve2gr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_h:.*vpickve2gr\\.h.*lsx_vpickve2gr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_w:.*vpickve2gr\\.w.*lsx_vpickve2gr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_d:.*vpickve2gr\\.d.*lsx_vpickve2gr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_bu:.*vpickve2gr\\.bu.*lsx_vpickve2gr_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_hu:.*vpickve2gr\\.hu.*lsx_vpickve2gr_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_wu:.*vpickve2gr\\.wu.*lsx_vpickve2gr_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_du:.*vpickve2gr\\.du.*lsx_vpickve2gr_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_b:.*vinsgr2vr\\.b.*lsx_vinsgr2vr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_h:.*vinsgr2vr\\.h.*lsx_vinsgr2vr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_w:.*vinsgr2vr\\.w.*lsx_vinsgr2vr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_d:.*vinsgr2vr\\.d.*lsx_vinsgr2vr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfadd_s:.*vfadd\\.s.*lsx_vfadd_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfadd_d:.*vfadd\\.d.*lsx_vfadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfsub_s:.*vfsub\\.s.*lsx_vfsub_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfsub_d:.*vfsub\\.d.*lsx_vfsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmul_s:.*vfmul\\.s.*lsx_vfmul_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmul_d:.*vfmul\\.d.*lsx_vfmul_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfdiv_s:.*vfdiv\\.s.*lsx_vfdiv_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfdiv_d:.*vfdiv\\.d.*lsx_vfdiv_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcvt_h_s:.*vfcvt\\.h\\.s.*lsx_vfcvt_h_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcvt_s_d:.*vfcvt\\.s\\.d.*lsx_vfcvt_s_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmin_s:.*vfmin\\.s.*lsx_vfmin_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmin_d:.*vfmin\\.d.*lsx_vfmin_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmina_s:.*vfmina\\.s.*lsx_vfmina_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmina_d:.*vfmina\\.d.*lsx_vfmina_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmax_s:.*vfmax\\.s.*lsx_vfmax_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmax_d:.*vfmax\\.d.*lsx_vfmax_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmaxa_s:.*vfmaxa\\.s.*lsx_vfmaxa_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmaxa_d:.*vfmaxa\\.d.*lsx_vfmaxa_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfclass_s:.*vfclass\\.s.*lsx_vfclass_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfclass_d:.*vfclass\\.d.*lsx_vfclass_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfsqrt_s:.*vfsqrt\\.s.*lsx_vfsqrt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfsqrt_d:.*vfsqrt\\.d.*lsx_vfsqrt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrecip_s:.*vfrecip\\.s.*lsx_vfrecip_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrecip_d:.*vfrecip\\.d.*lsx_vfrecip_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrint_s:.*vfrint\\.s.*lsx_vfrint_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrint_d:.*vfrint\\.d.*lsx_vfrint_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrsqrt_s:.*vfrsqrt\\.s.*lsx_vfrsqrt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrsqrt_d:.*vfrsqrt\\.d.*lsx_vfrsqrt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vflogb_s:.*vflogb\\.s.*lsx_vflogb_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vflogb_d:.*vflogb\\.d.*lsx_vflogb_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcvth_s_h:.*vfcvth\\.s\\.h.*lsx_vfcvth_s_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcvth_d_s:.*vfcvth\\.d\\.s.*lsx_vfcvth_d_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcvtl_s_h:.*vfcvtl\\.s\\.h.*lsx_vfcvtl_s_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcvtl_d_s:.*vfcvtl\\.d\\.s.*lsx_vfcvtl_d_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftint_w_s:.*vftint\\.w\\.s.*lsx_vftint_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftint_l_d:.*vftint\\.l\\.d.*lsx_vftint_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftint_wu_s:.*vftint\\.wu\\.s.*lsx_vftint_wu_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftint_lu_d:.*vftint\\.lu\\.d.*lsx_vftint_lu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrz_w_s:.*vftintrz\\.w\\.s.*lsx_vftintrz_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrz_l_d:.*vftintrz\\.l\\.d.*lsx_vftintrz_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrz_wu_s:.*vftintrz\\.wu\\.s.*lsx_vftintrz_wu_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrz_lu_d:.*vftintrz\\.lu\\.d.*lsx_vftintrz_lu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffint_s_w:.*vffint\\.s\\.w.*lsx_vffint_s_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffint_d_l:.*vffint\\.d\\.l.*lsx_vffint_d_l" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffint_s_wu:.*vffint\\.s\\.wu.*lsx_vffint_s_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffint_d_lu:.*vffint\\.d\\.lu.*lsx_vffint_d_lu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vandn_v:.*vandn\\.v.*lsx_vandn_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vneg_b:.*vneg\\.b.*lsx_vneg_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vneg_h:.*vneg\\.h.*lsx_vneg_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vneg_w:.*vneg\\.w.*lsx_vneg_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vneg_d:.*vneg\\.d.*lsx_vneg_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_b:.*vmuh\\.b.*lsx_vmuh_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_h:.*vmuh\\.h.*lsx_vmuh_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_w:.*vmuh\\.w.*lsx_vmuh_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_d:.*vmuh\\.d.*lsx_vmuh_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_bu:.*vmuh\\.bu.*lsx_vmuh_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_hu:.*vmuh\\.hu.*lsx_vmuh_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_wu:.*vmuh\\.wu.*lsx_vmuh_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmuh_du:.*vmuh\\.du.*lsx_vmuh_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsllwil_h_b:.*vsllwil\\.h\\.b.*lsx_vsllwil_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsllwil_w_h:.*vsllwil\\.w\\.h.*lsx_vsllwil_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsllwil_d_w:.*vsllwil\\.d\\.w.*lsx_vsllwil_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsllwil_hu_bu:.*vsllwil\\.hu\\.bu.*lsx_vsllwil_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsllwil_wu_hu:.*vsllwil\\.wu\\.hu.*lsx_vsllwil_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsllwil_du_wu:.*vsllwil\\.du\\.wu.*lsx_vsllwil_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsran_b_h:.*vsran\\.b\\.h.*lsx_vsran_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsran_h_w:.*vsran\\.h\\.w.*lsx_vsran_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsran_w_d:.*vsran\\.w\\.d.*lsx_vsran_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssran_b_h:.*vssran\\.b\\.h.*lsx_vssran_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssran_h_w:.*vssran\\.h\\.w.*lsx_vssran_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssran_w_d:.*vssran\\.w\\.d.*lsx_vssran_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssran_bu_h:.*vssran\\.bu\\.h.*lsx_vssran_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssran_hu_w:.*vssran\\.hu\\.w.*lsx_vssran_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssran_wu_d:.*vssran\\.wu\\.d.*lsx_vssran_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarn_b_h:.*vsrarn\\.b\\.h.*lsx_vsrarn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarn_h_w:.*vsrarn\\.h\\.w.*lsx_vsrarn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarn_w_d:.*vsrarn\\.w\\.d.*lsx_vsrarn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarn_b_h:.*vssrarn\\.b\\.h.*lsx_vssrarn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarn_h_w:.*vssrarn\\.h\\.w.*lsx_vssrarn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarn_w_d:.*vssrarn\\.w\\.d.*lsx_vssrarn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarn_bu_h:.*vssrarn\\.bu\\.h.*lsx_vssrarn_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarn_hu_w:.*vssrarn\\.hu\\.w.*lsx_vssrarn_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarn_wu_d:.*vssrarn\\.wu\\.d.*lsx_vssrarn_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrln_b_h:.*vsrln\\.b\\.h.*lsx_vsrln_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrln_h_w:.*vsrln\\.h\\.w.*lsx_vsrln_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrln_w_d:.*vsrln\\.w\\.d.*lsx_vsrln_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrln_bu_h:.*vssrln\\.bu\\.h.*lsx_vssrln_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrln_hu_w:.*vssrln\\.hu\\.w.*lsx_vssrln_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrln_wu_d:.*vssrln\\.wu\\.d.*lsx_vssrln_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrn_b_h:.*vsrlrn\\.b\\.h.*lsx_vsrlrn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrn_h_w:.*vsrlrn\\.h\\.w.*lsx_vsrlrn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrn_w_d:.*vsrlrn\\.w\\.d.*lsx_vsrlrn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrn_bu_h:.*vssrlrn\\.bu\\.h.*lsx_vssrlrn_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrn_hu_w:.*vssrlrn\\.hu\\.w.*lsx_vssrlrn_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrn_wu_d:.*vssrlrn\\.wu\\.d.*lsx_vssrlrn_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrstpi_b:.*vfrstpi\\.b.*lsx_vfrstpi_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrstpi_h:.*vfrstpi\\.h.*lsx_vfrstpi_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrstp_b:.*vfrstp\\.b.*lsx_vfrstp_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrstp_h:.*vfrstp\\.h.*lsx_vfrstp_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf4i_d:.*vshuf4i\\.d.*lsx_vshuf4i_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbsrl_v:.*vbsrl\\.v.*lsx_vbsrl_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vbsll_v:.*vbsll\\.v.*lsx_vbsll_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vextrins_b:.*vextrins\\.b.*lsx_vextrins_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vextrins_h:.*vextrins\\.h.*lsx_vextrins_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vextrins_w:.*vextrins\\.w.*lsx_vextrins_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vextrins_d:.*vextrins\\.d.*lsx_vextrins_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmskltz_b:.*vmskltz\\.b.*lsx_vmskltz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmskltz_h:.*vmskltz\\.h.*lsx_vmskltz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmskltz_w:.*vmskltz\\.w.*lsx_vmskltz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmskltz_d:.*vmskltz\\.d.*lsx_vmskltz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsigncov_b:.*vsigncov\\.b.*lsx_vsigncov_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsigncov_h:.*vsigncov\\.h.*lsx_vsigncov_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsigncov_w:.*vsigncov\\.w.*lsx_vsigncov_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsigncov_d:.*vsigncov\\.d.*lsx_vsigncov_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmadd_s:.*vfmadd\\.s.*lsx_vfmadd_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmadd_d:.*vfmadd\\.d.*lsx_vfmadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmsub_s:.*vfmsub\\.s.*lsx_vfmsub_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfmsub_d:.*vfmsub\\.d.*lsx_vfmsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfnmadd_s:.*vfnmadd\\.s.*lsx_vfnmadd_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfnmadd_d:.*vfnmadd\\.d.*lsx_vfnmadd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfnmsub_s:.*vfnmsub\\.s.*lsx_vfnmsub_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfnmsub_d:.*vfnmsub\\.d.*lsx_vfnmsub_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrne_w_s:.*vftintrne\\.w\\.s.*lsx_vftintrne_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrne_l_d:.*vftintrne\\.l\\.d.*lsx_vftintrne_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrp_w_s:.*vftintrp\\.w\\.s.*lsx_vftintrp_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrp_l_d:.*vftintrp\\.l\\.d.*lsx_vftintrp_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrm_w_s:.*vftintrm\\.w\\.s.*lsx_vftintrm_w_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrm_l_d:.*vftintrm\\.l\\.d.*lsx_vftintrm_l_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftint_w_d:.*vftint\\.w\\.d.*lsx_vftint_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffint_s_l:.*vffint\\.s\\.l.*lsx_vffint_s_l" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrz_w_d:.*vftintrz\\.w\\.d.*lsx_vftintrz_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrp_w_d:.*vftintrp\\.w\\.d.*lsx_vftintrp_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrm_w_d:.*vftintrm\\.w\\.d.*lsx_vftintrm_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrne_w_d:.*vftintrne\\.w\\.d.*lsx_vftintrne_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintl_l_s:.*vftintl\\.l\\.s.*lsx_vftintl_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftinth_l_s:.*vftinth\\.l\\.s.*lsx_vftinth_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffinth_d_w:.*vffinth\\.d\\.w.*lsx_vffinth_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vffintl_d_w:.*vffintl\\.d\\.w.*lsx_vffintl_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrzl_l_s:.*vftintrzl\\.l\\.s.*lsx_vftintrzl_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrzh_l_s:.*vftintrzh\\.l\\.s.*lsx_vftintrzh_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrpl_l_s:.*vftintrpl\\.l\\.s.*lsx_vftintrpl_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrph_l_s:.*vftintrph\\.l\\.s.*lsx_vftintrph_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrml_l_s:.*vftintrml\\.l\\.s.*lsx_vftintrml_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrmh_l_s:.*vftintrmh\\.l\\.s.*lsx_vftintrmh_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrnel_l_s:.*vftintrnel\\.l\\.s.*lsx_vftintrnel_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vftintrneh_l_s:.*vftintrneh\\.l\\.s.*lsx_vftintrneh_l_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrne_s:.*vfrintrne\\.s.*lsx_vfrintrne_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrne_d:.*vfrintrne\\.d.*lsx_vfrintrne_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrz_s:.*vfrintrz\\.s.*lsx_vfrintrz_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrz_d:.*vfrintrz\\.d.*lsx_vfrintrz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrp_s:.*vfrintrp\\.s.*lsx_vfrintrp_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrp_d:.*vfrintrp\\.d.*lsx_vfrintrp_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrm_s:.*vfrintrm\\.s.*lsx_vfrintrm_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfrintrm_d:.*vfrintrm\\.d.*lsx_vfrintrm_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vstelm_b:.*vstelm\\.b.*lsx_vstelm_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vstelm_h:.*vstelm\\.h.*lsx_vstelm_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vstelm_w:.*vstelm\\.w.*lsx_vstelm_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vstelm_d:.*vstelm\\.d.*lsx_vstelm_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_d_w:.*vaddwev\\.d\\.w.*lsx_vaddwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_w_h:.*vaddwev\\.w\\.h.*lsx_vaddwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_h_b:.*vaddwev\\.h\\.b.*lsx_vaddwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_d_w:.*vaddwod\\.d\\.w.*lsx_vaddwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_w_h:.*vaddwod\\.w\\.h.*lsx_vaddwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_h_b:.*vaddwod\\.h\\.b.*lsx_vaddwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_d_wu:.*vaddwev\\.d\\.wu.*lsx_vaddwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_w_hu:.*vaddwev\\.w\\.hu.*lsx_vaddwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_h_bu:.*vaddwev\\.h\\.bu.*lsx_vaddwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_d_wu:.*vaddwod\\.d\\.wu.*lsx_vaddwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_w_hu:.*vaddwod\\.w\\.hu.*lsx_vaddwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_h_bu:.*vaddwod\\.h\\.bu.*lsx_vaddwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_d_wu_w:.*vaddwev\\.d\\.wu\\.w.*lsx_vaddwev_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_w_hu_h:.*vaddwev\\.w\\.hu\\.h.*lsx_vaddwev_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_h_bu_b:.*vaddwev\\.h\\.bu\\.b.*lsx_vaddwev_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_d_wu_w:.*vaddwod\\.d\\.wu\\.w.*lsx_vaddwod_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_w_hu_h:.*vaddwod\\.w\\.hu\\.h.*lsx_vaddwod_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_h_bu_b:.*vaddwod\\.h\\.bu\\.b.*lsx_vaddwod_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_d_w:.*vsubwev\\.d\\.w.*lsx_vsubwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_w_h:.*vsubwev\\.w\\.h.*lsx_vsubwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_h_b:.*vsubwev\\.h\\.b.*lsx_vsubwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_d_w:.*vsubwod\\.d\\.w.*lsx_vsubwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_w_h:.*vsubwod\\.w\\.h.*lsx_vsubwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_h_b:.*vsubwod\\.h\\.b.*lsx_vsubwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_d_wu:.*vsubwev\\.d\\.wu.*lsx_vsubwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_w_hu:.*vsubwev\\.w\\.hu.*lsx_vsubwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_h_bu:.*vsubwev\\.h\\.bu.*lsx_vsubwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_d_wu:.*vsubwod\\.d\\.wu.*lsx_vsubwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_w_hu:.*vsubwod\\.w\\.hu.*lsx_vsubwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_h_bu:.*vsubwod\\.h\\.bu.*lsx_vsubwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_q_d:.*vaddwev\\.q\\.d.*lsx_vaddwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_q_d:.*vaddwod\\.q\\.d.*lsx_vaddwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_q_du:.*vaddwev\\.q\\.du.*lsx_vaddwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_q_du:.*vaddwod\\.q\\.du.*lsx_vaddwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_q_d:.*vsubwev\\.q\\.d.*lsx_vsubwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_q_d:.*vsubwod\\.q\\.d.*lsx_vsubwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwev_q_du:.*vsubwev\\.q\\.du.*lsx_vsubwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsubwod_q_du:.*vsubwod\\.q\\.du.*lsx_vsubwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwev_q_du_d:.*vaddwev\\.q\\.du\\.d.*lsx_vaddwev_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vaddwod_q_du_d:.*vaddwod\\.q\\.du\\.d.*lsx_vaddwod_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_d_w:.*vmulwev\\.d\\.w.*lsx_vmulwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_w_h:.*vmulwev\\.w\\.h.*lsx_vmulwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_h_b:.*vmulwev\\.h\\.b.*lsx_vmulwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_d_w:.*vmulwod\\.d\\.w.*lsx_vmulwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_w_h:.*vmulwod\\.w\\.h.*lsx_vmulwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_h_b:.*vmulwod\\.h\\.b.*lsx_vmulwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_d_wu:.*vmulwev\\.d\\.wu.*lsx_vmulwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_w_hu:.*vmulwev\\.w\\.hu.*lsx_vmulwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_h_bu:.*vmulwev\\.h\\.bu.*lsx_vmulwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_d_wu:.*vmulwod\\.d\\.wu.*lsx_vmulwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_w_hu:.*vmulwod\\.w\\.hu.*lsx_vmulwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_h_bu:.*vmulwod\\.h\\.bu.*lsx_vmulwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_d_wu_w:.*vmulwev\\.d\\.wu\\.w.*lsx_vmulwev_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_w_hu_h:.*vmulwev\\.w\\.hu\\.h.*lsx_vmulwev_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_h_bu_b:.*vmulwev\\.h\\.bu\\.b.*lsx_vmulwev_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_d_wu_w:.*vmulwod\\.d\\.wu\\.w.*lsx_vmulwod_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_w_hu_h:.*vmulwod\\.w\\.hu\\.h.*lsx_vmulwod_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_h_bu_b:.*vmulwod\\.h\\.bu\\.b.*lsx_vmulwod_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_q_d:.*vmulwev\\.q\\.d.*lsx_vmulwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_q_d:.*vmulwod\\.q\\.d.*lsx_vmulwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_q_du:.*vmulwev\\.q\\.du.*lsx_vmulwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_q_du:.*vmulwod\\.q\\.du.*lsx_vmulwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwev_q_du_d:.*vmulwev\\.q\\.du\\.d.*lsx_vmulwev_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmulwod_q_du_d:.*vmulwod\\.q\\.du\\.d.*lsx_vmulwod_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_q_d:.*vhaddw\\.q\\.d.*lsx_vhaddw_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhaddw_qu_du:.*vhaddw\\.qu\\.du.*lsx_vhaddw_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_q_d:.*vhsubw\\.q\\.d.*lsx_vhsubw_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vhsubw_qu_du:.*vhsubw\\.qu\\.du.*lsx_vhsubw_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_d_w:.*vmaddwev\\.d\\.w.*lsx_vmaddwev_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_w_h:.*vmaddwev\\.w\\.h.*lsx_vmaddwev_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_h_b:.*vmaddwev\\.h\\.b.*lsx_vmaddwev_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_d_wu:.*vmaddwev\\.d\\.wu.*lsx_vmaddwev_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_w_hu:.*vmaddwev\\.w\\.hu.*lsx_vmaddwev_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_h_bu:.*vmaddwev\\.h\\.bu.*lsx_vmaddwev_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_d_w:.*vmaddwod\\.d\\.w.*lsx_vmaddwod_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_w_h:.*vmaddwod\\.w\\.h.*lsx_vmaddwod_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_h_b:.*vmaddwod\\.h\\.b.*lsx_vmaddwod_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_d_wu:.*vmaddwod\\.d\\.wu.*lsx_vmaddwod_d_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_w_hu:.*vmaddwod\\.w\\.hu.*lsx_vmaddwod_w_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_h_bu:.*vmaddwod\\.h\\.bu.*lsx_vmaddwod_h_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_d_wu_w:.*vmaddwev\\.d\\.wu\\.w.*lsx_vmaddwev_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_w_hu_h:.*vmaddwev\\.w\\.hu\\.h.*lsx_vmaddwev_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_h_bu_b:.*vmaddwev\\.h\\.bu\\.b.*lsx_vmaddwev_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_d_wu_w:.*vmaddwod\\.d\\.wu\\.w.*lsx_vmaddwod_d_wu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_w_hu_h:.*vmaddwod\\.w\\.hu\\.h.*lsx_vmaddwod_w_hu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_h_bu_b:.*vmaddwod\\.h\\.bu\\.b.*lsx_vmaddwod_h_bu_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_q_d:.*vmaddwev\\.q\\.d.*lsx_vmaddwev_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_q_d:.*vmaddwod\\.q\\.d.*lsx_vmaddwod_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_q_du:.*vmaddwev\\.q\\.du.*lsx_vmaddwev_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_q_du:.*vmaddwod\\.q\\.du.*lsx_vmaddwod_q_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwev_q_du_d:.*vmaddwev\\.q\\.du\\.d.*lsx_vmaddwev_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmaddwod_q_du_d:.*vmaddwod\\.q\\.du\\.d.*lsx_vmaddwod_q_du_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotr_b:.*vrotr\\.b.*lsx_vrotr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotr_h:.*vrotr\\.h.*lsx_vrotr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotr_w:.*vrotr\\.w.*lsx_vrotr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotr_d:.*vrotr\\.d.*lsx_vrotr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vadd_q:.*vadd\\.q.*lsx_vadd_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsub_q:.*vsub\\.q.*lsx_vsub_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vldrepl_b:.*vldrepl\\.b.*lsx_vldrepl_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vldrepl_h:.*vldrepl\\.h.*lsx_vldrepl_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vldrepl_w:.*vldrepl\\.w.*lsx_vldrepl_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vldrepl_d:.*vldrepl\\.d.*lsx_vldrepl_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmskgez_b:.*vmskgez\\.b.*lsx_vmskgez_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vmsknz_b:.*vmsknz\\.b.*lsx_vmsknz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_h_b:.*vexth\\.h\\.b.*lsx_vexth_h_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_w_h:.*vexth\\.w\\.h.*lsx_vexth_w_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_d_w:.*vexth\\.d\\.w.*lsx_vexth_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_q_d:.*vexth\\.q\\.d.*lsx_vexth_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_hu_bu:.*vexth\\.hu\\.bu.*lsx_vexth_hu_bu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_wu_hu:.*vexth\\.wu\\.hu.*lsx_vexth_wu_hu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_du_wu:.*vexth\\.du\\.wu.*lsx_vexth_du_wu" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vexth_qu_du:.*vexth\\.qu\\.du.*lsx_vexth_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotri_b:.*vrotri\\.b.*lsx_vrotri_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotri_h:.*vrotri\\.h.*lsx_vrotri_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotri_w:.*vrotri\\.w.*lsx_vrotri_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrotri_d:.*vrotri\\.d.*lsx_vrotri_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vextl_q_d:.*vextl\\.q\\.d.*lsx_vextl_q_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlni_b_h:.*vsrlni\\.b\\.h.*lsx_vsrlni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlni_h_w:.*vsrlni\\.h\\.w.*lsx_vsrlni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlni_w_d:.*vsrlni\\.w\\.d.*lsx_vsrlni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlni_d_q:.*vsrlni\\.d\\.q.*lsx_vsrlni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrni_b_h:.*vsrlrni\\.b\\.h.*lsx_vsrlrni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrni_h_w:.*vsrlrni\\.h\\.w.*lsx_vsrlrni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrni_w_d:.*vsrlrni\\.w\\.d.*lsx_vsrlrni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrlrni_d_q:.*vsrlrni\\.d\\.q.*lsx_vsrlrni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_b_h:.*vssrlni\\.b\\.h.*lsx_vssrlni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_h_w:.*vssrlni\\.h\\.w.*lsx_vssrlni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_w_d:.*vssrlni\\.w\\.d.*lsx_vssrlni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_d_q:.*vssrlni\\.d\\.q.*lsx_vssrlni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_bu_h:.*vssrlni\\.bu\\.h.*lsx_vssrlni_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_hu_w:.*vssrlni\\.hu\\.w.*lsx_vssrlni_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_wu_d:.*vssrlni\\.wu\\.d.*lsx_vssrlni_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlni_du_q:.*vssrlni\\.du\\.q.*lsx_vssrlni_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_b_h:.*vssrlrni\\.b\\.h.*lsx_vssrlrni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_h_w:.*vssrlrni\\.h\\.w.*lsx_vssrlrni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_w_d:.*vssrlrni\\.w\\.d.*lsx_vssrlrni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_d_q:.*vssrlrni\\.d\\.q.*lsx_vssrlrni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_bu_h:.*vssrlrni\\.bu\\.h.*lsx_vssrlrni_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_hu_w:.*vssrlrni\\.hu\\.w.*lsx_vssrlrni_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_wu_d:.*vssrlrni\\.wu\\.d.*lsx_vssrlrni_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrni_du_q:.*vssrlrni\\.du\\.q.*lsx_vssrlrni_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrani_b_h:.*vsrani\\.b\\.h.*lsx_vsrani_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrani_h_w:.*vsrani\\.h\\.w.*lsx_vsrani_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrani_w_d:.*vsrani\\.w\\.d.*lsx_vsrani_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrani_d_q:.*vsrani\\.d\\.q.*lsx_vsrani_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarni_b_h:.*vsrarni\\.b\\.h.*lsx_vsrarni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarni_h_w:.*vsrarni\\.h\\.w.*lsx_vsrarni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarni_w_d:.*vsrarni\\.w\\.d.*lsx_vsrarni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vsrarni_d_q:.*vsrarni\\.d\\.q.*lsx_vsrarni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_b_h:.*vssrani\\.b\\.h.*lsx_vssrani_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_h_w:.*vssrani\\.h\\.w.*lsx_vssrani_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_w_d:.*vssrani\\.w\\.d.*lsx_vssrani_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_d_q:.*vssrani\\.d\\.q.*lsx_vssrani_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_bu_h:.*vssrani\\.bu\\.h.*lsx_vssrani_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_hu_w:.*vssrani\\.hu\\.w.*lsx_vssrani_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_wu_d:.*vssrani\\.wu\\.d.*lsx_vssrani_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrani_du_q:.*vssrani\\.du\\.q.*lsx_vssrani_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_b_h:.*vssrarni\\.b\\.h.*lsx_vssrarni_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_h_w:.*vssrarni\\.h\\.w.*lsx_vssrarni_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_w_d:.*vssrarni\\.w\\.d.*lsx_vssrarni_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_d_q:.*vssrarni\\.d\\.q.*lsx_vssrarni_d_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_bu_h:.*vssrarni\\.bu\\.h.*lsx_vssrarni_bu_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_hu_w:.*vssrarni\\.hu\\.w.*lsx_vssrarni_hu_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_wu_d:.*vssrarni\\.wu\\.d.*lsx_vssrarni_wu_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrarni_du_q:.*vssrarni\\.du\\.q.*lsx_vssrarni_du_q" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vpermi_w:.*vpermi\\.w.*lsx_vpermi_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vld:.*vld.*lsx_vld" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vst:.*vst.*lsx_vst" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrn_b_h:.*vssrlrn\\.b\\.h.*lsx_vssrlrn_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrn_h_w:.*vssrlrn\\.h\\.w.*lsx_vssrlrn_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrlrn_w_d:.*vssrlrn\\.w\\.d.*lsx_vssrlrn_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrln_b_h:.*vssrln\\.b\\.h.*lsx_vssrln_b_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrln_h_w:.*vssrln\\.h\\.w.*lsx_vssrln_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vssrln_w_d:.*vssrln\\.w\\.d.*lsx_vssrln_w_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vorn_v:.*vorn\\.v.*lsx_vorn_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vldi:.*vldi.*lsx_vldi" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vshuf_b:.*vshuf\\.b.*lsx_vshuf_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vldx:.*vldx.*lsx_vldx" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vstx:.*vstx.*lsx_vstx" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vextl_qu_du:.*vextl\\.qu\\.du.*lsx_vextl_qu_du" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bnz_b:.*vsetanyeqz\\.b.*lsx_bnz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bnz_d:.*vsetanyeqz\\.d.*lsx_bnz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bnz_h:.*vsetanyeqz\\.h.*lsx_bnz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bnz_v:.*vseteqz\\.v.*lsx_bnz_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bnz_w:.*vsetanyeqz\\.w.*lsx_bnz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bz_b:.*vsetallnez\\.b.*lsx_bz_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bz_d:.*vsetallnez\\.d.*lsx_bz_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bz_h:.*vsetallnez\\.h.*lsx_bz_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bz_v:.*vsetnez\\.v.*lsx_bz_v" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_bz_w:.*vsetallnez\\.w.*lsx_bz_w" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_caf_d:.*vfcmp\\.caf\\.d.*lsx_vfcmp_caf_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_caf_s:.*vfcmp\\.caf\\.s.*lsx_vfcmp_caf_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_ceq_d:.*vfcmp\\.ceq\\.d.*lsx_vfcmp_ceq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_ceq_s:.*vfcmp\\.ceq\\.s.*lsx_vfcmp_ceq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cle_d:.*vfcmp\\.cle\\.d.*lsx_vfcmp_cle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cle_s:.*vfcmp\\.cle\\.s.*lsx_vfcmp_cle_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_clt_d:.*vfcmp\\.clt\\.d.*lsx_vfcmp_clt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_clt_s:.*vfcmp\\.clt\\.s.*lsx_vfcmp_clt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cne_d:.*vfcmp\\.cne\\.d.*lsx_vfcmp_cne_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cne_s:.*vfcmp\\.cne\\.s.*lsx_vfcmp_cne_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cor_d:.*vfcmp\\.cor\\.d.*lsx_vfcmp_cor_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cor_s:.*vfcmp\\.cor\\.s.*lsx_vfcmp_cor_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cueq_d:.*vfcmp\\.cueq\\.d.*lsx_vfcmp_cueq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cueq_s:.*vfcmp\\.cueq\\.s.*lsx_vfcmp_cueq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cule_d:.*vfcmp\\.cule\\.d.*lsx_vfcmp_cule_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cule_s:.*vfcmp\\.cule\\.s.*lsx_vfcmp_cule_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cult_d:.*vfcmp\\.cult\\.d.*lsx_vfcmp_cult_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cult_s:.*vfcmp\\.cult\\.s.*lsx_vfcmp_cult_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cun_d:.*vfcmp\\.cun\\.d.*lsx_vfcmp_cun_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cune_d:.*vfcmp\\.cune\\.d.*lsx_vfcmp_cune_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cune_s:.*vfcmp\\.cune\\.s.*lsx_vfcmp_cune_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_cun_s:.*vfcmp\\.cun\\.s.*lsx_vfcmp_cun_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_saf_d:.*vfcmp\\.saf\\.d.*lsx_vfcmp_saf_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_saf_s:.*vfcmp\\.saf\\.s.*lsx_vfcmp_saf_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_seq_d:.*vfcmp\\.seq\\.d.*lsx_vfcmp_seq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_seq_s:.*vfcmp\\.seq\\.s.*lsx_vfcmp_seq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sle_d:.*vfcmp\\.sle\\.d.*lsx_vfcmp_sle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sle_s:.*vfcmp\\.sle\\.s.*lsx_vfcmp_sle_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_slt_d:.*vfcmp\\.slt\\.d.*lsx_vfcmp_slt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_slt_s:.*vfcmp\\.slt\\.s.*lsx_vfcmp_slt_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sne_d:.*vfcmp\\.sne\\.d.*lsx_vfcmp_sne_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sne_s:.*vfcmp\\.sne\\.s.*lsx_vfcmp_sne_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sor_d:.*vfcmp\\.sor\\.d.*lsx_vfcmp_sor_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sor_s:.*vfcmp\\.sor\\.s.*lsx_vfcmp_sor_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sueq_d:.*vfcmp\\.sueq\\.d.*lsx_vfcmp_sueq_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sueq_s:.*vfcmp\\.sueq\\.s.*lsx_vfcmp_sueq_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sule_d:.*vfcmp\\.sule\\.d.*lsx_vfcmp_sule_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sule_s:.*vfcmp\\.sule\\.s.*lsx_vfcmp_sule_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sult_d:.*vfcmp\\.sult\\.d.*lsx_vfcmp_sult_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sult_s:.*vfcmp\\.sult\\.s.*lsx_vfcmp_sult_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sun_d:.*vfcmp\\.sun\\.d.*lsx_vfcmp_sun_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sune_d:.*vfcmp\\.sune\\.d.*lsx_vfcmp_sune_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sune_s:.*vfcmp\\.sune\\.s.*lsx_vfcmp_sune_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vfcmp_sun_s:.*vfcmp\\.sun\\.s.*lsx_vfcmp_sun_s" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrepli_b:.*vrepli\\.b.*lsx_vrepli_b" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrepli_d:.*vrepli\\.d.*lsx_vrepli_d" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrepli_h:.*vrepli\\.h.*lsx_vrepli_h" 1 } } */ ++/* { dg-final { scan-assembler-times "lsx_vrepli_w:.*vrepli\\.w.*lsx_vrepli_w" 1 } } */ ++ ++typedef signed char v16i8 __attribute__ ((vector_size(16), aligned(16))); ++typedef signed char v16i8_b __attribute__ ((vector_size(16), aligned(1))); ++typedef unsigned char v16u8 __attribute__ ((vector_size(16), aligned(16))); ++typedef unsigned char v16u8_b __attribute__ ((vector_size(16), aligned(1))); ++typedef short v8i16 __attribute__ ((vector_size(16), aligned(16))); ++typedef short v8i16_h __attribute__ ((vector_size(16), aligned(2))); ++typedef unsigned short v8u16 __attribute__ ((vector_size(16), aligned(16))); ++typedef unsigned short v8u16_h __attribute__ ((vector_size(16), aligned(2))); ++typedef int v4i32 __attribute__ ((vector_size(16), aligned(16))); ++typedef int v4i32_w __attribute__ ((vector_size(16), aligned(4))); ++typedef unsigned int v4u32 __attribute__ ((vector_size(16), aligned(16))); ++typedef unsigned int v4u32_w __attribute__ ((vector_size(16), aligned(4))); ++typedef long long v2i64 __attribute__ ((vector_size(16), aligned(16))); ++typedef long long v2i64_d __attribute__ ((vector_size(16), aligned(8))); ++typedef unsigned long long v2u64 __attribute__ ((vector_size(16), aligned(16))); ++typedef unsigned long long v2u64_d __attribute__ ((vector_size(16), aligned(8))); ++typedef float v4f32 __attribute__ ((vector_size(16), aligned(16))); ++typedef float v4f32_w __attribute__ ((vector_size(16), aligned(4))); ++typedef double v2f64 __attribute__ ((vector_size(16), aligned(16))); ++typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8))); ++ ++typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__)); ++typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); ++typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); ++ ++v16i8 __lsx_vsll_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsll_b(_1, _2);} ++v8i16 __lsx_vsll_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsll_h(_1, _2);} ++v4i32 __lsx_vsll_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsll_w(_1, _2);} ++v2i64 __lsx_vsll_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsll_d(_1, _2);} ++v16i8 __lsx_vslli_b(v16i8 _1){return __builtin_lsx_vslli_b(_1, 1);} ++v8i16 __lsx_vslli_h(v8i16 _1){return __builtin_lsx_vslli_h(_1, 1);} ++v4i32 __lsx_vslli_w(v4i32 _1){return __builtin_lsx_vslli_w(_1, 1);} ++v2i64 __lsx_vslli_d(v2i64 _1){return __builtin_lsx_vslli_d(_1, 1);} ++v16i8 __lsx_vsra_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsra_b(_1, _2);} ++v8i16 __lsx_vsra_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsra_h(_1, _2);} ++v4i32 __lsx_vsra_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsra_w(_1, _2);} ++v2i64 __lsx_vsra_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsra_d(_1, _2);} ++v16i8 __lsx_vsrai_b(v16i8 _1){return __builtin_lsx_vsrai_b(_1, 1);} ++v8i16 __lsx_vsrai_h(v8i16 _1){return __builtin_lsx_vsrai_h(_1, 1);} ++v4i32 __lsx_vsrai_w(v4i32 _1){return __builtin_lsx_vsrai_w(_1, 1);} ++v2i64 __lsx_vsrai_d(v2i64 _1){return __builtin_lsx_vsrai_d(_1, 1);} ++v16i8 __lsx_vsrar_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrar_b(_1, _2);} ++v8i16 __lsx_vsrar_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrar_h(_1, _2);} ++v4i32 __lsx_vsrar_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrar_w(_1, _2);} ++v2i64 __lsx_vsrar_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrar_d(_1, _2);} ++v16i8 __lsx_vsrari_b(v16i8 _1){return __builtin_lsx_vsrari_b(_1, 1);} ++v8i16 __lsx_vsrari_h(v8i16 _1){return __builtin_lsx_vsrari_h(_1, 1);} ++v4i32 __lsx_vsrari_w(v4i32 _1){return __builtin_lsx_vsrari_w(_1, 1);} ++v2i64 __lsx_vsrari_d(v2i64 _1){return __builtin_lsx_vsrari_d(_1, 1);} ++v16i8 __lsx_vsrl_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrl_b(_1, _2);} ++v8i16 __lsx_vsrl_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrl_h(_1, _2);} ++v4i32 __lsx_vsrl_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrl_w(_1, _2);} ++v2i64 __lsx_vsrl_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrl_d(_1, _2);} ++v16i8 __lsx_vsrli_b(v16i8 _1){return __builtin_lsx_vsrli_b(_1, 1);} ++v8i16 __lsx_vsrli_h(v8i16 _1){return __builtin_lsx_vsrli_h(_1, 1);} ++v4i32 __lsx_vsrli_w(v4i32 _1){return __builtin_lsx_vsrli_w(_1, 1);} ++v2i64 __lsx_vsrli_d(v2i64 _1){return __builtin_lsx_vsrli_d(_1, 1);} ++v16i8 __lsx_vsrlr_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrlr_b(_1, _2);} ++v8i16 __lsx_vsrlr_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrlr_h(_1, _2);} ++v4i32 __lsx_vsrlr_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrlr_w(_1, _2);} ++v2i64 __lsx_vsrlr_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrlr_d(_1, _2);} ++v16i8 __lsx_vsrlri_b(v16i8 _1){return __builtin_lsx_vsrlri_b(_1, 1);} ++v8i16 __lsx_vsrlri_h(v8i16 _1){return __builtin_lsx_vsrlri_h(_1, 1);} ++v4i32 __lsx_vsrlri_w(v4i32 _1){return __builtin_lsx_vsrlri_w(_1, 1);} ++v2i64 __lsx_vsrlri_d(v2i64 _1){return __builtin_lsx_vsrlri_d(_1, 1);} ++v16u8 __lsx_vbitclr_b(v16u8 _1, v16u8 _2){return __builtin_lsx_vbitclr_b(_1, _2);} ++v8u16 __lsx_vbitclr_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vbitclr_h(_1, _2);} ++v4u32 __lsx_vbitclr_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vbitclr_w(_1, _2);} ++v2u64 __lsx_vbitclr_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vbitclr_d(_1, _2);} ++v16u8 __lsx_vbitclri_b(v16u8 _1){return __builtin_lsx_vbitclri_b(_1, 1);} ++v8u16 __lsx_vbitclri_h(v8u16 _1){return __builtin_lsx_vbitclri_h(_1, 1);} ++v4u32 __lsx_vbitclri_w(v4u32 _1){return __builtin_lsx_vbitclri_w(_1, 1);} ++v2u64 __lsx_vbitclri_d(v2u64 _1){return __builtin_lsx_vbitclri_d(_1, 1);} ++v16u8 __lsx_vbitset_b(v16u8 _1, v16u8 _2){return __builtin_lsx_vbitset_b(_1, _2);} ++v8u16 __lsx_vbitset_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vbitset_h(_1, _2);} ++v4u32 __lsx_vbitset_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vbitset_w(_1, _2);} ++v2u64 __lsx_vbitset_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vbitset_d(_1, _2);} ++v16u8 __lsx_vbitseti_b(v16u8 _1){return __builtin_lsx_vbitseti_b(_1, 1);} ++v8u16 __lsx_vbitseti_h(v8u16 _1){return __builtin_lsx_vbitseti_h(_1, 1);} ++v4u32 __lsx_vbitseti_w(v4u32 _1){return __builtin_lsx_vbitseti_w(_1, 1);} ++v2u64 __lsx_vbitseti_d(v2u64 _1){return __builtin_lsx_vbitseti_d(_1, 1);} ++v16u8 __lsx_vbitrev_b(v16u8 _1, v16u8 _2){return __builtin_lsx_vbitrev_b(_1, _2);} ++v8u16 __lsx_vbitrev_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vbitrev_h(_1, _2);} ++v4u32 __lsx_vbitrev_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vbitrev_w(_1, _2);} ++v2u64 __lsx_vbitrev_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vbitrev_d(_1, _2);} ++v16u8 __lsx_vbitrevi_b(v16u8 _1){return __builtin_lsx_vbitrevi_b(_1, 1);} ++v8u16 __lsx_vbitrevi_h(v8u16 _1){return __builtin_lsx_vbitrevi_h(_1, 1);} ++v4u32 __lsx_vbitrevi_w(v4u32 _1){return __builtin_lsx_vbitrevi_w(_1, 1);} ++v2u64 __lsx_vbitrevi_d(v2u64 _1){return __builtin_lsx_vbitrevi_d(_1, 1);} ++v16i8 __lsx_vadd_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vadd_b(_1, _2);} ++v8i16 __lsx_vadd_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vadd_h(_1, _2);} ++v4i32 __lsx_vadd_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vadd_w(_1, _2);} ++v2i64 __lsx_vadd_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vadd_d(_1, _2);} ++v16i8 __lsx_vaddi_bu(v16i8 _1){return __builtin_lsx_vaddi_bu(_1, 1);} ++v8i16 __lsx_vaddi_hu(v8i16 _1){return __builtin_lsx_vaddi_hu(_1, 1);} ++v4i32 __lsx_vaddi_wu(v4i32 _1){return __builtin_lsx_vaddi_wu(_1, 1);} ++v2i64 __lsx_vaddi_du(v2i64 _1){return __builtin_lsx_vaddi_du(_1, 1);} ++v16i8 __lsx_vsub_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsub_b(_1, _2);} ++v8i16 __lsx_vsub_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsub_h(_1, _2);} ++v4i32 __lsx_vsub_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsub_w(_1, _2);} ++v2i64 __lsx_vsub_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsub_d(_1, _2);} ++v16i8 __lsx_vsubi_bu(v16i8 _1){return __builtin_lsx_vsubi_bu(_1, 1);} ++v8i16 __lsx_vsubi_hu(v8i16 _1){return __builtin_lsx_vsubi_hu(_1, 1);} ++v4i32 __lsx_vsubi_wu(v4i32 _1){return __builtin_lsx_vsubi_wu(_1, 1);} ++v2i64 __lsx_vsubi_du(v2i64 _1){return __builtin_lsx_vsubi_du(_1, 1);} ++v16i8 __lsx_vmax_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmax_b(_1, _2);} ++v8i16 __lsx_vmax_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmax_h(_1, _2);} ++v4i32 __lsx_vmax_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmax_w(_1, _2);} ++v2i64 __lsx_vmax_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmax_d(_1, _2);} ++v16i8 __lsx_vmaxi_b(v16i8 _1){return __builtin_lsx_vmaxi_b(_1, 1);} ++v8i16 __lsx_vmaxi_h(v8i16 _1){return __builtin_lsx_vmaxi_h(_1, 1);} ++v4i32 __lsx_vmaxi_w(v4i32 _1){return __builtin_lsx_vmaxi_w(_1, 1);} ++v2i64 __lsx_vmaxi_d(v2i64 _1){return __builtin_lsx_vmaxi_d(_1, 1);} ++v16u8 __lsx_vmax_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmax_bu(_1, _2);} ++v8u16 __lsx_vmax_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmax_hu(_1, _2);} ++v4u32 __lsx_vmax_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmax_wu(_1, _2);} ++v2u64 __lsx_vmax_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmax_du(_1, _2);} ++v16u8 __lsx_vmaxi_bu(v16u8 _1){return __builtin_lsx_vmaxi_bu(_1, 1);} ++v8u16 __lsx_vmaxi_hu(v8u16 _1){return __builtin_lsx_vmaxi_hu(_1, 1);} ++v4u32 __lsx_vmaxi_wu(v4u32 _1){return __builtin_lsx_vmaxi_wu(_1, 1);} ++v2u64 __lsx_vmaxi_du(v2u64 _1){return __builtin_lsx_vmaxi_du(_1, 1);} ++v16i8 __lsx_vmin_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmin_b(_1, _2);} ++v8i16 __lsx_vmin_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmin_h(_1, _2);} ++v4i32 __lsx_vmin_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmin_w(_1, _2);} ++v2i64 __lsx_vmin_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmin_d(_1, _2);} ++v16i8 __lsx_vmini_b(v16i8 _1){return __builtin_lsx_vmini_b(_1, 1);} ++v8i16 __lsx_vmini_h(v8i16 _1){return __builtin_lsx_vmini_h(_1, 1);} ++v4i32 __lsx_vmini_w(v4i32 _1){return __builtin_lsx_vmini_w(_1, 1);} ++v2i64 __lsx_vmini_d(v2i64 _1){return __builtin_lsx_vmini_d(_1, 1);} ++v16u8 __lsx_vmin_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmin_bu(_1, _2);} ++v8u16 __lsx_vmin_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmin_hu(_1, _2);} ++v4u32 __lsx_vmin_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmin_wu(_1, _2);} ++v2u64 __lsx_vmin_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmin_du(_1, _2);} ++v16u8 __lsx_vmini_bu(v16u8 _1){return __builtin_lsx_vmini_bu(_1, 1);} ++v8u16 __lsx_vmini_hu(v8u16 _1){return __builtin_lsx_vmini_hu(_1, 1);} ++v4u32 __lsx_vmini_wu(v4u32 _1){return __builtin_lsx_vmini_wu(_1, 1);} ++v2u64 __lsx_vmini_du(v2u64 _1){return __builtin_lsx_vmini_du(_1, 1);} ++v16i8 __lsx_vseq_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vseq_b(_1, _2);} ++v8i16 __lsx_vseq_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vseq_h(_1, _2);} ++v4i32 __lsx_vseq_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vseq_w(_1, _2);} ++v2i64 __lsx_vseq_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vseq_d(_1, _2);} ++v16i8 __lsx_vseqi_b(v16i8 _1){return __builtin_lsx_vseqi_b(_1, 1);} ++v8i16 __lsx_vseqi_h(v8i16 _1){return __builtin_lsx_vseqi_h(_1, 1);} ++v4i32 __lsx_vseqi_w(v4i32 _1){return __builtin_lsx_vseqi_w(_1, 1);} ++v2i64 __lsx_vseqi_d(v2i64 _1){return __builtin_lsx_vseqi_d(_1, 1);} ++v16i8 __lsx_vslti_b(v16i8 _1){return __builtin_lsx_vslti_b(_1, 1);} ++v16i8 __lsx_vslt_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vslt_b(_1, _2);} ++v8i16 __lsx_vslt_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vslt_h(_1, _2);} ++v4i32 __lsx_vslt_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vslt_w(_1, _2);} ++v2i64 __lsx_vslt_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vslt_d(_1, _2);} ++v8i16 __lsx_vslti_h(v8i16 _1){return __builtin_lsx_vslti_h(_1, 1);} ++v4i32 __lsx_vslti_w(v4i32 _1){return __builtin_lsx_vslti_w(_1, 1);} ++v2i64 __lsx_vslti_d(v2i64 _1){return __builtin_lsx_vslti_d(_1, 1);} ++v16i8 __lsx_vslt_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vslt_bu(_1, _2);} ++v8i16 __lsx_vslt_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vslt_hu(_1, _2);} ++v4i32 __lsx_vslt_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vslt_wu(_1, _2);} ++v2i64 __lsx_vslt_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vslt_du(_1, _2);} ++v16i8 __lsx_vslti_bu(v16u8 _1){return __builtin_lsx_vslti_bu(_1, 1);} ++v8i16 __lsx_vslti_hu(v8u16 _1){return __builtin_lsx_vslti_hu(_1, 1);} ++v4i32 __lsx_vslti_wu(v4u32 _1){return __builtin_lsx_vslti_wu(_1, 1);} ++v2i64 __lsx_vslti_du(v2u64 _1){return __builtin_lsx_vslti_du(_1, 1);} ++v16i8 __lsx_vsle_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsle_b(_1, _2);} ++v8i16 __lsx_vsle_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsle_h(_1, _2);} ++v4i32 __lsx_vsle_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsle_w(_1, _2);} ++v2i64 __lsx_vsle_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsle_d(_1, _2);} ++v16i8 __lsx_vslei_b(v16i8 _1){return __builtin_lsx_vslei_b(_1, 1);} ++v8i16 __lsx_vslei_h(v8i16 _1){return __builtin_lsx_vslei_h(_1, 1);} ++v4i32 __lsx_vslei_w(v4i32 _1){return __builtin_lsx_vslei_w(_1, 1);} ++v2i64 __lsx_vslei_d(v2i64 _1){return __builtin_lsx_vslei_d(_1, 1);} ++v16i8 __lsx_vsle_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vsle_bu(_1, _2);} ++v8i16 __lsx_vsle_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vsle_hu(_1, _2);} ++v4i32 __lsx_vsle_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vsle_wu(_1, _2);} ++v2i64 __lsx_vsle_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vsle_du(_1, _2);} ++v16i8 __lsx_vslei_bu(v16u8 _1){return __builtin_lsx_vslei_bu(_1, 1);} ++v8i16 __lsx_vslei_hu(v8u16 _1){return __builtin_lsx_vslei_hu(_1, 1);} ++v4i32 __lsx_vslei_wu(v4u32 _1){return __builtin_lsx_vslei_wu(_1, 1);} ++v2i64 __lsx_vslei_du(v2u64 _1){return __builtin_lsx_vslei_du(_1, 1);} ++v16i8 __lsx_vsat_b(v16i8 _1){return __builtin_lsx_vsat_b(_1, 1);} ++v8i16 __lsx_vsat_h(v8i16 _1){return __builtin_lsx_vsat_h(_1, 1);} ++v4i32 __lsx_vsat_w(v4i32 _1){return __builtin_lsx_vsat_w(_1, 1);} ++v2i64 __lsx_vsat_d(v2i64 _1){return __builtin_lsx_vsat_d(_1, 1);} ++v16u8 __lsx_vsat_bu(v16u8 _1){return __builtin_lsx_vsat_bu(_1, 1);} ++v8u16 __lsx_vsat_hu(v8u16 _1){return __builtin_lsx_vsat_hu(_1, 1);} ++v4u32 __lsx_vsat_wu(v4u32 _1){return __builtin_lsx_vsat_wu(_1, 1);} ++v2u64 __lsx_vsat_du(v2u64 _1){return __builtin_lsx_vsat_du(_1, 1);} ++v16i8 __lsx_vadda_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vadda_b(_1, _2);} ++v8i16 __lsx_vadda_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vadda_h(_1, _2);} ++v4i32 __lsx_vadda_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vadda_w(_1, _2);} ++v2i64 __lsx_vadda_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vadda_d(_1, _2);} ++v16i8 __lsx_vsadd_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsadd_b(_1, _2);} ++v8i16 __lsx_vsadd_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsadd_h(_1, _2);} ++v4i32 __lsx_vsadd_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsadd_w(_1, _2);} ++v2i64 __lsx_vsadd_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsadd_d(_1, _2);} ++v16u8 __lsx_vsadd_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vsadd_bu(_1, _2);} ++v8u16 __lsx_vsadd_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vsadd_hu(_1, _2);} ++v4u32 __lsx_vsadd_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vsadd_wu(_1, _2);} ++v2u64 __lsx_vsadd_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vsadd_du(_1, _2);} ++v16i8 __lsx_vavg_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vavg_b(_1, _2);} ++v8i16 __lsx_vavg_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vavg_h(_1, _2);} ++v4i32 __lsx_vavg_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vavg_w(_1, _2);} ++v2i64 __lsx_vavg_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vavg_d(_1, _2);} ++v16u8 __lsx_vavg_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vavg_bu(_1, _2);} ++v8u16 __lsx_vavg_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vavg_hu(_1, _2);} ++v4u32 __lsx_vavg_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vavg_wu(_1, _2);} ++v2u64 __lsx_vavg_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vavg_du(_1, _2);} ++v16i8 __lsx_vavgr_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vavgr_b(_1, _2);} ++v8i16 __lsx_vavgr_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vavgr_h(_1, _2);} ++v4i32 __lsx_vavgr_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vavgr_w(_1, _2);} ++v2i64 __lsx_vavgr_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vavgr_d(_1, _2);} ++v16u8 __lsx_vavgr_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vavgr_bu(_1, _2);} ++v8u16 __lsx_vavgr_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vavgr_hu(_1, _2);} ++v4u32 __lsx_vavgr_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vavgr_wu(_1, _2);} ++v2u64 __lsx_vavgr_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vavgr_du(_1, _2);} ++v16i8 __lsx_vssub_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vssub_b(_1, _2);} ++v8i16 __lsx_vssub_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssub_h(_1, _2);} ++v4i32 __lsx_vssub_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssub_w(_1, _2);} ++v2i64 __lsx_vssub_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssub_d(_1, _2);} ++v16u8 __lsx_vssub_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vssub_bu(_1, _2);} ++v8u16 __lsx_vssub_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vssub_hu(_1, _2);} ++v4u32 __lsx_vssub_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vssub_wu(_1, _2);} ++v2u64 __lsx_vssub_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vssub_du(_1, _2);} ++v16i8 __lsx_vabsd_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vabsd_b(_1, _2);} ++v8i16 __lsx_vabsd_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vabsd_h(_1, _2);} ++v4i32 __lsx_vabsd_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vabsd_w(_1, _2);} ++v2i64 __lsx_vabsd_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vabsd_d(_1, _2);} ++v16u8 __lsx_vabsd_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vabsd_bu(_1, _2);} ++v8u16 __lsx_vabsd_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vabsd_hu(_1, _2);} ++v4u32 __lsx_vabsd_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vabsd_wu(_1, _2);} ++v2u64 __lsx_vabsd_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vabsd_du(_1, _2);} ++v16i8 __lsx_vmul_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmul_b(_1, _2);} ++v8i16 __lsx_vmul_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmul_h(_1, _2);} ++v4i32 __lsx_vmul_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmul_w(_1, _2);} ++v2i64 __lsx_vmul_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmul_d(_1, _2);} ++v16i8 __lsx_vmadd_b(v16i8 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vmadd_b(_1, _2, _3);} ++v8i16 __lsx_vmadd_h(v8i16 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vmadd_h(_1, _2, _3);} ++v4i32 __lsx_vmadd_w(v4i32 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vmadd_w(_1, _2, _3);} ++v2i64 __lsx_vmadd_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vmadd_d(_1, _2, _3);} ++v16i8 __lsx_vmsub_b(v16i8 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vmsub_b(_1, _2, _3);} ++v8i16 __lsx_vmsub_h(v8i16 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vmsub_h(_1, _2, _3);} ++v4i32 __lsx_vmsub_w(v4i32 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vmsub_w(_1, _2, _3);} ++v2i64 __lsx_vmsub_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vmsub_d(_1, _2, _3);} ++v16i8 __lsx_vdiv_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vdiv_b(_1, _2);} ++v8i16 __lsx_vdiv_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vdiv_h(_1, _2);} ++v4i32 __lsx_vdiv_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vdiv_w(_1, _2);} ++v2i64 __lsx_vdiv_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vdiv_d(_1, _2);} ++v16u8 __lsx_vdiv_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vdiv_bu(_1, _2);} ++v8u16 __lsx_vdiv_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vdiv_hu(_1, _2);} ++v4u32 __lsx_vdiv_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vdiv_wu(_1, _2);} ++v2u64 __lsx_vdiv_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vdiv_du(_1, _2);} ++v8i16 __lsx_vhaddw_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vhaddw_h_b(_1, _2);} ++v4i32 __lsx_vhaddw_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vhaddw_w_h(_1, _2);} ++v2i64 __lsx_vhaddw_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vhaddw_d_w(_1, _2);} ++v8u16 __lsx_vhaddw_hu_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vhaddw_hu_bu(_1, _2);} ++v4u32 __lsx_vhaddw_wu_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vhaddw_wu_hu(_1, _2);} ++v2u64 __lsx_vhaddw_du_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vhaddw_du_wu(_1, _2);} ++v8i16 __lsx_vhsubw_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vhsubw_h_b(_1, _2);} ++v4i32 __lsx_vhsubw_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vhsubw_w_h(_1, _2);} ++v2i64 __lsx_vhsubw_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vhsubw_d_w(_1, _2);} ++v8i16 __lsx_vhsubw_hu_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vhsubw_hu_bu(_1, _2);} ++v4i32 __lsx_vhsubw_wu_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vhsubw_wu_hu(_1, _2);} ++v2i64 __lsx_vhsubw_du_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vhsubw_du_wu(_1, _2);} ++v16i8 __lsx_vmod_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmod_b(_1, _2);} ++v8i16 __lsx_vmod_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmod_h(_1, _2);} ++v4i32 __lsx_vmod_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmod_w(_1, _2);} ++v2i64 __lsx_vmod_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmod_d(_1, _2);} ++v16u8 __lsx_vmod_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmod_bu(_1, _2);} ++v8u16 __lsx_vmod_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmod_hu(_1, _2);} ++v4u32 __lsx_vmod_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmod_wu(_1, _2);} ++v2u64 __lsx_vmod_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmod_du(_1, _2);} ++v16i8 __lsx_vreplve_b(v16i8 _1, int _2){return __builtin_lsx_vreplve_b(_1, _2);} ++v8i16 __lsx_vreplve_h(v8i16 _1, int _2){return __builtin_lsx_vreplve_h(_1, _2);} ++v4i32 __lsx_vreplve_w(v4i32 _1, int _2){return __builtin_lsx_vreplve_w(_1, _2);} ++v2i64 __lsx_vreplve_d(v2i64 _1, int _2){return __builtin_lsx_vreplve_d(_1, _2);} ++v16i8 __lsx_vreplvei_b(v16i8 _1){return __builtin_lsx_vreplvei_b(_1, 1);} ++v8i16 __lsx_vreplvei_h(v8i16 _1){return __builtin_lsx_vreplvei_h(_1, 1);} ++v4i32 __lsx_vreplvei_w(v4i32 _1){return __builtin_lsx_vreplvei_w(_1, 1);} ++v2i64 __lsx_vreplvei_d(v2i64 _1){return __builtin_lsx_vreplvei_d(_1, 1);} ++v16i8 __lsx_vpickev_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vpickev_b(_1, _2);} ++v8i16 __lsx_vpickev_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vpickev_h(_1, _2);} ++v4i32 __lsx_vpickev_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpickev_w(_1, _2);} ++v2i64 __lsx_vpickev_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vpickev_d(_1, _2);} ++v16i8 __lsx_vpickod_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vpickod_b(_1, _2);} ++v8i16 __lsx_vpickod_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vpickod_h(_1, _2);} ++v4i32 __lsx_vpickod_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpickod_w(_1, _2);} ++v2i64 __lsx_vpickod_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vpickod_d(_1, _2);} ++v16i8 __lsx_vilvh_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vilvh_b(_1, _2);} ++v8i16 __lsx_vilvh_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vilvh_h(_1, _2);} ++v4i32 __lsx_vilvh_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vilvh_w(_1, _2);} ++v2i64 __lsx_vilvh_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vilvh_d(_1, _2);} ++v16i8 __lsx_vilvl_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vilvl_b(_1, _2);} ++v8i16 __lsx_vilvl_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vilvl_h(_1, _2);} ++v4i32 __lsx_vilvl_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vilvl_w(_1, _2);} ++v2i64 __lsx_vilvl_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vilvl_d(_1, _2);} ++v16i8 __lsx_vpackev_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vpackev_b(_1, _2);} ++v8i16 __lsx_vpackev_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vpackev_h(_1, _2);} ++v4i32 __lsx_vpackev_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpackev_w(_1, _2);} ++v2i64 __lsx_vpackev_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vpackev_d(_1, _2);} ++v16i8 __lsx_vpackod_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vpackod_b(_1, _2);} ++v8i16 __lsx_vpackod_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vpackod_h(_1, _2);} ++v4i32 __lsx_vpackod_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpackod_w(_1, _2);} ++v2i64 __lsx_vpackod_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vpackod_d(_1, _2);} ++v8i16 __lsx_vshuf_h(v8i16 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vshuf_h(_1, _2, _3);} ++v4i32 __lsx_vshuf_w(v4i32 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vshuf_w(_1, _2, _3);} ++v2i64 __lsx_vshuf_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vshuf_d(_1, _2, _3);} ++v16u8 __lsx_vand_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vand_v(_1, _2);} ++v16u8 __lsx_vandi_b(v16u8 _1){return __builtin_lsx_vandi_b(_1, 1);} ++v16u8 __lsx_vor_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vor_v(_1, _2);} ++v16u8 __lsx_vori_b(v16u8 _1){return __builtin_lsx_vori_b(_1, 1);} ++v16u8 __lsx_vnor_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vnor_v(_1, _2);} ++v16u8 __lsx_vnori_b(v16u8 _1){return __builtin_lsx_vnori_b(_1, 1);} ++v16u8 __lsx_vxor_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vxor_v(_1, _2);} ++v16u8 __lsx_vxori_b(v16u8 _1){return __builtin_lsx_vxori_b(_1, 1);} ++v16u8 __lsx_vbitsel_v(v16u8 _1, v16u8 _2, v16u8 _3){return __builtin_lsx_vbitsel_v(_1, _2, _3);} ++v16u8 __lsx_vbitseli_b(v16u8 _1, v16u8 _2){return __builtin_lsx_vbitseli_b(_1, _2, 1);} ++v16i8 __lsx_vshuf4i_b(v16i8 _1){return __builtin_lsx_vshuf4i_b(_1, 1);} ++v8i16 __lsx_vshuf4i_h(v8i16 _1){return __builtin_lsx_vshuf4i_h(_1, 1);} ++v4i32 __lsx_vshuf4i_w(v4i32 _1){return __builtin_lsx_vshuf4i_w(_1, 1);} ++v16i8 __lsx_vreplgr2vr_b(int _1){return __builtin_lsx_vreplgr2vr_b(_1);} ++v8i16 __lsx_vreplgr2vr_h(int _1){return __builtin_lsx_vreplgr2vr_h(_1);} ++v4i32 __lsx_vreplgr2vr_w(int _1){return __builtin_lsx_vreplgr2vr_w(_1);} ++v2i64 __lsx_vreplgr2vr_d(long _1){return __builtin_lsx_vreplgr2vr_d(_1);} ++v16i8 __lsx_vpcnt_b(v16i8 _1){return __builtin_lsx_vpcnt_b(_1);} ++v8i16 __lsx_vpcnt_h(v8i16 _1){return __builtin_lsx_vpcnt_h(_1);} ++v4i32 __lsx_vpcnt_w(v4i32 _1){return __builtin_lsx_vpcnt_w(_1);} ++v2i64 __lsx_vpcnt_d(v2i64 _1){return __builtin_lsx_vpcnt_d(_1);} ++v16i8 __lsx_vclo_b(v16i8 _1){return __builtin_lsx_vclo_b(_1);} ++v8i16 __lsx_vclo_h(v8i16 _1){return __builtin_lsx_vclo_h(_1);} ++v4i32 __lsx_vclo_w(v4i32 _1){return __builtin_lsx_vclo_w(_1);} ++v2i64 __lsx_vclo_d(v2i64 _1){return __builtin_lsx_vclo_d(_1);} ++v16i8 __lsx_vclz_b(v16i8 _1){return __builtin_lsx_vclz_b(_1);} ++v8i16 __lsx_vclz_h(v8i16 _1){return __builtin_lsx_vclz_h(_1);} ++v4i32 __lsx_vclz_w(v4i32 _1){return __builtin_lsx_vclz_w(_1);} ++v2i64 __lsx_vclz_d(v2i64 _1){return __builtin_lsx_vclz_d(_1);} ++int __lsx_vpickve2gr_b(v16i8 _1){return __builtin_lsx_vpickve2gr_b(_1, 1);} ++int __lsx_vpickve2gr_h(v8i16 _1){return __builtin_lsx_vpickve2gr_h(_1, 1);} ++int __lsx_vpickve2gr_w(v4i32 _1){return __builtin_lsx_vpickve2gr_w(_1, 1);} ++long __lsx_vpickve2gr_d(v2i64 _1){return __builtin_lsx_vpickve2gr_d(_1, 1);} ++unsigned int __lsx_vpickve2gr_bu(v16i8 _1){return __builtin_lsx_vpickve2gr_bu(_1, 1);} ++unsigned int __lsx_vpickve2gr_hu(v8i16 _1){return __builtin_lsx_vpickve2gr_hu(_1, 1);} ++unsigned int __lsx_vpickve2gr_wu(v4i32 _1){return __builtin_lsx_vpickve2gr_wu(_1, 1);} ++unsigned long int __lsx_vpickve2gr_du(v2i64 _1){return __builtin_lsx_vpickve2gr_du(_1, 1);} ++v16i8 __lsx_vinsgr2vr_b(v16i8 _1){return __builtin_lsx_vinsgr2vr_b(_1, 1, 1);} ++v8i16 __lsx_vinsgr2vr_h(v8i16 _1){return __builtin_lsx_vinsgr2vr_h(_1, 1, 1);} ++v4i32 __lsx_vinsgr2vr_w(v4i32 _1){return __builtin_lsx_vinsgr2vr_w(_1, 1, 1);} ++v2i64 __lsx_vinsgr2vr_d(v2i64 _1){return __builtin_lsx_vinsgr2vr_d(_1, 1, 1);} ++v4f32 __lsx_vfadd_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfadd_s(_1, _2);} ++v2f64 __lsx_vfadd_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfadd_d(_1, _2);} ++v4f32 __lsx_vfsub_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfsub_s(_1, _2);} ++v2f64 __lsx_vfsub_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfsub_d(_1, _2);} ++v4f32 __lsx_vfmul_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmul_s(_1, _2);} ++v2f64 __lsx_vfmul_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmul_d(_1, _2);} ++v4f32 __lsx_vfdiv_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfdiv_s(_1, _2);} ++v2f64 __lsx_vfdiv_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfdiv_d(_1, _2);} ++v8i16 __lsx_vfcvt_h_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcvt_h_s(_1, _2);} ++v4f32 __lsx_vfcvt_s_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcvt_s_d(_1, _2);} ++v4f32 __lsx_vfmin_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmin_s(_1, _2);} ++v2f64 __lsx_vfmin_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmin_d(_1, _2);} ++v4f32 __lsx_vfmina_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmina_s(_1, _2);} ++v2f64 __lsx_vfmina_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmina_d(_1, _2);} ++v4f32 __lsx_vfmax_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmax_s(_1, _2);} ++v2f64 __lsx_vfmax_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmax_d(_1, _2);} ++v4f32 __lsx_vfmaxa_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmaxa_s(_1, _2);} ++v2f64 __lsx_vfmaxa_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmaxa_d(_1, _2);} ++v4i32 __lsx_vfclass_s(v4f32 _1){return __builtin_lsx_vfclass_s(_1);} ++v2i64 __lsx_vfclass_d(v2f64 _1){return __builtin_lsx_vfclass_d(_1);} ++v4f32 __lsx_vfsqrt_s(v4f32 _1){return __builtin_lsx_vfsqrt_s(_1);} ++v2f64 __lsx_vfsqrt_d(v2f64 _1){return __builtin_lsx_vfsqrt_d(_1);} ++v4f32 __lsx_vfrecip_s(v4f32 _1){return __builtin_lsx_vfrecip_s(_1);} ++v2f64 __lsx_vfrecip_d(v2f64 _1){return __builtin_lsx_vfrecip_d(_1);} ++v4f32 __lsx_vfrint_s(v4f32 _1){return __builtin_lsx_vfrint_s(_1);} ++v2f64 __lsx_vfrint_d(v2f64 _1){return __builtin_lsx_vfrint_d(_1);} ++v4f32 __lsx_vfrsqrt_s(v4f32 _1){return __builtin_lsx_vfrsqrt_s(_1);} ++v2f64 __lsx_vfrsqrt_d(v2f64 _1){return __builtin_lsx_vfrsqrt_d(_1);} ++v4f32 __lsx_vflogb_s(v4f32 _1){return __builtin_lsx_vflogb_s(_1);} ++v2f64 __lsx_vflogb_d(v2f64 _1){return __builtin_lsx_vflogb_d(_1);} ++v4f32 __lsx_vfcvth_s_h(v8i16 _1){return __builtin_lsx_vfcvth_s_h(_1);} ++v2f64 __lsx_vfcvth_d_s(v4f32 _1){return __builtin_lsx_vfcvth_d_s(_1);} ++v4f32 __lsx_vfcvtl_s_h(v8i16 _1){return __builtin_lsx_vfcvtl_s_h(_1);} ++v2f64 __lsx_vfcvtl_d_s(v4f32 _1){return __builtin_lsx_vfcvtl_d_s(_1);} ++v4i32 __lsx_vftint_w_s(v4f32 _1){return __builtin_lsx_vftint_w_s(_1);} ++v2i64 __lsx_vftint_l_d(v2f64 _1){return __builtin_lsx_vftint_l_d(_1);} ++v4u32 __lsx_vftint_wu_s(v4f32 _1){return __builtin_lsx_vftint_wu_s(_1);} ++v2u64 __lsx_vftint_lu_d(v2f64 _1){return __builtin_lsx_vftint_lu_d(_1);} ++v4i32 __lsx_vftintrz_w_s(v4f32 _1){return __builtin_lsx_vftintrz_w_s(_1);} ++v2i64 __lsx_vftintrz_l_d(v2f64 _1){return __builtin_lsx_vftintrz_l_d(_1);} ++v4u32 __lsx_vftintrz_wu_s(v4f32 _1){return __builtin_lsx_vftintrz_wu_s(_1);} ++v2u64 __lsx_vftintrz_lu_d(v2f64 _1){return __builtin_lsx_vftintrz_lu_d(_1);} ++v4f32 __lsx_vffint_s_w(v4i32 _1){return __builtin_lsx_vffint_s_w(_1);} ++v2f64 __lsx_vffint_d_l(v2i64 _1){return __builtin_lsx_vffint_d_l(_1);} ++v4f32 __lsx_vffint_s_wu(v4u32 _1){return __builtin_lsx_vffint_s_wu(_1);} ++v2f64 __lsx_vffint_d_lu(v2u64 _1){return __builtin_lsx_vffint_d_lu(_1);} ++v16u8 __lsx_vandn_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vandn_v(_1, _2);} ++v16i8 __lsx_vneg_b(v16i8 _1){return __builtin_lsx_vneg_b(_1);} ++v8i16 __lsx_vneg_h(v8i16 _1){return __builtin_lsx_vneg_h(_1);} ++v4i32 __lsx_vneg_w(v4i32 _1){return __builtin_lsx_vneg_w(_1);} ++v2i64 __lsx_vneg_d(v2i64 _1){return __builtin_lsx_vneg_d(_1);} ++v16i8 __lsx_vmuh_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmuh_b(_1, _2);} ++v8i16 __lsx_vmuh_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmuh_h(_1, _2);} ++v4i32 __lsx_vmuh_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmuh_w(_1, _2);} ++v2i64 __lsx_vmuh_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmuh_d(_1, _2);} ++v16u8 __lsx_vmuh_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmuh_bu(_1, _2);} ++v8u16 __lsx_vmuh_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmuh_hu(_1, _2);} ++v4u32 __lsx_vmuh_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmuh_wu(_1, _2);} ++v2u64 __lsx_vmuh_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmuh_du(_1, _2);} ++v8i16 __lsx_vsllwil_h_b(v16i8 _1){return __builtin_lsx_vsllwil_h_b(_1, 1);} ++v4i32 __lsx_vsllwil_w_h(v8i16 _1){return __builtin_lsx_vsllwil_w_h(_1, 1);} ++v2i64 __lsx_vsllwil_d_w(v4i32 _1){return __builtin_lsx_vsllwil_d_w(_1, 1);} ++v8u16 __lsx_vsllwil_hu_bu(v16u8 _1){return __builtin_lsx_vsllwil_hu_bu(_1, 1);} ++v4u32 __lsx_vsllwil_wu_hu(v8u16 _1){return __builtin_lsx_vsllwil_wu_hu(_1, 1);} ++v2u64 __lsx_vsllwil_du_wu(v4u32 _1){return __builtin_lsx_vsllwil_du_wu(_1, 1);} ++v16i8 __lsx_vsran_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsran_b_h(_1, _2);} ++v8i16 __lsx_vsran_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsran_h_w(_1, _2);} ++v4i32 __lsx_vsran_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsran_w_d(_1, _2);} ++v16i8 __lsx_vssran_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssran_b_h(_1, _2);} ++v8i16 __lsx_vssran_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssran_h_w(_1, _2);} ++v4i32 __lsx_vssran_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssran_w_d(_1, _2);} ++v16u8 __lsx_vssran_bu_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vssran_bu_h(_1, _2);} ++v8u16 __lsx_vssran_hu_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vssran_hu_w(_1, _2);} ++v4u32 __lsx_vssran_wu_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vssran_wu_d(_1, _2);} ++v16i8 __lsx_vsrarn_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrarn_b_h(_1, _2);} ++v8i16 __lsx_vsrarn_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrarn_h_w(_1, _2);} ++v4i32 __lsx_vsrarn_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrarn_w_d(_1, _2);} ++v16i8 __lsx_vssrarn_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrarn_b_h(_1, _2);} ++v8i16 __lsx_vssrarn_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrarn_h_w(_1, _2);} ++v4i32 __lsx_vssrarn_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrarn_w_d(_1, _2);} ++v16u8 __lsx_vssrarn_bu_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vssrarn_bu_h(_1, _2);} ++v8u16 __lsx_vssrarn_hu_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vssrarn_hu_w(_1, _2);} ++v4u32 __lsx_vssrarn_wu_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vssrarn_wu_d(_1, _2);} ++v16i8 __lsx_vsrln_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrln_b_h(_1, _2);} ++v8i16 __lsx_vsrln_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrln_h_w(_1, _2);} ++v4i32 __lsx_vsrln_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrln_w_d(_1, _2);} ++v16u8 __lsx_vssrln_bu_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vssrln_bu_h(_1, _2);} ++v8u16 __lsx_vssrln_hu_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vssrln_hu_w(_1, _2);} ++v4u32 __lsx_vssrln_wu_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vssrln_wu_d(_1, _2);} ++v16i8 __lsx_vsrlrn_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrlrn_b_h(_1, _2);} ++v8i16 __lsx_vsrlrn_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrlrn_h_w(_1, _2);} ++v4i32 __lsx_vsrlrn_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrlrn_w_d(_1, _2);} ++v16u8 __lsx_vssrlrn_bu_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vssrlrn_bu_h(_1, _2);} ++v8u16 __lsx_vssrlrn_hu_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vssrlrn_hu_w(_1, _2);} ++v4u32 __lsx_vssrlrn_wu_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vssrlrn_wu_d(_1, _2);} ++v16i8 __lsx_vfrstpi_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vfrstpi_b(_1, _2, 1);} ++v8i16 __lsx_vfrstpi_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vfrstpi_h(_1, _2, 1);} ++v16i8 __lsx_vfrstp_b(v16i8 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vfrstp_b(_1, _2, _3);} ++v8i16 __lsx_vfrstp_h(v8i16 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vfrstp_h(_1, _2, _3);} ++v2i64 __lsx_vshuf4i_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vshuf4i_d(_1, _2, 1);} ++v16i8 __lsx_vbsrl_v(v16i8 _1){return __builtin_lsx_vbsrl_v(_1, 1);} ++v16i8 __lsx_vbsll_v(v16i8 _1){return __builtin_lsx_vbsll_v(_1, 1);} ++v16i8 __lsx_vextrins_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vextrins_b(_1, _2, 1);} ++v8i16 __lsx_vextrins_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vextrins_h(_1, _2, 1);} ++v4i32 __lsx_vextrins_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vextrins_w(_1, _2, 1);} ++v2i64 __lsx_vextrins_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vextrins_d(_1, _2, 1);} ++v16i8 __lsx_vmskltz_b(v16i8 _1){return __builtin_lsx_vmskltz_b(_1);} ++v8i16 __lsx_vmskltz_h(v8i16 _1){return __builtin_lsx_vmskltz_h(_1);} ++v4i32 __lsx_vmskltz_w(v4i32 _1){return __builtin_lsx_vmskltz_w(_1);} ++v2i64 __lsx_vmskltz_d(v2i64 _1){return __builtin_lsx_vmskltz_d(_1);} ++v16i8 __lsx_vsigncov_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsigncov_b(_1, _2);} ++v8i16 __lsx_vsigncov_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsigncov_h(_1, _2);} ++v4i32 __lsx_vsigncov_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsigncov_w(_1, _2);} ++v2i64 __lsx_vsigncov_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsigncov_d(_1, _2);} ++v4f32 __lsx_vfmadd_s(v4f32 _1, v4f32 _2, v4f32 _3){return __builtin_lsx_vfmadd_s(_1, _2, _3);} ++v2f64 __lsx_vfmadd_d(v2f64 _1, v2f64 _2, v2f64 _3){return __builtin_lsx_vfmadd_d(_1, _2, _3);} ++v4f32 __lsx_vfmsub_s(v4f32 _1, v4f32 _2, v4f32 _3){return __builtin_lsx_vfmsub_s(_1, _2, _3);} ++v2f64 __lsx_vfmsub_d(v2f64 _1, v2f64 _2, v2f64 _3){return __builtin_lsx_vfmsub_d(_1, _2, _3);} ++v4f32 __lsx_vfnmadd_s(v4f32 _1, v4f32 _2, v4f32 _3){return __builtin_lsx_vfnmadd_s(_1, _2, _3);} ++v2f64 __lsx_vfnmadd_d(v2f64 _1, v2f64 _2, v2f64 _3){return __builtin_lsx_vfnmadd_d(_1, _2, _3);} ++v4f32 __lsx_vfnmsub_s(v4f32 _1, v4f32 _2, v4f32 _3){return __builtin_lsx_vfnmsub_s(_1, _2, _3);} ++v2f64 __lsx_vfnmsub_d(v2f64 _1, v2f64 _2, v2f64 _3){return __builtin_lsx_vfnmsub_d(_1, _2, _3);} ++v4i32 __lsx_vftintrne_w_s(v4f32 _1){return __builtin_lsx_vftintrne_w_s(_1);} ++v2i64 __lsx_vftintrne_l_d(v2f64 _1){return __builtin_lsx_vftintrne_l_d(_1);} ++v4i32 __lsx_vftintrp_w_s(v4f32 _1){return __builtin_lsx_vftintrp_w_s(_1);} ++v2i64 __lsx_vftintrp_l_d(v2f64 _1){return __builtin_lsx_vftintrp_l_d(_1);} ++v4i32 __lsx_vftintrm_w_s(v4f32 _1){return __builtin_lsx_vftintrm_w_s(_1);} ++v2i64 __lsx_vftintrm_l_d(v2f64 _1){return __builtin_lsx_vftintrm_l_d(_1);} ++v4i32 __lsx_vftint_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftint_w_d(_1, _2);} ++v4f32 __lsx_vffint_s_l(v2i64 _1, v2i64 _2){return __builtin_lsx_vffint_s_l(_1, _2);} ++v4i32 __lsx_vftintrz_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftintrz_w_d(_1, _2);} ++v4i32 __lsx_vftintrp_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftintrp_w_d(_1, _2);} ++v4i32 __lsx_vftintrm_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftintrm_w_d(_1, _2);} ++v4i32 __lsx_vftintrne_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftintrne_w_d(_1, _2);} ++v2i64 __lsx_vftintl_l_s(v4f32 _1){return __builtin_lsx_vftintl_l_s(_1);} ++v2i64 __lsx_vftinth_l_s(v4f32 _1){return __builtin_lsx_vftinth_l_s(_1);} ++v2f64 __lsx_vffinth_d_w(v4i32 _1){return __builtin_lsx_vffinth_d_w(_1);} ++v2f64 __lsx_vffintl_d_w(v4i32 _1){return __builtin_lsx_vffintl_d_w(_1);} ++v2i64 __lsx_vftintrzl_l_s(v4f32 _1){return __builtin_lsx_vftintrzl_l_s(_1);} ++v2i64 __lsx_vftintrzh_l_s(v4f32 _1){return __builtin_lsx_vftintrzh_l_s(_1);} ++v2i64 __lsx_vftintrpl_l_s(v4f32 _1){return __builtin_lsx_vftintrpl_l_s(_1);} ++v2i64 __lsx_vftintrph_l_s(v4f32 _1){return __builtin_lsx_vftintrph_l_s(_1);} ++v2i64 __lsx_vftintrml_l_s(v4f32 _1){return __builtin_lsx_vftintrml_l_s(_1);} ++v2i64 __lsx_vftintrmh_l_s(v4f32 _1){return __builtin_lsx_vftintrmh_l_s(_1);} ++v2i64 __lsx_vftintrnel_l_s(v4f32 _1){return __builtin_lsx_vftintrnel_l_s(_1);} ++v2i64 __lsx_vftintrneh_l_s(v4f32 _1){return __builtin_lsx_vftintrneh_l_s(_1);} ++v4i32 __lsx_vfrintrne_s(v4f32 _1){return __builtin_lsx_vfrintrne_s(_1);} ++v2i64 __lsx_vfrintrne_d(v2f64 _1){return __builtin_lsx_vfrintrne_d(_1);} ++v4i32 __lsx_vfrintrz_s(v4f32 _1){return __builtin_lsx_vfrintrz_s(_1);} ++v2i64 __lsx_vfrintrz_d(v2f64 _1){return __builtin_lsx_vfrintrz_d(_1);} ++v4i32 __lsx_vfrintrp_s(v4f32 _1){return __builtin_lsx_vfrintrp_s(_1);} ++v2i64 __lsx_vfrintrp_d(v2f64 _1){return __builtin_lsx_vfrintrp_d(_1);} ++v4i32 __lsx_vfrintrm_s(v4f32 _1){return __builtin_lsx_vfrintrm_s(_1);} ++v2i64 __lsx_vfrintrm_d(v2f64 _1){return __builtin_lsx_vfrintrm_d(_1);} ++void __lsx_vstelm_b(v16i8 _1, void * _2){return __builtin_lsx_vstelm_b(_1, _2, 1, 1);} ++void __lsx_vstelm_h(v8i16 _1, void * _2){return __builtin_lsx_vstelm_h(_1, _2, 2, 1);} ++void __lsx_vstelm_w(v4i32 _1, void * _2){return __builtin_lsx_vstelm_w(_1, _2, 4, 1);} ++void __lsx_vstelm_d(v2i64 _1, void * _2){return __builtin_lsx_vstelm_d(_1, _2, 8, 1);} ++v2i64 __lsx_vaddwev_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vaddwev_d_w(_1, _2);} ++v4i32 __lsx_vaddwev_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vaddwev_w_h(_1, _2);} ++v8i16 __lsx_vaddwev_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vaddwev_h_b(_1, _2);} ++v2i64 __lsx_vaddwod_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vaddwod_d_w(_1, _2);} ++v4i32 __lsx_vaddwod_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vaddwod_w_h(_1, _2);} ++v8i16 __lsx_vaddwod_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vaddwod_h_b(_1, _2);} ++v2i64 __lsx_vaddwev_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vaddwev_d_wu(_1, _2);} ++v4i32 __lsx_vaddwev_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vaddwev_w_hu(_1, _2);} ++v8i16 __lsx_vaddwev_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vaddwev_h_bu(_1, _2);} ++v2i64 __lsx_vaddwod_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vaddwod_d_wu(_1, _2);} ++v4i32 __lsx_vaddwod_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vaddwod_w_hu(_1, _2);} ++v8i16 __lsx_vaddwod_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vaddwod_h_bu(_1, _2);} ++v2i64 __lsx_vaddwev_d_wu_w(v4u32 _1, v4i32 _2){return __builtin_lsx_vaddwev_d_wu_w(_1, _2);} ++v4i32 __lsx_vaddwev_w_hu_h(v8u16 _1, v8i16 _2){return __builtin_lsx_vaddwev_w_hu_h(_1, _2);} ++v8i16 __lsx_vaddwev_h_bu_b(v16u8 _1, v16i8 _2){return __builtin_lsx_vaddwev_h_bu_b(_1, _2);} ++v2i64 __lsx_vaddwod_d_wu_w(v4u32 _1, v4i32 _2){return __builtin_lsx_vaddwod_d_wu_w(_1, _2);} ++v4i32 __lsx_vaddwod_w_hu_h(v8u16 _1, v8i16 _2){return __builtin_lsx_vaddwod_w_hu_h(_1, _2);} ++v8i16 __lsx_vaddwod_h_bu_b(v16u8 _1, v16i8 _2){return __builtin_lsx_vaddwod_h_bu_b(_1, _2);} ++v2i64 __lsx_vsubwev_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsubwev_d_w(_1, _2);} ++v4i32 __lsx_vsubwev_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsubwev_w_h(_1, _2);} ++v8i16 __lsx_vsubwev_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsubwev_h_b(_1, _2);} ++v2i64 __lsx_vsubwod_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsubwod_d_w(_1, _2);} ++v4i32 __lsx_vsubwod_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsubwod_w_h(_1, _2);} ++v8i16 __lsx_vsubwod_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsubwod_h_b(_1, _2);} ++v2i64 __lsx_vsubwev_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vsubwev_d_wu(_1, _2);} ++v4i32 __lsx_vsubwev_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vsubwev_w_hu(_1, _2);} ++v8i16 __lsx_vsubwev_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vsubwev_h_bu(_1, _2);} ++v2i64 __lsx_vsubwod_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vsubwod_d_wu(_1, _2);} ++v4i32 __lsx_vsubwod_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vsubwod_w_hu(_1, _2);} ++v8i16 __lsx_vsubwod_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vsubwod_h_bu(_1, _2);} ++v2i64 __lsx_vaddwev_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vaddwev_q_d(_1, _2);} ++v2i64 __lsx_vaddwod_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vaddwod_q_d(_1, _2);} ++v2i64 __lsx_vaddwev_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vaddwev_q_du(_1, _2);} ++v2i64 __lsx_vaddwod_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vaddwod_q_du(_1, _2);} ++v2i64 __lsx_vsubwev_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsubwev_q_d(_1, _2);} ++v2i64 __lsx_vsubwod_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsubwod_q_d(_1, _2);} ++v2i64 __lsx_vsubwev_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vsubwev_q_du(_1, _2);} ++v2i64 __lsx_vsubwod_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vsubwod_q_du(_1, _2);} ++v2i64 __lsx_vaddwev_q_du_d(v2u64 _1, v2i64 _2){return __builtin_lsx_vaddwev_q_du_d(_1, _2);} ++v2i64 __lsx_vaddwod_q_du_d(v2u64 _1, v2i64 _2){return __builtin_lsx_vaddwod_q_du_d(_1, _2);} ++v2i64 __lsx_vmulwev_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmulwev_d_w(_1, _2);} ++v4i32 __lsx_vmulwev_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmulwev_w_h(_1, _2);} ++v8i16 __lsx_vmulwev_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmulwev_h_b(_1, _2);} ++v2i64 __lsx_vmulwod_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmulwod_d_w(_1, _2);} ++v4i32 __lsx_vmulwod_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmulwod_w_h(_1, _2);} ++v8i16 __lsx_vmulwod_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmulwod_h_b(_1, _2);} ++v2i64 __lsx_vmulwev_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmulwev_d_wu(_1, _2);} ++v4i32 __lsx_vmulwev_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmulwev_w_hu(_1, _2);} ++v8i16 __lsx_vmulwev_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmulwev_h_bu(_1, _2);} ++v2i64 __lsx_vmulwod_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmulwod_d_wu(_1, _2);} ++v4i32 __lsx_vmulwod_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmulwod_w_hu(_1, _2);} ++v8i16 __lsx_vmulwod_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmulwod_h_bu(_1, _2);} ++v2i64 __lsx_vmulwev_d_wu_w(v4u32 _1, v4i32 _2){return __builtin_lsx_vmulwev_d_wu_w(_1, _2);} ++v4i32 __lsx_vmulwev_w_hu_h(v8u16 _1, v8i16 _2){return __builtin_lsx_vmulwev_w_hu_h(_1, _2);} ++v8i16 __lsx_vmulwev_h_bu_b(v16u8 _1, v16i8 _2){return __builtin_lsx_vmulwev_h_bu_b(_1, _2);} ++v2i64 __lsx_vmulwod_d_wu_w(v4u32 _1, v4i32 _2){return __builtin_lsx_vmulwod_d_wu_w(_1, _2);} ++v4i32 __lsx_vmulwod_w_hu_h(v8u16 _1, v8i16 _2){return __builtin_lsx_vmulwod_w_hu_h(_1, _2);} ++v8i16 __lsx_vmulwod_h_bu_b(v16u8 _1, v16i8 _2){return __builtin_lsx_vmulwod_h_bu_b(_1, _2);} ++v2i64 __lsx_vmulwev_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmulwev_q_d(_1, _2);} ++v2i64 __lsx_vmulwod_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmulwod_q_d(_1, _2);} ++v2i64 __lsx_vmulwev_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmulwev_q_du(_1, _2);} ++v2i64 __lsx_vmulwod_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmulwod_q_du(_1, _2);} ++v2i64 __lsx_vmulwev_q_du_d(v2u64 _1, v2i64 _2){return __builtin_lsx_vmulwev_q_du_d(_1, _2);} ++v2i64 __lsx_vmulwod_q_du_d(v2u64 _1, v2i64 _2){return __builtin_lsx_vmulwod_q_du_d(_1, _2);} ++v2i64 __lsx_vhaddw_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vhaddw_q_d(_1, _2);} ++v2u64 __lsx_vhaddw_qu_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vhaddw_qu_du(_1, _2);} ++v2i64 __lsx_vhsubw_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vhsubw_q_d(_1, _2);} ++v2u64 __lsx_vhsubw_qu_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vhsubw_qu_du(_1, _2);} ++v2i64 __lsx_vmaddwev_d_w(v2i64 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vmaddwev_d_w(_1, _2, _3);} ++v4i32 __lsx_vmaddwev_w_h(v4i32 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vmaddwev_w_h(_1, _2, _3);} ++v8i16 __lsx_vmaddwev_h_b(v8i16 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vmaddwev_h_b(_1, _2, _3);} ++v2u64 __lsx_vmaddwev_d_wu(v2u64 _1, v4u32 _2, v4u32 _3){return __builtin_lsx_vmaddwev_d_wu(_1, _2, _3);} ++v4u32 __lsx_vmaddwev_w_hu(v4u32 _1, v8u16 _2, v8u16 _3){return __builtin_lsx_vmaddwev_w_hu(_1, _2, _3);} ++v8u16 __lsx_vmaddwev_h_bu(v8u16 _1, v16u8 _2, v16u8 _3){return __builtin_lsx_vmaddwev_h_bu(_1, _2, _3);} ++v2i64 __lsx_vmaddwod_d_w(v2i64 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vmaddwod_d_w(_1, _2, _3);} ++v4i32 __lsx_vmaddwod_w_h(v4i32 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vmaddwod_w_h(_1, _2, _3);} ++v8i16 __lsx_vmaddwod_h_b(v8i16 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vmaddwod_h_b(_1, _2, _3);} ++v2u64 __lsx_vmaddwod_d_wu(v2u64 _1, v4u32 _2, v4u32 _3){return __builtin_lsx_vmaddwod_d_wu(_1, _2, _3);} ++v4u32 __lsx_vmaddwod_w_hu(v4u32 _1, v8u16 _2, v8u16 _3){return __builtin_lsx_vmaddwod_w_hu(_1, _2, _3);} ++v8u16 __lsx_vmaddwod_h_bu(v8u16 _1, v16u8 _2, v16u8 _3){return __builtin_lsx_vmaddwod_h_bu(_1, _2, _3);} ++v2i64 __lsx_vmaddwev_d_wu_w(v2i64 _1, v4u32 _2, v4i32 _3){return __builtin_lsx_vmaddwev_d_wu_w(_1, _2, _3);} ++v4i32 __lsx_vmaddwev_w_hu_h(v4i32 _1, v8u16 _2, v8i16 _3){return __builtin_lsx_vmaddwev_w_hu_h(_1, _2, _3);} ++v8i16 __lsx_vmaddwev_h_bu_b(v8i16 _1, v16u8 _2, v16i8 _3){return __builtin_lsx_vmaddwev_h_bu_b(_1, _2, _3);} ++v2i64 __lsx_vmaddwod_d_wu_w(v2i64 _1, v4u32 _2, v4i32 _3){return __builtin_lsx_vmaddwod_d_wu_w(_1, _2, _3);} ++v4i32 __lsx_vmaddwod_w_hu_h(v4i32 _1, v8u16 _2, v8i16 _3){return __builtin_lsx_vmaddwod_w_hu_h(_1, _2, _3);} ++v8i16 __lsx_vmaddwod_h_bu_b(v8i16 _1, v16u8 _2, v16i8 _3){return __builtin_lsx_vmaddwod_h_bu_b(_1, _2, _3);} ++v2i64 __lsx_vmaddwev_q_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vmaddwev_q_d(_1, _2, _3);} ++v2i64 __lsx_vmaddwod_q_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vmaddwod_q_d(_1, _2, _3);} ++v2u64 __lsx_vmaddwev_q_du(v2u64 _1, v2u64 _2, v2u64 _3){return __builtin_lsx_vmaddwev_q_du(_1, _2, _3);} ++v2u64 __lsx_vmaddwod_q_du(v2u64 _1, v2u64 _2, v2u64 _3){return __builtin_lsx_vmaddwod_q_du(_1, _2, _3);} ++v2i64 __lsx_vmaddwev_q_du_d(v2i64 _1, v2u64 _2, v2i64 _3){return __builtin_lsx_vmaddwev_q_du_d(_1, _2, _3);} ++v2i64 __lsx_vmaddwod_q_du_d(v2i64 _1, v2u64 _2, v2i64 _3){return __builtin_lsx_vmaddwod_q_du_d(_1, _2, _3);} ++v16i8 __lsx_vrotr_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vrotr_b(_1, _2);} ++v8i16 __lsx_vrotr_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vrotr_h(_1, _2);} ++v4i32 __lsx_vrotr_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vrotr_w(_1, _2);} ++v2i64 __lsx_vrotr_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vrotr_d(_1, _2);} ++v2i64 __lsx_vadd_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vadd_q(_1, _2);} ++v2i64 __lsx_vsub_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsub_q(_1, _2);} ++v16i8 __lsx_vldrepl_b(void * _1){return __builtin_lsx_vldrepl_b(_1, 1);} ++v8i16 __lsx_vldrepl_h(void * _1){return __builtin_lsx_vldrepl_h(_1, 2);} ++v4i32 __lsx_vldrepl_w(void * _1){return __builtin_lsx_vldrepl_w(_1, 4);} ++v2i64 __lsx_vldrepl_d(void * _1){return __builtin_lsx_vldrepl_d(_1, 8);} ++v16i8 __lsx_vmskgez_b(v16i8 _1){return __builtin_lsx_vmskgez_b(_1);} ++v16i8 __lsx_vmsknz_b(v16i8 _1){return __builtin_lsx_vmsknz_b(_1);} ++v8i16 __lsx_vexth_h_b(v16i8 _1){return __builtin_lsx_vexth_h_b(_1);} ++v4i32 __lsx_vexth_w_h(v8i16 _1){return __builtin_lsx_vexth_w_h(_1);} ++v2i64 __lsx_vexth_d_w(v4i32 _1){return __builtin_lsx_vexth_d_w(_1);} ++v2i64 __lsx_vexth_q_d(v2i64 _1){return __builtin_lsx_vexth_q_d(_1);} ++v8u16 __lsx_vexth_hu_bu(v16u8 _1){return __builtin_lsx_vexth_hu_bu(_1);} ++v4u32 __lsx_vexth_wu_hu(v8u16 _1){return __builtin_lsx_vexth_wu_hu(_1);} ++v2u64 __lsx_vexth_du_wu(v4u32 _1){return __builtin_lsx_vexth_du_wu(_1);} ++v2u64 __lsx_vexth_qu_du(v2u64 _1){return __builtin_lsx_vexth_qu_du(_1);} ++v16i8 __lsx_vrotri_b(v16i8 _1){return __builtin_lsx_vrotri_b(_1, 1);} ++v8i16 __lsx_vrotri_h(v8i16 _1){return __builtin_lsx_vrotri_h(_1, 1);} ++v4i32 __lsx_vrotri_w(v4i32 _1){return __builtin_lsx_vrotri_w(_1, 1);} ++v2i64 __lsx_vrotri_d(v2i64 _1){return __builtin_lsx_vrotri_d(_1, 1);} ++v2i64 __lsx_vextl_q_d(v2i64 _1){return __builtin_lsx_vextl_q_d(_1);} ++v16i8 __lsx_vsrlni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrlni_b_h(_1, _2, 1);} ++v8i16 __lsx_vsrlni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrlni_h_w(_1, _2, 1);} ++v4i32 __lsx_vsrlni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrlni_w_d(_1, _2, 1);} ++v2i64 __lsx_vsrlni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrlni_d_q(_1, _2, 1);} ++v16i8 __lsx_vsrlrni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrlrni_b_h(_1, _2, 1);} ++v8i16 __lsx_vsrlrni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrlrni_h_w(_1, _2, 1);} ++v4i32 __lsx_vsrlrni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrlrni_w_d(_1, _2, 1);} ++v2i64 __lsx_vsrlrni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrlrni_d_q(_1, _2, 1);} ++v16i8 __lsx_vssrlni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vssrlni_b_h(_1, _2, 1);} ++v8i16 __lsx_vssrlni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrlni_h_w(_1, _2, 1);} ++v4i32 __lsx_vssrlni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrlni_w_d(_1, _2, 1);} ++v2i64 __lsx_vssrlni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrlni_d_q(_1, _2, 1);} ++v16u8 __lsx_vssrlni_bu_h(v16u8 _1, v16i8 _2){return __builtin_lsx_vssrlni_bu_h(_1, _2, 1);} ++v8u16 __lsx_vssrlni_hu_w(v8u16 _1, v8i16 _2){return __builtin_lsx_vssrlni_hu_w(_1, _2, 1);} ++v4u32 __lsx_vssrlni_wu_d(v4u32 _1, v4i32 _2){return __builtin_lsx_vssrlni_wu_d(_1, _2, 1);} ++v2u64 __lsx_vssrlni_du_q(v2u64 _1, v2i64 _2){return __builtin_lsx_vssrlni_du_q(_1, _2, 1);} ++v16i8 __lsx_vssrlrni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vssrlrni_b_h(_1, _2, 1);} ++v8i16 __lsx_vssrlrni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrlrni_h_w(_1, _2, 1);} ++v4i32 __lsx_vssrlrni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrlrni_w_d(_1, _2, 1);} ++v2i64 __lsx_vssrlrni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrlrni_d_q(_1, _2, 1);} ++v16u8 __lsx_vssrlrni_bu_h(v16u8 _1, v16i8 _2){return __builtin_lsx_vssrlrni_bu_h(_1, _2, 1);} ++v8u16 __lsx_vssrlrni_hu_w(v8u16 _1, v8i16 _2){return __builtin_lsx_vssrlrni_hu_w(_1, _2, 1);} ++v4u32 __lsx_vssrlrni_wu_d(v4u32 _1, v4i32 _2){return __builtin_lsx_vssrlrni_wu_d(_1, _2, 1);} ++v2u64 __lsx_vssrlrni_du_q(v2u64 _1, v2i64 _2){return __builtin_lsx_vssrlrni_du_q(_1, _2, 1);} ++v16i8 __lsx_vsrani_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrani_b_h(_1, _2, 1);} ++v8i16 __lsx_vsrani_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrani_h_w(_1, _2, 1);} ++v4i32 __lsx_vsrani_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrani_w_d(_1, _2, 1);} ++v2i64 __lsx_vsrani_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrani_d_q(_1, _2, 1);} ++v16i8 __lsx_vsrarni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrarni_b_h(_1, _2, 1);} ++v8i16 __lsx_vsrarni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrarni_h_w(_1, _2, 1);} ++v4i32 __lsx_vsrarni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrarni_w_d(_1, _2, 1);} ++v2i64 __lsx_vsrarni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrarni_d_q(_1, _2, 1);} ++v16i8 __lsx_vssrani_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vssrani_b_h(_1, _2, 1);} ++v8i16 __lsx_vssrani_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrani_h_w(_1, _2, 1);} ++v4i32 __lsx_vssrani_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrani_w_d(_1, _2, 1);} ++v2i64 __lsx_vssrani_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrani_d_q(_1, _2, 1);} ++v16u8 __lsx_vssrani_bu_h(v16u8 _1, v16i8 _2){return __builtin_lsx_vssrani_bu_h(_1, _2, 1);} ++v8u16 __lsx_vssrani_hu_w(v8u16 _1, v8i16 _2){return __builtin_lsx_vssrani_hu_w(_1, _2, 1);} ++v4u32 __lsx_vssrani_wu_d(v4u32 _1, v4i32 _2){return __builtin_lsx_vssrani_wu_d(_1, _2, 1);} ++v2u64 __lsx_vssrani_du_q(v2u64 _1, v2i64 _2){return __builtin_lsx_vssrani_du_q(_1, _2, 1);} ++v16i8 __lsx_vssrarni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vssrarni_b_h(_1, _2, 1);} ++v8i16 __lsx_vssrarni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrarni_h_w(_1, _2, 1);} ++v4i32 __lsx_vssrarni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrarni_w_d(_1, _2, 1);} ++v2i64 __lsx_vssrarni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrarni_d_q(_1, _2, 1);} ++v16u8 __lsx_vssrarni_bu_h(v16u8 _1, v16i8 _2){return __builtin_lsx_vssrarni_bu_h(_1, _2, 1);} ++v8u16 __lsx_vssrarni_hu_w(v8u16 _1, v8i16 _2){return __builtin_lsx_vssrarni_hu_w(_1, _2, 1);} ++v4u32 __lsx_vssrarni_wu_d(v4u32 _1, v4i32 _2){return __builtin_lsx_vssrarni_wu_d(_1, _2, 1);} ++v2u64 __lsx_vssrarni_du_q(v2u64 _1, v2i64 _2){return __builtin_lsx_vssrarni_du_q(_1, _2, 1);} ++v4i32 __lsx_vpermi_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpermi_w(_1, _2, 1);} ++v16i8 __lsx_vld(void * _1){return __builtin_lsx_vld(_1, 1);} ++void __lsx_vst(v16i8 _1, void * _2){return __builtin_lsx_vst(_1, _2, 1);} ++v16i8 __lsx_vssrlrn_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrlrn_b_h(_1, _2);} ++v8i16 __lsx_vssrlrn_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrlrn_h_w(_1, _2);} ++v4i32 __lsx_vssrlrn_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrlrn_w_d(_1, _2);} ++v16i8 __lsx_vssrln_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrln_b_h(_1, _2);} ++v8i16 __lsx_vssrln_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrln_h_w(_1, _2);} ++v4i32 __lsx_vssrln_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrln_w_d(_1, _2);} ++v16i8 __lsx_vorn_v(v16i8 _1, v16i8 _2){return __builtin_lsx_vorn_v(_1, _2);} ++v2i64 __lsx_vldi(){return __builtin_lsx_vldi(1);} ++v16i8 __lsx_vshuf_b(v16i8 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vshuf_b(_1, _2, _3);} ++v16i8 __lsx_vldx(void * _1){return __builtin_lsx_vldx(_1, 1);} ++void __lsx_vstx(v16i8 _1, void * _2){return __builtin_lsx_vstx(_1, _2, 1);} ++v2u64 __lsx_vextl_qu_du(v2u64 _1){return __builtin_lsx_vextl_qu_du(_1);} ++int __lsx_bnz_b(v16u8 _1){return __builtin_lsx_bnz_b(_1);} ++int __lsx_bnz_d(v2u64 _1){return __builtin_lsx_bnz_d(_1);} ++int __lsx_bnz_h(v8u16 _1){return __builtin_lsx_bnz_h(_1);} ++int __lsx_bnz_v(v16u8 _1){return __builtin_lsx_bnz_v(_1);} ++int __lsx_bnz_w(v4u32 _1){return __builtin_lsx_bnz_w(_1);} ++int __lsx_bz_b(v16u8 _1){return __builtin_lsx_bz_b(_1);} ++int __lsx_bz_d(v2u64 _1){return __builtin_lsx_bz_d(_1);} ++int __lsx_bz_h(v8u16 _1){return __builtin_lsx_bz_h(_1);} ++int __lsx_bz_v(v16u8 _1){return __builtin_lsx_bz_v(_1);} ++int __lsx_bz_w(v4u32 _1){return __builtin_lsx_bz_w(_1);} ++v2i64 __lsx_vfcmp_caf_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_caf_d(_1, _2);} ++v4i32 __lsx_vfcmp_caf_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_caf_s(_1, _2);} ++v2i64 __lsx_vfcmp_ceq_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_ceq_d(_1, _2);} ++v4i32 __lsx_vfcmp_ceq_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_ceq_s(_1, _2);} ++v2i64 __lsx_vfcmp_cle_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cle_d(_1, _2);} ++v4i32 __lsx_vfcmp_cle_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cle_s(_1, _2);} ++v2i64 __lsx_vfcmp_clt_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_clt_d(_1, _2);} ++v4i32 __lsx_vfcmp_clt_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_clt_s(_1, _2);} ++v2i64 __lsx_vfcmp_cne_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cne_d(_1, _2);} ++v4i32 __lsx_vfcmp_cne_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cne_s(_1, _2);} ++v2i64 __lsx_vfcmp_cor_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cor_d(_1, _2);} ++v4i32 __lsx_vfcmp_cor_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cor_s(_1, _2);} ++v2i64 __lsx_vfcmp_cueq_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cueq_d(_1, _2);} ++v4i32 __lsx_vfcmp_cueq_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cueq_s(_1, _2);} ++v2i64 __lsx_vfcmp_cule_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cule_d(_1, _2);} ++v4i32 __lsx_vfcmp_cule_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cule_s(_1, _2);} ++v2i64 __lsx_vfcmp_cult_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cult_d(_1, _2);} ++v4i32 __lsx_vfcmp_cult_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cult_s(_1, _2);} ++v2i64 __lsx_vfcmp_cun_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cun_d(_1, _2);} ++v2i64 __lsx_vfcmp_cune_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cune_d(_1, _2);} ++v4i32 __lsx_vfcmp_cune_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cune_s(_1, _2);} ++v4i32 __lsx_vfcmp_cun_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cun_s(_1, _2);} ++v2i64 __lsx_vfcmp_saf_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_saf_d(_1, _2);} ++v4i32 __lsx_vfcmp_saf_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_saf_s(_1, _2);} ++v2i64 __lsx_vfcmp_seq_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_seq_d(_1, _2);} ++v4i32 __lsx_vfcmp_seq_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_seq_s(_1, _2);} ++v2i64 __lsx_vfcmp_sle_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sle_d(_1, _2);} ++v4i32 __lsx_vfcmp_sle_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sle_s(_1, _2);} ++v2i64 __lsx_vfcmp_slt_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_slt_d(_1, _2);} ++v4i32 __lsx_vfcmp_slt_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_slt_s(_1, _2);} ++v2i64 __lsx_vfcmp_sne_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sne_d(_1, _2);} ++v4i32 __lsx_vfcmp_sne_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sne_s(_1, _2);} ++v2i64 __lsx_vfcmp_sor_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sor_d(_1, _2);} ++v4i32 __lsx_vfcmp_sor_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sor_s(_1, _2);} ++v2i64 __lsx_vfcmp_sueq_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sueq_d(_1, _2);} ++v4i32 __lsx_vfcmp_sueq_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sueq_s(_1, _2);} ++v2i64 __lsx_vfcmp_sule_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sule_d(_1, _2);} ++v4i32 __lsx_vfcmp_sule_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sule_s(_1, _2);} ++v2i64 __lsx_vfcmp_sult_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sult_d(_1, _2);} ++v4i32 __lsx_vfcmp_sult_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sult_s(_1, _2);} ++v2i64 __lsx_vfcmp_sun_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sun_d(_1, _2);} ++v2i64 __lsx_vfcmp_sune_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sune_d(_1, _2);} ++v4i32 __lsx_vfcmp_sune_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sune_s(_1, _2);} ++v4i32 __lsx_vfcmp_sun_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sun_s(_1, _2);} ++v16i8 __lsx_vrepli_b(){return __builtin_lsx_vrepli_b(1);} ++v2i64 __lsx_vrepli_d(){return __builtin_lsx_vrepli_d(1);} ++v8i16 __lsx_vrepli_h(){return __builtin_lsx_vrepli_h(1);} ++v4i32 __lsx_vrepli_w(){return __builtin_lsx_vrepli_w(1);} +diff --git a/gcc/testsuite/gcc.target/loongarch/tst-asm-const.c b/gcc/testsuite/gcc.target/loongarch/tst-asm-const.c +new file mode 100644 +index 000000000..2e04b99e3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/tst-asm-const.c +@@ -0,0 +1,16 @@ ++/* Test asm const. */ ++/* { dg-do compile } */ ++/* { dg-final { scan-assembler-times "foo:.*\\.long 1061109567.*\\.long 52" 1 } } */ ++int foo () ++{ ++ __asm__ volatile ( ++ "foo:" ++ "\n\t" ++ ".long %a0\n\t" ++ ".long %a1\n\t" ++ : ++ :"i"(0x3f3f3f3f), "i"(52) ++ : ++ ); ++} ++ +diff --git a/gcc/testsuite/gcc.target/mips/loongson-shift-count-truncated-1.c b/gcc/testsuite/gcc.target/mips/loongson-shift-count-truncated-1.c +index baed48cf5..6e22c0e11 100644 +--- a/gcc/testsuite/gcc.target/mips/loongson-shift-count-truncated-1.c ++++ b/gcc/testsuite/gcc.target/mips/loongson-shift-count-truncated-1.c +@@ -4,11 +4,11 @@ + /* loongson.h does not handle or check for MIPS16ness. There doesn't + seem any good reason for it to, given that the Loongson processors + do not support MIPS16. */ +-/* { dg-options "isa=loongson -mhard-float -mno-mips16 (REQUIRES_STDLIB)" } */ ++/* { dg-options "-mloongson-mmi -mhard-float -mno-mips16 (REQUIRES_STDLIB)" } */ + /* See PR 52155. */ +-/* { dg-options "isa=loongson -mhard-float -mno-mips16 -mlong64" { mips*-*-elf* && ilp32 } } */ ++/* { dg-options "-mloongson-mmi -mhard-float -mno-mips16 -mlong64" { mips*-*-elf* && ilp32 } } */ + +-#include "loongson.h" ++#include "loongson-mmiintrin.h" + #include + + typedef union { int32x2_t v; int32_t a[2]; } int32x2_encap_t; +diff --git a/gcc/testsuite/gcc.target/mips/loongson-simd.c b/gcc/testsuite/gcc.target/mips/loongson-simd.c +index f263b4393..34fdcecc6 100644 +--- a/gcc/testsuite/gcc.target/mips/loongson-simd.c ++++ b/gcc/testsuite/gcc.target/mips/loongson-simd.c +@@ -26,9 +26,9 @@ along with GCC; see the file COPYING3. If not see + because inclusion of some system headers e.g. stdint.h will fail due to not + finding stubs-o32_hard.h. */ + /* { dg-require-effective-target mips_nanlegacy } */ +-/* { dg-options "isa=loongson -mhard-float -mno-micromips -mno-mips16 -flax-vector-conversions (REQUIRES_STDLIB)" } */ ++/* { dg-options "-mloongson-mmi -mhard-float -mno-micromips -mno-mips16 -flax-vector-conversions (REQUIRES_STDLIB)" } */ + +-#include "loongson.h" ++#include "loongson-mmiintrin.h" + #include + #include + #include +diff --git a/gcc/testsuite/gcc.target/mips/mips.exp b/gcc/testsuite/gcc.target/mips/mips.exp +index 9db4fbe29..975c51f82 100644 +--- a/gcc/testsuite/gcc.target/mips/mips.exp ++++ b/gcc/testsuite/gcc.target/mips/mips.exp +@@ -281,6 +281,7 @@ foreach option { + fix-r4000 + fix-r10000 + fix-vr4130 ++ fix-loongson3-llsc + gpopt + local-sdata + long-calls +@@ -296,6 +297,9 @@ foreach option { + mcount-ra-address + odd-spreg + msa ++ loongson-mmi ++ loongson-ext ++ loongson-ext2 + } { + lappend mips_option_groups $option "-m(no-|)$option" + } +@@ -883,6 +887,12 @@ proc mips-dg-init {} { + "-mno-msa" + #endif + ++ #ifdef __mips_loongson_mmi ++ "-mloongson-mmi" ++ #else ++ "-mno-loongson-mmi" ++ #endif ++ + 0 + }; + } 0] +@@ -1046,6 +1056,19 @@ proc mips-dg-options { args } { + mips_option_dependency options "-mabicalls" "-G0" + mips_option_dependency options "-mno-gpopt" "-mexplicit-relocs" + ++ if { [check_configured_with "with-arch=loongson3a"] ++ || [check_configured_with "with-arch=gs464"] ++ || [check_configured_with "with-arch=gs464e"] ++ || [check_configured_with "with-arch=gs264e"] } { ++ mips_option_dependency options "-msoft-float" "-mno-loongson-mmi" ++ mips_option_dependency options "-mips16" "-mno-loongson-mmi" ++ mips_option_dependency options "-mips16" "-mno-loongson-ext" ++ mips_option_dependency options "-mips16" "-mno-loongson-ext2" ++ mips_option_dependency options "-mmicromips" "-mno-loongson-mmi" ++ mips_option_dependency options "-mmicromips" "-mno-loongson-ext" ++ mips_option_dependency options "-mmicromips" "-mno-loongson-ext2" ++ } ++ + # Work out information about the current ABI. + set abi_test_option_p [mips_test_option_p options abi] + set abi [mips_option options abi] +diff --git a/gcc/testsuite/go.test/go-test.exp b/gcc/testsuite/go.test/go-test.exp +index cab0d0e2f..72d7434be 100644 +--- a/gcc/testsuite/go.test/go-test.exp ++++ b/gcc/testsuite/go.test/go-test.exp +@@ -249,6 +249,9 @@ proc go-set-goarch { } { + "riscv64-*-*" { + set goarch "riscv64" + } ++ "loongarch64-*-*" { ++ set goarch "loongarch64" ++ } + "s390*-*-*" { + if [check_effective_target_ilp32] { + set goarch "s390" +diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp +index 3387804fa..74256842f 100644 +--- a/gcc/testsuite/lib/target-supports.exp ++++ b/gcc/testsuite/lib/target-supports.exp +@@ -282,6 +282,10 @@ proc check_weak_available { } { + return 1 + } + ++ if { [ string first "loongarch" $target_cpu ] >= 0 } { ++ return 1 ++ } ++ + # All AIX targets should support it + + if { [istarget *-*-aix*] } { +@@ -676,6 +680,7 @@ proc check_profiling_available { test_what } { + || [istarget m68k-*-elf] + || [istarget m68k-*-uclinux*] + || [istarget mips*-*-elf*] ++ || [istarget loongarch*-*-elf*] + || [istarget mmix-*-*] + || [istarget mn10300-*-elf*] + || [istarget moxie-*-elf*] +@@ -1216,6 +1221,14 @@ proc check_effective_target_hard_float { } { + }] + } + ++ if { [istarget loongarch*-*-*] } { ++ return [check_no_compiler_messages hard_float assembly { ++ #if (defined __loongarch_soft_float) ++ #error __loongarch_soft_float ++ #endif ++ }] ++ } ++ + # This proc is actually checking the availabilty of FPU + # support for doubles, so on the RX we must fail if the + # 64-bit double multilib has been selected. +@@ -1902,20 +1915,20 @@ proc check_mpaired_single_hw_available { } { + # Return 1 if the target supports executing Loongson vector instructions, + # 0 otherwise. Cache the result. + +-proc check_mips_loongson_hw_available { } { +- return [check_cached_effective_target mips_loongson_hw_available { ++proc check_mips_loongson_mmi_hw_available { } { ++ return [check_cached_effective_target mips_loongson_mmi_hw_available { + # If this is not the right target then we can skip the test. + if { !([istarget mips*-*-*]) } { + expr 0 + } else { +- check_runtime_nocache mips_loongson_hw_available { +- #include ++ check_runtime_nocache mips_loongson_mmi_hw_available { ++ #include + int main() + { + asm volatile ("paddw $f2,$f4,$f6"); + return 0; + } +- } "" ++ } "-mloongson-mmi" + } + }] + } +@@ -1969,9 +1982,9 @@ proc check_effective_target_mpaired_single_runtime { } { + + # Return 1 if the target supports running Loongson executables, 0 otherwise. + +-proc check_effective_target_mips_loongson_runtime { } { +- if { [check_effective_target_mips_loongson] +- && [check_mips_loongson_hw_available] } { ++proc check_effective_target_mips_loongson_mmi_runtime { } { ++ if { [check_effective_target_mips_loongson_mmi] ++ && [check_mips_loongson_mmi_hw_available] } { + return 1 + } + return 0 +@@ -3086,7 +3099,7 @@ proc check_effective_target_vect_int { } { + || [istarget aarch64*-*-*] + || [is-effective-target arm_neon] + || ([istarget mips*-*-*] +- && ([et-is-effective-target mips_loongson] ++ && ([et-is-effective-target mips_loongson_mmi] + || [et-is-effective-target mips_msa])) + || ([istarget s390*-*-*] + && [check_effective_target_s390_vx]) } { +@@ -4845,11 +4858,24 @@ proc add_options_for_mips_msa { flags } { + return "$flags -mmsa" + } + ++# Add the options needed for MIPS Loongson MMI Architecture. ++ ++proc add_options_for_mips_loongson_mmi { flags } { ++ if { ! [check_effective_target_mips_loongson_mmi] } { ++ return "$flags" ++ } ++ return "$flags -mloongson-mmi" ++} ++ ++ + # Return 1 if this a Loongson-2E or -2F target using an ABI that supports + # the Loongson vector modes. + +-proc check_effective_target_mips_loongson { } { ++proc check_effective_target_mips_loongson_mmi { } { + return [check_no_compiler_messages loongson assembly { ++ #if !defined(__mips_loongson_mmi) ++ #error !__mips_loongson_mmi ++ #endif + #if !defined(__mips_loongson_vector_rev) + #error !__mips_loongson_vector_rev + #endif +@@ -5437,7 +5463,7 @@ proc check_effective_target_vect_shift { } { + || [is-effective-target arm_neon] + || ([istarget mips*-*-*] + && ([et-is-effective-target mips_msa] +- || [et-is-effective-target mips_loongson])) ++ || [et-is-effective-target mips_loongson_mmi])) + || ([istarget s390*-*-*] + && [check_effective_target_s390_vx]) } { + set et_vect_shift_saved($et_index) 1 +@@ -5457,7 +5483,7 @@ proc check_effective_target_whole_vector_shift { } { + || ([is-effective-target arm_neon] + && [check_effective_target_arm_little_endian]) + || ([istarget mips*-*-*] +- && [et-is-effective-target mips_loongson]) ++ && [et-is-effective-target mips_loongson_mmi]) + || ([istarget s390*-*-*] + && [check_effective_target_s390_vx]) } { + set answer 1 +@@ -5559,6 +5585,7 @@ proc check_effective_target_vect_float { } { + || [istarget powerpc*-*-*] + || [istarget spu-*-*] + || [istarget mips-sde-elf] ++ || [istarget loongarch-sde-elf] + || [istarget mipsisa64*-*-*] + || [istarget ia64-*-*] + || [istarget aarch64*-*-*] +@@ -5663,7 +5690,7 @@ proc check_effective_target_vect_no_int_min_max { } { + || [istarget spu-*-*] + || [istarget alpha*-*-*] + || ([istarget mips*-*-*] +- && [et-is-effective-target mips_loongson]) } { ++ && [et-is-effective-target mips_loongson_mmi]) } { + set et_vect_no_int_min_max_saved($et_index) 1 + } + } +@@ -6434,7 +6461,7 @@ proc check_effective_target_vect_no_align { } { + || [check_effective_target_arm_vect_no_misalign] + || ([istarget powerpc*-*-*] && [check_p8vector_hw_available]) + || ([istarget mips*-*-*] +- && [et-is-effective-target mips_loongson]) } { ++ && [et-is-effective-target mips_loongson_mmi]) } { + set et_vect_no_align_saved($et_index) 1 + } + } +@@ -6764,7 +6791,7 @@ proc check_effective_target_vect_short_mult { } { + || [check_effective_target_arm32] + || ([istarget mips*-*-*] + && ([et-is-effective-target mips_msa] +- || [et-is-effective-target mips_loongson])) ++ || [et-is-effective-target mips_loongson_mmi])) + || ([istarget s390*-*-*] + && [check_effective_target_s390_vx]) } { + set et_vect_short_mult_saved($et_index) 1 +@@ -7502,6 +7529,7 @@ proc check_effective_target_sync_char_short { } { + || ([istarget sparc*-*-*] && [check_effective_target_sparc_v9]) + || [istarget spu-*-*] + || ([istarget arc*-*-*] && [check_effective_target_arc_atomic]) ++ || [istarget loongarch*-*-*] + || [check_effective_target_mips_llsc] } { + set et_sync_char_short_saved 1 + } +@@ -8579,8 +8607,8 @@ proc check_vect_support_and_set_flags { } { + if { [check_effective_target_mpaired_single] } { + lappend EFFECTIVE_TARGETS mpaired_single + } +- if { [check_effective_target_mips_loongson] } { +- lappend EFFECTIVE_TARGETS mips_loongson ++ if { [check_effective_target_mips_loongson_mmi] } { ++ lappend EFFECTIVE_TARGETS mips_loongson_mmi + } + if { [check_effective_target_mips_msa] } { + lappend EFFECTIVE_TARGETS mips_msa +@@ -8813,6 +8841,7 @@ proc check_effective_target_tiny {} { + + proc check_effective_target_logical_op_short_circuit {} { + if { [istarget mips*-*-*] ++ || [istarget loongarch*-*-*] + || [istarget arc*-*-*] + || [istarget avr*-*-*] + || [istarget crisv32-*-*] || [istarget cris-*-*] +@@ -8837,6 +8866,7 @@ proc check_effective_target_branch_cost {} { + || [istarget frv*-*-*] + || [istarget i?86-*-*] || [istarget x86_64-*-*] + || [istarget mips*-*-*] ++ || [istarget loongarch*-*-*] + || [istarget s390*-*-*] + || [istarget riscv*-*-*] + || [istarget sh*-*-*] +diff --git a/libgcc/config.host b/libgcc/config.host +index 317d735d5..83ca131aa 100644 +--- a/libgcc/config.host ++++ b/libgcc/config.host +@@ -156,6 +156,23 @@ mips*-*-*) + fi + tmake_file="${tmake_file} t-softfp" + ;; ++loongarch*-*-*) ++ # All MIPS targets provide a full set of FP routines. ++ cpu_type=loongarch ++ tmake_file="loongarch/t-loongarch" ++ if test "${libgcc_cv_loongarch_hard_float}" = yes; then ++ tmake_file="${tmake_file} t-hardfp-sfdf t-hardfp" ++ else ++ tmake_file="${tmake_file} t-softfp-sfdf" ++ fi ++ if test "${ac_cv_sizeof_long_double}" = 16; then ++ tmake_file="${tmake_file} loongarch/t-softfp-tf" ++ fi ++ if test "${host_address}" = 64; then ++ tmake_file="${tmake_file} loongarch/t-loongarch64" ++ fi ++ tmake_file="${tmake_file} t-softfp" ++ ;; + nds32*-*) + cpu_type=nds32 + ;; +@@ -906,6 +923,16 @@ mips*-*-linux*) # Linux MIPS, either endian. + esac + md_unwind_header=mips/linux-unwind.h + ;; ++loongarch*-*-linux*) # Linux MIPS, either endian. ++ extra_parts="$extra_parts crtfastmath.o" ++ tmake_file="${tmake_file} t-crtfm loongarch/t-crtstuff" ++ case ${host} in ++ *) ++ tmake_file="${tmake_file} t-slibgcc-libgcc" ++ ;; ++ esac ++ md_unwind_header=loongarch/linux-unwind.h ++ ;; + mips*-sde-elf*) + tmake_file="$tmake_file mips/t-crtstuff mips/t-mips16" + case "${with_newlib}" in +@@ -919,6 +946,19 @@ mips*-sde-elf*) + esac + extra_parts="$extra_parts crti.o crtn.o" + ;; ++loongarch*-sde-elf*) ++ tmake_file="$tmake_file loongarch/t-crtstuff" ++ case "${with_newlib}" in ++ yes) ++ # newlib / libgloss. ++ ;; ++ *) ++ # MIPS toolkit libraries. ++ tmake_file="$tmake_file loongarch/t-sdemtk" ++ ;; ++ esac ++ extra_parts="$extra_parts crti.o crtn.o" ++ ;; + mipsisa32-*-elf* | mipsisa32el-*-elf* | \ + mipsisa32r2-*-elf* | mipsisa32r2el-*-elf* | \ + mipsisa32r6-*-elf* | mipsisa32r6el-*-elf* | \ +diff --git a/libgcc/config/loongarch/crtfastmath.c b/libgcc/config/loongarch/crtfastmath.c +new file mode 100644 +index 000000000..d7371de6d +--- /dev/null ++++ b/libgcc/config/loongarch/crtfastmath.c +@@ -0,0 +1,50 @@ ++/* Copyright (C) 2010-2018 Free Software Foundation, Inc. ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3, or (at your option) ++ any later version. ++ ++ GCC is distributed in the hope that it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++ License for more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License ++ and a copy of the GCC Runtime Library Exception along with this ++ program; see the files COPYING3 and COPYING.RUNTIME respectively. ++ If not, see . */ ++ ++#ifdef __loongarch_hard_float ++ ++/* Rounding control. */ ++#define _FPU_RC_NEAREST 0x000 /* RECOMMENDED */ ++#define _FPU_RC_ZERO 0x100 ++#define _FPU_RC_UP 0x200 ++#define _FPU_RC_DOWN 0x300 ++ ++/* Enable interrupts for IEEE exceptions. */ ++#define _FPU_IEEE 0x0000001F ++ ++/* Macros for accessing the hardware control word. */ ++#define _FPU_GETCW(cw) __asm__ ("movgr2fcsr %0,$r1" : "=r" (cw)) ++#define _FPU_SETCW(cw) __asm__ ("movfcsr2gr %0,$r1" : : "r" (cw)) ++ ++static void __attribute__((constructor)) ++set_fast_math (void) ++{ ++ unsigned int fcr; ++ ++ /* round to nearest, IEEE exceptions disabled. */ ++ fcr = _FPU_RC_NEAREST; ++ ++ _FPU_SETCW(fcr); ++} ++ ++#endif /* __loongarch_hard_float */ +diff --git a/libgcc/config/loongarch/crti.S b/libgcc/config/loongarch/crti.S +new file mode 100644 +index 000000000..dcd05afea +--- /dev/null ++++ b/libgcc/config/loongarch/crti.S +@@ -0,0 +1,43 @@ ++/* Copyright (C) 2001-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++Under Section 7 of GPL version 3, you are granted additional ++permissions described in the GCC Runtime Library Exception, version ++3.1, as published by the Free Software Foundation. ++ ++You should have received a copy of the GNU General Public License and ++a copy of the GCC Runtime Library Exception along with this program; ++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++. */ ++ ++/* 4 slots for argument spill area. 1 for cpreturn, 1 for stack. ++ Return spill offset of 40 and 20. Aligned to 16 bytes for n32. */ ++ ++ .section .init,"ax",@progbits ++ .globl _init ++ .type _init,@function ++_init: ++ addi.d $r3,$r3,-48 ++ st.d $r1,$r3,40 ++ addi.d $r3,$r3,48 ++ jirl $r0,$r1,0 ++ ++ .section .fini,"ax",@progbits ++ .globl _fini ++ .type _fini,@function ++_fini: ++ addi.d $r3,$r3,-48 ++ st.d $r1,$r3,40 ++ addi.d $r3,$r3,48 ++ jirl $r0,$r1,0 +diff --git a/libgcc/config/loongarch/crtn.S b/libgcc/config/loongarch/crtn.S +new file mode 100644 +index 000000000..91d9d5e7f +--- /dev/null ++++ b/libgcc/config/loongarch/crtn.S +@@ -0,0 +1,39 @@ ++/* Copyright (C) 2001-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++Under Section 7 of GPL version 3, you are granted additional ++permissions described in the GCC Runtime Library Exception, version ++3.1, as published by the Free Software Foundation. ++ ++You should have received a copy of the GNU General Public License and ++a copy of the GCC Runtime Library Exception along with this program; ++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++. */ ++ ++/* 4 slots for argument spill area. 1 for cpreturn, 1 for stack. ++ Return spill offset of 40 and 20. Aligned to 16 bytes for n32. */ ++ ++ ++ .section .init,"ax",@progbits ++init: ++ ld.d $r1,$r3,40 ++ addi.d $r3,$r3,48 ++ jirl $r0,$r1,0 ++ ++ .section .fini,"ax",@progbits ++fini: ++ ld.d $r1,$r3,40 ++ addi.d $r3,$r3,48 ++ jirl $r0,$r1,0 ++ +diff --git a/libgcc/config/loongarch/gthr-loongnixsde.h b/libgcc/config/loongarch/gthr-loongnixsde.h +new file mode 100644 +index 000000000..f62b57318 +--- /dev/null ++++ b/libgcc/config/loongarch/gthr-loongnixsde.h +@@ -0,0 +1,237 @@ ++/* LARCH SDE threads compatibility routines for libgcc2 and libobjc. */ ++/* Compile this one with gcc. */ ++/* Copyright (C) 2006-2018 Free Software Foundation, Inc. ++ Contributed by Nigel Stephens ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++Under Section 7 of GPL version 3, you are granted additional ++permissions described in the GCC Runtime Library Exception, version ++3.1, as published by the Free Software Foundation. ++ ++You should have received a copy of the GNU General Public License and ++a copy of the GCC Runtime Library Exception along with this program; ++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++. */ ++ ++#ifndef GCC_GTHR_LARCHSDE_H ++#define GCC_GTHR_LARCHSDE_H ++ ++/* LARCH SDE threading API specific definitions. ++ Easy, since the interface is pretty much one-to-one. */ ++ ++#define __GTHREADS 1 ++ ++#include ++#include ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++typedef __sdethread_key_t __gthread_key_t; ++typedef __sdethread_once_t __gthread_once_t; ++typedef __sdethread_mutex_t __gthread_mutex_t; ++ ++typedef struct { ++ long depth; ++ __sdethread_t owner; ++ __sdethread_mutex_t actual; ++} __gthread_recursive_mutex_t; ++ ++#define __GTHREAD_MUTEX_INIT __SDETHREAD_MUTEX_INITIALIZER("gthr") ++#define __GTHREAD_ONCE_INIT __SDETHREAD_ONCE_INIT ++static inline int ++__gthread_recursive_mutex_init_function(__gthread_recursive_mutex_t *__mutex); ++#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function ++ ++#if SUPPORTS_WEAK && GTHREAD_USE_WEAK ++# define __gthrw(name) \ ++ static __typeof(name) __gthrw_ ## name __attribute__ ((__weakref__(#name))); ++# define __gthrw_(name) __gthrw_ ## name ++#else ++# define __gthrw(name) ++# define __gthrw_(name) name ++#endif ++ ++__gthrw(__sdethread_once) ++__gthrw(__sdethread_key_create) ++__gthrw(__sdethread_key_delete) ++__gthrw(__sdethread_getspecific) ++__gthrw(__sdethread_setspecific) ++ ++__gthrw(__sdethread_self) ++ ++__gthrw(__sdethread_mutex_lock) ++__gthrw(__sdethread_mutex_trylock) ++__gthrw(__sdethread_mutex_unlock) ++ ++__gthrw(__sdethread_mutex_init) ++ ++__gthrw(__sdethread_threading) ++ ++#if SUPPORTS_WEAK && GTHREAD_USE_WEAK ++ ++static inline int ++__gthread_active_p (void) ++{ ++ return !!(void *)&__sdethread_threading; ++} ++ ++#else /* not SUPPORTS_WEAK */ ++ ++static inline int ++__gthread_active_p (void) ++{ ++ return 1; ++} ++ ++#endif /* SUPPORTS_WEAK */ ++ ++static inline int ++__gthread_once (__gthread_once_t *__once, void (*__func) (void)) ++{ ++ if (__gthread_active_p ()) ++ return __gthrw_(__sdethread_once) (__once, __func); ++ else ++ return -1; ++} ++ ++static inline int ++__gthread_key_create (__gthread_key_t *__key, void (*__dtor) (void *)) ++{ ++ return __gthrw_(__sdethread_key_create) (__key, __dtor); ++} ++ ++static inline int ++__gthread_key_delete (__gthread_key_t __key) ++{ ++ return __gthrw_(__sdethread_key_delete) (__key); ++} ++ ++static inline void * ++__gthread_getspecific (__gthread_key_t __key) ++{ ++ return __gthrw_(__sdethread_getspecific) (__key); ++} ++ ++static inline int ++__gthread_setspecific (__gthread_key_t __key, const void *__ptr) ++{ ++ return __gthrw_(__sdethread_setspecific) (__key, __ptr); ++} ++ ++static inline int ++__gthread_mutex_destroy (__gthread_mutex_t * UNUSED(__mutex)) ++{ ++ return 0; ++} ++ ++static inline int ++__gthread_mutex_lock (__gthread_mutex_t *__mutex) ++{ ++ if (__gthread_active_p ()) ++ return __gthrw_(__sdethread_mutex_lock) (__mutex); ++ else ++ return 0; ++} ++ ++static inline int ++__gthread_mutex_trylock (__gthread_mutex_t *__mutex) ++{ ++ if (__gthread_active_p ()) ++ return __gthrw_(__sdethread_mutex_trylock) (__mutex); ++ else ++ return 0; ++} ++ ++static inline int ++__gthread_mutex_unlock (__gthread_mutex_t *__mutex) ++{ ++ if (__gthread_active_p ()) ++ return __gthrw_(__sdethread_mutex_unlock) (__mutex); ++ else ++ return 0; ++} ++ ++static inline int ++__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex) ++{ ++ __mutex->depth = 0; ++ __mutex->owner = __gthrw_(__sdethread_self) (); ++ return __gthrw_(__sdethread_mutex_init) (&__mutex->actual, NULL); ++} ++ ++static inline int ++__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex) ++{ ++ if (__gthread_active_p ()) ++ { ++ __sdethread_t __me = __gthrw_(__sdethread_self) (); ++ ++ if (__mutex->owner != __me) ++ { ++ __gthrw_(__sdethread_mutex_lock) (&__mutex->actual); ++ __mutex->owner = __me; ++ } ++ ++ __mutex->depth++; ++ } ++ return 0; ++} ++ ++static inline int ++__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex) ++{ ++ if (__gthread_active_p ()) ++ { ++ __sdethread_t __me = __gthrw_(__sdethread_self) (); ++ ++ if (__mutex->owner != __me) ++ { ++ if (__gthrw_(__sdethread_mutex_trylock) (&__mutex->actual)) ++ return 1; ++ __mutex->owner = __me; ++ } ++ ++ __mutex->depth++; ++ } ++ return 0; ++} ++ ++static inline int ++__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex) ++{ ++ if (__gthread_active_p ()) ++ { ++ if (--__mutex->depth == 0) ++ { ++ __mutex->owner = (__sdethread_t) 0; ++ __gthrw_(__sdethread_mutex_unlock) (&__mutex->actual); ++ } ++ } ++ return 0; ++} ++ ++static inline int ++__gthread_recursive_mutex_destroy (__gthread_recursive_mutex_t ++ * UNUSED(__mutex)) ++{ ++ return 0; ++} ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* ! GCC_GTHR_LARCHSDE_H */ +diff --git a/libgcc/config/loongarch/lib2funcs.c b/libgcc/config/loongarch/lib2funcs.c +new file mode 100644 +index 000000000..c7d3541e9 +--- /dev/null ++++ b/libgcc/config/loongarch/lib2funcs.c +@@ -0,0 +1,23 @@ ++/* libgcc routines for LoongArch ++ Copyright (C) 2021 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++Under Section 7 of GPL version 3, you are granted additional ++permissions described in the GCC Runtime Library Exception, version ++3.1, as published by the Free Software Foundation. ++ ++You should have received a copy of the GNU General Public License and ++a copy of the GCC Runtime Library Exception along with this program; ++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++. */ +diff --git a/libgcc/config/loongarch/linux-unwind.h b/libgcc/config/loongarch/linux-unwind.h +new file mode 100644 +index 000000000..d77dfb058 +--- /dev/null ++++ b/libgcc/config/loongarch/linux-unwind.h +@@ -0,0 +1,81 @@ ++/* DWARF2 EH unwinding support for LARCH Linux. ++ Copyright (C) 2004-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++Under Section 7 of GPL version 3, you are granted additional ++permissions described in the GCC Runtime Library Exception, version ++3.1, as published by the Free Software Foundation. ++ ++You should have received a copy of the GNU General Public License and ++a copy of the GCC Runtime Library Exception along with this program; ++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++. */ ++ ++#ifndef inhibit_libc ++/* Do code reading to identify a signal frame, and set the frame ++ state data appropriately. See unwind-dw2.c for the structs. */ ++ ++#include ++#include ++#include ++ ++#define MD_FALLBACK_FRAME_STATE_FOR loongarch_fallback_frame_state ++ ++static _Unwind_Reason_Code ++loongarch_fallback_frame_state (struct _Unwind_Context *context, ++ _Unwind_FrameState *fs) ++{ ++ u_int32_t *pc = (u_int32_t *) context->ra; ++ struct sigcontext *sc; ++ _Unwind_Ptr new_cfa; ++ int i; ++ ++ /* 03822c0b dli a7, 0x8b (sigreturn) */ ++ /* 002b0000 syscall 0 */ ++ if (pc[1] != 0x002b0000) ++ return _URC_END_OF_STACK; ++ if (pc[0] == 0x03822c0b) ++ { ++ struct rt_sigframe { ++ u_int32_t ass[4]; /* Argument save space for o32. */ ++ u_int32_t trampoline[2]; ++ siginfo_t info; ++ ucontext_t uc; ++ } *rt_ = context->cfa; ++ sc = &rt_->uc.uc_mcontext; ++ } ++ else ++ return _URC_END_OF_STACK; ++ ++ new_cfa = (_Unwind_Ptr) sc; ++ fs->regs.cfa_how = CFA_REG_OFFSET; ++ fs->regs.cfa_reg = __LIBGCC_STACK_POINTER_REGNUM__; ++ fs->regs.cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa; ++ ++ for (i = 0; i < 32; i++) { ++ fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.reg[i].loc.offset ++ = (_Unwind_Ptr)&(sc->sc_regs[i]) - new_cfa; ++ } ++ ++ fs->signal_frame = 1; ++ fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].how ++ = REG_SAVED_VAL_OFFSET; ++ fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].loc.offset ++ = (_Unwind_Ptr)(sc->sc_pc) - new_cfa; ++ fs->retaddr_column = __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__; ++ ++ return _URC_NO_REASON; ++} ++#endif +diff --git a/libgcc/config/loongarch/sfp-machine.h b/libgcc/config/loongarch/sfp-machine.h +new file mode 100644 +index 000000000..f7800a003 +--- /dev/null ++++ b/libgcc/config/loongarch/sfp-machine.h +@@ -0,0 +1,148 @@ ++/* softfp machine description for LARCH. ++ Copyright (C) 2009-2018 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++Under Section 7 of GPL version 3, you are granted additional ++permissions described in the GCC Runtime Library Exception, version ++3.1, as published by the Free Software Foundation. ++ ++You should have received a copy of the GNU General Public License and ++a copy of the GCC Runtime Library Exception along with this program; ++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++. */ ++ ++#ifdef __loongarch64 ++#define _FP_W_TYPE_SIZE 64 ++#define _FP_W_TYPE unsigned long long ++#define _FP_WS_TYPE signed long long ++#define _FP_I_TYPE long long ++ ++typedef int TItype __attribute__ ((mode (TI))); ++typedef unsigned int UTItype __attribute__ ((mode (TI))); ++#define TI_BITS (__CHAR_BIT__ * (int) sizeof (TItype)) ++ ++#define _FP_MUL_MEAT_S(R,X,Y) \ ++ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) ++#define _FP_MUL_MEAT_D(R,X,Y) \ ++ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) ++#define _FP_MUL_MEAT_Q(R,X,Y) \ ++ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) ++ ++#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y) ++#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y) ++#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y) ++ ++# define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) ++# define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1) ++# define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1 ++#else ++#define _FP_W_TYPE_SIZE 32 ++#define _FP_W_TYPE unsigned int ++#define _FP_WS_TYPE signed int ++#define _FP_I_TYPE int ++ ++#define _FP_MUL_MEAT_S(R,X,Y) \ ++ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) ++#define _FP_MUL_MEAT_D(R,X,Y) \ ++ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) ++#define _FP_MUL_MEAT_Q(R,X,Y) \ ++ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) ++ ++#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y) ++#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y) ++#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y) ++ ++# define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) ++# define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1 ++# define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1 ++#endif ++ ++/* The type of the result of a floating point comparison. This must ++ match __libgcc_cmp_return__ in GCC for the target. */ ++typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__))); ++#define CMPtype __gcc_CMPtype ++ ++#define _FP_NANSIGN_S 0 ++#define _FP_NANSIGN_D 0 ++#define _FP_NANSIGN_Q 0 ++ ++#define _FP_KEEPNANFRACP 1 ++# define _FP_QNANNEGATEDP 0 ++ ++/* NaN payloads should be preserved for NAN2008. */ ++# define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ ++ do \ ++ { \ ++ R##_s = X##_s; \ ++ _FP_FRAC_COPY_##wc (R, X); \ ++ R##_c = FP_CLS_NAN; \ ++ } \ ++ while (0) ++ ++#ifdef __loongarch_hard_float ++#define FP_EX_INVALID 0x100000 ++#define FP_EX_DIVZERO 0x080000 ++#define FP_EX_OVERFLOW 0x040000 ++#define FP_EX_UNDERFLOW 0x020000 ++#define FP_EX_INEXACT 0x010000 ++#define FP_EX_ALL \ ++ (FP_EX_INVALID | FP_EX_DIVZERO | FP_EX_OVERFLOW | FP_EX_UNDERFLOW \ ++ | FP_EX_INEXACT) ++ ++#define FP_EX_ENABLE_SHIFT 16 ++#define FP_EX_CAUSE_SHIFT 8 ++ ++#define FP_RND_NEAREST 0x000 ++#define FP_RND_ZERO 0x100 ++#define FP_RND_PINF 0x200 ++#define FP_RND_MINF 0x300 ++#define FP_RND_MASK 0x300 ++ ++#define _FP_DECL_EX \ ++ unsigned long int _fcsr __attribute__ ((unused)) = FP_RND_NEAREST ++ ++#define FP_INIT_ROUNDMODE \ ++ do { \ ++ _fcsr = __builtin_loongarch_movfcsr2gr (0); \ ++ } while (0) ++ ++#define FP_ROUNDMODE (_fcsr & FP_RND_MASK) ++ ++#define FP_TRAPPING_EXCEPTIONS ((_fcsr << FP_EX_ENABLE_SHIFT) & FP_EX_ALL) ++ ++#define FP_HANDLE_EXCEPTIONS \ ++ do { \ ++ _fcsr &= ~(FP_EX_ALL << FP_EX_CAUSE_SHIFT); \ ++ _fcsr |= _fex | (_fex << FP_EX_CAUSE_SHIFT); \ ++ __builtin_loongarch_movgr2fcsr (0, _fcsr); \ ++ } while (0) ++ ++#else ++#define FP_EX_INVALID (1 << 4) ++#define FP_EX_DIVZERO (1 << 3) ++#define FP_EX_OVERFLOW (1 << 2) ++#define FP_EX_UNDERFLOW (1 << 1) ++#define FP_EX_INEXACT (1 << 0) ++#endif ++ ++#define _FP_TININESS_AFTER_ROUNDING 1 ++ ++#define __LITTLE_ENDIAN 1234 ++ ++# define __BYTE_ORDER __LITTLE_ENDIAN ++ ++/* Define ALIASNAME as a strong alias for NAME. */ ++# define strong_alias(name, aliasname) _strong_alias(name, aliasname) ++# define _strong_alias(name, aliasname) \ ++ extern __typeof (name) aliasname __attribute__ ((alias (#name))); +diff --git a/libgcc/config/loongarch/t-crtstuff b/libgcc/config/loongarch/t-crtstuff +new file mode 100644 +index 000000000..b8c36eb66 +--- /dev/null ++++ b/libgcc/config/loongarch/t-crtstuff +@@ -0,0 +1,5 @@ ++# -fasynchronous-unwind-tables is on by default for LoongArch. ++# We turn it off for crt*.o because it would make __EH_FRAME_BEGIN__ point ++# to .eh_frame data from crtbeginT.o instead of the user-defined object ++# during static linking. ++CRTSTUFF_T_CFLAGS += -fno-omit-frame-pointer -fno-asynchronous-unwind-tables +diff --git a/libgcc/config/loongarch/t-elf b/libgcc/config/loongarch/t-elf +new file mode 100644 +index 000000000..651f10a53 +--- /dev/null ++++ b/libgcc/config/loongarch/t-elf +@@ -0,0 +1,3 @@ ++# We must build libgcc2.a with -G 0, in case the user wants to link ++# without the $gp register. ++HOST_LIBGCC2_CFLAGS += -G 0 +diff --git a/libgcc/config/loongarch/t-loongarch b/libgcc/config/loongarch/t-loongarch +new file mode 100644 +index 000000000..9508cb2fc +--- /dev/null ++++ b/libgcc/config/loongarch/t-loongarch +@@ -0,0 +1,9 @@ ++LIB2_SIDITI_CONV_FUNCS = yes ++ ++softfp_float_modes := ++softfp_int_modes := si di ++softfp_extensions := ++softfp_truncations := ++softfp_exclude_libgcc2 := n ++ ++LIB2ADD_ST += $(srcdir)/config/loongarch/lib2funcs.c +diff --git a/libgcc/config/loongarch/t-loongarch64 b/libgcc/config/loongarch/t-loongarch64 +new file mode 100644 +index 000000000..a1e3513e2 +--- /dev/null ++++ b/libgcc/config/loongarch/t-loongarch64 +@@ -0,0 +1 @@ ++softfp_int_modes += ti +diff --git a/libgcc/config/loongarch/t-sdemtk b/libgcc/config/loongarch/t-sdemtk +new file mode 100644 +index 000000000..a06e828b5 +--- /dev/null ++++ b/libgcc/config/loongarch/t-sdemtk +@@ -0,0 +1,3 @@ ++# Don't build FPBIT and DPBIT; we'll be using the SDE soft-float library. ++FPBIT = ++DPBIT = +diff --git a/libgcc/config/loongarch/t-softfp-tf b/libgcc/config/loongarch/t-softfp-tf +new file mode 100644 +index 000000000..306677b12 +--- /dev/null ++++ b/libgcc/config/loongarch/t-softfp-tf +@@ -0,0 +1,3 @@ ++softfp_float_modes += tf ++softfp_extensions += sftf dftf ++softfp_truncations += tfsf tfdf +diff --git a/libgcc/config/loongarch/t-vr b/libgcc/config/loongarch/t-vr +new file mode 100644 +index 000000000..e69de29bb +diff --git a/libgcc/configure.ac b/libgcc/configure.ac +index 5f0a63ce2..9b78de063 100644 +--- a/libgcc/configure.ac ++++ b/libgcc/configure.ac +@@ -277,7 +277,7 @@ AC_CACHE_CHECK([whether assembler supports CFI directives], [libgcc_cv_cfi], + # word size rather than the address size. + cat > conftest.c < + #include + ++static bool debian_date_format(); ++ + void test01() + { + using namespace std; +@@ -46,7 +48,7 @@ void test01() + 0x5e74, L'1', L'2', 0x6708, L'1', L'7', + 0x65e5 , 0x0 }; + +- iss.str(wstr); ++ iss.str(debian_date_format() ? wstr+2 : wstr); + iterator_type is_it01(iss); + tm time01; + tim_get.get_date(is_it01, end, iss, errorstate, &time01); +@@ -56,6 +58,26 @@ void test01() + VERIFY( time01.tm_year == 103 ); + } + ++#include ++#if __has_include() ++# include ++#endif ++ ++static bool debian_date_format() ++{ ++#ifdef D_FMT ++ if (setlocale(LC_TIME, "zh_TW.UTF-8") != NULL) ++ { ++ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=31413 ++ // and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=71641#c2 ++ if (*nl_langinfo(D_FMT) == '%') ++ return true; ++ setlocale(LC_TIME, "C"); ++ } ++#endif ++ return false; ++} ++ + int main() + { + test01(); +-- +2.20.1 + diff --git a/0001-LoongArch-Fixup-configure-file-error.patch b/0001-LoongArch-Fixup-configure-file-error.patch new file mode 100644 index 0000000..eb8a21a --- /dev/null +++ b/0001-LoongArch-Fixup-configure-file-error.patch @@ -0,0 +1,153 @@ +From 77dd77cc1400180914b26c19704dbe990cb36878 Mon Sep 17 00:00:00 2001 +From: Xing Li +Date: Mon, 31 Oct 2022 20:12:09 +0800 +Subject: [PATCH] LoongArch: Fixup configure file error + +configure error lead to the default macros not correct during compile +initialize, such as dwarf2out_as_loc_support and dwarf2out_as_locview_support + +Signed-off-by: Xing Li +--- + gcc/configure | 67 +++++++++++++++++++++++++++++++++++++++++++++--- + libgcc/configure | 5 +++- + 2 files changed, 67 insertions(+), 5 deletions(-) + +diff --git a/gcc/configure b/gcc/configure +index 56566e3f1..f0b5dbc00 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -5075,6 +5075,9 @@ case "${target}" in + # sets the default TLS model and affects inlining. + PICFLAG_FOR_TARGET=-fPIC + ;; ++ loongarch*-*-*) ++ PICFLAG_FOR_TARGET=-fpic ++ ;; + mips-sgi-irix6*) + # PIC is the default. + ;; +@@ -7525,6 +7528,9 @@ else + enable_fixed_point=yes + ;; + ++ loongarch*-*-*) ++ enable_fixed_point=yes ++ ;; + mips*-*-*) + enable_fixed_point=yes + ;; +@@ -24085,6 +24091,18 @@ x: + tls_first_minor=16 + tls_as_opt='-32 --fatal-warnings' + ;; ++ loongarch*-*-*) ++ conftest_s=' ++ .section .tdata,"awT",@progbits ++x: .word 2 ++ .text ++ la.tls.gd $a0,x ++ bl __tls_get_addr' ++ tls_first_major=0 ++ tls_first_minor=0 ++ tls_as_opt='--fatal-warnings' ++ ;; ++ + m68k-*-*) + conftest_s=' + .section .tdata,"awT",@progbits +@@ -27644,6 +27662,47 @@ fi + as_fn_error "Requesting --with-nan= requires assembler support for -mnan=" "$LINENO" 5 + fi + ;; ++ loongarch*-*-*) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for .dtprelword support" >&5 ++$as_echo_n "checking assembler for .dtprelword support... " >&6; } ++if test "${gcc_cv_as_loongarch_dtprelword+set}" = set; then : ++ $as_echo_n "(cached) " >&6 ++else ++ gcc_cv_as_loongarch_dtprelword=no ++ if test $in_tree_gas = yes; then ++ if test $gcc_cv_gas_vers -ge `expr \( \( 2 \* 1000 \) + 18 \) \* 1000 + 0` ++ then gcc_cv_as_loongarch_dtprelword=yes ++fi ++ elif test x$gcc_cv_as != x; then ++ $as_echo '.section .tdata,"awT",@progbits ++x: ++ .word 2 ++ .text ++ .dtprelword x+0x8000' > conftest.s ++ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -o conftest.o conftest.s >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; } ++ then ++ gcc_cv_as_loongarch_dtprelword=yes ++ else ++ echo "configure: failed program was" >&5 ++ cat conftest.s >&5 ++ fi ++ rm -f conftest.o conftest.s ++ fi ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_loongarch_dtprelword" >&5 ++$as_echo "$gcc_cv_as_loongarch_dtprelword" >&6; } ++if test $gcc_cv_as_loongarch_dtprelword = yes; then ++ ++$as_echo "#define HAVE_AS_DTPRELWORD 1" >>confdefs.h ++ ++fi ++ ++ ;; + s390*-*-*) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for .gnu_attribute support" >&5 + $as_echo_n "checking assembler for .gnu_attribute support... " >&6; } +@@ -27827,7 +27886,7 @@ esac + # Linux on IA64 might be able to use the Intel assembler. + + case "$target" in +- mips*-*-* | *-*-hpux* ) ++ mips*-*-* | loongarch*-*-* | *-*-hpux* ) + if test x$gas_flag = xyes \ + || test x"$host" != x"$build" \ + || test ! -x "$gcc_cv_as" \ +@@ -27847,9 +27906,9 @@ esac + # ??? Once 2.11 is released, probably need to add first known working + # version to the per-target configury. + case "$cpu_type" in +- aarch64 | alpha | arc | arm | avr | bfin | cris | i386 | m32c | m68k \ +- | microblaze | mips | nios2 | pa | riscv | rs6000 | score | sparc | spu \ +- | tilegx | tilepro | visium | xstormy16 | xtensa) ++ aarch64 | alpha | arc | arm | avr | bfin | cris | i386 | loongarch | m32c \ ++ | m68k | microblaze | mips | nios2 | pa | riscv | rs6000 | score | sparc \ ++ | spu | tilegx | tilepro | visium | xstormy16 | xtensa) + insn="nop" + ;; + ia64 | s390) +diff --git a/libgcc/configure b/libgcc/configure +index aac5e5fb6..170b470dd 100755 +--- a/libgcc/configure ++++ b/libgcc/configure +@@ -2362,6 +2362,9 @@ case "${host}" in + # sets the default TLS model and affects inlining. + PICFLAG=-fPIC + ;; ++ loongarch*-*-*) ++ PICFLAG=-fpic ++ ;; + mips-sgi-irix6*) + # PIC is the default. + ;; +@@ -4938,7 +4941,7 @@ $as_echo "$libgcc_cv_cfi" >&6; } + # word size rather than the address size. + cat > conftest.c < +Date: Tue, 1 Nov 2022 09:41:17 +0800 +Subject: [PATCH 2/2] LoongArch: Rename config file for loongarch + +Signed-off-by: Xing Li +--- + config/{mt-loongnix-gnu => mt-loongarch-gnu} | 0 + 1 file changed, 0 insertions(+), 0 deletions(-) + rename config/{mt-loongnix-gnu => mt-loongarch-gnu} (100%) + +diff --git a/config/mt-loongnix-gnu b/config/mt-loongarch-gnu +similarity index 100% +rename from config/mt-loongnix-gnu +rename to config/mt-loongarch-gnu +-- +2.27.0 + diff --git a/0002-loongarch-fix-multilib-osdirnames-to-lib64.patch b/0002-loongarch-fix-multilib-osdirnames-to-lib64.patch new file mode 100644 index 0000000..9cd6348 --- /dev/null +++ b/0002-loongarch-fix-multilib-osdirnames-to-lib64.patch @@ -0,0 +1,27 @@ +From ea03a4f48828e1ca550f8cd5d3916872de6098d8 Mon Sep 17 00:00:00 2001 +From: Li Xing +Date: Fri, 24 Jun 2022 11:00:23 +0800 +Subject: [PATCH 2/2] loongarch fix multilib osdirnames to lib64 + +Signed-off-by: Li Xing +Signed-off-by: Zhang Wenlong +--- + gcc/config/loongarch/t-linux | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/config/loongarch/t-linux b/gcc/config/loongarch/t-linux +index 479f4293e..58f27f89d 100644 +--- a/gcc/config/loongarch/t-linux ++++ b/gcc/config/loongarch/t-linux +@@ -16,7 +16,7 @@ + # along with GCC; see the file COPYING3. If not see + # . + +-MULTILIB_OSDIRNAMES := ../lib$(call if_multiarch,:loongarch64-linux-gnu) ++MULTILIB_OSDIRNAMES := ../lib64$(call if_multiarch,:loongarch64-linux-gnu) + MULTIARCH_DIRNAME := $(call if_multiarch,loongarch64-linux-gnu) + + # haven't supported lp32 yet +-- +2.20.1 + diff --git a/Fix-dwarf2cfi-error.patch b/Fix-dwarf2cfi-error.patch new file mode 100644 index 0000000..4d7e134 --- /dev/null +++ b/Fix-dwarf2cfi-error.patch @@ -0,0 +1,33 @@ +From 685e5318771d90711e331e6192b2d2002076d99e Mon Sep 17 00:00:00 2001 +From: Lixing +Date: Thu, 31 Aug 2023 17:07:11 +0800 +Subject: [PATCH] Fix dwarf2cfi error + +--- + gcc/dwarf2cfi.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +diff --git a/gcc/dwarf2cfi.c b/gcc/dwarf2cfi.c +index c3e69e8b8..938222156 100644 +--- a/gcc/dwarf2cfi.c ++++ b/gcc/dwarf2cfi.c +@@ -1948,6 +1948,16 @@ dwarf2out_frame_debug_expr (rtx expr) + { + unsigned int regno = dwf_regno (XEXP (dest, 0)); + ++ if (fde ++ && fde->stack_realign ++ && REG_P (src) ++ && REGNO (src) == HARD_FRAME_POINTER_REGNUM) ++ { ++ gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum); ++ cur_trace->cfa_store.offset = 0; ++ fde->rule18 = 1; ++ } ++ + if (cur_cfa->reg == regno) + offset = -cur_cfa->offset; + else if (cur_trace->cfa_store.reg == regno) +-- +2.27.0 + diff --git a/Improve-specs-processing-to-allow-in-function-argume.patch b/Improve-specs-processing-to-allow-in-function-argume.patch new file mode 100644 index 0000000..b7504b4 --- /dev/null +++ b/Improve-specs-processing-to-allow-in-function-argume.patch @@ -0,0 +1,220 @@ +From 6e6de5b31ac9b5a523fbf60099d00124d99aa0d0 Mon Sep 17 00:00:00 2001 +From: Lixing +Date: Mon, 31 Jul 2023 10:08:23 +0800 +Subject: [PATCH 2/2] Improve specs processing to allow %* in function + arguments + +2018-07-31 Olivier Hainque + + * gcc.c (handle_spec_function): Accept a soft_matched_part + argument, as do_spec_1. Pass it down to ... + (eval_spec_function): Accept a soft_matched_part argument, + and pass it down to ... + (do_spec_2): Accept a soft_matched_part argument, and pass + it down to do_spec_1. + (do_spec_1): Pass soft_matched_part to handle_spec_function. + (handle_braces): Update call to handle_spec_function. + (driver::set_up_specs): Update calls to do_spec_2. + (compare_debug_dump_opt_spec_function): Likewise. + (compare_debug_self_opt_spec_function): Likewise. + +[Upstream] 1102fd64dbb767 (Deleted ChangeLog) +Link: https://gcc.gnu.org/git/?p=gcc.git;a=patch;f=gcc/gcc.cc;h=1102fd64dbb76784ed46ff81bf905f6c52d296fc +--- + gcc/gcc.c | 51 +++++++++++++++++++++++++++++---------------------- + 1 file changed, 29 insertions(+), 22 deletions(-) + +diff --git a/gcc/gcc.c b/gcc/gcc.c +index 3b87e91b6..3c46e0769 100644 +--- a/gcc/gcc.c ++++ b/gcc/gcc.c +@@ -354,12 +354,12 @@ static inline void mark_matching_switches (const char *, const char *, int); + static inline void process_marked_switches (void); + static const char *process_brace_body (const char *, const char *, const char *, int, int); + static const struct spec_function *lookup_spec_function (const char *); +-static const char *eval_spec_function (const char *, const char *); +-static const char *handle_spec_function (const char *, bool *); ++static const char *eval_spec_function (const char *, const char *, const char *); ++static const char *handle_spec_function (const char *, bool *, const char *); + static char *save_string (const char *, int); + static void set_collect_gcc_options (void); + static int do_spec_1 (const char *, int, const char *); +-static int do_spec_2 (const char *); ++static int do_spec_2 (const char *, const char *); + static void do_option_spec (const char *, const char *); + static void do_self_spec (const char *); + static const char *find_file (const char *); +@@ -4865,7 +4865,7 @@ do_spec (const char *spec) + { + int value; + +- value = do_spec_2 (spec); ++ value = do_spec_2 (spec, NULL); + + /* Force out any unfinished command. + If -pipe, this forces out the last command if it ended in `|'. */ +@@ -4884,8 +4884,11 @@ do_spec (const char *spec) + return value; + } + ++/* Process the spec SPEC, with SOFT_MATCHED_PART designating the current value ++ of a matched * pattern which may be re-injected by way of %*. */ ++ + static int +-do_spec_2 (const char *spec) ++do_spec_2 (const char *spec, const char *soft_matched_part) + { + int result; + +@@ -4898,14 +4901,13 @@ do_spec_2 (const char *spec) + input_from_pipe = 0; + suffix_subst = NULL; + +- result = do_spec_1 (spec, 0, NULL); ++ result = do_spec_1 (spec, 0, soft_matched_part); + + end_going_arg (); + + return result; + } + +- + /* Process the given spec string and add any new options to the end + of the switches/n_switches array. */ + +@@ -4963,7 +4965,7 @@ do_self_spec (const char *spec) + { + int i; + +- do_spec_2 (spec); ++ do_spec_2 (spec, NULL); + do_spec_1 (" ", 0, NULL); + + /* Mark % 1) + error ("spec failure: more than one arg to SYSROOT_SUFFIX_SPEC"); +@@ -7577,7 +7584,7 @@ driver::set_up_specs () const + /* Process sysroot_hdrs_suffix_spec. */ + if (*sysroot_hdrs_suffix_spec != 0 + && !no_sysroot_suffix +- && do_spec_2 (sysroot_hdrs_suffix_spec) == 0) ++ && do_spec_2 (sysroot_hdrs_suffix_spec, NULL) == 0) + { + if (argbuf.length () > 1) + error ("spec failure: more than one arg to SYSROOT_HEADERS_SUFFIX_SPEC"); +@@ -7587,7 +7594,7 @@ driver::set_up_specs () const + + /* Look for startfiles in the standard places. */ + if (*startfile_prefix_spec != 0 +- && do_spec_2 (startfile_prefix_spec) == 0 ++ && do_spec_2 (startfile_prefix_spec, NULL) == 0 + && do_spec_1 (" ", 0, NULL) == 0) + { + const char *arg; +@@ -9717,7 +9724,7 @@ compare_debug_dump_opt_spec_function (int arg, + fatal_error (input_location, + "too many arguments to %%:compare-debug-dump-opt"); + +- do_spec_2 ("%{fdump-final-insns=*:%*}"); ++ do_spec_2 ("%{fdump-final-insns=*:%*}", NULL); + do_spec_1 (" ", 0, NULL); + + if (argbuf.length () > 0 +@@ -9735,13 +9742,13 @@ compare_debug_dump_opt_spec_function (int arg, + + if (argbuf.length () > 0) + { +- do_spec_2 ("%{o*:%*}%{!o:%{!S:%b%O}%{S:%b.s}}"); ++ do_spec_2 ("%{o*:%*}%{!o:%{!S:%b%O}%{S:%b.s}}", NULL); + ext = ".gkd"; + } + else if (!compare_debug) + return NULL; + else +- do_spec_2 ("%g.gkd"); ++ do_spec_2 ("%g.gkd", NULL); + + do_spec_1 (" ", 0, NULL); + +@@ -9793,7 +9800,7 @@ compare_debug_self_opt_spec_function (int arg, + if (compare_debug >= 0) + return NULL; + +- do_spec_2 ("%{c|S:%{o*:%*}}"); ++ do_spec_2 ("%{c|S:%{o*:%*}}", NULL); + do_spec_1 (" ", 0, NULL); + + if (argbuf.length () > 0) +-- +2.39.3 + diff --git a/LoongArch-Add-missing-headers.patch b/LoongArch-Add-missing-headers.patch new file mode 100644 index 0000000..2b06f75 --- /dev/null +++ b/LoongArch-Add-missing-headers.patch @@ -0,0 +1,65 @@ +From abae9df56090904daf1295744322950cd8380f9a Mon Sep 17 00:00:00 2001 +From: Lixing +Date: Thu, 3 Aug 2023 11:17:50 +0800 +Subject: [PATCH] LoongArch: Add missing headers + +--- + gcc/config/loongarch/genopts/loongarch.opt.in | 9 +++++++++ + gcc/config/loongarch/loongarch.opt | 9 +++++++++ + 2 files changed, 18 insertions(+) + +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index 463dfec77..b8aab4ca8 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -20,12 +20,21 @@ + ; . + ; + ++HeaderInclude ++config/loongarch/loongarch-tune.h ++ ++HeaderInclude ++config/loongarch/loongarch-def.h ++ + HeaderInclude + config/loongarch/loongarch-opts.h + + HeaderInclude + config/loongarch/loongarch-str.h + ++HeaderInclude ++config/loongarch/loongarch-driver.h ++ + TargetVariable + unsigned int recip_mask = 0 + +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +index 075a2d6c7..3dfe5f3cb 100644 +--- a/gcc/config/loongarch/loongarch.opt ++++ b/gcc/config/loongarch/loongarch.opt +@@ -27,12 +27,21 @@ + ; . + ; + ++HeaderInclude ++config/loongarch/loongarch-tune.h ++ ++HeaderInclude ++config/loongarch/loongarch-def.h ++ + HeaderInclude + config/loongarch/loongarch-opts.h + + HeaderInclude + config/loongarch/loongarch-str.h + ++HeaderInclude ++config/loongarch/loongarch-driver.h ++ + TargetVariable + unsigned int recip_mask = 0 + +-- +2.39.3 + diff --git a/LoongArch-Add-sanitizer-support.patch b/LoongArch-Add-sanitizer-support.patch new file mode 100644 index 0000000..935d3c3 --- /dev/null +++ b/LoongArch-Add-sanitizer-support.patch @@ -0,0 +1,1100 @@ +From c985960fa4baae43ed4a1bfcaab9214b78a15020 Mon Sep 17 00:00:00 2001 +From: Xing Li +Date: Fri, 6 Jan 2023 10:39:21 +0800 +Subject: [PATCH 2/2] LoongArch: Add sanitizer support + +Signed-off-by: Xing Li +Signed-off-by: Peng Fan +--- + gcc/config/loongarch/loongarch.c | 14 +- + libsanitizer/asan/asan_mapping.h | 6 + + libsanitizer/configure.tgt | 10 ++ + libsanitizer/lsan/lsan_allocator.cc | 2 +- + libsanitizer/lsan/lsan_allocator.h | 2 +- + libsanitizer/lsan/lsan_common.cc | 2 + + libsanitizer/sanitizer_common/Makefile.am | 2 +- + libsanitizer/sanitizer_common/Makefile.in | 3 +- + .../sanitizer_common_syscalls.inc | 6 +- + .../sanitizer_common/sanitizer_linux.cc | 94 ++++++++++- + .../sanitizer_common/sanitizer_linux.h | 4 +- + .../sanitizer_linux_libcdep.cc | 15 +- + .../sanitizer_linux_loongarch64.S | 22 +++ + .../sanitizer_common/sanitizer_platform.h | 21 ++- + .../sanitizer_platform_interceptors.h | 4 +- + .../sanitizer_platform_limits_linux.cc | 2 +- + .../sanitizer_platform_limits_posix.cc | 22 ++- + .../sanitizer_platform_limits_posix.h | 7 +- + .../sanitizer_common/sanitizer_stacktrace.cc | 2 +- + .../sanitizer_common/sanitizer_stacktrace.h | 5 +- + .../sanitizer_stoptheworld_linux_libcdep.cc | 14 +- + .../sanitizer_tls_get_addr.cc | 2 + + libsanitizer/tsan/Makefile.am | 2 +- + libsanitizer/tsan/Makefile.in | 3 +- + libsanitizer/tsan/tsan_interceptors.cc | 5 +- + libsanitizer/tsan/tsan_platform.h | 38 +++++ + libsanitizer/tsan/tsan_platform_posix.cc | 3 + + libsanitizer/tsan/tsan_rtl.cc | 16 +- + libsanitizer/tsan/tsan_rtl.h | 3 +- + libsanitizer/tsan/tsan_rtl_loongarch64.S | 156 ++++++++++++++++++ + 30 files changed, 447 insertions(+), 40 deletions(-) + create mode 100644 libsanitizer/sanitizer_common/sanitizer_linux_loongarch64.S + create mode 100644 libsanitizer/tsan/tsan_rtl_loongarch64.S + +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index a1dde5a0f..82be582ff 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -10724,7 +10724,16 @@ loongarch_prefetch_cookie (rtx write, rtx locality) + gcc_unreachable (); + } + +- ++/* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */ ++ ++static unsigned HOST_WIDE_INT ++loongarch_asan_shadow_offset (void) ++{ ++ /* We only have libsanitizer support for LOONGARCH64 at present. ++ This value is taken from the file libsanitizer/asan/asan_mappint.h. */ ++ return TARGET_64BIT ? (HOST_WIDE_INT_1 << 37) : (0x0aaa0000); ++} ++ + /* Initialize the GCC target structure. */ + #undef TARGET_ASM_ALIGNED_HI_OP + #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" +@@ -10952,6 +10961,9 @@ loongarch_prefetch_cookie (rtx write, rtx locality) + #undef TARGET_SECONDARY_RELOAD + #define TARGET_SECONDARY_RELOAD loongarch_secondary_reload + ++#undef TARGET_ASAN_SHADOW_OFFSET ++#define TARGET_ASAN_SHADOW_OFFSET loongarch_asan_shadow_offset ++ + struct gcc_target targetm = TARGET_INITIALIZER; + + #include "gt-loongarch.h" +diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h +index 5496df66d..77c8061ad 100644 +--- a/libsanitizer/asan/asan_mapping.h ++++ b/libsanitizer/asan/asan_mapping.h +@@ -141,6 +141,8 @@ static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64; + static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; + static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; + static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; ++static const u64 kLoongArch32_ShadowOffset32 = 0x0aaa0000; ++static const u64 kLoongArch64_ShadowOffset64 = 1ULL << 37; + static const u64 kPPC64_ShadowOffset64 = 1ULL << 41; + static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; + static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 +@@ -157,6 +159,8 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 + # define SHADOW_OFFSET (0) + # elif defined(__mips__) + # define SHADOW_OFFSET kMIPS32_ShadowOffset32 ++# elif defined(__loongarch__) ++# define SHADOW_OFFSET kLoongArch32_ShadowOffset32 + # elif SANITIZER_FREEBSD + # define SHADOW_OFFSET kFreeBSD_ShadowOffset32 + # elif SANITIZER_WINDOWS +@@ -191,6 +195,8 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 + # define SHADOW_OFFSET kDefaultShadowOffset64 + # elif defined(__mips64) + # define SHADOW_OFFSET kMIPS64_ShadowOffset64 ++# elif defined(__loongarch64) ++# define SHADOW_OFFSET kLoongArch64_ShadowOffset64 + # elif SANITIZER_WINDOWS64 + # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address + # else +diff --git a/libsanitizer/configure.tgt b/libsanitizer/configure.tgt +index 573e3b482..8da064c1e 100644 +--- a/libsanitizer/configure.tgt ++++ b/libsanitizer/configure.tgt +@@ -55,6 +55,16 @@ case "${target}" in + x86_64-*-darwin[1]* | i?86-*-darwin[1]*) + TSAN_SUPPORTED=no + ;; ++ loongarch*-*-linux*) ++ if echo "int x = __loongarch64;" | $CC -c -x c -o /dev/null - > /dev/null 2>&1; then ++ SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS=sanitizer_linux_loongarch64.lo ++ fi ++ if test x$ac_cv_sizeof_void_p = x8; then ++ TSAN_SUPPORTED=yes ++ LSAN_SUPPORTED=yes ++ TSAN_TARGET_DEPENDENT_OBJECTS=tsan_rtl_loongarch64.lo ++ fi ++ ;; + *) + UNSUPPORTED=1 + ;; +diff --git a/libsanitizer/lsan/lsan_allocator.cc b/libsanitizer/lsan/lsan_allocator.cc +index 9e1668077..b3ef2400e 100644 +--- a/libsanitizer/lsan/lsan_allocator.cc ++++ b/libsanitizer/lsan/lsan_allocator.cc +@@ -26,7 +26,7 @@ extern "C" void *memset(void *ptr, int value, uptr num); + namespace __lsan { + #if defined(__i386__) || defined(__arm__) + static const uptr kMaxAllowedMallocSize = 1UL << 30; +-#elif defined(__mips64) || defined(__aarch64__) ++#elif defined(__mips64) || defined(__aarch64__) || defined(__loongarch64) + static const uptr kMaxAllowedMallocSize = 4UL << 30; + #else + static const uptr kMaxAllowedMallocSize = 8UL << 30; +diff --git a/libsanitizer/lsan/lsan_allocator.h b/libsanitizer/lsan/lsan_allocator.h +index b0c0ec241..5793dd372 100644 +--- a/libsanitizer/lsan/lsan_allocator.h ++++ b/libsanitizer/lsan/lsan_allocator.h +@@ -49,7 +49,7 @@ struct ChunkMetadata { + }; + + #if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \ +- defined(__arm__) ++ defined(__arm__) || defined(__loongarch64) + static const uptr kRegionSizeLog = 20; + static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; + typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; +diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc +index 4afce9df0..e1dce25c7 100644 +--- a/libsanitizer/lsan/lsan_common.cc ++++ b/libsanitizer/lsan/lsan_common.cc +@@ -136,6 +136,8 @@ static inline bool CanBeAHeapPointer(uptr p) { + return ((p >> 47) == 0); + #elif defined(__mips64) + return ((p >> 40) == 0); ++#elif defined(__loongarch64) ++ return ((p >> 40) == 0); + #elif defined(__aarch64__) + unsigned runtimeVMA = + (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); +diff --git a/libsanitizer/sanitizer_common/Makefile.am b/libsanitizer/sanitizer_common/Makefile.am +index 246985b99..3b39f5bb0 100644 +--- a/libsanitizer/sanitizer_common/Makefile.am ++++ b/libsanitizer/sanitizer_common/Makefile.am +@@ -71,7 +71,7 @@ sanitizer_common_files = \ + + + libsanitizer_common_la_SOURCES = $(sanitizer_common_files) +-EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S ++EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S sanitizer_linux_loongarch64.S + libsanitizer_common_la_LIBADD = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS) + libsanitizer_common_la_DEPENDENCIES = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS) + +diff --git a/libsanitizer/sanitizer_common/Makefile.in b/libsanitizer/sanitizer_common/Makefile.in +index b0f5ac25a..023f633f7 100644 +--- a/libsanitizer/sanitizer_common/Makefile.in ++++ b/libsanitizer/sanitizer_common/Makefile.in +@@ -355,7 +355,7 @@ sanitizer_common_files = \ + sanitizer_win.cc + + libsanitizer_common_la_SOURCES = $(sanitizer_common_files) +-EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S ++EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S sanitizer_linux_loongarch64.S + libsanitizer_common_la_LIBADD = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS) + libsanitizer_common_la_DEPENDENCIES = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS) + +@@ -467,6 +467,7 @@ distclean-compile: + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_libcdep.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_mips64.Plo@am__quote@ ++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_loongarch64.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_s390.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_x86_64.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@ +diff --git a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc +index 6fd5ef742..f55759106 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc ++++ b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc +@@ -2295,7 +2295,8 @@ POST_SYSCALL(ni_syscall)(long res) {} + PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) { + #if !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ +- defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__)) ++ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \ ++ defined(__loongarch64)) + if (data) { + if (request == ptrace_setregs) { + PRE_READ((void *)data, struct_user_regs_struct_sz); +@@ -2316,7 +2317,8 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) { + POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) { + #if !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ +- defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__)) ++ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \ ++ defined(__loongarch64)) + if (res >= 0 && data) { + // Note that this is different from the interceptor in + // sanitizer_common_interceptors.inc. +diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cc b/libsanitizer/sanitizer_common/sanitizer_linux.cc +index 2826cc89e..003c38b4f 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_linux.cc ++++ b/libsanitizer/sanitizer_common/sanitizer_linux.cc +@@ -12,6 +12,10 @@ + + #include "sanitizer_platform.h" + ++#if defined(__loongarch__) ++#define __ARCH_WANT_RENAMEAT 1 ++#endif ++ + #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD + + #include "sanitizer_common.h" +@@ -127,7 +131,7 @@ const int FUTEX_WAKE = 1; + # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0 + #endif + +-#if defined(__x86_64__) || SANITIZER_MIPS64 ++#if defined(__x86_64__) || SANITIZER_MIPS64 || SANITIZER_LOONGARCH64 + extern "C" { + extern void internal_sigreturn(); + } +@@ -802,7 +806,7 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) { + // Invokes sigaction via a raw syscall with a restorer, but does not support + // all platforms yet. + // We disable for Go simply because we have not yet added to buildgo.sh. +-#if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO ++#if (defined(__x86_64__) || SANITIZER_MIPS64 || SANITIZER_LOONGARCH64) && !SANITIZER_GO + int internal_sigaction_syscall(int signum, const void *act, void *oldact) { + if (act == nullptr) + return internal_sigaction_norestorer(signum, act, oldact); +@@ -980,6 +984,8 @@ uptr GetMaxVirtualAddress() { + return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1; + # elif defined(__mips64) + return (1ULL << 40) - 1; // 0x000000ffffffffffUL; ++# elif defined(__loongarch64) ++ return (1ULL << 40) - 1; // 0x000000ffffffffffUL; + # elif defined(__s390x__) + return (1ULL << 53) - 1; // 0x001fffffffffffffUL; + # else +@@ -1247,6 +1253,61 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + : "memory", "$29" ); + return res; + } ++#elif defined(__loongarch__) && SANITIZER_LINUX ++uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, ++ int *parent_tidptr, void *newtls, int *child_tidptr) { ++ long long res; ++ if (!fn || !child_stack) ++ return -EINVAL; ++ CHECK_EQ(0, (uptr)child_stack % 16); ++ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); ++ ((unsigned long long *)child_stack)[0] = (uptr)fn; ++ ((unsigned long long *)child_stack)[1] = (uptr)arg; ++ ++ register int __flags __asm__("r4") = flags; ++ register void *__child_stack __asm__("r5") = child_stack; ++ register int *__parent_tidptr __asm__("r6") = parent_tidptr; ++ register void *__newtls __asm__("r7") = newtls; ++ register int *__child_tidptr __asm__("r8") = child_tidptr; ++ ++ __asm__ __volatile__( ++ /* $a0 = syscall($a7 = SYSCALL(clone), ++ * $a0 = flags, ++ * $a1 = child_stack, ++ * $a2 = parent_tidptr, ++ * $a3 = new_tls, ++ * $a4 = child_tyidptr) ++ */ ++ ++ /* Do the system call */ ++ "addi.d $a7, $r0, %1\n" ++ "syscall 0\n" ++ ++ "move %0, $a0" ++ : "=r"(res) ++ : "i"(__NR_clone), ++ "r"(__flags), "r"(__child_stack), "r"(__parent_tidptr), "r"(__newtls), "r"(__child_tidptr) ++ :"memory" ); ++ if (res != 0) { ++ return res; ++ } ++ __asm__ __volatile__ ( ++ /* In the child, now. Call "fn(arg)". */ ++ "ld.d $a6, $sp, 0\n" ++ "ld.d $a0, $sp, 8\n" ++ ++ "jirl $r1, $a6, 0\n" ++ ++ /* Call _exit($v0) */ ++ "addi.d $a7, $r0, %1\n" ++ "syscall 0\n" ++ ++ "move %0, $a0" ++ : "=r"(res) ++ : "i"(__NR_exit) ++ :"r1", "memory"); ++ return res; ++} + #elif defined(__aarch64__) + uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr) { +@@ -1676,6 +1737,30 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { + u64 esr; + if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN; + return esr & ESR_ELx_WNR ? WRITE : READ; ++#elif defined(__loongarch__) ++ uint32_t *exception_source; ++ uint32_t faulty_instruction; ++ uint32_t op_code; ++ ++ exception_source = (uint32_t *)ucontext->uc_mcontext.__pc; ++ faulty_instruction = (uint32_t)(*exception_source); ++ ++ op_code = (faulty_instruction >> 22) & 0x3ff; ++ switch (op_code) { ++ case 0xa0: //ld.b ++ case 0xa1: //ld.h ++ case 0xa2: //ld.w ++ case 0xa3: //ld.d ++ return SignalContext::READ; ++ case 0xa4: ++ case 0xa5: ++ case 0xa6: ++ return SignalContext::WRITE; ++ case 0xa8: ++ case 0xa9: ++ return SignalContext::READ; ++ } ++ return SignalContext::UNKNOWN; + #else + (void)ucontext; + return UNKNOWN; // FIXME: Implement. +@@ -1763,6 +1848,11 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { + *pc = ucontext->uc_mcontext.pc; + *bp = ucontext->uc_mcontext.gregs[30]; + *sp = ucontext->uc_mcontext.gregs[29]; ++#elif defined(__loongarch__) ++ ucontext_t *ucontext = (ucontext_t*)context; ++ *pc = ucontext->uc_mcontext.__pc; ++ *bp = ucontext->uc_mcontext.__gregs[22]; ++ *sp = ucontext->uc_mcontext.__gregs[3]; + #elif defined(__s390__) + ucontext_t *ucontext = (ucontext_t*)context; + # if defined(__s390x__) +diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.h b/libsanitizer/sanitizer_common/sanitizer_linux.h +index 910703d8b..600d2b382 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_linux.h ++++ b/libsanitizer/sanitizer_common/sanitizer_linux.h +@@ -52,14 +52,14 @@ uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5); + // (like the process-wide error reporting SEGV handler) must use + // internal_sigaction instead. + int internal_sigaction_norestorer(int signum, const void *act, void *oldact); +-#if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO ++#if (defined(__x86_64__) || SANITIZER_MIPS64 || SANITIZER_LOONGARCH64) && !SANITIZER_GO + // Uses a raw system call to avoid interceptors. + int internal_sigaction_syscall(int signum, const void *act, void *oldact); + #endif + void internal_sigdelset(__sanitizer_sigset_t *set, int signum); + #if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \ + || defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \ +- || defined(__arm__) ++ || defined(__arm__) || defined(__loongarch__) + uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr); + #endif +diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc +index 3b1a2174c..43551c0d1 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc ++++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc +@@ -196,7 +196,7 @@ void InitTlsSize() { } + + #if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) \ + || defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) \ +- || defined(__arm__)) && SANITIZER_LINUX && !SANITIZER_ANDROID ++ || defined(__arm__) || defined(__loongarch__)) && SANITIZER_LINUX && !SANITIZER_ANDROID + // sizeof(struct pthread) from glibc. + static atomic_uintptr_t kThreadDescriptorSize; + +@@ -251,6 +251,9 @@ uptr ThreadDescriptorSize() { + if (val) + atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); + return val; ++#elif defined(__loongarch64) ++ val = 1776; ++ return val; + #elif defined(__aarch64__) + // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22. + val = 1776; +@@ -274,12 +277,14 @@ uptr ThreadSelfOffset() { + return kThreadSelfOffset; + } + +-#if defined(__mips__) || defined(__powerpc64__) ++#if defined(__mips__) || defined(__powerpc64__) || defined(__loongarch__) + // TlsPreTcbSize includes size of struct pthread_descr and size of tcb + // head structure. It lies before the static tls blocks. + static uptr TlsPreTcbSize() { + # if defined(__mips__) + const uptr kTcbHead = 16; // sizeof (tcbhead_t) ++# elif defined(__loongarch__) ++ const uptr kTcbHead = 16; // sizeof (tcbhead_t) + # elif defined(__powerpc64__) + const uptr kTcbHead = 88; // sizeof (tcbhead_t) + # endif +@@ -308,6 +313,10 @@ uptr ThreadSelf() { + rdhwr %0,$29;\ + .set pop" : "=r" (thread_pointer)); + descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize(); ++# elif defined(__loongarch__) ++ uptr thread_pointer; ++ asm("or %0,$r2,$r0" : "=r" (thread_pointer)); ++ descr_addr = thread_pointer - TlsPreTcbSize(); + # elif defined(__aarch64__) || defined(__arm__) + descr_addr = reinterpret_cast(__builtin_thread_pointer()) - + ThreadDescriptorSize(); +@@ -360,7 +369,7 @@ static void GetTls(uptr *addr, uptr *size) { + *addr -= *size; + *addr += ThreadDescriptorSize(); + # elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \ +- || defined(__arm__) ++ || defined(__arm__) || defined(__loongarch__) + *addr = ThreadSelf(); + *size = GetTlsSize(); + # else +diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_loongarch64.S b/libsanitizer/sanitizer_common/sanitizer_linux_loongarch64.S +new file mode 100644 +index 000000000..245816e60 +--- /dev/null ++++ b/libsanitizer/sanitizer_common/sanitizer_linux_loongarch64.S +@@ -0,0 +1,22 @@ ++// This file is dual licensed under the MIT and the University of Illinois Open ++// Source Licenses. See LICENSE.TXT for details. ++ ++// Avoid being marked as needing an executable stack: ++#if defined(__linux__) && defined(__ELF__) ++.section .note.GNU-stack,"",%progbits ++#endif ++ ++// Further contents are loongarch64 only: ++#if defined(__linux__) && defined(__loongarch64) ++ ++.section .text ++.globl internal_sigreturn ++.type internal_sigreturn, @function ++internal_sigreturn: ++ ++ li.d $r11,139 // #139 is for SYS_rt_sigreturn ++ syscall 0 ++ ++.size internal_sigreturn, .-internal_sigreturn ++ ++#endif // defined(__linux__) && defined(__loongarch64) +diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h +index 1eb4d0c61..6d91863a5 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_platform.h ++++ b/libsanitizer/sanitizer_common/sanitizer_platform.h +@@ -187,7 +187,7 @@ + #ifndef SANITIZER_CAN_USE_ALLOCATOR64 + # if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA + # define SANITIZER_CAN_USE_ALLOCATOR64 1 +-# elif defined(__mips64) || defined(__aarch64__) ++# elif defined(__mips64) || defined(__aarch64__) || defined(__loongarch64) + # define SANITIZER_CAN_USE_ALLOCATOR64 0 + # else + # define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64) +@@ -197,7 +197,7 @@ + // The range of addresses which can be returned my mmap. + // FIXME: this value should be different on different platforms. Larger values + // will still work but will consume more memory for TwoLevelByteMap. +-#if defined(__mips__) ++#if defined(__mips__) || defined(__loongarch__) + # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40) + #elif defined(__aarch64__) + # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48) +@@ -209,7 +209,7 @@ + // the upstream linux community for all new ports. Other ports may still + // use legacy syscalls. + #ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +-# if defined(__aarch64__) && SANITIZER_LINUX ++# if (defined(__aarch64__) || defined(__loongarch64)) && SANITIZER_LINUX + # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1 + # else + # define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0 +@@ -241,6 +241,21 @@ + # define HAVE_TIRPC_RPC_XDR_H 0 + #endif + ++#if defined(__loongarch__) ++# define SANITIZER_LOONGARCH 1 ++# if defined(__loongarch64) ++# define SANITIZER_LOONGARCH32 0 ++# define SANITIZER_LOONGARCH64 1 ++# else ++# define SANITIZER_LOONGARCH32 1 ++# define SANITIZER_LOONGARCH64 0 ++# endif ++#else ++# define SANITIZER_LOONGARCH 0 ++# define SANITIZER_LOONGARCH32 0 ++# define SANITIZER_LOONGARCH64 0 ++#endif ++ + /// \macro MSC_PREREQ + /// \brief Is the compiler MSVC of at least the specified version? + /// The common \param version values to check for are: +diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h +index b9eb09ad3..e8f8cfedb 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h ++++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h +@@ -205,7 +205,7 @@ + #if SI_LINUX_NOT_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__s390__)) ++ defined(__s390__) || defined(__loongarch__)) + #define SANITIZER_INTERCEPT_PTRACE 1 + #else + #define SANITIZER_INTERCEPT_PTRACE 0 +@@ -382,7 +382,7 @@ + #define SANITIZER_INTERCEPT_PVALLOC \ + (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA) + #define SANITIZER_INTERCEPT_CFREE \ +- (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA) ++ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA && !SANITIZER_LOONGARCH) + #define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC) + #define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC) + #define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID +diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc +index 23a014823..cf71e922e 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc ++++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc +@@ -64,7 +64,7 @@ namespace __sanitizer { + + #if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\ + && !defined(__mips__) && !defined(__s390__)\ +- && !defined(__sparc__) ++ && !defined(__sparc__) && !defined(__loongarch__) + COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat)); + #endif + +diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc +index 5c720b2e7..e0225c4a9 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc ++++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc +@@ -115,7 +115,8 @@ + #if SANITIZER_LINUX || SANITIZER_FREEBSD + # include + # include +-# if defined(__mips64) || defined(__aarch64__) || defined(__arm__) ++# if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \ ++ defined(__loongarch64) + # include + # ifdef __arm__ + typedef struct user_fpregs elf_fpregset_t; +@@ -153,7 +154,7 @@ typedef struct user_fpregs elf_fpregset_t; + #include + #include + #include +-#if defined(__mips64) ++#if defined(__mips64) || defined(__loongarch64) + # include + #endif + #include +@@ -253,10 +254,11 @@ namespace __sanitizer { + // has been removed from glibc 2.28. + #if defined(__aarch64__) || defined(__s390x__) || defined (__mips64) \ + || defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) \ +- || defined(__x86_64__) ++ || defined(__x86_64__) || defined(__loongarch64) + #define SIZEOF_STRUCT_USTAT 32 + #elif defined(__arm__) || defined(__i386__) || defined(__mips__) \ +- || defined(__powerpc__) || defined(__s390__) || defined(__sparc__) ++ || defined(__powerpc__) || defined(__s390__) || defined(__sparc__) \ ++ || defined(__loongarch__) + #define SIZEOF_STRUCT_USTAT 20 + #else + #error Unknown size of struct ustat +@@ -326,27 +328,31 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); + #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__s390__)) ++ defined(__s390__) || defined(__loongarch64)) + #if defined(__mips64) || defined(__powerpc64__) || defined(__arm__) + unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs); + unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t); + #elif defined(__aarch64__) + unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs); + unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state); ++#elif defined(__loongarch64) ++ unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs); ++ unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fp_state); + #elif defined(__s390__) + unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct); + unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct); + #else + unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct); + unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct); +-#endif // __mips64 || __powerpc64__ || __aarch64__ ++#endif // __mips64 || __powerpc64__ || __aarch64__ || __loongarch64 + #if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \ +- defined(__aarch64__) || defined(__arm__) || defined(__s390__) ++ defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \ ++ defined(__loongarch64) + unsigned struct_user_fpxregs_struct_sz = 0; + #else + unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct); + #endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__ +-// || __s390__ ++// || __s390__ || __loongarch64 + #ifdef __arm__ + unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE; + #else +diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h +index 9c1429623..0020448cc 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h ++++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h +@@ -77,6 +77,9 @@ namespace __sanitizer { + #elif defined(__aarch64__) + const unsigned struct_kernel_stat_sz = 128; + const unsigned struct_kernel_stat64_sz = 104; ++#elif defined(__loongarch__) ++ const unsigned struct_kernel_stat_sz = 128; ++ const unsigned struct_kernel_stat64_sz = 128; + #elif defined(__powerpc__) && !defined(__powerpc64__) + const unsigned struct_kernel_stat_sz = 72; + const unsigned struct_kernel_stat64_sz = 104; +@@ -659,7 +662,7 @@ namespace __sanitizer { + + #if SANITIZER_FREEBSD + typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t; +-#elif defined(__mips__) ++#elif defined(__mips__) || defined(__loongarch__) + struct __sanitizer_kernel_sigset_t { + uptr sig[2]; + }; +@@ -827,7 +830,7 @@ namespace __sanitizer { + #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__s390__)) ++ defined(__s390__) || defined(__loongarch64)) + extern unsigned struct_user_regs_struct_sz; + extern unsigned struct_user_fpregs_struct_sz; + extern unsigned struct_user_fpxregs_struct_sz; +diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc +index 2de585c32..ca79b289d 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc ++++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc +@@ -18,7 +18,7 @@ namespace __sanitizer { + uptr StackTrace::GetNextInstructionPc(uptr pc) { + #if defined(__mips__) + return pc + 8; +-#elif defined(__powerpc__) ++#elif defined(__powerpc__) || defined(__loongarch__) + return pc + 4; + #else + return pc + 1; +diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h +index 31e99f6b9..3affe4eb7 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h ++++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h +@@ -17,7 +17,8 @@ namespace __sanitizer { + + static const u32 kStackTraceMax = 256; + +-#if SANITIZER_LINUX && (defined(__sparc__) || defined(__mips__)) ++#if SANITIZER_LINUX && (defined(__sparc__) || defined(__mips__)) || \ ++ (SANITIZER_LINUX && defined(__loongarch__)) + # define SANITIZER_CAN_FAST_UNWIND 0 + #elif SANITIZER_WINDOWS + # define SANITIZER_CAN_FAST_UNWIND 0 +@@ -74,7 +75,7 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) { + // Cancel Thumb bit. + pc = pc & (~1); + #endif +-#if defined(__powerpc__) || defined(__powerpc64__) ++#if defined(__powerpc__) || defined(__powerpc64__) || defined(__loongarch__) + // PCs are always 4 byte aligned. + return pc - 4; + #elif defined(__sparc__) || defined(__mips__) +diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc +index d746fa540..4c183efc4 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc ++++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc +@@ -15,13 +15,17 @@ + #if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \ + defined(__aarch64__) || defined(__powerpc64__) || \ + defined(__s390__) || defined(__i386__) || \ +- defined(__arm__)) ++ defined(__arm__) || defined(__loongarch__)) + + #include "sanitizer_stoptheworld.h" + + #include "sanitizer_platform_limits_posix.h" + #include "sanitizer_atomic.h" + ++#if defined(__loongarch__) ++#include ++#endif ++ + #include + #include // for CLONE_* definitions + #include +@@ -35,7 +39,7 @@ + # include + #endif + #include // for user_regs_struct +-#if SANITIZER_ANDROID && SANITIZER_MIPS ++#if SANITIZER_ANDROID && SANITIZER_MIPS || SANITIZER_LOONGARCH + # include // for mips SP register in sys/user.h + #endif + #include // for signal-related stuff +@@ -483,8 +487,14 @@ typedef pt_regs regs_struct; + + #elif defined(__mips__) + typedef struct user regs_struct; ++#elif defined(__loongarch__) ++typedef struct user_regs_struct regs_struct; ++#define ARCH_IOVEC_FOR_GETREGSET ++ + # if SANITIZER_ANDROID + # define REG_SP regs[EF_R29] ++# elif SANITIZER_LOONGARCH ++# define REG_SP gpr[3] + # else + # define REG_SP regs[EF_REG29] + # endif +diff --git a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc +index ebf5ec094..c7cdf37df 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc ++++ b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc +@@ -81,6 +81,8 @@ void DTLS_Destroy() { + // "Dynamic thread vector pointers point 0x8000 past the start of each + // TLS block." + static const uptr kDtvOffset = 0x8000; ++#elif defined(__loongarch__) ++static const uptr kDtvOffset = 0x800; + #else + static const uptr kDtvOffset = 0; + #endif +diff --git a/libsanitizer/tsan/Makefile.am b/libsanitizer/tsan/Makefile.am +index 753cb8f4f..ac5ae4117 100644 +--- a/libsanitizer/tsan/Makefile.am ++++ b/libsanitizer/tsan/Makefile.am +@@ -50,7 +50,7 @@ tsan_files = \ + tsan_sync.cc + + libtsan_la_SOURCES = $(tsan_files) +-EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S ++EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S rtl_loongarch64.S + libtsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la $(TSAN_TARGET_DEPENDENT_OBJECTS) + libtsan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la $(TSAN_TARGET_DEPENDENT_OBJECTS) + if LIBBACKTRACE_SUPPORTED +diff --git a/libsanitizer/tsan/Makefile.in b/libsanitizer/tsan/Makefile.in +index 629056bf1..6a3477b99 100644 +--- a/libsanitizer/tsan/Makefile.in ++++ b/libsanitizer/tsan/Makefile.in +@@ -358,7 +358,7 @@ tsan_files = \ + tsan_sync.cc + + libtsan_la_SOURCES = $(tsan_files) +-EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S ++EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S rtl_loongarch64.S + libtsan_la_LIBADD = \ + $(top_builddir)/sanitizer_common/libsanitizer_common.la \ + $(top_builddir)/interception/libinterception.la \ +@@ -512,6 +512,7 @@ distclean-compile: + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_aarch64.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_amd64.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_mips64.Plo@am__quote@ ++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_loongarch64.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_mutex.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_ppc64.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_proc.Plo@am__quote@ +diff --git a/libsanitizer/tsan/tsan_interceptors.cc b/libsanitizer/tsan/tsan_interceptors.cc +index 15f20d4b6..c6959862b 100644 +--- a/libsanitizer/tsan/tsan_interceptors.cc ++++ b/libsanitizer/tsan/tsan_interceptors.cc +@@ -71,7 +71,8 @@ struct ucontext_t { + }; + #endif + +-#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 ++#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 \ ++ || defined(__loongarch__) + #define PTHREAD_ABI_BASE "GLIBC_2.3.2" + #elif defined(__aarch64__) || SANITIZER_PPC64V2 + #define PTHREAD_ABI_BASE "GLIBC_2.17" +@@ -500,6 +501,8 @@ static void LongJmp(ThreadState *thr, uptr *env) { + uptr mangled_sp = env[13]; + # elif defined(__mips64) + uptr mangled_sp = env[1]; ++#elif defined(__loongarch64) ++ uptr mangled_sp = env[1]; + # else + uptr mangled_sp = env[6]; + # endif +diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h +index 44a3ea991..a50dc6dbe 100644 +--- a/libsanitizer/tsan/tsan_platform.h ++++ b/libsanitizer/tsan/tsan_platform.h +@@ -129,6 +129,44 @@ struct Mapping { + static const uptr kVdsoBeg = 0x7000000000000000ull; + }; + ++#elif defined(__loongarch64) ++/* ++ * TODO same as mips64 and need to change in the future ++C/C++ on linux/loongarch64 (40-bit VMA) ++0000 0000 00 - 0100 0000 00: - (4 GB) ++0100 0000 00 - 0200 0000 00: main binary (4 GB) ++0200 0000 00 - 2000 0000 00: - (120 GB) ++2000 0000 00 - 4000 0000 00: shadow (128 GB) ++4000 0000 00 - 5000 0000 00: metainfo (memory blocks and sync objects) (64 GB) ++5000 0000 00 - aa00 0000 00: - (360 GB) ++aa00 0000 00 - ab00 0000 00: main binary (PIE) (4 GB) ++ab00 0000 00 - b000 0000 00: - (20 GB) ++b000 0000 00 - b200 0000 00: traces (8 GB) ++b200 0000 00 - fe00 0000 00: - (304 GB) ++fe00 0000 00 - ff00 0000 00: heap (4 GB) ++ff00 0000 00 - ff80 0000 00: - (2 GB) ++ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB) ++*/ ++struct Mapping { ++ static const uptr kMetaShadowBeg = 0x4000000000ull; ++ static const uptr kMetaShadowEnd = 0x5000000000ull; ++ static const uptr kTraceMemBeg = 0xb000000000ull; ++ static const uptr kTraceMemEnd = 0xb200000000ull; ++ static const uptr kShadowBeg = 0x2000000000ull; ++ static const uptr kShadowEnd = 0x4000000000ull; ++ static const uptr kHeapMemBeg = 0xfe00000000ull; ++ static const uptr kHeapMemEnd = 0xff00000000ull; ++ static const uptr kLoAppMemBeg = 0x0100000000ull; ++ static const uptr kLoAppMemEnd = 0x0200000000ull; ++ static const uptr kMidAppMemBeg = 0xaa00000000ull; ++ static const uptr kMidAppMemEnd = 0xab00000000ull; ++ static const uptr kHiAppMemBeg = 0xff80000000ull; ++ static const uptr kHiAppMemEnd = 0xffffffffffull; ++ static const uptr kAppMemMsk = 0xf800000000ull; ++ static const uptr kAppMemXor = 0x0800000000ull; ++ static const uptr kVdsoBeg = 0xfffff00000ull; ++}; ++ + #elif defined(__aarch64__) + // AArch64 supports multiple VMA which leads to multiple address transformation + // functions. To support these multiple VMAS transformations and mappings TSAN +diff --git a/libsanitizer/tsan/tsan_platform_posix.cc b/libsanitizer/tsan/tsan_platform_posix.cc +index 6e62575f1..e146d04fb 100644 +--- a/libsanitizer/tsan/tsan_platform_posix.cc ++++ b/libsanitizer/tsan/tsan_platform_posix.cc +@@ -59,6 +59,9 @@ void InitializeShadowMemory() { + } else { + DCHECK(0); + } ++#elif defined(__loongarch64) ++ const uptr kMadviseRangeBeg = 0xff00000000ull; ++ const uptr kMadviseRangeSize = 0x0100000000ull; + #elif defined(__powerpc64__) + uptr kMadviseRangeBeg = 0; + uptr kMadviseRangeSize = 0; +diff --git a/libsanitizer/tsan/tsan_rtl.cc b/libsanitizer/tsan/tsan_rtl.cc +index 4a1f50061..8f9c48867 100644 +--- a/libsanitizer/tsan/tsan_rtl.cc ++++ b/libsanitizer/tsan/tsan_rtl.cc +@@ -224,7 +224,7 @@ static void StartBackgroundThread() { + ctx->background_thread = internal_start_thread(&BackgroundThread, 0); + } + +-#ifndef __mips__ ++#ifndef __mips__ || defined(__loongarch__) + static void StopBackgroundThread() { + atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); + internal_join_thread(ctx->background_thread); +@@ -401,6 +401,20 @@ void Initialize(ThreadState *thr) { + OnInitialize(); + } + ++void MaybeSpawnBackgroundThread() { ++ // On MIPS, TSan initialization is run before ++ // __pthread_initialize_minimal_internal() is finished, so we can not spawn ++ // new threads. ++#if !SANITIZER_GO && !(defined(__mips__) || defined(__loongarch__)) ++ static atomic_uint32_t bg_thread = {}; ++ if (atomic_load(&bg_thread, memory_order_relaxed) == 0 && ++ atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) { ++ StartBackgroundThread(); ++ SetSandboxingCallback(StopBackgroundThread); ++ } ++#endif ++} ++ + int Finalize(ThreadState *thr) { + bool failed = false; + +diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h +index 7dd9779e4..6f8800003 100644 +--- a/libsanitizer/tsan/tsan_rtl.h ++++ b/libsanitizer/tsan/tsan_rtl.h +@@ -52,7 +52,8 @@ namespace __tsan { + + #if !SANITIZER_GO + struct MapUnmapCallback; +-#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) ++#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) \ ++ || defined(__loongarch64) + static const uptr kAllocatorRegionSizeLog = 20; + static const uptr kAllocatorNumRegions = + SANITIZER_MMAP_RANGE_SIZE >> kAllocatorRegionSizeLog; +diff --git a/libsanitizer/tsan/tsan_rtl_loongarch64.S b/libsanitizer/tsan/tsan_rtl_loongarch64.S +new file mode 100644 +index 000000000..9331e8afa +--- /dev/null ++++ b/libsanitizer/tsan/tsan_rtl_loongarch64.S +@@ -0,0 +1,156 @@ ++.section .text ++ ++.hidden __tsan_setjmp ++.comm _ZN14__interception11real_setjmpE,8,8 ++.globl setjmp ++.type setjmp, @function ++setjmp: ++ ++ # save env parameters ++ addi.d $r3,$r3,-24 ++ st.d $r1,$r3,16 ++ st.d $r22,$r3,8 ++ ++ # save jmp_buf ++ st.d $r4,$r3,0 ++ ++ # obtain $sp ++ add.d $r4,$r0,$r3 ++ ++ # call tsan interceptor ++ addi.d $r5,$r4,24 ++ bl __tsan_setjmp ++ ++ # restore jmp_buf ++ ld.d $r4,$r3,0 ++ ++ # load libc setjmp to r20 ++ la $r20,_ZN14__interception11real_setjmpE ++ # restore env parameters ++ ld.d $r22,$r3,8 ++ ld.d $r1,$r3,16 ++ addi.d $r3,$r3,24 ++ ++ # tail jump to libc setjmp ++ ld.d $r20,$r20,0 ++ jr $r20 ++ ++.size setjmp, .-setjmp ++ ++.hidden __tsan_setjmp ++.globl _setjmp ++.comm _ZN14__interception12real__setjmpE,8,8 ++.type _setjmp, @function ++_setjmp: ++ ++ # Save env parameters ++ addi.d $r3,$r3,-24 ++ st.d $r1,$r3,16 ++ st.d $r22,$r3,8 ++ ++ # save jmp_buf ++ st.d $r4,$r3,0 ++ ++ # obtain $sp ++ add.d $r4,$r0,$r3 ++ ++ # call tsan interceptor ++ addi.d $r5,$r4,24 ++ bl __tsan_setjmp ++ ++ # restore jmp_buf ++ ld.d $r4,$r3,0 ++ ++ # load libc _setjmp to r20 ++ la $r20,_ZN14__interception12real__setjmpE ++ ++ # restore env parameters ++ ld.d $r22,$r3,8 ++ ld.d $r1,$r3,16 ++ addi.d $r3,$r3,24 ++ ++ # tail jump to libc _setjmp ++ ld.d $r20,$r20,0 ++ jr $r20 ++ ++.size _setjmp, .-_setjmp ++ ++.hidden __tsan_setjmp ++.globl sigsetjmp ++.comm _ZN14__interception14real_sigsetjmpE,8,8 ++.type sigsetjmp, @function ++sigsetjmp: ++ ++ # Save env parameters ++ addi.d $r3,$r3,-32 ++ st.d $r1,$r3,24 ++ st.d $r22,$r3,16 ++ ++ # save jmp_buf and savesig ++ st.d $r4,$r3,0 ++ st.d $r5,$r3,8 ++ ++ # obtain $sp ++ add.d $r4,$r0,$r3 ++ ++ # call tsan interceptor ++ addi.d $r5,$r4,32 ++ bl __tsan_setjmp ++ ++ # restore jmp_buf and savesig ++ ld.d $r4,$r3,0 ++ ld.d $r5,$r3,8 ++ ++ # load libc sigsetjmp to r20 ++ la $r20,_ZN14__interception14real_sigsetjmpE ++ ++ # restore env parameters ++ ld.d $r22,$r3,16 ++ ld.d $r1,$r3,24 ++ addi.d $r3,$r3,32 ++ ++ # tail jump to libc sigsetjmp ++ ld.d $r20,$r20,0 ++ jr $r20 ++ ++.size sigsetjmp, .-sigsetjmp ++ ++.hidden __tsan_setjmp ++.comm _ZN14__interception16real___sigsetjmpE,8,8 ++.globl __sigsetjmp ++.type __sigsetjmp, @function ++__sigsetjmp: ++ ++ # Save env parameters ++ addi.d $sp,$sp,-32 ++ st.d $r1,$r3,24 ++ st.d $r22,$r3,16 ++ ++ # save jmp_buf and savesig ++ st.d $r4,$r3,0 ++ st.d $r5,$r3,8 ++ ++ # obtain $sp ++ add.d $r4,$r0,$r3 ++ ++ # call tsan interceptor ++ addi.d $r5,$r4,32 ++ bl __tsan_setjmp ++ ++ # restore jmp_buf and savesig ++ ld.d $r4,$r3,0 ++ ld.d $r5,$r3,8 ++ ++ # load libc __sigsetjmp in r20 ++ la $r20,_ZN14__interception16real___sigsetjmpE ++ ++ # restore env parameters ++ ld.d $r22,$r3,16 ++ ld.d $r1,$r3,24 ++ addi.d $r3,$r3,32 ++ ++ # tail jump to libc __sigsetjmp ++ ld.d $r20,$r20,0 ++ jr $r20 ++ ++.size __sigsetjmp, .-__sigsetjmp +-- +2.39.3 + diff --git a/LoongArch-Fix-atomic_exchange-expanding-PR107713.patch b/LoongArch-Fix-atomic_exchange-expanding-PR107713.patch new file mode 100644 index 0000000..1660289 --- /dev/null +++ b/LoongArch-Fix-atomic_exchange-expanding-PR107713.patch @@ -0,0 +1,164 @@ +From 438fe2208b9a219e3a3d729f39a55c6831082181 Mon Sep 17 00:00:00 2001 +From: Xing Li +Date: Fri, 2 Dec 2022 10:35:54 +0800 +Subject: [PATCH] LoongArch: Fix atomic_exchange expanding [PR107713] + +We used to expand atomic_exchange_n(ptr, new, mem_order) for subword types +into something like: + + { + __typeof__(*ptr) t = atomic_load_n(ptr, mem_order); + atomic_compare_exchange_n(ptr, &t, new, true, mem_order, mem_order); + return t; + } + +It's incorrect because another thread may store a different value into *ptr +after atomic_load_n. Then atomic_compare_exchange_n will not store into +*ptr, but atomic_exchange_n should always perform the store. + +gcc/ChangeLog: + + PR target/107713 + * config/loongarch/sync.md + (atomic_cas_value_exchange_7_): New define_insn. + (atomic_exchange): Use atomic_cas_value_exchange_7_si instead of + atomic_cas_value_cmp_and_7_si. + +gcc/testsuite/ChangeLog: + + PR target/107713 + * gcc.target/loongarch/pr107713-1.c: New test. + * gcc.target/loongarch/pr107713-2.c: New test. + +Signed-off-by: Xing Li +Signed-off-by: Jinyang He +--- + gcc/config/loongarch/sync.md | 27 +++++++++- + .../gcc.target/loongarch/pr107713-1.c | 50 +++++++++++++++++++ + .../gcc.target/loongarch/pr107713-2.c | 9 ++++ + 3 files changed, 84 insertions(+), 2 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr107713-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr107713-2.c + +diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md +index e3eb43e16..5a16c4fa3 100644 +--- a/gcc/config/loongarch/sync.md ++++ b/gcc/config/loongarch/sync.md +@@ -461,6 +461,29 @@ + } + [(set (attr "length") (const_int 32))]) + ++(define_insn "atomic_cas_value_exchange_7_" ++ [(set (match_operand:GPR 0 "register_operand" "=&r") ++ (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (set (match_dup 1) ++ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ++ (match_operand:GPR 3 "reg_or_0_operand" "rJ") ++ (match_operand:GPR 4 "reg_or_0_operand" "rJ") ++ (match_operand:GPR 5 "reg_or_0_operand" "rJ") ++ (match_operand:SI 6 "const_int_operand")] ;; model ++ UNSPEC_SYNC_EXCHANGE)) ++ (clobber (match_scratch:GPR 7 "=&r"))] ++ "" ++{ ++ return "%G6\\n\\t" ++ "1:\\n\\t" ++ "ll.\\t%0,%1\\n\\t" ++ "and\\t%7,%0,%z3\\n\\t" ++ "or%i5\\t%7,%7,%5\\n\\t" ++ "sc.\\t%7,%1\\n\\t" ++ "beqz\\t%7,1b\\n\\t"; ++} ++ [(set (attr "length") (const_int 20))]) ++ + (define_expand "atomic_exchange" + [(set (match_operand:SHORT 0 "register_operand") + (unspec_volatile:SHORT +@@ -472,11 +495,11 @@ + "" + { + union loongarch_gen_fn_ptrs generator; +- generator.fn_7 = gen_atomic_cas_value_cmp_and_7_si; ++ generator.fn_7 = gen_atomic_cas_value_exchange_7_si; + loongarch_expand_atomic_qihi (generator, + operands[0], + operands[1], +- operands[1], ++ const0_rtx, + operands[2], + operands[3]); + DONE; +diff --git a/gcc/testsuite/gcc.target/loongarch/pr107713-1.c b/gcc/testsuite/gcc.target/loongarch/pr107713-1.c +new file mode 100644 +index 000000000..c307bf87b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/pr107713-1.c +@@ -0,0 +1,50 @@ ++/* { dg-do run } */ ++/* { dg-require-effective-target pthread } */ ++/* { dg-options "-pthread" } */ ++ ++#include ++ ++char x, x1, x2; ++ ++void * ++work1 (void *none) ++{ ++ for (int i = 0; i < 100; i++) ++ x1 = __atomic_exchange_n (&x, x1, __ATOMIC_SEQ_CST); ++ return NULL; ++} ++ ++void * ++work2 (void *none) ++{ ++ for (int i = 0; i < 100; i++) ++ x2 = __atomic_exchange_n (&x, x2, __ATOMIC_SEQ_CST); ++ return NULL; ++} ++ ++void ++test (void) ++{ ++ x = 0; ++ x1 = 1; ++ x2 = 2; ++ pthread_t w1, w2; ++ if (pthread_create (&w1, NULL, work1, NULL) != 0) ++ __builtin_abort (); ++ if (pthread_create (&w2, NULL, work2, NULL) != 0) ++ __builtin_abort (); ++ if (pthread_join (w1, NULL) != 0) ++ __builtin_abort (); ++ if (pthread_join (w2, NULL) != 0) ++ __builtin_abort (); ++ if ((x ^ x1 ^ x2) != 3) ++ __builtin_abort (); ++} ++ ++int ++main () ++{ ++ int i; ++ for (i = 0; i < 10000; i++) ++ test (); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/pr107713-2.c b/gcc/testsuite/gcc.target/loongarch/pr107713-2.c +new file mode 100644 +index 000000000..82d44db3d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/pr107713-2.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++/* { dg-final { scan-assembler-times "beq|bne" 1 } } */ ++ ++char ++t (char *p, char x) ++{ ++ return __atomic_exchange_n (p, x, __ATOMIC_RELAXED); ++} +-- +2.27.0 + diff --git a/LoongArch-Remove-NOOP_TRUNCATION-and-fix-extendsidi2.patch b/LoongArch-Remove-NOOP_TRUNCATION-and-fix-extendsidi2.patch new file mode 100644 index 0000000..f8de504 --- /dev/null +++ b/LoongArch-Remove-NOOP_TRUNCATION-and-fix-extendsidi2.patch @@ -0,0 +1,101 @@ +From 08d337cc5186e47949b60e4b3eeebd1f763337e0 Mon Sep 17 00:00:00 2001 +From: Lixing +Date: Mon, 31 Jul 2023 09:46:12 +0800 +Subject: [PATCH 1/2] LoongArch: Remove NOOP_TRUNCATION and fix extendsidi2 + +We can safely convert value from inprec to outprec because we hold on +extention if needed. +--- + gcc/config/loongarch/loongarch.c | 11 -------- + gcc/config/loongarch/loongarch.md | 44 +++++++------------------------ + 2 files changed, 9 insertions(+), 46 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index a1dde5a0f..f8f96329c 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -10313,14 +10313,6 @@ loongarch_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, + return mode; + } + +-/* Implement TARGET_TRULY_NOOP_TRUNCATION. */ +- +-static bool +-loongarch_truly_noop_truncation (poly_uint64 outprec, poly_uint64 inprec) +-{ +- return !TARGET_64BIT || inprec <= 32 || outprec > 32; +-} +- + /* Implement TARGET_STARTING_FRAME_OFFSET. See loongarch_compute_frame_info + for details about the frame layout. */ + +@@ -10940,9 +10932,6 @@ loongarch_prefetch_cookie (rtx write, rtx locality) + #undef TARGET_CAN_CHANGE_MODE_CLASS + #define TARGET_CAN_CHANGE_MODE_CLASS loongarch_can_change_mode_class + +-#undef TARGET_TRULY_NOOP_TRUNCATION +-#define TARGET_TRULY_NOOP_TRUNCATION loongarch_truly_noop_truncation +- + #undef TARGET_CONSTANT_ALIGNMENT + #define TARGET_CONSTANT_ALIGNMENT loongarch_constant_alignment + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 097c9f4db..a08c4a62c 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -1433,43 +1433,17 @@ + ;; + ;; .................... + +-(define_insn_and_split "extendsidi2" ++(define_insn "extendsidi2" + [(set (match_operand:DI 0 "register_operand" "=r,r,r,r") +- (sign_extend:DI +- (match_operand:SI 1 "nonimmediate_operand" "0,ZC,m,k")))] ++ (sign_extend:DI ++ (match_operand:SI 1 "nonimmediate_operand" "r,ZC,m,k")))] + "TARGET_64BIT" +-{ +- switch (which_alternative) +- { +- case 0: +- return "#"; +- case 1: +- { +- rtx offset = XEXP (operands[1], 0); +- if (GET_CODE (offset) == PLUS) +- offset = XEXP (offset, 1); +- else +- offset = const0_rtx; +- if (const_arith_operand (offset, Pmode) || (offset == const0_rtx)) +- return "ld.w\t%0,%1"; +- else +- return "ldptr.w\t%0,%1"; +- } +- case 2: +- return "ld.w\t%0,%1"; +- case 3: +- return "ldx.w\t%0,%1"; +- default: +- gcc_unreachable (); +- } +-} +- "&& reload_completed && register_operand (operands[1], VOIDmode)" +- [(const_int 0)] +-{ +- emit_note (NOTE_INSN_DELETED); +- DONE; +-} +- [(set_attr "move_type" "move,load,load,load") ++ "@ ++ slli.w\t%0,%1,0 ++ ldptr.w\t%0,%1 ++ ld.w\t%0,%1 ++ ldx.w\t%0,%1" ++ [(set_attr "move_type" "sll0,load,load,load") + (set_attr "mode" "DI")]) + + (define_insn "extend2" +-- +2.39.3 + diff --git a/Sync-to-gcc-8-vec-36.patch b/Sync-to-gcc-8-vec-36.patch new file mode 100644 index 0000000..e41b234 --- /dev/null +++ b/Sync-to-gcc-8-vec-36.patch @@ -0,0 +1,30492 @@ +From 474c84c016b0c36c9aace9a41d6d9df8107cf3e8 Mon Sep 17 00:00:00 2001 +From: Lixing +Date: Wed, 19 Jul 2023 10:47:27 +0800 +Subject: [PATCH] Sync to gcc-8-vec-36 + +--- + .../config/loongarch/loongarch-common.c | 41 +- + gcc/config.gcc | 589 +- + gcc/config.host | 12 - + gcc/config/loongarch/constraints.md | 371 +- + gcc/config/loongarch/driver-native.c | 82 - + gcc/config/loongarch/elf.h | 56 +- + gcc/config/loongarch/frame-header-opt.c | 292 - + gcc/config/loongarch/generic.md | 21 +- + gcc/config/loongarch/genopt.sh | 110 - + gcc/config/loongarch/genopts/genstr.sh | 104 + + .../loongarch/genopts/loongarch-strings | 68 + + gcc/config/loongarch/genopts/loongarch.opt.in | 242 + + gcc/config/loongarch/gnu-user.h | 135 +- + gcc/config/loongarch/la464.md | 132 + + gcc/config/loongarch/larchintrin.h | 495 +- + gcc/config/loongarch/lasx.md | 684 +- + gcc/config/loongarch/lasxintrin.h | 46 +- + gcc/config/loongarch/linux-common.h | 68 - + gcc/config/loongarch/linux.h | 37 +- + gcc/config/loongarch/loongarch-builtins.c | 549 +- + gcc/config/loongarch/loongarch-c.c | 158 +- + gcc/config/loongarch/loongarch-cpu.c | 291 + + .../{loongarch-d.c => loongarch-cpu.h} | 30 +- + gcc/config/loongarch/loongarch-cpus.def | 38 - + gcc/config/loongarch/loongarch-def.c | 232 + + gcc/config/loongarch/loongarch-def.h | 161 + + gcc/config/loongarch/loongarch-driver.c | 206 + + gcc/config/loongarch/loongarch-driver.h | 72 + + gcc/config/loongarch/loongarch-ftypes.def | 173 +- + gcc/config/loongarch/loongarch-modes.def | 6 +- + gcc/config/loongarch/loongarch-opts.c | 725 ++ + gcc/config/loongarch/loongarch-opts.h | 86 +- + gcc/config/loongarch/loongarch-protos.h | 155 +- + gcc/config/loongarch/loongarch-str.h | 68 + + gcc/config/loongarch/loongarch-tables.opt | 34 - + gcc/config/loongarch/loongarch-tune.h | 51 + + gcc/config/loongarch/loongarch.c | 8440 +++++++++-------- + gcc/config/loongarch/loongarch.h | 1523 +-- + gcc/config/loongarch/loongarch.md | 3658 +++---- + gcc/config/loongarch/loongarch.opt | 252 +- + gcc/config/loongarch/lsx.md | 358 +- + gcc/config/loongarch/lsxintrin.h | 46 +- + gcc/config/loongarch/predicates.md | 250 +- + gcc/config/loongarch/rtems.h | 39 - + gcc/config/loongarch/sde.opt | 28 - + gcc/config/loongarch/sync.md | 746 +- + gcc/config/loongarch/t-linux | 65 +- + gcc/config/loongarch/t-loongarch | 59 +- + gcc/config/loongarch/x-native | 3 - + libgcc/config/loongarch/crtfastmath.c | 48 +- + libgcc/config/loongarch/crti.S | 43 - + libgcc/config/loongarch/crtn.S | 39 - + libgcc/config/loongarch/gthr-loongnixsde.h | 237 - + libgcc/config/loongarch/linux-unwind.h | 27 +- + libgcc/config/loongarch/sfp-machine.h | 166 +- + libgcc/config/loongarch/t-elf | 3 - + libgcc/config/loongarch/t-loongarch | 2 - + libgcc/config/loongarch/t-sdemtk | 3 - + libgcc/config/loongarch/t-vr | 0 + 59 files changed, 12128 insertions(+), 10527 deletions(-) + delete mode 100644 gcc/config/loongarch/driver-native.c + delete mode 100644 gcc/config/loongarch/frame-header-opt.c + delete mode 100644 gcc/config/loongarch/genopt.sh + create mode 100755 gcc/config/loongarch/genopts/genstr.sh + create mode 100644 gcc/config/loongarch/genopts/loongarch-strings + create mode 100644 gcc/config/loongarch/genopts/loongarch.opt.in + create mode 100644 gcc/config/loongarch/la464.md + delete mode 100644 gcc/config/loongarch/linux-common.h + create mode 100644 gcc/config/loongarch/loongarch-cpu.c + rename gcc/config/loongarch/{loongarch-d.c => loongarch-cpu.h} (59%) + delete mode 100644 gcc/config/loongarch/loongarch-cpus.def + create mode 100644 gcc/config/loongarch/loongarch-def.c + create mode 100644 gcc/config/loongarch/loongarch-def.h + create mode 100644 gcc/config/loongarch/loongarch-driver.c + create mode 100644 gcc/config/loongarch/loongarch-driver.h + create mode 100644 gcc/config/loongarch/loongarch-opts.c + create mode 100644 gcc/config/loongarch/loongarch-str.h + delete mode 100644 gcc/config/loongarch/loongarch-tables.opt + create mode 100644 gcc/config/loongarch/loongarch-tune.h + delete mode 100644 gcc/config/loongarch/rtems.h + delete mode 100644 gcc/config/loongarch/sde.opt + delete mode 100644 gcc/config/loongarch/x-native + delete mode 100644 libgcc/config/loongarch/crti.S + delete mode 100644 libgcc/config/loongarch/crtn.S + delete mode 100644 libgcc/config/loongarch/gthr-loongnixsde.h + delete mode 100644 libgcc/config/loongarch/t-elf + delete mode 100644 libgcc/config/loongarch/t-sdemtk + delete mode 100644 libgcc/config/loongarch/t-vr + +diff --git a/gcc/common/config/loongarch/loongarch-common.c b/gcc/common/config/loongarch/loongarch-common.c +index afbbc3ad0..ccdc8f498 100644 +--- a/gcc/common/config/loongarch/loongarch-common.c ++++ b/gcc/common/config/loongarch/loongarch-common.c +@@ -1,5 +1,5 @@ +-/* Common hooks for LARCH. +- Copyright (C) 1989-2018 Free Software Foundation, Inc. ++/* Common hooks for LoongArch. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. + + This file is part of GCC. + +@@ -25,44 +25,21 @@ along with GCC; see the file COPYING3. If not see + #include "common/common-target-def.h" + #include "opts.h" + #include "flags.h" ++#include "diagnostic-core.h" + +-#undef TARGET_OPTION_OPTIMIZATION_TABLE ++#undef TARGET_OPTION_OPTIMIZATION_TABLE + #define TARGET_OPTION_OPTIMIZATION_TABLE loongarch_option_optimization_table + + /* Set default optimization options. */ + static const struct default_options loongarch_option_optimization_table[] = + { +- { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 }, +- { OPT_LEVELS_NONE, 0, NULL, 0 } ++ { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 }, ++ /* Enable -fsched-pressure by default when optimizing. */ ++ { OPT_LEVELS_1_PLUS, OPT_fsched_pressure, NULL, 1 }, ++ { OPT_LEVELS_NONE, 0, NULL, 0 } + }; + +-/* Implement TARGET_HANDLE_OPTION. */ +- +-static bool +-loongarch_handle_option (struct gcc_options *opts, +- struct gcc_options *opts_set ATTRIBUTE_UNUSED, +- const struct cl_decoded_option *decoded, +- location_t loc ATTRIBUTE_UNUSED) +-{ +- size_t code = decoded->opt_index; +- +- switch (code) +- { +- case OPT_mno_flush_func: +- opts->x_loongarch_cache_flush_func = NULL; +- return true; +- +- default: +- return true; +- } +-} +- + #undef TARGET_DEFAULT_TARGET_FLAGS +-#define TARGET_DEFAULT_TARGET_FLAGS \ +- (TARGET_DEFAULT \ +- | TARGET_CPU_DEFAULT \ +- | MASK_CHECK_ZERO_DIV) +-#undef TARGET_HANDLE_OPTION +-#define TARGET_HANDLE_OPTION loongarch_handle_option ++#define TARGET_DEFAULT_TARGET_FLAGS MASK_CHECK_ZERO_DIV + + struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER; +diff --git a/gcc/config.gcc b/gcc/config.gcc +index ba061efa4..cca2e6e43 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -427,10 +427,10 @@ lm32*) + ;; + loongarch*-*-*) + cpu_type=loongarch +- d_target_objs="loongarch-d.o" + extra_headers="lasxintrin.h lsxintrin.h larchintrin.h" +- extra_objs="frame-header-opt.o loongarch-c.o loongarch-builtins.o" +- extra_options="${extra_options} g.opt fused-madd.opt loongarch/loongarch-tables.opt" ++ extra_objs="loongarch-c.o loongarch-builtins.o loongarch-cpu.o loongarch-opts.o loongarch-def.o" ++ extra_gcc_objs="loongarch-driver.o loongarch-cpu.o loongarch-opts.o loongarch-def.o" ++ extra_options="${extra_options} g.opt fused-madd.opt" + ;; + m32r*-*-*) + cpu_type=m32r +@@ -2193,54 +2193,30 @@ mips*-*-linux*) # Linux MIPS, either endian. + fi + ;; + loongarch*-*-linux*) +- case ${with_abi} in +- "") +- echo "not specify ABI, default is lp64 for loongarch64" +- with_abi=lp64 # for default +- ;; +- lpx32) +- ;; +- lp32) +- ;; +- lp64) +- ;; +- *) +- echo "Unknown ABI used in --with-abi=$with_abi" +- exit 1 +- esac +- +- enable_multilib="yes" +- loongarch_multilibs="${with_multilib_list}" +- if test "$loongarch_multilibs" = "default"; then +- loongarch_multilibs="${with_abi}" +- fi +- loongarch_multilibs=`echo $loongarch_multilibs | sed -e 's/,/ /g'` +- for loongarch_multilib in ${loongarch_multilibs}; do +- case ${loongarch_multilib} in +- lp64 | lpx32 | lp32 ) +- TM_MULTILIB_CONFIG="${TM_MULTILIB_CONFIG},${loongarch_multilib}" +- ;; +- *) +- echo "--with-multilib-list=${loongarch_multilib} not supported." +- exit 1 +- esac +- done +- TM_MULTILIB_CONFIG=`echo $TM_MULTILIB_CONFIG | sed 's/^,//'` ++ tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file}" ++ tm_file="${tm_file} loongarch/gnu-user.h loongarch/linux.h" ++ extra_options="${extra_options} linux-android.opt" ++ tmake_file="${tmake_file} loongarch/t-linux" ++ gnu_ld=yes ++ gas=yes + +- if test `for one_abi in ${loongarch_multilibs}; do if [ x\$one_abi = x$with_abi ]; then echo 1; exit 0; fi; done; echo 0;` = "0"; then +- echo "--with-abi=${with_abi} must be one of --with-multilib-list=${with_multilib_list}" +- exit 1 +- fi ++ # Force .init_array support. The configure script cannot always ++ # automatically detect that GAS supports it, yet we require it. ++ gcc_cv_initfini_array=yes ++ ;; + +- tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} loongarch/gnu-user.h loongarch/linux.h loongarch/linux-common.h" +- extra_options="${extra_options} linux-android.opt" ++loongarch*-*-elf*) ++ tm_file="elfos.h newlib-stdint.h ${tm_file}" ++ tm_file="${tm_file} loongarch/elf.h loongarch/linux.h" + tmake_file="${tmake_file} loongarch/t-linux" + gnu_ld=yes + gas=yes ++ + # Force .init_array support. The configure script cannot always + # automatically detect that GAS supports it, yet we require it. + gcc_cv_initfini_array=yes + ;; ++ + mips*-mti-elf*) + tm_file="elfos.h newlib-stdint.h ${tm_file} mips/elf.h mips/n32-elf.h mips/sde.h mips/mti-elf.h" + tmake_file="mips/t-mti-elf" +@@ -2295,31 +2271,6 @@ mips*-sde-elf*) + ;; + esac + ;; +-loongarch*-sde-elf*) +- tm_file="elfos.h newlib-stdint.h ${tm_file} loongarch/elf.h loongarch/sde.h" +-# tmake_file="loongarch/t-sde" +- extra_options="${extra_options} loongarch/sde.opt" +- case "${with_newlib}" in +- yes) +- # newlib / libgloss. +- ;; +- *) +- # MIPS toolkit libraries. +- tm_file="$tm_file loongarch/sdemtk.h" +- tmake_file="$tmake_file loongarch/t-sdemtk" +- case ${enable_threads} in +- "" | yes | loongarchsde) +- thread_file='loongarchsde' +- ;; +- esac +- ;; +- esac +- case ${target} in +- loongarch*) +- tm_defines="LARCH_ISA_DEFAULT=0 LARCH_ABI_DEFAULT=ABILP64" +- ;; +- esac +- ;; + mipsisa32-*-elf* | mipsisa32el-*-elf* | \ + mipsisa32r2-*-elf* | mipsisa32r2el-*-elf* | \ + mipsisa32r6-*-elf* | mipsisa32r6el-*-elf* | \ +@@ -3259,7 +3210,7 @@ case ${target} in + ;; + *-*-linux* | *-*-gnu*) + case ${target} in +- aarch64*-* | arm*-* | i[34567]86-* | powerpc*-* | s390*-* | sparc*-* | x86_64-*) ++ aarch64*-* | arm*-* | i[34567]86-* | powerpc*-* | s390*-* | sparc*-* | x86_64-* | loongarch*-*) + default_gnu_indirect_function=yes + ;; + esac +@@ -4450,57 +4401,466 @@ case "${target}" in + ;; + + loongarch*-*-*) +- supported_defaults="abi arch float fpu tune" ++ supported_defaults="abi arch tune fpu simd multilib-default" ++ ++ # Local variables ++ unset \ ++ abi_pattern abi_default \ ++ abiext_pattern abiext_default \ ++ arch_pattern arch_default \ ++ fpu_pattern fpu_default \ ++ triplet_os triplet_abi ++ ++ # Infer ABI from the triplet. ++ case ${target} in ++ loongarch64-*-*-*f64) ++ abi_pattern="lp64d" ++ triplet_abi="" ++ ;; ++ loongarch64-*-*-*f32) ++ abi_pattern="lp64f" ++ triplet_abi="f32" ++ ;; ++ loongarch64-*-*-*sf) ++ abi_pattern="lp64s" ++ triplet_abi="sf" ++ ;; ++ loongarch64-*-*) ++ abi_pattern="lp64[dfs]" ++ abi_default="lp64d" ++ triplet_abi="" ++ ;; ++ *) ++ echo "Unsupported target ${target}." 1>&2 ++ exit 1 ++ ;; ++ esac ++ ++ abiext_pattern="*" ++ abiext_default="base" ++ ++ # Get the canonical triplet (multiarch specifier). ++ case ${target} in ++ *-linux-gnu*) triplet_os="linux-gnu";; ++ *-linux-musl*) triplet_os="linux-musl";; ++ *-elf*) triplet_os="elf";; ++ *) ++ echo "Unsupported target ${target}." 1>&2 ++ exit 1 ++ ;; ++ esac ++ ++ la_canonical_triplet="loongarch64-${triplet_os}${triplet_abi}" + ++ ++ # Perform initial sanity checks on --with-* options. + case ${with_arch} in +- loongarch64 | loongarch32) +- # OK +- default_loongarch_arch=$with_arch ++ "" | abi-default | loongarch64 | la[2346]64) ;; # OK, append here. ++ native) ++ if test x${host} != x${target}; then ++ echo "--with-arch=native is illegal for cross-compiler." 1>&2 ++ exit 1 ++ fi + ;; +- "") +- # fallback +- default_loongarch_arch=loongarch64 ++ *) ++ echo "Unknown arch in --with-arch=$with_arch" 1>&2 ++ exit 1 ++ ;; ++ esac ++ ++ case ${with_abi} in ++ lp64) ++ # Legacy ++ with_abi=lp64d + ;; ++ ++ "" | lp64d | lp64f | lp64s) ;; # OK, append here. + *) +- echo "Unknown arch given in --with-arch=$with_arch, available choices are: loongarch64" 1>&2 ++ echo "Unsupported ABI given in --with-abi=$with_abi" 1>&2 + exit 1 + ;; + esac + ++ case ${with_abiext} in ++ "" | base) ;; # OK, append here. ++ *) ++ echo "Unsupported ABI extention type $with_abiext" 1>&2 ++ exit 1 ++ ;; ++ esac ++ ++ case ${with_fpu} in ++ "" | none | 32 | 64) ;; # OK, append here. ++ 0) ++ # Convert "0" to "none" for upcoming checks. ++ with_fpu="none" ++ ;; ++ *) ++ echo "Unknown fpu type in --with-fpu=$with_fpu" 1>&2 ++ exit 1 ++ ;; ++ esac ++ ++ case ${with_simd} in ++ "" | none) ;; ++ lsx | lasx) # OK, append here. ++ case ${with_fpu} in ++ 64) ;; ++ "") with_fpu=64 ;; ++ *) ++ echo "--with-simd=${with_simd} conflicts with --with-fpu=${with_fpu}" 1>&2 ++ exit 1 ++ ;; ++ esac ++ ;; ++ ++ *) ++ echo "Unknown SIMD extension in --with-simd=$with_simd" 1>&2 ++ exit 1 ++ ;; ++ esac ++ ++ ++ # Set default value for with_abi. + case ${with_abi} in +- lp64 | lp32) +- # OK +- default_loongarch_abi=$with_abi ++ "") ++ if test x${abi_default} != x; then ++ with_abi=${abi_default} ++ else ++ with_abi=${abi_pattern} ++ fi ++ ;; ++ ++ *) ++ if echo "${with_abi}" | grep -E "^${abi_pattern}$" > /dev/null; then ++ : # OK ++ else ++ echo "Incompatible options:" \ ++ "--with-abi=${with_abi} and --target=${target}." 1>&2 ++ exit 1 ++ fi + ;; ++ esac ++ ++ # Set default value for with_abiext (internal) ++ case ${with_abiext} in + "") +- # fallback +- default_loongarch_abi=lp64 ++ if test x${abiext_default} != x; then ++ with_abiext=${abiext_default} ++ else ++ with_abiext=${abiext_pattern} ++ fi ++ ;; ++ ++ *) ++ if echo "${with_abiext}" | grep -E "^${abiext_pattern}$" > /dev/null; then ++ : # OK ++ else ++ echo "The ABI extension type \"${with_abiext}\"" \ ++ "is incompatible with --target=${target}." 1>&2 ++ exit 1 ++ fi ++ ++ ;; ++ esac ++ ++ # Infer ISA-related default options from the ABI: pass 1 ++ case ${with_abi}/${with_abiext} in ++ lp64*/base) ++ # architectures that support lp64* ABI ++ arch_pattern="native|abi-default|loongarch64|la[2346]64" ++ # default architecture for lp64* ABI ++ arch_default="abi-default" + ;; + *) +- echo "Unknown ABI given in --with-abi=$with_abi, available choices are: lp32 lp64" 1>&2 ++ echo "Unsupported ABI type ${with_abi}/${with_abiext}." 1>&2 + exit 1 + ;; + esac + +- case ${with_float} in +- "" | soft | hard) +- # OK ++ # Infer ISA-related default options from the ABI: pass 2 ++ case ${with_abi}/${with_abiext} in ++ lp64d/base) ++ fpu_pattern="64" ++ ;; ++ lp64f/base) ++ fpu_pattern="32|64" ++ fpu_default="32" ++ ;; ++ lp64s/base) ++ fpu_pattern="none|32|64" ++ fpu_default="none" + ;; + *) +- echo "Unknown floating point type used in --with-float=$with_float" 1>&2 ++ echo "Unsupported ABI type ${with_abi}/${with_abiext}." 1>&2 + exit 1 + ;; + esac + ++ ## Set default value for with_arch. ++ case ${with_arch} in ++ "") ++ if test x${arch_default} != x; then ++ with_arch=${arch_default} ++ else ++ with_arch=${arch_pattern} ++ fi ++ ;; ++ ++ *) ++ if echo "${with_arch}" | grep -E "^${arch_pattern}$" > /dev/null; then ++ : # OK ++ else ++ echo "${with_abi}/${with_abiext} ABI cannot be implemented with" \ ++ "--with-arch=${with_arch}." 1>&2 ++ exit 1 ++ fi ++ ;; ++ esac ++ ++ ## Set default value for with_fpu. + case ${with_fpu} in +- "" | single | double) +- # OK ++ "") ++ if test x${fpu_default} != x; then ++ with_fpu=${fpu_default} ++ else ++ with_fpu=${fpu_pattern} ++ fi + ;; ++ + *) +- echo "Unknown fpu type used in --with-fpu=$with_fpu" 1>&2 +- exit 1 ++ if echo "${with_fpu}" | grep -E "^${fpu_pattern}$" > /dev/null; then ++ : # OK ++ else ++ echo "${with_abi}/${with_abiext} ABI cannot be implemented with" \ ++ "--with-fpu=${with_fpu}." 1>&2 ++ exit 1 ++ fi ++ ;; ++ esac ++ ++ ++ # Check default with_tune configuration using with_arch. ++ case ${with_arch} in ++ loongarch64) ++ tune_pattern="native|abi-default|loongarch64|la[2346]64" ++ ;; ++ *) ++ # By default, $with_tune == $with_arch ++ tune_pattern="*" ++ ;; ++ esac ++ ++ case ${with_tune} in ++ "") ;; # OK ++ *) ++ if echo "${with_tune}" | grep -E "^${tune_pattern}$" > /dev/null; then ++ : # OK ++ else ++ echo "Incompatible options: --with-tune=${with_tune}" \ ++ "and --with-arch=${with_arch}." 1>&2 ++ exit 1 ++ fi + ;; + esac ++ ++ # Handle --with-multilib-default ++ if echo "${with_multilib_default}" \ ++ | grep -E -e '[[:space:]]' -e '//' -e '/$' -e '^/' > /dev/null 2>&1; then ++ echo "Invalid argument to --with-multilib-default." 1>&2 ++ exit 1 ++ fi ++ ++ if test x${with_multilib_default} = x; then ++ # Use -march=abi-default by default when building libraries. ++ with_multilib_default="/march=abi-default" ++ else ++ unset parse_state component ++ parse_state=arch ++ for component in $(echo "${with_multilib_default}" | tr '/' ' '); do ++ case ${parse_state},${component} in ++ arch,|arch,abi-default) ++ # ABI-default: use the ABI's default ARCH configuration for ++ # multilib library builds, unless otherwise specified ++ # in --with-multilib-list. ++ with_multilib_default="/march=abi-default" ++ parse_state=opts ++ ;; ++ arch,fixed) ++ # Fixed: use the default gcc configuration for all multilib ++ # builds by default. ++ with_multilib_default="" ++ parse_state=opts ++ ;; ++ arch,*) ++ with_multilib_default="/march=abi-default" ++ parse_state=opts ++ ;& ++ opts,*) ++ with_multilib_default="${with_multilib_default}/${component}" ++ ;; ++ esac ++ done ++ unset parse_state component ++ fi ++ ++ # Handle --with-multilib-list. ++ if test x"${with_multilib_list}" = x \ ++ || test x"${with_multilib_list}" = xno \ ++ || test x"${with_multilib_list}" = xdefault \ ++ || test x"${enable_multilib}" != xyes; then ++ ++ with_multilib_list="${with_abi}/${with_abiext}" ++ fi ++ ++ # Check if the configured default ABI combination is included in ++ # ${with_multilib_list}. ++ loongarch_multilib_list_sane=no ++ ++ # This one goes to TM_MULTILIB_CONFIG, for use in t-linux. ++ loongarch_multilib_list_make="" ++ ++ # This one goes to tm_defines, for use in loongarch-driver.c. ++ loongarch_multilib_list_c="" ++ ++ # ${with_multilib_list} should not contain whitespaces, ++ # consecutive commas or slashes. ++ if echo "${with_multilib_list}" \ ++ | grep -E -e "[[:space:]]" -e '[,/][,/]' -e '[,/]$' -e '^[,/]' > /dev/null 2>&1; then ++ echo "Invalid argument to --with-multilib-list." 1>&2 ++ exit 1 ++ fi ++ ++ unset component elem_abi_base elem_abi_ext elem_tmp parse_state all_abis ++ for elem in $(echo "${with_multilib_list}" | tr ',' ' '); do ++ unset elem_abi_base elem_abi_ext ++ parse_state="abi-base" ++ ++ for component in $(echo "${elem}" | tr '/' ' '); do ++ case ${parse_state} in ++ abi-base) ++ # Base ABI type ++ case ${component} in ++ lp64 | lp64d) elem_tmp="ABI_BASE_LP64D,";; ++ lp64f) elem_tmp="ABI_BASE_LP64F,";; ++ lp64s) elem_tmp="ABI_BASE_LP64S,";; ++ *) ++ echo "Unknown base ABI \"${component}\" in --with-multilib-list." 1>&2 ++ exit 1 ++ ;; ++ esac ++ loongarch_multilib_list_c="${loongarch_multilib_list_c}${elem_tmp}" ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}mabi=${component}" ++ elem_abi_base="${component}" ++ ++ parse_state="abi-ext" ++ ;; ++ ++ abi-ext) ++ # ABI extension type ++ case ${component} in ++ base) ++ elem_abi_ext="base" ++ loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. ++ parse_state="arch" ++ continue; ++ ;; ++ esac ++ ++ # The default ABI extension is "base" if unspecified. ++ elem_abi_ext="base" ++ loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. ++ parse_state="arch" ++ ;& ++ ++ arch) ++ # -march option ++ case ${component} in ++ abi-default | loongarch64 | la[2346]64) # OK, append here. ++ # Append -march spec for each multilib variant. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}/march=${component}" ++ ;& ++ ++ default) ++ # "/default" is equivalent to --with-multilib-default=fixed ++ parse_state="opts" ++ continue; ++ ;; ++ esac ++ ++ # If ARCH is unspecified for this multilib variant, use ${with_multllib_default}. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" ++ parse_state="opts" ++ ;& ++ ++ opts) ++ # Other compiler options for building libraries. ++ # (no static sanity check performed) ++ case ${component} in ++ *) ++ # Append other components as additional build options ++ # (without the prepending dash). ++ # Their validity should be examined by the compiler. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}/${component}" ++ ;; ++ esac ++ ;; ++ ++ esac ++ done ++ ++ case ${parse_state} in ++ "abi-ext") ++ elem_abi_ext="base" ++ loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. ++ ;& ++ "arch") ++ # If ARCH is unspecified for this multilib variant, use ${with_multllib_default}. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" ++ ;& ++ "opts") ++ ;; ++ esac ++ ++ # Check for repeated configuration of the same multilib variant. ++ if echo "${elem_abi_base}/${elem_abi_ext}" \ ++ | grep -E "^(${all_abis%|})$" >/dev/null 2>&1; then ++ echo "Repeated multilib config of \"${elem_abi_base}/${elem_abi_ext}\" in --with-multilib-list." ++ exit 1 ++ fi ++ all_abis+="${elem_abi_base}/${elem_abi_ext}|" ++ ++ ++ # Check if the default ABI configuration of the GCC binary ++ # is included in the enabled multilib variants. ++ if test x${elem_abi_base} = x${with_abi} \ ++ && test x${elem_abi_ext} = x${with_abiext}; then ++ loongarch_multilib_list_sane=yes ++ fi ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}," ++ done ++ unset component elem_abi_base elem_abi_ext elem_tmp parse_state all_abis ++ ++ ++ # Check if the default ABI combination is in the default list. ++ if test x${loongarch_multilib_list_sane} = xno; then ++ if test x${with_abiext} = xbase; then ++ with_abiext="" ++ else ++ with_abiext="/${with_abiext}" ++ fi ++ ++ echo "Default ABI combination (${with_abi}${with_abiext})" \ ++ "not found in --with-multilib-list." 1>&2 ++ exit 1 ++ fi ++ ++ # Remove the excessive appending comma. ++ loongarch_multilib_list_c=${loongarch_multilib_list_c%,} ++ loongarch_multilib_list_make=${loongarch_multilib_list_make%,} + ;; + + nds32*-*-*) +@@ -4935,17 +5295,54 @@ case ${target} in + ;; + + loongarch*-*-*) +- case ${default_loongarch_arch} in +- loongarch64) tm_defines="$tm_defines LARCH_ISA_DEFAULT=0" ;; +- loongarch32) tm_defines="$tm_defines LARCH_ISA_DEFAULT=1" ;; ++ # Export canonical triplet. ++ tm_defines="${tm_defines} LA_MULTIARCH_TRIPLET=${la_canonical_triplet}" ++ ++ # Define macro LA_DISABLE_MULTILIB if --disable-multilib ++ tm_defines="${tm_defines} TM_MULTILIB_LIST=${loongarch_multilib_list_c}" ++ if test x$enable_multilib = xyes; then ++ TM_MULTILIB_CONFIG="${loongarch_multilib_list_make}" ++ else ++ tm_defines="${tm_defines} LA_DISABLE_MULTILIB" ++ fi ++ ++ # Let --with- flags initialize the enum variables from loongarch.opt. ++ # See macro definitions from loongarch-opts.h and loongarch-cpu.h. ++ ++ # Architecture ++ tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_$(tr a-z- A-Z_ <<< ${with_arch})" ++ ++ # Base ABI type ++ tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_$(tr a-z- A-Z_ <<< ${with_abi})" ++ ++ # ABI Extension ++ case ${with_abiext} in ++ base) tm_defines="${tm_defines} DEFAULT_ABI_EXT=ABI_EXT_BASE" ;; + esac +- case ${default_loongarch_abi} in +- lp64) tm_defines="$tm_defines LARCH_ABI_DEFAULT=ABILP64" ;; +- lp32) tm_defines="$tm_defines LARCH_ABI_DEFAULT=ABILP32" ;; ++ ++ # Microarchitecture ++ if test x${with_tune} != x; then ++ tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_$(tr a-z- A-Z_ <<< ${with_tune})" ++ fi ++ ++ # FPU adjustment ++ case ${with_fpu} in ++ none) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_NONE" ;; ++ 32) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_FPU32" ;; ++ 64) tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_FPU64" ;; + esac ++ ++ # SIMD extensions ++ case ${with_simd} in ++ none) tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_NONE" ;; ++ lsx) tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_SIMD_LSX" ;; ++ lasx) tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_SIMD_LASX" ;; ++ esac ++ + tmake_file="loongarch/t-loongarch $tmake_file" + ;; + ++ + powerpc*-*-* | rs6000-*-*) + # FIXME: The PowerPC port uses the value set at compile time, + # although it's only cosmetic. +diff --git a/gcc/config.host b/gcc/config.host +index d23dae4ac..c65569da2 100644 +--- a/gcc/config.host ++++ b/gcc/config.host +@@ -139,18 +139,6 @@ case ${host} in + host_extra_gcc_objs="driver-native.o" + host_xmake_file="${host_xmake_file} mips/x-native" + ;; +- loongarch*-*-linux*) +- host_extra_gcc_objs="driver-native.o" +- host_xmake_file="${host_xmake_file} loongarch/x-native" +- ;; +- esac +- ;; +- loongarch*-*-linux*) +- case ${target} in +- loongarch*-*-linux*) +- host_extra_gcc_objs="driver-native.o" +- host_xmake_file="${host_xmake_file} loongarch/x-native" +- ;; + esac + ;; + rs6000-*-* \ +diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md +index ae8596107..82c0ccf37 100644 +--- a/gcc/config/loongarch/constraints.md ++++ b/gcc/config/loongarch/constraints.md +@@ -1,5 +1,6 @@ +-;; Constraint definitions for LARCH. +-;; Copyright (C) 2006-2018 Free Software Foundation, Inc. ++;; Constraint definitions for LoongArch. ++;; Copyright (C) 2020-2022 Free Software Foundation, Inc. ++;; Contributed by Loongson Co. Ltd. + ;; + ;; This file is part of GCC. + ;; +@@ -20,160 +21,158 @@ + ;; Register constraints + + ;; "a" A constant call global and noplt address. +-;; "b" ALL_REGS ++;; "b" <-----unused + ;; "c" A constant call local address. +-;; "d" - +-;; "e" JALR_REGS ++;; "d" <-----unused ++;; "e" JIRL_REGS + ;; "f" FP_REGS +-;; "g" * ++;; "g" <-----unused + ;; "h" A constant call plt address. +-;; "i" "Matches a general integer constant." ++;; "i" Matches a general integer constant. (Global non-architectural) + ;; "j" SIBCALL_REGS +-;; "k" * +-;; "l" "A signed 16-bit constant ." +-;; "m" "A memory operand whose address is formed by a base register and offset +-;; that is suitable for use in instructions with the same addressing mode +-;; as @code{st.w} and @code{ld.w}." +-;; "n" "Matches a non-symbolic integer constant." +-;; "o" "Matches an offsettable memory reference." +-;; "p" "Matches a general address." +-;; "q" CSR_REGS +-;; "r" GENERAL_REGS +-;; "s" "Matches a symbolic integer constant." ++;; "k" A memory operand whose address is formed by a base register and ++;; (optionally scaled) index register. ++;; "l" A signed 16-bit constant. ++;; "m" A memory operand whose address is formed by a base register and offset ++;; that is suitable for use in instructions with the same addressing mode ++;; as @code{st.w} and @code{ld.w}. ++;; "n" Matches a non-symbolic integer constant. (Global non-architectural) ++;; "o" Matches an offsettable memory reference. (Global non-architectural) ++;; "p" Matches a general address. (Global non-architectural) ++;; "q" A general-purpose register except for $r0 and $r1 for lcsr. ++;; "r" GENERAL_REGS (Global non-architectural) ++;; "s" Matches a symbolic integer constant. (Global non-architectural) + ;; "t" A constant call weak address +-;; "u" - +-;; "v" - +-;; "w" "Matches any valid memory." +-;; "x" - +-;; "y" GR_REGS +-;; "z" ST_REGS +-;; "A" - +-;; "B" - +-;; "C" - +-;; "D" - +-;; "E" "Matches a floating-point constant." +-;; "F" "Matches a floating-point constant." +-;; "G" "Floating-point zero." +-;; "H" - +-;; "I" "A signed 12-bit constant (for arithmetic instructions)." +-;; "J" "Integer zero." +-;; "K" "An unsigned 12-bit constant (for logic instructions)." +-;; "L" "A signed 32-bit constant in which the lower 12 bits are zero. +-;; "M" "A constant that cannot be loaded using @code{lui}, @code{addiu} or @code{ori}." +-;; "N" "A constant in the range -65535 to -1 (inclusive)." +-;; "O" "A signed 15-bit constant." +-;; "P" "A constant in the range 1 to 65535 (inclusive)." +-;; "Q" "A signed 12-bit constant" +-;; "R" "An address that can be used in a non-macro load or store." +-;; "S" "A constant call address." +-;; "T" - +-;; "U" - +-;; "V" "Matches a non-offsettable memory reference." +-;; "W" "A memory address based on a member of @code{BASE_REG_CLASS}. This is +-;; true for all references (although it can sometimes be implicit +-;; if @samp{!TARGET_EXPLICIT_RELOCS})." +-;; "X" "Matches anything." ++;; "u" A signed 52bit constant and low 32-bit is zero (for logic instructions) ++;; "v" A signed 64-bit constant and low 44-bit is zero (for logic instructions) ++;; "w" Matches any valid memory. ++;; "x" <-----unused ++;; "y" <-----unused ++;; "z" FCC_REGS ++;; "A" <-----unused ++;; "B" <-----unused ++;; "C" <-----unused ++;; "D" <-----unused ++;; "E" Matches a floating-point constant. (Global non-architectural) ++;; "F" Matches a floating-point constant. (Global non-architectural) ++;; "G" Floating-point zero. ++;; "H" <-----unused ++;; "I" A signed 12-bit constant (for arithmetic instructions). ++;; "J" Integer zero. ++;; "K" An unsigned 12-bit constant (for logic instructions). ++;; "L" <-----unused ++;; "M" <-----unused ++;; "N" <-----unused ++;; "O" <-----unused ++;; "P" <-----unused ++;; "Q" <-----unused ++;; "R" <-----unused ++;; "S" <-----unused ++;; "T" <-----unused ++;; "U" <-----unused ++;; "V" Matches a non-offsettable memory reference. (Global non-architectural) ++;; "W" <-----unused ++;; "X" Matches anything. (Global non-architectural) + ;; "Y" - +-;; "YG" +-;; "A vector zero." +-;; "YA" +-;; "An unsigned 6-bit constant." +-;; "YB" +-;; "A signed 10-bit constant." +-;; "Yb" + ;; "Yd" +-;; "A constant @code{move_operand} that can be safely loaded into @code{$25} +-;; using @code{la}." +-;; "Yh" +-;; "Yw" ++;; A constant @code{move_operand} that can be safely loaded using ++;; @code{la}. ++;; "YG" ++;; A vector zero. + ;; "Yx" +-;; "YI" +-;; "A replicated vector const in which the replicated value is in the range +-;; [-512,511]." + ;; "YC" +-;; "A replicated vector const in which the replicated value has a single +-;; bit set." ++;; A replicated vector const in which the replicated value has a single ++;; bit set. + ;; "YZ" +-;; "A replicated vector const in which the replicated value has a single +-;; bit clear." ++;; A replicated vector const in which the replicated value has a single ++;; bit clear. + ;; "Z" - + ;; "ZC" +-;; "A memory operand whose address is formed by a base register and offset ++;; A memory operand whose address is formed by a base register and offset + ;; that is suitable for use in instructions with the same addressing mode +-;; as @code{ll.w} and @code{sc.w}." +-;; "ZD" +-;; "An address suitable for a @code{prefetch} instruction, or for any other +-;; instruction with the same addressing mode as @code{prefetch}." +-;; "ZR" +-;; "An address valid for loading/storing register exclusive" ++;; as @code{ll.w} and @code{sc.w}. + ;; "ZB" +-;; "An address that is held in a general-purpose register. +-;; The offset is zero" ++;; An address that is held in a general-purpose register. ++;; The offset is zero. ++;; "<" Matches a pre-dec or post-dec operand. (Global non-architectural) ++;; ">" Matches a pre-inc or post-inc operand. (Global non-architectural) + ++(define_constraint "a" ++ "@internal ++ A constant call global and noplt address." ++ (match_operand 0 "is_const_call_global_noplt_symbol")) + + (define_constraint "c" + "@internal + A constant call local address." + (match_operand 0 "is_const_call_local_symbol")) + +-(define_constraint "a" +- "@internal +- A constant call global and noplt address." +- (match_operand 0 "is_const_call_global_noplt_symbol")) ++(define_register_constraint "e" "JIRL_REGS" ++ "@internal") ++ ++(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS" ++ "A floating-point register (if available).") + + (define_constraint "h" + "@internal + A constant call plt address." + (match_operand 0 "is_const_call_plt_symbol")) + +-(define_constraint "t" +- "@internal +- A constant call weak address." +- (match_operand 0 "is_const_call_weak_symbol")) +- +-(define_register_constraint "e" "JALR_REGS" ++(define_register_constraint "j" "SIBCALL_REGS" + "@internal") + +-(define_register_constraint "q" "CSR_REGS" +- "A general-purpose register except for $r0 and $r1 for csr.") ++(define_memory_constraint "k" ++ "A memory operand whose address is formed by a base register and (optionally scaled) ++ index register." ++ (and (match_code "mem") ++ (match_test "loongarch_base_index_address_p (XEXP (op, 0), mode)"))) + +-(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS" +- "A floating-point register (if available).") ++(define_constraint "l" ++"A signed 16-bit constant." ++(and (match_code "const_int") ++ (match_test "IMM16_OPERAND (ival)"))) + +-(define_register_constraint "b" "ALL_REGS" +- "@internal") ++(define_memory_constraint "m" ++ "A memory operand whose address is formed by a base register and offset ++ that is suitable for use in instructions with the same addressing mode ++ as @code{st.w} and @code{ld.w}." ++ (and (match_code "mem") ++ (match_test "loongarch_12bit_offset_address_p (XEXP (op, 0), mode)"))) + +-(define_register_constraint "j" "SIBCALL_REGS" +- "@internal") ++(define_register_constraint "q" "CSR_REGS" ++ "A general-purpose register except for $r0 and $r1 for lcsr.") + +-(define_constraint "l" +- "A signed 16-bit constant ." ++(define_constraint "t" ++ "@internal ++ A constant call weak address." ++ (match_operand 0 "is_const_call_weak_symbol")) ++ ++(define_constraint "u" ++ "A signed 52bit constant and low 32-bit is zero (for logic instructions)." + (and (match_code "const_int") +- (match_test "IMM16_OPERAND (ival)"))) ++ (match_test "LU32I_OPERAND (ival)"))) + +-(define_register_constraint "y" "GR_REGS" +- "Equivalent to @code{r}; retained for backwards compatibility.") ++(define_constraint "v" ++ "A signed 64-bit constant and low 52-bit is zero (for logic instructions)." ++ (and (match_code "const_int") ++ (match_test "LU52I_OPERAND (ival)"))) + +-(define_register_constraint "z" "ST_REGS" ++(define_register_constraint "z" "FCC_REGS" + "A floating-point condition code register.") + +-(define_constraint "kf" +- "@internal" +- (match_operand 0 "force_to_mem_operand")) ++;; Floating-point constraints + +-;; This is a normal rather than a register constraint because we can +-;; never use the stack pointer as a reload register. +-(define_constraint "ks" +- "@internal" +- (and (match_code "reg") +- (match_test "REGNO (op) == STACK_POINTER_REGNUM"))) ++(define_constraint "G" ++ "Floating-point zero." ++ (and (match_code "const_double") ++ (match_test "op == CONST0_RTX (mode)"))) + + ;; Integer constraints + + (define_constraint "I" + "A signed 12-bit constant (for arithmetic instructions)." + (and (match_code "const_int") +- (match_test "SMALL_OPERAND (ival)"))) ++ (match_test "IMM12_OPERAND (ival)"))) + + (define_constraint "J" + "Integer zero." +@@ -183,53 +182,7 @@ + (define_constraint "K" + "An unsigned 12-bit constant (for logic instructions)." + (and (match_code "const_int") +- (match_test "SMALL_OPERAND_UNSIGNED (ival)"))) +- +-(define_constraint "u" +- "An unsigned 12-bit constant (for logic instructions)." +- (and (match_code "const_int") +- (match_test "LU32I_OPERAND (ival)"))) +- +-(define_constraint "v" +- "An unsigned 12-bit constant (for logic instructions)." +- (and (match_code "const_int") +- (match_test "LU52I_OPERAND (ival)"))) +- +-(define_constraint "L" +- "A signed 32-bit constant in which the lower 12 bits are zero. +- Such constants can be loaded using @code{lui}." +- (and (match_code "const_int") +- (match_test "LUI_OPERAND (ival)"))) +- +-(define_constraint "M" +- "A constant that cannot be loaded using @code{lui}, @code{addiu} +- or @code{ori}." +- (and (match_code "const_int") +- (not (match_test "SMALL_OPERAND (ival)")) +- (not (match_test "SMALL_OPERAND_UNSIGNED (ival)")) +- (not (match_test "LUI_OPERAND (ival)")))) +- +-(define_constraint "N" +- "A constant in the range -65535 to -1 (inclusive)." +- (and (match_code "const_int") +- (match_test "ival >= -0xffff && ival < 0"))) +- +-(define_constraint "O" +- "A signed 15-bit constant." +- (and (match_code "const_int") +- (match_test "ival >= -0x4000 && ival < 0x4000"))) +- +-(define_constraint "P" +- "A constant in the range 1 to 65535 (inclusive)." +- (and (match_code "const_int") +- (match_test "ival > 0 && ival < 0x10000"))) +- +-;; Floating-point constraints +- +-(define_constraint "G" +- "Floating-point zero." +- (and (match_code "const_double") +- (match_test "op == CONST0_RTX (mode)"))) ++ (match_test "IMM12_OPERAND_UNSIGNED (ival)"))) + + ;; General constraints + +@@ -237,33 +190,35 @@ + "@internal" + (match_operand 0 "const_arith_operand")) + +-(define_memory_constraint "R" +- "An address that can be used in a non-macro load or store." +- (and (match_code "mem") +- (match_test "loongarch_address_insns (XEXP (op, 0), mode, false) == 1"))) ++(define_constraint "Yd" ++ "@internal ++ A constant @code{move_operand} that can be safely loaded using ++ @code{la}." ++ (and (match_operand 0 "move_operand") ++ (match_test "CONSTANT_P (op)"))) + +-(define_memory_constraint "m" ++(define_constraint "Yx" ++ "@internal" ++ (match_operand 0 "low_bitmask_operand")) ++ ++(define_memory_constraint "ZC" + "A memory operand whose address is formed by a base register and offset + that is suitable for use in instructions with the same addressing mode +- as @code{st.w} and @code{ld.w}." ++ as @code{ll.w} and @code{sc.w}." + (and (match_code "mem") +- (match_test "loongarch_12bit_offset_address_p (XEXP (op, 0), mode)"))) ++ (match_test "loongarch_14bit_shifted_offset_address_p (XEXP (op, 0), mode)"))) + +-(define_constraint "S" ++(define_memory_constraint "ZB" + "@internal +- A constant call address." +- (and (match_operand 0 "call_insn_operand") +- (match_test "CONSTANT_P (op)"))) ++ An address that is held in a general-purpose register. ++ The offset is zero" ++ (and (match_code "mem") ++ (match_test "REG_P (XEXP (op, 0))"))) + +-(define_memory_constraint "W" +- "@internal +- A memory address based on a member of @code{BASE_REG_CLASS}. This is +- true for allreferences (although it can sometimes be implicit +- if @samp{!TARGET_EXPLICIT_RELOCS})." ++(define_memory_constraint "R" ++ "An address that can be used in a non-macro load or store." + (and (match_code "mem") +- (match_operand 0 "memory_operand") +- (and (not (match_operand 0 "stack_operand")) +- (not (match_test "CONSTANT_P (XEXP (op, 0))"))))) ++ (match_test "loongarch_address_insns (XEXP (op, 0), mode, false) == 1"))) + + (define_constraint "YG" + "@internal +@@ -271,41 +226,6 @@ + (and (match_code "const_vector") + (match_test "op == CONST0_RTX (mode)"))) + +-(define_constraint "YA" +- "@internal +- An unsigned 6-bit constant." +- (and (match_code "const_int") +- (match_test "UIMM6_OPERAND (ival)"))) +- +-(define_constraint "YB" +- "@internal +- A signed 10-bit constant." +- (and (match_code "const_int") +- (match_test "IMM10_OPERAND (ival)"))) +- +-(define_constraint "Yb" +- "@internal" +- (match_operand 0 "qi_mask_operand")) +- +-(define_constraint "Yd" +- "@internal +- A constant @code{move_operand} that can be safely loaded into @code{$25} +- using @code{la}." +- (and (match_operand 0 "move_operand") +- (match_test "CONSTANT_P (op)"))) +- +-(define_constraint "Yh" +- "@internal" +- (match_operand 0 "hi_mask_operand")) +- +-(define_constraint "Yw" +- "@internal" +- (match_operand 0 "si_mask_operand")) +- +-(define_constraint "Yx" +- "@internal" +- (match_operand 0 "low_bitmask_operand")) +- + (define_constraint "YI" + "@internal + A replicated vector const in which the replicated value is in the range +@@ -360,30 +280,3 @@ + A replicated vector const with replicated byte values as well as elements" + (and (match_code "const_vector") + (match_test "loongarch_const_vector_same_bytes_p (op, mode)"))) +- +-(define_memory_constraint "ZC" +- "A memory operand whose address is formed by a base register and offset +- that is suitable for use in instructions with the same addressing mode +- as @code{ll.w} and @code{sc.w}." +- (and (match_code "mem") +- (match_test "loongarch_14bit_shifted_offset_address_p (XEXP (op, 0), mode)"))) +- +-;;(define_address_constraint "ZD" +-;; "An address suitable for a @code{prefetch} instruction, or for any other +-;; instruction with the same addressing mode as @code{prefetch}." +-;; (if_then_else (match_test "ISA_HAS_9BIT_DISPLACEMENT") +-;; (match_test "loongarch_9bit_offset_address_p (op, mode)") +-;; (match_test "loongarch_address_insns (op, mode, false)"))) +- +-(define_memory_constraint "ZR" +- "@internal +- An address valid for loading/storing register exclusive" +- (match_operand 0 "mem_noofs_operand")) +- +-(define_memory_constraint "ZB" +- "@internal +- An address that is held in a general-purpose register. +- The offset is zero" +- (and (match_code "mem") +- (match_test "GET_CODE(XEXP(op,0)) == REG"))) +- +diff --git a/gcc/config/loongarch/driver-native.c b/gcc/config/loongarch/driver-native.c +deleted file mode 100644 +index 5484ee502..000000000 +--- a/gcc/config/loongarch/driver-native.c ++++ /dev/null +@@ -1,82 +0,0 @@ +-/* Subroutines for the gcc driver. +- Copyright (C) 2008-2018 Free Software Foundation, Inc. +- +-This file is part of GCC. +- +-GCC is free software; you can redistribute it and/or modify +-it under the terms of the GNU General Public License as published by +-the Free Software Foundation; either version 3, or (at your option) +-any later version. +- +-GCC is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +-GNU General Public License for more details. +- +-You should have received a copy of the GNU General Public License +-along with GCC; see the file COPYING3. If not see +-. */ +- +-#define IN_TARGET_CODE 1 +- +-#include "config.h" +-#include "system.h" +-#include "coretypes.h" +-#include "tm.h" +- +- +-/* This function must set to noinline. Otherwise the arg can not be passed. */ +-int loongson_cpucfg (int arg) +-{ +- int ret; +- __asm__ __volatile__ ("cpucfg %0,%1\n\t" /* cpucfg $2,$4. */ +- :"=r"(ret) +- :"r"(arg) +- :); +- return ret; +-} +- +-/* This will be called by the spec parser in gcc.c when it sees +- a %:local_cpu_detect(args) construct. Currently it will be called +- with either "arch" or "tune" as argument depending on if -march=native +- or -mtune=native is to be substituted. +- +- It returns a string containing new command line parameters to be +- put at the place of the above two options, depending on what CPU +- this is executed. E.g. "-march=loongson2f" on a Loongson 2F for +- -march=native. If the routine can't detect a known processor, +- the -march or -mtune option is discarded. +- +- ARGC and ARGV are set depending on the actual arguments given +- in the spec. */ +-const char * +-host_detect_local_cpu (int argc, const char **argv) +-{ +- const char *cpu = NULL; +- bool arch; +- int cpucfg_arg; +- int cpucfg_ret; +- +- if (argc < 1) +- return NULL; +- +- arch = strcmp (argv[0], "arch") == 0; +- if (!arch && strcmp (argv[0], "tune")) +- return NULL; +- +- cpucfg_arg = 0; +- cpucfg_ret = loongson_cpucfg (cpucfg_arg); +- if (((cpucfg_ret >> 16) & 0xff) == 0x14) +- { +- if (((cpucfg_ret >> 8) & 0xff) == 0xc0) +- cpu = "la464"; +- else +- cpu = NULL; +- } +- +- +- if (cpu == NULL) +- return NULL; +- +- return concat ("-m", argv[0], "=", cpu, NULL); +-} +diff --git a/gcc/config/loongarch/elf.h b/gcc/config/loongarch/elf.h +index b7f938e31..edb0e77d2 100644 +--- a/gcc/config/loongarch/elf.h ++++ b/gcc/config/loongarch/elf.h +@@ -1,5 +1,6 @@ +-/* Target macros for loongarch*-elf targets. +- Copyright (C) 1994-2018 Free Software Foundation, Inc. ++/* Definitions for LoongArch systems using GNU (glibc-based) userspace, ++ or other userspace with libc derived from glibc. ++ Copyright (C) 1998-2018 Free Software Foundation, Inc. + + This file is part of GCC. + +@@ -17,34 +18,37 @@ You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +-/* LARCH assemblers don't have the usual .set foo,bar construct; +- .set is used for assembler options instead. */ +-#undef SET_ASM_OP +-#define ASM_OUTPUT_DEF(FILE, LABEL1, LABEL2) \ +- do \ +- { \ +- fputc ('\t', FILE); \ +- assemble_name (FILE, LABEL1); \ +- fputs (" = ", FILE); \ +- assemble_name (FILE, LABEL2); \ +- fputc ('\n', FILE); \ +- } \ +- while (0) +- +-#undef ASM_DECLARE_OBJECT_NAME +-#define ASM_DECLARE_OBJECT_NAME loongarch_declare_object_name +- +-#undef ASM_FINISH_DECLARE_OBJECT +-#define ASM_FINISH_DECLARE_OBJECT loongarch_finish_declare_object +- +-/* Leave the linker script to choose the appropriate libraries. */ ++/* Define the size of the wide character type. */ ++#undef WCHAR_TYPE ++#define WCHAR_TYPE "int" ++ ++#undef WCHAR_TYPE_SIZE ++#define WCHAR_TYPE_SIZE 32 ++ ++ ++/* GNU-specific SPEC definitions. */ ++#define GNU_USER_LINK_EMULATION "elf" ABI_GRLEN_SPEC "loongarch" ++ ++#undef GNU_USER_TARGET_LINK_SPEC ++#define GNU_USER_TARGET_LINK_SPEC \ ++ "%{shared} -m " GNU_USER_LINK_EMULATION ++ ++ ++/* Link against Newlib libraries, because the ELF backend assumes Newlib. ++ Handle the circular dependence between libc and libgloss. */ + #undef LIB_SPEC +-#define LIB_SPEC "" ++#define LIB_SPEC "--start-group -lc %{!specs=nosys.specs:-lgloss} --end-group" ++ ++#undef LINK_SPEC ++#define LINK_SPEC GNU_USER_TARGET_LINK_SPEC + + #undef STARTFILE_SPEC +-#define STARTFILE_SPEC "crti%O%s crtbegin%O%s" ++#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s" + + #undef ENDFILE_SPEC +-#define ENDFILE_SPEC "crtend%O%s crtn%O%s" ++#define ENDFILE_SPEC "crtend%O%s" + + #define NO_IMPLICIT_EXTERN_C 1 ++#undef SUBTARGET_CC1_SPEC ++#define SUBTARGET_CC1_SPEC "%{profile:-p}" ++ +diff --git a/gcc/config/loongarch/frame-header-opt.c b/gcc/config/loongarch/frame-header-opt.c +deleted file mode 100644 +index 86e5d423d..000000000 +--- a/gcc/config/loongarch/frame-header-opt.c ++++ /dev/null +@@ -1,292 +0,0 @@ +-/* Analyze functions to determine if callers need to allocate a frame header +- on the stack. The frame header is used by callees to save their arguments. +- This optimization is specific to TARGET_OLDABI targets. For TARGET_NEWABI +- targets, if a frame header is required, it is allocated by the callee. +- +- +- Copyright (C) 2015-2018 Free Software Foundation, Inc. +- +-This file is part of GCC. +- +-GCC is free software; you can redistribute it and/or modify it +-under the terms of the GNU General Public License as published by the +-Free Software Foundation; either version 3, or (at your option) any +-later version. +- +-GCC is distributed in the hope that it will be useful, but WITHOUT +-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-for more details. +- +-You should have received a copy of the GNU General Public License +-along with GCC; see the file COPYING3. If not see +-. */ +- +- +-#define IN_TARGET_CODE 1 +- +-#include "config.h" +-#include "system.h" +-#include "context.h" +-#include "coretypes.h" +-#include "tree.h" +-#include "tree-core.h" +-#include "tree-pass.h" +-#include "target.h" +-#include "target-globals.h" +-#include "profile-count.h" +-#include "cfg.h" +-#include "cgraph.h" +-#include "function.h" +-#include "basic-block.h" +-#include "gimple.h" +-#include "gimple-iterator.h" +-#include "gimple-walk.h" +- +-static unsigned int frame_header_opt (void); +- +-namespace { +- +-const pass_data pass_data_ipa_frame_header_opt = +-{ +- IPA_PASS, /* type */ +- "frame-header-opt", /* name */ +- OPTGROUP_NONE, /* optinfo_flags */ +- TV_CGRAPHOPT, /* tv_id */ +- 0, /* properties_required */ +- 0, /* properties_provided */ +- 0, /* properties_destroyed */ +- 0, /* todo_flags_start */ +- 0, /* todo_flags_finish */ +-}; +- +-class pass_ipa_frame_header_opt : public ipa_opt_pass_d +-{ +-public: +- pass_ipa_frame_header_opt (gcc::context *ctxt) +- : ipa_opt_pass_d (pass_data_ipa_frame_header_opt, ctxt, +- NULL, /* generate_summary */ +- NULL, /* write_summary */ +- NULL, /* read_summary */ +- NULL, /* write_optimization_summary */ +- NULL, /* read_optimization_summary */ +- NULL, /* stmt_fixup */ +- 0, /* function_transform_todo_flags_start */ +- NULL, /* function_transform */ +- NULL) /* variable_transform */ +- {} +- +- /* opt_pass methods: */ +- virtual bool gate (function *) +- { +- /* This optimization has no affect if TARGET_NEWABI. If optimize +- is not at least 1 then the data needed for the optimization is +- not available and nothing will be done anyway. */ +- return TARGET_OLDABI && flag_frame_header_optimization && optimize > 0; +- } +- +- virtual unsigned int execute (function *) { return frame_header_opt (); } +- +-}; // class pass_ipa_frame_header_opt +- +-} // anon namespace +- +-static ipa_opt_pass_d * +-make_pass_ipa_frame_header_opt (gcc::context *ctxt) +-{ +- return new pass_ipa_frame_header_opt (ctxt); +-} +- +-void +-loongarch_register_frame_header_opt (void) +-{ +- opt_pass *p = make_pass_ipa_frame_header_opt (g); +- struct register_pass_info f = { p, "comdats", 1, PASS_POS_INSERT_AFTER }; +- register_pass (&f); +-} +- +- +-/* Return true if it is certain that this is a leaf function. False if it is +- not a leaf function or if it is impossible to tell. */ +- +-static bool +-is_leaf_function (function *fn) +-{ +- basic_block bb; +- gimple_stmt_iterator gsi; +- +- /* If we do not have a cfg for this function be conservative and assume +- it is not a leaf function. */ +- if (fn->cfg == NULL) +- return false; +- +- FOR_EACH_BB_FN (bb, fn) +- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) +- if (is_gimple_call (gsi_stmt (gsi))) +- return false; +- return true; +-} +- +-/* Return true if this function has inline assembly code or if we cannot +- be certain that it does not. False if we know that there is no inline +- assembly. */ +- +-static bool +-has_inlined_assembly (function *fn) +-{ +- basic_block bb; +- gimple_stmt_iterator gsi; +- +- /* If we do not have a cfg for this function be conservative and assume +- it is may have inline assembly. */ +- if (fn->cfg == NULL) +- return true; +- +- FOR_EACH_BB_FN (bb, fn) +- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) +- if (gimple_code (gsi_stmt (gsi)) == GIMPLE_ASM) +- return true; +- +- return false; +-} +- +-/* Return true if this function will use the stack space allocated by its +- caller or if we cannot determine for certain that it does not. */ +- +-static bool +-needs_frame_header_p (function *fn) +-{ +- tree t; +- +- if (fn->decl == NULL) +- return true; +- +- if (fn->stdarg) +- return true; +- +- for (t = DECL_ARGUMENTS (fn->decl); t; t = TREE_CHAIN (t)) +- { +- if (!use_register_for_decl (t)) +- return true; +- +- /* Some 64-bit types may get copied to general registers using the frame +- header, see loongarch_output_64bit_xfer. Checking for SImode only may be +- overly restrictive but it is guaranteed to be safe. */ +- if (DECL_MODE (t) != SImode) +- return true; +- } +- +- return false; +-} +- +-/* Return true if the argument stack space allocated by function FN is used. +- Return false if the space is needed or if the need for the space cannot +- be determined. */ +- +-static bool +-callees_functions_use_frame_header (function *fn) +-{ +- basic_block bb; +- gimple_stmt_iterator gsi; +- gimple *stmt; +- tree called_fn_tree; +- function *called_fn; +- +- if (fn->cfg == NULL) +- return true; +- +- FOR_EACH_BB_FN (bb, fn) +- { +- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) +- { +- stmt = gsi_stmt (gsi); +- if (is_gimple_call (stmt)) +- { +- called_fn_tree = gimple_call_fndecl (stmt); +- if (called_fn_tree != NULL) +- { +- called_fn = DECL_STRUCT_FUNCTION (called_fn_tree); +- if (called_fn == NULL +- || DECL_WEAK (called_fn_tree) +- || has_inlined_assembly (called_fn) +- || !is_leaf_function (called_fn) +- || !called_fn->machine->does_not_use_frame_header) +- return true; +- } +- else +- return true; +- } +- } +- } +- return false; +-} +- +-/* Set the callers_may_not_allocate_frame flag for any function which +- function FN calls because FN may not allocate a frame header. */ +- +-static void +-set_callers_may_not_allocate_frame (function *fn) +-{ +- basic_block bb; +- gimple_stmt_iterator gsi; +- gimple *stmt; +- tree called_fn_tree; +- function *called_fn; +- +- if (fn->cfg == NULL) +- return; +- +- FOR_EACH_BB_FN (bb, fn) +- { +- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) +- { +- stmt = gsi_stmt (gsi); +- if (is_gimple_call (stmt)) +- { +- called_fn_tree = gimple_call_fndecl (stmt); +- if (called_fn_tree != NULL) +- { +- called_fn = DECL_STRUCT_FUNCTION (called_fn_tree); +- if (called_fn != NULL) +- called_fn->machine->callers_may_not_allocate_frame = true; +- } +- } +- } +- } +- return; +-} +- +-/* Scan each function to determine those that need its frame headers. Perform +- a second scan to determine if the allocation can be skipped because none of +- their callees require the frame header. */ +- +-static unsigned int +-frame_header_opt () +-{ +- struct cgraph_node *node; +- function *fn; +- +- FOR_EACH_DEFINED_FUNCTION (node) +- { +- fn = node->get_fun (); +- if (fn != NULL) +- fn->machine->does_not_use_frame_header = !needs_frame_header_p (fn); +- } +- +- FOR_EACH_DEFINED_FUNCTION (node) +- { +- fn = node->get_fun (); +- if (fn != NULL) +- fn->machine->optimize_call_stack +- = !callees_functions_use_frame_header (fn) && !is_leaf_function (fn); +- } +- +- FOR_EACH_DEFINED_FUNCTION (node) +- { +- fn = node->get_fun (); +- if (fn != NULL && fn->machine->optimize_call_stack) +- set_callers_may_not_allocate_frame (fn); +- } +- +- return 0; +-} +diff --git a/gcc/config/loongarch/generic.md b/gcc/config/loongarch/generic.md +index 321b8e561..0f6eb3f42 100644 +--- a/gcc/config/loongarch/generic.md ++++ b/gcc/config/loongarch/generic.md +@@ -1,6 +1,8 @@ +-;; Generic DFA-based pipeline description for LARCH targets +-;; Copyright (C) 2004-2018 Free Software Foundation, Inc. +-;; ++;; Generic DFA-based pipeline description for LoongArch targets ++;; Copyright (C) 2020-2022 Free Software Foundation, Inc. ++;; Contributed by Loongson Co. Ltd. ++;; Based on MIPS target for GNU compiler. ++ + ;; This file is part of GCC. + + ;; GCC is free software; you can redistribute it and/or modify it +@@ -17,9 +19,16 @@ + ;; along with GCC; see the file COPYING3. If not see + ;; . + ++(define_automaton "alu,imuldiv") ++ ++(define_cpu_unit "alu" "alu") ++(define_cpu_unit "imuldiv" "imuldiv") + +-;; This file is derived from the old define_function_unit description. +-;; Each reservation can be overridden on a processor-by-processor basis. ++;; Ghost instructions produce no real code. ++;; They exist purely to express an effect on dataflow. ++(define_insn_reservation "ghost" 0 ++ (eq_attr "type" "ghost") ++ "nothing") + + (define_insn_reservation "generic_alu" 1 + (eq_attr "type" "unknown,prefetch,prefetchx,condmove,const,arith, +@@ -43,7 +52,7 @@ + "alu") + + (define_insn_reservation "generic_imul" 17 +- (eq_attr "type" "imul,imul3") ++ (eq_attr "type" "imul") + "imuldiv*17") + + (define_insn_reservation "generic_fcvt" 1 +diff --git a/gcc/config/loongarch/genopt.sh b/gcc/config/loongarch/genopt.sh +deleted file mode 100644 +index 272aac51d..000000000 +--- a/gcc/config/loongarch/genopt.sh ++++ /dev/null +@@ -1,110 +0,0 @@ +-#!/bin/sh +-# Generate loongarch-tables.opt from the list of CPUs in loongarch-cpus.def. +-# Copyright (C) 2011-2018 Free Software Foundation, Inc. +-# +-# This file is part of GCC. +-# +-# GCC is free software; you can redistribute it and/or modify +-# it under the terms of the GNU General Public License as published by +-# the Free Software Foundation; either version 3, or (at your option) +-# any later version. +-# +-# GCC is distributed in the hope that it will be useful, +-# but WITHOUT ANY WARRANTY; without even the implied warranty of +-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +-# GNU General Public License for more details. +-# +-# You should have received a copy of the GNU General Public License +-# along with GCC; see the file COPYING3. If not see +-# . +- +-cat <. +- +-Enum +-Name(loongarch_arch_opt_value) Type(int) +-Known LARCH CPUs (for use with the -march= and -mtune= options): +- +-EnumValue +-Enum(loongarch_arch_opt_value) String(native) Value(LARCH_ARCH_OPTION_NATIVE) DriverOnly +- +-EOF +- +-awk -F'[(, ]+' ' +-BEGIN { +- value = 0 +-} +- +-# Write an entry for a single string accepted as a -march= argument. +- +-function write_one_arch_value(name, value, flags) +-{ +- print "EnumValue" +- print "Enum(loongarch_arch_opt_value) String(" name ") Value(" value ")" flags +- print "" +-} +- +-# The logic for matching CPU name variants should be the same as in GAS. +- +-# Write an entry for a single string accepted as a -march= argument, +-# plus any variant with a final "000" replaced by "k". +- +-function write_arch_value_maybe_k(name, value, flags) +-{ +- write_one_arch_value(name, value, flags) +- if (name ~ "000$") { +- sub("000$", "k", name) +- write_one_arch_value(name, value, "") +- } +-} +- +-# Write all the entries for a -march= argument. In addition to +-# replacement of a final "000" with "k", an argument starting with +-# "vr", "rm" or "r" followed by a number, or just a plain number, +-# matches a plain number or "r" followed by a plain number. +- +-function write_all_arch_values(name, value) +-{ +- write_arch_value_maybe_k(name, value, " Canonical") +- cname = name +- if (cname ~ "^vr") { +- sub("^vr", "", cname) +- } else if (cname ~ "^rm") { +- sub("^rm", "", cname) +- } else if (cname ~ "^r") { +- sub("^r", "", cname) +- } +- if (cname ~ "^[0-9]") { +- if (cname != name) +- write_arch_value_maybe_k(cname, value, "") +- rname = "r" cname +- if (rname != name) +- write_arch_value_maybe_k(rname, value, "") +- } +-} +- +-/^LARCH_CPU/ { +- name = $2 +- gsub("\"", "", name) +- write_all_arch_values(name, value) +- value++ +-}' $1/loongarch-cpus.def +diff --git a/gcc/config/loongarch/genopts/genstr.sh b/gcc/config/loongarch/genopts/genstr.sh +new file mode 100755 +index 000000000..e895f7ec8 +--- /dev/null ++++ b/gcc/config/loongarch/genopts/genstr.sh +@@ -0,0 +1,104 @@ ++#!/bin/sh ++# A simple script that generates loongarch-str.h and loongarch.opt ++# from genopt/loongarch-optstr. ++# ++# Copyright (C) 2020-2022 Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify it under ++# the terms of the GNU General Public License as published by the Free ++# Software Foundation; either version 3, or (at your option) any later ++# version. ++# ++# GCC is distributed in the hope that it will be useful, but WITHOUT ++# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++# License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++cd "$(dirname "$0")" ++ ++# Generate a header containing definitions from the string table. ++gen_defines() { ++ cat <. */ ++ ++#ifndef LOONGARCH_STR_H ++#define LOONGARCH_STR_H ++EOF ++ ++ sed -e '/^$/n' -e 's@#.*$@@' -e '/^$/d' \ ++ -e 's@^\([^ \t]\+\)[ \t]*\([^ \t]*\)@#define \1 "\2"@' \ ++ loongarch-strings ++ ++ echo ++ echo "#endif /* LOONGARCH_STR_H */" ++} ++ ++ ++# Substitute all "@@@@" to "" in loongarch.opt.in ++# according to the key-value pairs defined in loongarch-strings. ++ ++gen_options() { ++ ++ sed -e '/^$/n' -e 's@#.*$@@' -e '/^$/d' \ ++ -e 's@^\([^ \t]\+\)[ \t]*\([^ \t]*\)@\1="\2"@' \ ++ loongarch-strings | { \ ++ ++ # read the definitions ++ while read -r line; do ++ eval "$line" ++ done ++ ++ # print a header ++ cat << EOF ++; Generated by "genstr" from the template "loongarch.opt.in" ++; and definitions from "loongarch-strings". ++; ++; Please do not edit this file directly. ++; It will be automatically updated during a gcc build ++; if you change "loongarch.opt.in" or "loongarch-strings". ++; ++EOF ++ ++ # make the substitutions ++ sed -e 's@"@\\"@g' -e 's/@@\([^@]\+\)@@/${\1}/g' loongarch.opt.in | \ ++ while read -r line; do ++ eval "echo \"$line\"" ++ done ++ } ++} ++ ++main() { ++ case "$1" in ++ header) gen_defines;; ++ opt) gen_options;; ++ *) echo "Unknown Command: \"$1\". Available: header, opt"; exit 1;; ++ esac ++} ++ ++main "$@" +diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings +new file mode 100644 +index 000000000..d79e2e791 +--- /dev/null ++++ b/gcc/config/loongarch/genopts/loongarch-strings +@@ -0,0 +1,68 @@ ++# Defines the key strings for LoongArch compiler options. ++# ++# Copyright (C) 2020-2022 Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify it under ++# the terms of the GNU General Public License as published by the Free ++# Software Foundation; either version 3, or (at your option) any later ++# version. ++# ++# GCC is distributed in the hope that it will be useful, but WITHOUT ++# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++# License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++# -march= / -mtune= ++OPTSTR_ARCH arch ++OPTSTR_TUNE tune ++ ++STR_CPU_NATIVE native ++STR_CPU_ABI_DEFAULT abi-default ++STR_CPU_LOONGARCH64 loongarch64 ++STR_CPU_LA464 la464 ++STR_CPU_LA364 la364 ++STR_CPU_LA264 la264 ++STR_CPU_LA664 la664 ++ ++# Base architecture ++STR_ISA_BASE_LA64V100 la64 ++ ++# -mfpu ++OPTSTR_ISA_EXT_FPU fpu ++STR_NONE none ++STR_ISA_EXT_FPU0 0 ++STR_ISA_EXT_FPU32 32 ++STR_ISA_EXT_FPU64 64 ++ ++OPTSTR_SOFT_FLOAT soft-float ++OPTSTR_SINGLE_FLOAT single-float ++OPTSTR_DOUBLE_FLOAT double-float ++ ++# SIMD extensions ++OPTSTR_ISA_EXT_SIMD simd ++STR_ISA_EXT_LSX lsx ++STR_ISA_EXT_LASX lasx ++ ++# -mabi= ++OPTSTR_ABI_BASE abi ++STR_ABI_BASE_LP64D lp64d ++STR_ABI_BASE_LP64F lp64f ++STR_ABI_BASE_LP64S lp64s ++STR_ABI_BASE_LP64 lp64 ++ ++# ABI extension types ++STR_ABI_EXT_BASE base ++ ++# -mcmodel= ++OPTSTR_CMODEL cmodel ++STR_CMODEL_NORMAL normal ++STR_CMODEL_TINY tiny ++STR_CMODEL_TS tiny-static ++STR_CMODEL_LARGE large ++STR_CMODEL_EXTREME extreme +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +new file mode 100644 +index 000000000..463dfec77 +--- /dev/null ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -0,0 +1,242 @@ ++; Generated by "genstr" from the template "loongarch.opt.in" ++; and definitions from "loongarch-strings". ++; ++; Copyright (C) 2020-2022 Free Software Foundation, Inc. ++; ++; This file is part of GCC. ++; ++; GCC is free software; you can redistribute it and/or modify it under ++; the terms of the GNU General Public License as published by the Free ++; Software Foundation; either version 3, or (at your option) any later ++; version. ++; ++; GCC is distributed in the hope that it will be useful, but WITHOUT ++; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++; License for more details. ++; ++; You should have received a copy of the GNU General Public License ++; along with GCC; see the file COPYING3. If not see ++; . ++; ++ ++HeaderInclude ++config/loongarch/loongarch-opts.h ++ ++HeaderInclude ++config/loongarch/loongarch-str.h ++ ++TargetVariable ++unsigned int recip_mask = 0 ++ ++; ISA related options ++;; Base ISA ++Enum ++Name(isa_base) Type(int) ++Basic ISAs of LoongArch: ++ ++EnumValue ++Enum(isa_base) String(@@STR_ISA_BASE_LA64V100@@) Value(ISA_BASE_LA64V100) ++ ++;; ISA extensions / adjustments ++Enum ++Name(isa_ext_fpu) Type(int) ++FPU types of LoongArch: ++ ++EnumValue ++Enum(isa_ext_fpu) String(@@STR_NONE@@) Value(ISA_EXT_NONE) ++ ++EnumValue ++Enum(isa_ext_fpu) String(@@STR_ISA_EXT_FPU32@@) Value(ISA_EXT_FPU32) ++ ++EnumValue ++Enum(isa_ext_fpu) String(@@STR_ISA_EXT_FPU64@@) Value(ISA_EXT_FPU64) ++ ++m@@OPTSTR_ISA_EXT_FPU@@= ++Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET) ++-m@@OPTSTR_ISA_EXT_FPU@@=FPU Generate code for the given FPU. ++ ++m@@OPTSTR_ISA_EXT_FPU@@=@@STR_ISA_EXT_FPU0@@ ++Target RejectNegative Alias(m@@OPTSTR_ISA_EXT_FPU@@=,@@STR_NONE@@) ++ ++m@@OPTSTR_SOFT_FLOAT@@ ++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_SINGLE_FLOAT@@) ++Prevent the use of all hardware floating-point instructions. ++ ++m@@OPTSTR_SINGLE_FLOAT@@ ++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_DOUBLE_FLOAT@@) ++Restrict the use of hardware floating-point instructions to 32-bit operations. ++ ++m@@OPTSTR_DOUBLE_FLOAT@@ ++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_SOFT_FLOAT@@) ++Allow hardware floating-point instructions to cover both 32-bit and 64-bit operations. ++ ++Enum ++Name(isa_ext_simd) Type(int) ++SIMD extension levels of LoongArch: ++ ++EnumValue ++Enum(isa_ext_simd) String(@@STR_NONE@@) Value(ISA_EXT_NONE) ++ ++EnumValue ++Enum(isa_ext_simd) String(@@STR_ISA_EXT_LSX@@) Value(ISA_EXT_SIMD_LSX) ++ ++EnumValue ++Enum(isa_ext_simd) String(@@STR_ISA_EXT_LASX@@) Value(ISA_EXT_SIMD_LASX) ++ ++m@@OPTSTR_ISA_EXT_SIMD@@= ++Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET) ++-m@@OPTSTR_ISA_EXT_SIMD@@=SIMD Generate code for the given SIMD extension. ++ ++m@@STR_ISA_EXT_LSX@@ ++Target Driver Defer Var(la_deferred_options) ++Enable LoongArch SIMD Extension (LSX, 128-bit). ++ ++m@@STR_ISA_EXT_LASX@@ ++Target Driver Defer Var(la_deferred_options) ++Enable LoongArch Advanced SIMD Extension (LASX, 256-bit). ++ ++;; Base target models (implies ISA & tune parameters) ++Enum ++Name(cpu_type) Type(int) ++LoongArch CPU types: ++ ++EnumValue ++Enum(cpu_type) String(@@STR_CPU_NATIVE@@) Value(CPU_NATIVE) ++ ++EnumValue ++Enum(cpu_type) String(@@STR_CPU_ABI_DEFAULT@@) Value(CPU_ABI_DEFAULT) ++ ++EnumValue ++Enum(cpu_type) String(@@STR_CPU_LOONGARCH64@@) Value(CPU_LOONGARCH64) ++ ++EnumValue ++Enum(cpu_type) String(@@STR_CPU_LA664@@) Value(CPU_LA664) ++ ++EnumValue ++Enum(cpu_type) String(@@STR_CPU_LA464@@) Value(CPU_LA464) ++ ++EnumValue ++Enum(cpu_type) String(@@STR_CPU_LA264@@) Value(CPU_LA264) ++ ++EnumValue ++Enum(cpu_type) String(@@STR_CPU_LA364@@) Value(CPU_LA364) ++ ++m@@OPTSTR_ARCH@@= ++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) ++-m@@OPTSTR_ARCH@@=PROCESSOR Generate code for the given PROCESSOR ISA. ++ ++m@@OPTSTR_TUNE@@= ++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) ++-m@@OPTSTR_TUNE@@=PROCESSOR Generate optimized code for PROCESSOR. ++ ++ ++; ABI related options ++; (ISA constraints on ABI are handled dynamically) ++ ++;; Base ABI ++Enum ++Name(abi_base) Type(int) ++Base ABI types for LoongArch: ++ ++EnumValue ++Enum(abi_base) String(@@STR_ABI_BASE_LP64D@@) Value(ABI_BASE_LP64D) ++ ++EnumValue ++Enum(abi_base) String(@@STR_ABI_BASE_LP64F@@) Value(ABI_BASE_LP64F) ++ ++EnumValue ++Enum(abi_base) String(@@STR_ABI_BASE_LP64S@@) Value(ABI_BASE_LP64S) ++ ++m@@OPTSTR_ABI_BASE@@= ++Target RejectNegative Joined ToLower Enum(abi_base) Var(la_opt_abi_base) Init(M_OPT_UNSET) ++-m@@OPTSTR_ABI_BASE@@=BASEABI Generate code that conforms to the given BASEABI. ++ ++;; Legacy option: -mabi=lp64 ++m@@OPTSTR_ABI_BASE@@=@@STR_ABI_BASE_LP64@@ ++Target RejectNegative Mask(LP64) ++-m@@OPTSTR_ABI_BASE@@=@@STR_ABI_BASE_LP64@@ Legacy option that enables the lp64 integer ABI. ++ ++;; ABI Extension ++Variable ++int la_opt_abi_ext = M_OPT_UNSET ++ ++mbranch-cost= ++Target RejectNegative Joined UInteger Var(loongarch_branch_cost) ++-mbranch-cost=COST Set the cost of branches to roughly COST instructions. ++ ++mvecarg ++Target Report Var(TARGET_VECARG) Init(1) ++Target pass vect arg uses vector register. ++ ++mmemvec-cost= ++Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) ++mmemvec-cost=COST Set the cost of vector memory access instructions. ++ ++mveclibabi= ++Target RejectNegative Joined Var(loongarch_veclibabi_name) ++Vector library ABI to use. ++ ++mstackrealign ++Target Var(loongarch_stack_realign) Init(1) ++Realign stack in prologue. ++ ++mforce-drap ++Target Var(loongarch_force_drap) Init(0) ++Always use Dynamic Realigned Argument Pointer (DRAP) to realign stack. ++ ++mcheck-zero-division ++Target Mask(CHECK_ZERO_DIV) ++Trap on integer divide by zero. ++ ++mcond-move-int ++Target Var(TARGET_COND_MOVE_INT) Init(1) ++Conditional moves for integral are enabled. ++ ++mcond-move-float ++Target Var(TARGET_COND_MOVE_FLOAT) Init(1) ++Conditional moves for float are enabled. ++ ++mmemcpy ++Target Mask(MEMCPY) ++Prevent optimizing block moves, which is also the default behavior of -Os. ++ ++mstrict-align ++Target Var(TARGET_STRICT_ALIGN) Init(0) ++Do not generate unaligned memory accesses. ++ ++mmax-inline-memcpy-size= ++Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) ++-mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. ++ ++mrecip ++Target Report RejectNegative Var(loongarch_recip) ++Generate reciprocals instead of divss and sqrtss. ++ ++mrecip= ++Target Report RejectNegative Joined Var(loongarch_recip_name) ++Control generation of reciprocal estimates. ++ ++; The code model option names for -mcmodel. ++Enum ++Name(cmodel) Type(int) ++The code model option names for -mcmodel: ++ ++EnumValue ++Enum(cmodel) String(@@STR_CMODEL_NORMAL@@) Value(CMODEL_NORMAL) ++ ++EnumValue ++Enum(cmodel) String(@@STR_CMODEL_TINY@@) Value(CMODEL_TINY) ++ ++EnumValue ++Enum(cmodel) String(@@STR_CMODEL_TS@@) Value(CMODEL_TINY_STATIC) ++ ++EnumValue ++Enum(cmodel) String(@@STR_CMODEL_LARGE@@) Value(CMODEL_LARGE) ++ ++EnumValue ++Enum(cmodel) String(@@STR_CMODEL_EXTREME@@) Value(CMODEL_EXTREME) ++ ++mcmodel= ++Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET) ++Specify the code model. +diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h +index 1304e2e97..603aed5a2 100644 +--- a/gcc/config/loongarch/gnu-user.h ++++ b/gcc/config/loongarch/gnu-user.h +@@ -1,4 +1,5 @@ +-/* Definitions for LARCH systems using GNU userspace. ++/* Definitions for LoongArch systems using GNU (glibc-based) userspace, ++ or other userspace with libc derived from glibc. + Copyright (C) 1998-2018 Free Software Foundation, Inc. + + This file is part of GCC. +@@ -17,116 +18,66 @@ You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + ++/* Define the size of the wide character type. */ + #undef WCHAR_TYPE + #define WCHAR_TYPE "int" + + #undef WCHAR_TYPE_SIZE + #define WCHAR_TYPE_SIZE 32 + +-#undef ASM_DECLARE_OBJECT_NAME +-#define ASM_DECLARE_OBJECT_NAME loongarch_declare_object_name + +-/* If we don't set MASK_ABICALLS, we can't default to PIC. */ +-/* #undef TARGET_DEFAULT */ +-/* #define TARGET_DEFAULT MASK_ABICALLS */ ++/* GNU-specific SPEC definitions. */ ++#define GNU_USER_LINK_EMULATION "elf" ABI_GRLEN_SPEC "loongarch" + +-#define TARGET_OS_CPP_BUILTINS() \ +- do { \ +- GNU_USER_TARGET_OS_CPP_BUILTINS(); \ +- /* The GNU C++ standard library requires this. */ \ +- if (c_dialect_cxx ()) \ +- builtin_define ("_GNU_SOURCE"); \ +- } while (0) ++#undef GLIBC_DYNAMIC_LINKER ++#define GLIBC_DYNAMIC_LINKER \ ++ "/lib" ABI_GRLEN_SPEC "/" \ ++ "%{mabi=lp64d:ld.so.1;" \ ++ "mabi=lp64s:ld-linux-loongarch-lp64s.so.1;" \ ++ "mabi=lp64f:ld-linux-loongarch-lp64f.so.1}" + +-#undef SUBTARGET_CPP_SPEC +-#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}" +- +-/* A standard GNU/Linux mapping. On most targets, it is included in +- CC1_SPEC itself by config/linux.h, but loongarch.h overrides CC1_SPEC +- and provides this hook instead. */ +-#undef SUBTARGET_CC1_SPEC +-#define SUBTARGET_CC1_SPEC GNU_USER_TARGET_CC1_SPEC +- +-/* -G is incompatible with -KPIC which is the default, so only allow objects +- in the small data section if the user explicitly asks for it. */ +-#undef LARCH_DEFAULT_GVALUE +-#define LARCH_DEFAULT_GVALUE 0 ++#undef MUSL_DYNAMIC_LINKER ++#define MUSL_DYNAMIC_LINKER \ ++ "/lib" ABI_GRLEN_SPEC "/ld-musl-loongarch-" ABI_SPEC ".so.1" + + #undef GNU_USER_TARGET_LINK_SPEC +-#define GNU_USER_TARGET_LINK_SPEC "\ +- %{G*} %{EB} %{EL} %{shared} \ +- %{!shared: \ +- %{!static: \ +- %{rdynamic:-export-dynamic} \ +- %{mabi=lp32: -dynamic-linker " GNU_USER_DYNAMIC_LINKERLP32 "} \ +- %{mabi=lp64: -dynamic-linker " GNU_USER_DYNAMIC_LINKERLP64 "}} \ +- %{static}} \ +- %{mabi=lp32:-m" GNU_USER_LINK_EMULATION32 "} \ +- %{mabi=lp64:-m" GNU_USER_LINK_EMULATION64 "}" ++#define GNU_USER_TARGET_LINK_SPEC \ ++ "%{G*} %{shared} -m " GNU_USER_LINK_EMULATION \ ++ "%{!shared: %{static} %{!static: %{rdynamic:-export-dynamic} " \ ++ "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}}" + +-#undef LINK_SPEC +-#define LINK_SPEC GNU_USER_TARGET_LINK_SPEC + +-/* The LARCH assembler has different syntax for .set. We set it to +- .dummy to trap any errors. */ +-#undef SET_ASM_OP +-#define SET_ASM_OP "\t.dummy\t" +- +-#undef ASM_OUTPUT_DEF +-#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \ +- do { \ +- fputc ( '\t', FILE); \ +- assemble_name (FILE, LABEL1); \ +- fputs ( " = ", FILE); \ +- assemble_name (FILE, LABEL2); \ +- fputc ( '\n', FILE); \ +- } while (0) +- +-/* The glibc _mcount stub will save $v0 for us. Don't mess with saving +- it, since ASM_OUTPUT_REG_PUSH/ASM_OUTPUT_REG_POP do not work in the +- presence of $gp-relative calls. */ +-#undef ASM_OUTPUT_REG_PUSH +-#undef ASM_OUTPUT_REG_POP ++/* Similar to standard Linux, but adding -ffast-math support. */ ++#undef GNU_USER_TARGET_MATHFILE_SPEC ++#define GNU_USER_TARGET_MATHFILE_SPEC \ ++ "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s}" + + #undef LIB_SPEC + #define LIB_SPEC GNU_USER_TARGET_LIB_SPEC + +-#define NO_SHARED_SPECS "" +- +-/* -march=native handling only makes sense with compiler running on +- a LARCH chip. */ +-#if defined(__loongarch__) +-extern const char *host_detect_local_cpu (int argc, const char **argv); +-# define EXTRA_SPEC_FUNCTIONS \ +- { "local_cpu_detect", host_detect_local_cpu }, +- +-# define MARCH_MTUNE_NATIVE_SPECS \ +- " %{march=native:%. ++ ++;; Uncomment the following line to output automata for debugging. ++;; (automata_option "v") ++ ++;; Automaton for integer instructions. ++(define_automaton "la464_a_alu") ++ ++;; Automaton for floating-point instructions. ++(define_automaton "la464_a_falu") ++ ++;; Automaton for memory operations. ++(define_automaton "la464_a_mem") ++ ++;; Describe the resources. ++ ++(define_cpu_unit "la464_alu1" "la464_a_alu") ++(define_cpu_unit "la464_alu2" "la464_a_alu") ++(define_cpu_unit "la464_mem1" "la464_a_mem") ++(define_cpu_unit "la464_mem2" "la464_a_mem") ++(define_cpu_unit "la464_falu1" "la464_a_falu") ++(define_cpu_unit "la464_falu2" "la464_a_falu") ++ ++;; Describe instruction reservations. ++ ++(define_insn_reservation "la464_arith" 1 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (eq_attr "type" "arith,clz,const,logical, ++ move,nop,shift,signext,slt")) ++ "la464_alu1 | la464_alu2") ++ ++(define_insn_reservation "la464_branch" 1 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (eq_attr "type" "branch,jump,call,condmove,trap")) ++ "la464_alu1 | la464_alu2") ++ ++(define_insn_reservation "la464_imul" 7 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (eq_attr "type" "imul")) ++ "la464_alu1 | la464_alu2") ++ ++(define_insn_reservation "la464_idiv_si" 12 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (and (eq_attr "type" "idiv") ++ (eq_attr "mode" "SI"))) ++ "la464_alu1 | la464_alu2") ++ ++(define_insn_reservation "la464_idiv_di" 25 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (and (eq_attr "type" "idiv") ++ (eq_attr "mode" "DI"))) ++ "la464_alu1 | la464_alu2") ++ ++(define_insn_reservation "la464_load" 4 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (eq_attr "type" "load")) ++ "la464_mem1 | la464_mem2") ++ ++(define_insn_reservation "la464_gpr_fp" 16 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (eq_attr "type" "mftg,mgtf")) ++ "la464_mem1") ++ ++(define_insn_reservation "la464_fpload" 4 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (eq_attr "type" "fpload")) ++ "la464_mem1 | la464_mem2") ++ ++(define_insn_reservation "la464_prefetch" 0 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (eq_attr "type" "prefetch,prefetchx")) ++ "la464_mem1 | la464_mem2") ++ ++(define_insn_reservation "la464_store" 0 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (eq_attr "type" "store,fpstore,fpidxstore")) ++ "la464_mem1 | la464_mem2") ++ ++(define_insn_reservation "la464_fadd" 4 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (eq_attr "type" "fadd,fmul,fmadd")) ++ "la464_falu1 | la464_falu2") ++ ++(define_insn_reservation "la464_fcmp" 2 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (eq_attr "type" "fabs,fcmp,fmove,fneg")) ++ "la464_falu1 | la464_falu2") ++ ++(define_insn_reservation "la464_fcvt" 4 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (eq_attr "type" "fcvt")) ++ "la464_falu1 | la464_falu2") ++ ++(define_insn_reservation "la464_fdiv_sf" 12 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt") ++ (eq_attr "mode" "SF"))) ++ "la464_falu1 | la464_falu2") ++ ++(define_insn_reservation "la464_fdiv_df" 19 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt") ++ (eq_attr "mode" "DF"))) ++ "la464_falu1 | la464_falu2") ++ ++;; Force single-dispatch for unknown or multi. ++(define_insn_reservation "la464_unknown" 1 ++ (and (match_test "TARGET_uARCH_LA464 || TARGET_uARCH_LA664") ++ (eq_attr "type" "unknown,multi,atomic,syncloop")) ++ "la464_alu1 + la464_alu2 + la464_falu1 ++ + la464_falu2 + la464_mem1 + la464_mem2") ++ ++;; End of DFA-based pipeline description for la464 +diff --git a/gcc/config/loongarch/larchintrin.h b/gcc/config/loongarch/larchintrin.h +index c649bf3f4..8e26ed6f0 100644 +--- a/gcc/config/loongarch/larchintrin.h ++++ b/gcc/config/loongarch/larchintrin.h +@@ -1,384 +1,353 @@ + /* Intrinsics for LoongArch BASE operations. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Ltd. + +- Copyright (C) 2019 Free Software Foundation, Inc. +- Contributed by xuchenghua@loongson.cn. ++This file is part of GCC. + +- This file is part of GCC. ++GCC is free software; you can redistribute it and/or modify it ++under the terms of the GNU General Public License as published ++by the Free Software Foundation; either version 3, or (at your ++option) any later version. + +- GCC is free software; you can redistribute it and/or modify it +- under the terms of the GNU General Public License as published +- by the Free Software Foundation; either version 3, or (at your +- option) any later version. ++GCC is distributed in the hope that it will be useful, but WITHOUT ++ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++License for more details. + +- GCC is distributed in the hope that it will be useful, but WITHOUT +- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +- License for more details. ++Under Section 7 of GPL version 3, you are granted additional ++permissions described in the GCC Runtime Library Exception, version ++3.1, as published by the Free Software Foundation. + +- Under Section 7 of GPL version 3, you are granted additional +- permissions described in the GCC Runtime Library Exception, version +- 3.1, as published by the Free Software Foundation. +- +- You should have received a copy of the GNU General Public License and +- a copy of the GCC Runtime Library Exception along with this program; +- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +- . */ ++You should have received a copy of the GNU General Public License and ++a copy of the GCC Runtime Library Exception along with this program; ++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++. */ + + #ifndef _GCC_LOONGARCH_BASE_INTRIN_H + #define _GCC_LOONGARCH_BASE_INTRIN_H + + #ifdef __cplusplus +-extern "C"{ ++extern "C" { + #endif + +-typedef struct drdtime{ +- unsigned long dvalue; +- unsigned long dtimeid; ++typedef struct drdtime ++{ ++ unsigned long dvalue; ++ unsigned long dtimeid; + } __drdtime_t; + +-typedef struct rdtime{ +- unsigned int value; +- unsigned int timeid; ++typedef struct rdtime ++{ ++ unsigned int value; ++ unsigned int timeid; + } __rdtime_t; + + #ifdef __loongarch64 +-extern __inline __drdtime_t __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__builtin_loongarch_rdtime_d (void) ++extern __inline __drdtime_t ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__rdtime_d (void) + { +- __drdtime_t drdtime; ++ __drdtime_t __drdtime; + __asm__ volatile ( + "rdtime.d\t%[val],%[tid]\n\t" +- : [val]"=&r"(drdtime.dvalue),[tid]"=&r"(drdtime.dtimeid) +- : +- ); +- return drdtime; ++ : [val]"=&r"(__drdtime.dvalue),[tid]"=&r"(__drdtime.dtimeid) ++ :); ++ return __drdtime; + } +-#define __rdtime_d __builtin_loongarch_rdtime_d + #endif + +-extern __inline __rdtime_t __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__builtin_loongarch_rdtimeh_w (void) ++extern __inline __rdtime_t ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__rdtimeh_w (void) + { +- __rdtime_t rdtime; ++ __rdtime_t __rdtime; + __asm__ volatile ( + "rdtimeh.w\t%[val],%[tid]\n\t" +- : [val]"=&r"(rdtime.value),[tid]"=&r"(rdtime.timeid) +- : +- ); +- return rdtime; ++ : [val]"=&r"(__rdtime.value),[tid]"=&r"(__rdtime.timeid) ++ :); ++ return __rdtime; + } +-#define __rdtimel_w __builtin_loongarch_rdtimel_w + +-extern __inline __rdtime_t __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__builtin_loongarch_rdtimel_w (void) ++extern __inline __rdtime_t ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__rdtimel_w (void) + { +- __rdtime_t rdtime; ++ __rdtime_t __rdtime; + __asm__ volatile ( + "rdtimel.w\t%[val],%[tid]\n\t" +- : [val]"=&r"(rdtime.value),[tid]"=&r"(rdtime.timeid) +- : +- ); +- return rdtime; ++ : [val]"=&r"(__rdtime.value),[tid]"=&r"(__rdtime.timeid) ++ :); ++ return __rdtime; + } +-#define __rdtimeh_w __builtin_loongarch_rdtimeh_w +- +-/* Assembly instruction format: rj, fcsr */ +-/* Data types in instruction templates: USI, UQI */ +-#define __movfcsr2gr(/*ui5*/_1) __builtin_loongarch_movfcsr2gr((_1)); +- +-/* Assembly instruction format: 0, fcsr, rj */ +-/* Data types in instruction templates: VOID, UQI, USI */ +-#define __movgr2fcsr(/*ui5*/ _1, _2) __builtin_loongarch_movgr2fcsr((unsigned short)_1, (unsigned int)_2); +- +-#ifdef __loongarch32 +-/* Assembly instruction format: ui5, rj, si12 */ +-/* Data types in instruction templates: VOID, USI, USI, SI */ +-#define __cacop(/*ui5*/ _1, /*unsigned int*/ _2, /*si12*/ _3) ((void)__builtin_loongarch_cacop((_1), (unsigned int)(_2), (_3))) +-#elif defined __loongarch64 +-/* Assembly instruction format: ui5, rj, si12 */ +-/* Data types in instruction templates: VOID, USI, UDI, SI */ +-#define __dcacop(/*ui5*/ _1, /*unsigned long int*/ _2, /*si12*/ _3) ((void)__builtin_loongarch_dcacop((_1), (unsigned long int)(_2), (_3))) ++ ++/* Assembly instruction format: rj, fcsr. */ ++/* Data types in instruction templates: USI, UQI. */ ++#define __movfcsr2gr(/*ui5*/ _1) __builtin_loongarch_movfcsr2gr ((_1)); ++ ++/* Assembly instruction format: fcsr, rj. */ ++/* Data types in instruction templates: VOID, UQI, USI. */ ++#define __movgr2fcsr(/*ui5*/ _1, _2) \ ++ __builtin_loongarch_movgr2fcsr ((_1), (unsigned int) _2); ++ ++#if defined __loongarch64 ++/* Assembly instruction format: ui5, rj, si12. */ ++/* Data types in instruction templates: VOID, USI, UDI, SI. */ ++#define __cacop_d(/*ui5*/ _1, /*unsigned long int*/ _2, /*si12*/ _3) \ ++ ((void) __builtin_loongarch_cacop_d ((_1), (unsigned long int) (_2), (_3))) + #else +-# error "Don't support this ABI." ++#error "Unsupported ABI." + #endif + +-/* Assembly instruction format: rd, rj */ +-/* Data types in instruction templates: USI, USI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-unsigned int __cpucfg(unsigned int _1) ++/* Assembly instruction format: rd, rj. */ ++/* Data types in instruction templates: USI, USI. */ ++extern __inline unsigned int ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__cpucfg (unsigned int _1) + { +- return (unsigned int)__builtin_loongarch_cpucfg((unsigned int)_1); ++ return (unsigned int) __builtin_loongarch_cpucfg ((unsigned int) _1); + } + + #ifdef __loongarch64 +-/* Assembly instruction format: rd, rj */ +-/* Data types in instruction templates: DI, DI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-void __asrtle_d(long int _1, long int _2) ++/* Assembly instruction format: rj, rk. */ ++/* Data types in instruction templates: DI, DI. */ ++extern __inline void ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__asrtle_d (long int _1, long int _2) + { +- __builtin_loongarch_asrtle_d((long int)_1, (long int)_2); ++ __builtin_loongarch_asrtle_d ((long int) _1, (long int) _2); + } + +-/* Assembly instruction format: rd, rj */ +-/* Data types in instruction templates: DI, DI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-void __asrtgt_d(long int _1, long int _2) ++/* Assembly instruction format: rj, rk. */ ++/* Data types in instruction templates: DI, DI. */ ++extern __inline void ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__asrtgt_d (long int _1, long int _2) + { +- __builtin_loongarch_asrtgt_d((long int)_1, (long int)_2); ++ __builtin_loongarch_asrtgt_d ((long int) _1, (long int) _2); + } + #endif + +-#ifdef __loongarch32 +-/* Assembly instruction format: rd, rj, ui5 */ +-/* Data types in instruction templates: SI, SI, UQI */ +-#define __lddir(/*int*/ _1, /*ui5*/ _2) ((int)__builtin_loongarch_lddir((int)(_1), (_2))) +-#elif defined __loongarch64 +-/* Assembly instruction format: rd, rj, ui5 */ +-/* Data types in instruction templates: DI, DI, UQI */ +-#define __dlddir(/*long int*/ _1, /*ui5*/ _2) ((long int)__builtin_loongarch_dlddir((long int)(_1), (_2))) ++#if defined __loongarch64 ++/* Assembly instruction format: rd, rj, ui5. */ ++/* Data types in instruction templates: DI, DI, UQI. */ ++#define __lddir_d(/*long int*/ _1, /*ui5*/ _2) \ ++ ((long int) __builtin_loongarch_lddir_d ((long int) (_1), (_2))) + #else +-# error "Don't support this ABI." ++#error "Unsupported ABI." + #endif + +-#ifdef __loongarch32 +-/* Assembly instruction format: rj, ui5 */ +-/* Data types in instruction templates: VOID, SI, UQI */ +-#define __ldpte(/*int*/ _1, /*ui5*/ _2) ((void)__builtin_loongarch_ldpte((int)(_1), (_2))) +-#elif defined __loongarch64 +-/* Assembly instruction format: rj, ui5 */ +-/* Data types in instruction templates: VOID, DI, UQI */ +-#define __dldpte(/*long int*/ _1, /*ui5*/ _2) ((void)__builtin_loongarch_dldpte((long int)(_1), (_2))) ++#if defined __loongarch64 ++/* Assembly instruction format: rj, ui5. */ ++/* Data types in instruction templates: VOID, DI, UQI. */ ++#define __ldpte_d(/*long int*/ _1, /*ui5*/ _2) \ ++ ((void) __builtin_loongarch_ldpte_d ((long int) (_1), (_2))) + #else +-# error "Don't support this ABI." ++#error "Unsupported ABI." + #endif + +-/* Assembly instruction format: rd, rj, rk */ +-/* Data types in instruction templates: SI, QI, SI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-int __crc_w_b_w(char _1, int _2) ++/* Assembly instruction format: rd, rj, rk. */ ++/* Data types in instruction templates: SI, QI, SI. */ ++extern __inline int ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__crc_w_b_w (char _1, int _2) + { +- return (int)__builtin_loongarch_crc_w_b_w((char)_1, (int)_2); ++ return (int) __builtin_loongarch_crc_w_b_w ((char) _1, (int) _2); + } + +-/* Assembly instruction format: rd, rj, rk */ +-/* Data types in instruction templates: SI, HI, SI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-int __crc_w_h_w(short _1, int _2) ++/* Assembly instruction format: rd, rj, rk. */ ++/* Data types in instruction templates: SI, HI, SI. */ ++extern __inline int ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__crc_w_h_w (short _1, int _2) + { +- return (int)__builtin_loongarch_crc_w_h_w((short)_1, (int)_2); ++ return (int) __builtin_loongarch_crc_w_h_w ((short) _1, (int) _2); + } + +-/* Assembly instruction format: rd, rj, rk */ +-/* Data types in instruction templates: SI, SI, SI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-int __crc_w_w_w(int _1, int _2) ++/* Assembly instruction format: rd, rj, rk. */ ++/* Data types in instruction templates: SI, SI, SI. */ ++extern __inline int ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__crc_w_w_w (int _1, int _2) + { +- return (int)__builtin_loongarch_crc_w_w_w((int)_1, (int)_2); ++ return (int) __builtin_loongarch_crc_w_w_w ((int) _1, (int) _2); + } + + #ifdef __loongarch64 +-/* Assembly instruction format: rd, rj, rk */ +-/* Data types in instruction templates: SI, DI, SI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-int __crc_w_d_w(long int _1, int _2) ++/* Assembly instruction format: rd, rj, rk. */ ++/* Data types in instruction templates: SI, DI, SI. */ ++extern __inline int ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__crc_w_d_w (long int _1, int _2) + { +- return (int)__builtin_loongarch_crc_w_d_w((long int)_1, (int)_2); ++ return (int) __builtin_loongarch_crc_w_d_w ((long int) _1, (int) _2); + } + #endif + +-/* Assembly instruction format: rd, rj, rk */ +-/* Data types in instruction templates: SI, QI, SI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-int __crcc_w_b_w(char _1, int _2) ++/* Assembly instruction format: rd, rj, rk. */ ++/* Data types in instruction templates: SI, QI, SI. */ ++extern __inline int ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__crcc_w_b_w (char _1, int _2) + { +- return (int)__builtin_loongarch_crcc_w_b_w((char)_1, (int)_2); ++ return (int) __builtin_loongarch_crcc_w_b_w ((char) _1, (int) _2); + } + +-/* Assembly instruction format: rd, rj, rk */ +-/* Data types in instruction templates: SI, HI, SI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-int __crcc_w_h_w(short _1, int _2) ++/* Assembly instruction format: rd, rj, rk. */ ++/* Data types in instruction templates: SI, HI, SI. */ ++extern __inline int ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__crcc_w_h_w (short _1, int _2) + { +- return (int)__builtin_loongarch_crcc_w_h_w((short)_1, (int)_2); ++ return (int) __builtin_loongarch_crcc_w_h_w ((short) _1, (int) _2); + } + +-/* Assembly instruction format: rd, rj, rk */ +-/* Data types in instruction templates: SI, SI, SI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-int __crcc_w_w_w(int _1, int _2) ++/* Assembly instruction format: rd, rj, rk. */ ++/* Data types in instruction templates: SI, SI, SI. */ ++extern __inline int ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__crcc_w_w_w (int _1, int _2) + { +- return (int)__builtin_loongarch_crcc_w_w_w((int)_1, (int)_2); ++ return (int) __builtin_loongarch_crcc_w_w_w ((int) _1, (int) _2); + } + + #ifdef __loongarch64 +-/* Assembly instruction format: rd, rj, rk */ +-/* Data types in instruction templates: SI, DI, SI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-int __crcc_w_d_w(long int _1, int _2) ++/* Assembly instruction format: rd, rj, rk. */ ++/* Data types in instruction templates: SI, DI, SI. */ ++extern __inline int ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__crcc_w_d_w (long int _1, int _2) + { +- return (int)__builtin_loongarch_crcc_w_d_w((long int)_1, (int)_2); ++ return (int) __builtin_loongarch_crcc_w_d_w ((long int) _1, (int) _2); + } + #endif + +-/* Assembly instruction format: rd, ui14 */ +-/* Data types in instruction templates: USI, USI */ +-#define __csrrd(/*ui14*/ _1) ((unsigned int)__builtin_loongarch_csrrd((_1))) ++/* Assembly instruction format: rd, ui14. */ ++/* Data types in instruction templates: USI, USI. */ ++#define __csrrd_w(/*ui14*/ _1) \ ++ ((unsigned int) __builtin_loongarch_csrrd_w ((_1))) + +-/* Assembly instruction format: rd, ui14 */ +-/* Data types in instruction templates: USI, USI, USI */ +-#define __csrwr(/*unsigned int*/ _1, /*ui14*/ _2) ((unsigned int)__builtin_loongarch_csrwr((unsigned int)(_1), (_2))) ++/* Assembly instruction format: rd, ui14. */ ++/* Data types in instruction templates: USI, USI, USI. */ ++#define __csrwr_w(/*unsigned int*/ _1, /*ui14*/ _2) \ ++ ((unsigned int) __builtin_loongarch_csrwr_w ((unsigned int) (_1), (_2))) + +-/* Assembly instruction format: rd, rj, ui14 */ +-/* Data types in instruction templates: USI, USI, USI, USI */ +-#define __csrxchg(/*unsigned int*/ _1, /*unsigned int*/ _2, /*ui14*/ _3) ((unsigned int)__builtin_loongarch_csrxchg((unsigned int)(_1), (unsigned int)(_2), (_3))) ++/* Assembly instruction format: rd, rj, ui14. */ ++/* Data types in instruction templates: USI, USI, USI, USI. */ ++#define __csrxchg_w(/*unsigned int*/ _1, /*unsigned int*/ _2, /*ui14*/ _3) \ ++ ((unsigned int) __builtin_loongarch_csrxchg_w ((unsigned int) (_1), \ ++ (unsigned int) (_2), (_3))) + + #ifdef __loongarch64 +-/* Assembly instruction format: rd, ui14 */ +-/* Data types in instruction templates: UDI, USI */ +-#define __dcsrrd(/*ui14*/ _1) ((unsigned long int)__builtin_loongarch_dcsrrd((_1))) +- +-/* Assembly instruction format: rd, ui14 */ +-/* Data types in instruction templates: UDI, UDI, USI */ +-#define __dcsrwr(/*unsigned long int*/ _1, /*ui14*/ _2) ((unsigned long int)__builtin_loongarch_dcsrwr((unsigned long int)(_1), (_2))) +- +-/* Assembly instruction format: rd, rj, ui14 */ +-/* Data types in instruction templates: UDI, UDI, UDI, USI */ +-#define __dcsrxchg(/*unsigned long int*/ _1, /*unsigned long int*/ _2, /*ui14*/ _3) ((unsigned long int)__builtin_loongarch_dcsrxchg((unsigned long int)(_1), (unsigned long int)(_2), (_3))) ++/* Assembly instruction format: rd, ui14. */ ++/* Data types in instruction templates: UDI, USI. */ ++#define __csrrd_d(/*ui14*/ _1) \ ++ ((unsigned long int) __builtin_loongarch_csrrd_d ((_1))) ++ ++/* Assembly instruction format: rd, ui14. */ ++/* Data types in instruction templates: UDI, UDI, USI. */ ++#define __csrwr_d(/*unsigned long int*/ _1, /*ui14*/ _2) \ ++ ((unsigned long int) __builtin_loongarch_csrwr_d ((unsigned long int) (_1), \ ++ (_2))) ++ ++/* Assembly instruction format: rd, rj, ui14. */ ++/* Data types in instruction templates: UDI, UDI, UDI, USI. */ ++#define __csrxchg_d(/*unsigned long int*/ _1, /*unsigned long int*/ _2, \ ++ /*ui14*/ _3) \ ++ ((unsigned long int) __builtin_loongarch_csrxchg_d ( \ ++ (unsigned long int) (_1), (unsigned long int) (_2), (_3))) + #endif + +-/* Assembly instruction format: rd, rj */ +-/* Data types in instruction templates: UQI, USI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-unsigned char __iocsrrd_b(unsigned int _1) ++/* Assembly instruction format: rd, rj. */ ++/* Data types in instruction templates: UQI, USI. */ ++extern __inline unsigned char ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__iocsrrd_b (unsigned int _1) + { +- return (unsigned char)__builtin_loongarch_iocsrrd_b((unsigned int)_1); ++ return (unsigned char) __builtin_loongarch_iocsrrd_b ((unsigned int) _1); + } + +-/* Assembly instruction format: rd, rj */ +-/* Data types in instruction templates: UHI, USI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-unsigned short __iocsrrd_h(unsigned int _1) ++/* Assembly instruction format: rd, rj. */ ++/* Data types in instruction templates: UHI, USI. */ ++extern __inline unsigned char ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__iocsrrd_h (unsigned int _1) + { +- return (unsigned short)__builtin_loongarch_iocsrrd_h((unsigned int)_1); ++ return (unsigned short) __builtin_loongarch_iocsrrd_h ((unsigned int) _1); + } + +-/* Assembly instruction format: rd, rj */ +-/* Data types in instruction templates: USI, USI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-unsigned int __iocsrrd_w(unsigned int _1) ++/* Assembly instruction format: rd, rj. */ ++/* Data types in instruction templates: USI, USI. */ ++extern __inline unsigned int ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__iocsrrd_w (unsigned int _1) + { +- return (unsigned int)__builtin_loongarch_iocsrrd_w((unsigned int)_1); ++ return (unsigned int) __builtin_loongarch_iocsrrd_w ((unsigned int) _1); + } + + #ifdef __loongarch64 +-/* Assembly instruction format: rd, rj */ +-/* Data types in instruction templates: UDI, USI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-unsigned long int __iocsrrd_d(unsigned int _1) ++/* Assembly instruction format: rd, rj. */ ++/* Data types in instruction templates: UDI, USI. */ ++extern __inline unsigned long int ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__iocsrrd_d (unsigned int _1) + { +- return (unsigned long int)__builtin_loongarch_iocsrrd_d((unsigned int)_1); ++ return (unsigned long int) __builtin_loongarch_iocsrrd_d ((unsigned int) _1); + } + #endif + +-/* Assembly instruction format: rd, rj */ +-/* Data types in instruction templates: VOID, UQI, USI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-void __iocsrwr_b(unsigned char _1, unsigned int _2) ++/* Assembly instruction format: rd, rj. */ ++/* Data types in instruction templates: VOID, UQI, USI. */ ++extern __inline void ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__iocsrwr_b (unsigned char _1, unsigned int _2) + { +- return (void)__builtin_loongarch_iocsrwr_b((unsigned char)_1, (unsigned int)_2); ++ __builtin_loongarch_iocsrwr_b ((unsigned char) _1, (unsigned int) _2); + } + +-/* Assembly instruction format: rd, rj */ +-/* Data types in instruction templates: VOID, UHI, USI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-void __iocsrwr_h(unsigned short _1, unsigned int _2) ++/* Assembly instruction format: rd, rj. */ ++/* Data types in instruction templates: VOID, UHI, USI. */ ++extern __inline void ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__iocsrwr_h (unsigned short _1, unsigned int _2) + { +- return (void)__builtin_loongarch_iocsrwr_h((unsigned short)_1, (unsigned int)_2); ++ __builtin_loongarch_iocsrwr_h ((unsigned short) _1, (unsigned int) _2); + } + +-/* Assembly instruction format: rd, rj */ +-/* Data types in instruction templates: VOID, USI, USI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-void __iocsrwr_w(unsigned int _1, unsigned int _2) ++/* Assembly instruction format: rd, rj. */ ++/* Data types in instruction templates: VOID, USI, USI. */ ++extern __inline void ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__iocsrwr_w (unsigned int _1, unsigned int _2) + { +- return (void)__builtin_loongarch_iocsrwr_w((unsigned int)_1, (unsigned int)_2); ++ __builtin_loongarch_iocsrwr_w ((unsigned int) _1, (unsigned int) _2); + } + + #ifdef __loongarch64 +-/* Assembly instruction format: rd, rj */ +-/* Data types in instruction templates: VOID, UDI, USI */ +-extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-void __iocsrwr_d(unsigned long int _1, unsigned int _2) ++/* Assembly instruction format: rd, rj. */ ++/* Data types in instruction templates: VOID, UDI, USI. */ ++extern __inline void ++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) ++__iocsrwr_d (unsigned long int _1, unsigned int _2) + { +- return (void)__builtin_loongarch_iocsrwr_d((unsigned long int)_1, (unsigned int)_2); ++ __builtin_loongarch_iocsrwr_d ((unsigned long int) _1, (unsigned int) _2); + } + #endif + +-/* Assembly instruction format: ui15 */ +-/* Data types in instruction templates: UQI */ +-#define __dbar(/*ui15*/ _1) __builtin_loongarch_dbar((_1)) +- +-/* Assembly instruction format: ui15 */ +-/* Data types in instruction templates: UQI */ +-#define __ibar(/*ui15*/ _1) __builtin_loongarch_ibar((_1)) +- +-#define __builtin_loongarch_syscall(a) \ +-{ \ +- __asm__ volatile ("syscall %0\n\t" \ +- ::"I"(a)); \ +-} +-#define __syscall __builtin_loongarch_syscall +- +-#define __builtin_loongarch_break(a) \ +-{ \ +- __asm__ volatile ("break %0\n\t" \ +- ::"I"(a)); \ +-} +-#define __break __builtin_loongarch_break +- +- +-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__builtin_loongarch_tlbsrch (void) +-{ +- __asm__ volatile ("tlbsrch\n\t"); +-} +-#define __tlbsrch __builtin_loongarch_tlbsrch +- +-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__builtin_loongarch_tlbrd (void) +-{ +- __asm__ volatile ("tlbrd\n\t"); +-} +-#define __tlbrd __builtin_loongarch_tlbrd ++/* Assembly instruction format: ui15. */ ++/* Data types in instruction templates: USI. */ ++#define __dbar(/*ui15*/ _1) __builtin_loongarch_dbar ((_1)) + +-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__builtin_loongarch_tlbwr (void) +-{ +- __asm__ volatile ("tlbwr\n\t"); +-} +-#define __tlbwr __builtin_loongarch_tlbwr ++/* Assembly instruction format: ui15. */ ++/* Data types in instruction templates: USI. */ ++#define __ibar(/*ui15*/ _1) __builtin_loongarch_ibar ((_1)) + +-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__builtin_loongarch_tlbfill (void) +-{ +- __asm__ volatile ("tlbfill\n\t"); +-} +-#define __tlbfill __builtin_loongarch_tlbfill +- +-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__builtin_loongarch_tlbclr (void) +-{ +- __asm__ volatile ("tlbclr\n\t"); +-} +-#define __tlbclr __builtin_loongarch_tlbclr +- +-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__builtin_loongarch_tlbflush (void) +-{ +- __asm__ volatile ("tlbflush\n\t"); +-} +-#define __tlbflush __builtin_loongarch_tlbflush ++/* Assembly instruction format: ui15. */ ++/* Data types in instruction templates: USI. */ ++#define __syscall(/*ui15*/ _1) __builtin_loongarch_syscall ((_1)) + ++/* Assembly instruction format: ui15. */ ++/* Data types in instruction templates: USI. */ ++#define __break(/*ui15*/ _1) __builtin_loongarch_break ((_1)) + + #ifdef __cplusplus + } +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +index 24757aaa1..515336e05 100644 +--- a/gcc/config/loongarch/lasx.md ++++ b/gcc/config/loongarch/lasx.md +@@ -212,6 +212,9 @@ + ;; As ILASX but excludes V32QI. + (define_mode_iterator ILASX_DWH [V4DI V8SI V16HI]) + ++;; As LASX but excludes V32QI. ++(define_mode_iterator LASX_DWH [V4DF V8SF V4DI V8SI V16HI]) ++ + ;; As ILASX but excludes V4DI. + (define_mode_iterator ILASX_WHB [V8SI V16HI V32QI]) + +@@ -227,7 +230,7 @@ + ;; Only used for immediate set shuffle elements instruction. + (define_mode_iterator LASX_WHB_W [V8SI V16HI V32QI V8SF]) + +-;; The atribute gives the integer vector mode with same size in Loongson ASX. ++;; The attribute gives the integer vector mode with same size in Loongson ASX. + (define_mode_attr VIMODE256 + [(V4DF "V4DI") + (V8SF "V8SI") +@@ -476,6 +479,37 @@ + (V16HI "w") + (V32QI "w")]) + ++(define_int_iterator FRINT256_S [UNSPEC_LASX_XVFRINTRP_S ++ UNSPEC_LASX_XVFRINTRZ_S ++ UNSPEC_LASX_XVFRINT ++ UNSPEC_LASX_XVFRINTRM_S]) ++ ++(define_int_iterator FRINT256_D [UNSPEC_LASX_XVFRINTRP_D ++ UNSPEC_LASX_XVFRINTRZ_D ++ UNSPEC_LASX_XVFRINT ++ UNSPEC_LASX_XVFRINTRM_D]) ++ ++(define_int_attr frint256_pattern_s ++ [(UNSPEC_LASX_XVFRINTRP_S "ceil") ++ (UNSPEC_LASX_XVFRINTRZ_S "btrunc") ++ (UNSPEC_LASX_XVFRINT "rint") ++ (UNSPEC_LASX_XVFRINTRM_S "floor")]) ++ ++(define_int_attr frint256_pattern_d ++ [(UNSPEC_LASX_XVFRINTRP_D "ceil") ++ (UNSPEC_LASX_XVFRINTRZ_D "btrunc") ++ (UNSPEC_LASX_XVFRINT "rint") ++ (UNSPEC_LASX_XVFRINTRM_D "floor")]) ++ ++(define_int_attr frint256_suffix ++ [(UNSPEC_LASX_XVFRINTRP_S "rp") ++ (UNSPEC_LASX_XVFRINTRP_D "rp") ++ (UNSPEC_LASX_XVFRINTRZ_S "rz") ++ (UNSPEC_LASX_XVFRINTRZ_D "rz") ++ (UNSPEC_LASX_XVFRINT "") ++ (UNSPEC_LASX_XVFRINTRM_S "rm") ++ (UNSPEC_LASX_XVFRINTRM_D "rm")]) ++ + (define_expand "vec_init" + [(match_operand:LASX 0 "register_operand") + (match_operand:LASX 1 "")] +@@ -497,7 +531,6 @@ + "xvpickev.\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8" + [(set_attr "type" "simd_permute") + (set_attr "mode" "") +- (set_attr "can_delay" "no") + (set_attr "length" "8")]) + + (define_expand "vec_unpacks_hi_v8sf" +@@ -522,7 +555,6 @@ + operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode, false/*high_p*/); + }) + +- + (define_expand "vec_unpacks_hi_" + [(match_operand: 0 "register_operand") + (match_operand:ILASX_WHB 1 "register_operand")] +@@ -560,11 +592,11 @@ + }) + + (define_insn "lasx_xvinsgr2vr_" +- [(set (match_operand:LASX_WD 0 "register_operand" "=f") +- (vec_merge:LASX_WD +- (vec_duplicate:LASX_WD ++ [(set (match_operand:ILASX_DW 0 "register_operand" "=f") ++ (vec_merge:ILASX_DW ++ (vec_duplicate:ILASX_DW + (match_operand: 1 "reg_or_0_operand" "rJ")) +- (match_operand:LASX_WD 2 "register_operand" "0") ++ (match_operand:ILASX_DW 2 "register_operand" "0") + (match_operand 3 "const__operand" "")))] + "ISA_HAS_LASX" + { +@@ -651,28 +683,49 @@ + (set_attr "mode" "V4DI")]) + + ;; xshuf.w +-(define_insn "lasx_xvperm_w" +- [(set (match_operand:V8SI 0 "register_operand" "=f") +- (unspec:V8SI +- [(match_operand:V8SI 1 "register_operand" "f") +- (match_operand:V8SI 2 "register_operand" "f")] +- UNSPEC_LASX_XVPERM_W))] ++(define_insn "lasx_xvperm_" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (unspec:LASX_W ++ [(match_operand:LASX_W 1 "nonimmediate_operand" "f") ++ (match_operand:V8SI 2 "register_operand" "f")] ++ UNSPEC_LASX_XVPERM_W))] + "ISA_HAS_LASX" + "xvperm.w\t%u0,%u1,%u2" + [(set_attr "type" "simd_splat") +- (set_attr "mode" "V8SI")]) ++ (set_attr "mode" "")]) + + ;; xvpermi.d +-(define_insn "lasx_xvpermi_d" +- [(set (match_operand:V4DI 0 "register_operand" "=f") +- (unspec:V4DI +- [(match_operand:V4DI 1 "register_operand" "f") +- (match_operand 2 "const_uimm8_operand")] +- UNSPEC_LASX_XVPERMI_D))] ++(define_insn "lasx_xvpermi_d_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX ++ [(match_operand:LASX 1 "register_operand" "f") ++ (match_operand:SI 2 "const_uimm8_operand")] ++ UNSPEC_LASX_XVPERMI_D))] + "ISA_HAS_LASX" + "xvpermi.d\t%u0,%u1,%2" + [(set_attr "type" "simd_splat") +- (set_attr "mode" "V4DI")]) ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpermi_d__1" ++ [(set (match_operand:LASX_D 0 "register_operand" "=f") ++ (vec_select:LASX_D ++ (match_operand:LASX_D 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_3_operand") ++ (match_operand 3 "const_0_to_3_operand") ++ (match_operand 4 "const_0_to_3_operand") ++ (match_operand 5 "const_0_to_3_operand")])))] ++ "ISA_HAS_LASX" ++{ ++ int mask = 0; ++ mask |= INTVAL (operands[2]) << 0; ++ mask |= INTVAL (operands[3]) << 2; ++ mask |= INTVAL (operands[4]) << 4; ++ mask |= INTVAL (operands[5]) << 6; ++ operands[2] = GEN_INT (mask); ++ return "xvpermi.d\t%u0,%u1,%2"; ++} ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) + + ;; xvpermi.q + (define_insn "lasx_xvpermi_q_" +@@ -698,82 +751,51 @@ + [(set_attr "type" "simd_copy") + (set_attr "mode" "V4DI")]) + +-(define_expand "vec_extract" +- [(match_operand: 0 "register_operand") +- (match_operand:ILASX 1 "register_operand") ++(define_expand "vec_set" ++ [(match_operand:ILASX_DW 0 "register_operand") ++ (match_operand: 1 "reg_or_0_operand") + (match_operand 2 "const__operand")] + "ISA_HAS_LASX" + { +- if (mode == SImode || mode == DImode) +- { +- emit_insn(gen_lasx_xvpickve2gr_ (operands[0], operands[1], operands[2])); +- } +- else +- { +- HOST_WIDE_INT size_0 = GET_MODE_SIZE (GET_MODE (operands[0])); +- HOST_WIDE_INT size_1 = GET_MODE_SIZE (GET_MODE (operands[1])); +- HOST_WIDE_INT val = INTVAL (operands[2]); ++ rtx index = GEN_INT (1 << INTVAL (operands[2])); ++ emit_insn (gen_lasx_xvinsgr2vr_ (operands[0], operands[1], ++ operands[0], index)); ++ DONE; ++}) + +- /* High part */ +- if (val >= size_1/size_0/2 ) +- { +- rtx dest1 = gen_reg_rtx (GET_MODE (operands[1])); +- rtx pos = GEN_INT( val - size_1/size_0/2); +- emit_insn (gen_lasx_xvpermi_q_ (dest1, dest1, operands[1], GEN_INT(1))); +- rtx dest2 = gen_reg_rtx (SImode); +- emit_insn (gen_lsx_vpickve2gr_ (dest2, +- gen_lowpart(mode, dest1), +- pos)); +- emit_move_insn (operands[0], +- gen_lowpart (mode, dest2)); +- } +- else +- { +- rtx dest1 = gen_reg_rtx (SImode); +- emit_insn (gen_lsx_vpickve2gr_ (dest1, +- gen_lowpart(mode, operands[1]), +- operands[2])); +- emit_move_insn (operands[0], +- gen_lowpart (mode, dest1)); +- } +- } ++(define_expand "vec_set" ++ [(match_operand:FLASX 0 "register_operand") ++ (match_operand: 1 "reg_or_0_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx index = GEN_INT (1 << INTVAL (operands[2])); ++ emit_insn (gen_lasx_xvinsve0__scalar (operands[0], operands[1], ++ operands[0], index)); + DONE; + }) + + (define_expand "vec_extract" + [(match_operand: 0 "register_operand") +- (match_operand:FLASX 1 "register_operand") ++ (match_operand:LASX 1 "register_operand") + (match_operand 2 "const__operand")] + "ISA_HAS_LASX" + { +- rtx temp; +- HOST_WIDE_INT val = INTVAL (operands[2]); +- +- if (val == 0) +- temp = operands[1]; +- else +- { +- temp = gen_reg_rtx (mode); +- emit_insn (gen_lasx_xvpickve_ (temp, operands[1], operands[2])); +- } +- emit_insn (gen_lasx_vec_extract_ (operands[0], temp)); ++ loongarch_expand_vector_extract (operands[0], operands[1], ++ INTVAL (operands[2])); + DONE; + }) + +-(define_insn_and_split "lasx_vec_extract_" +- [(set (match_operand: 0 "register_operand" "=f") +- (vec_select: +- (match_operand:FLASX 1 "register_operand" "f") +- (parallel [(const_int 0)])))] ++(define_expand "vec_perm" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "register_operand") ++ (match_operand:LASX 2 "register_operand") ++ (match_operand: 3 "register_operand")] + "ISA_HAS_LASX" +- "#" +- "&& reload_completed" +- [(set (match_dup 0) (match_dup 1))] + { +- operands[1] = gen_rtx_REG (mode, REGNO (operands[1])); +-} +- [(set_attr "move_type" "fmove") +- (set_attr "mode" "")]) ++ loongarch_expand_vec_perm_1(operands); ++ DONE; ++}) + + ;; FIXME: 256?? + (define_expand "vcondu" +@@ -860,7 +882,6 @@ + { return loongarch_output_move (operands[0], operands[1]); } + [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert") + (set_attr "mode" "") +- (set_attr "can_delay" "no,yes,yes,yes,yes") + (set_attr "length" "8,4,4,4,4")]) + + +@@ -868,7 +889,7 @@ + [(set (match_operand:LASX 0 "nonimmediate_operand") + (match_operand:LASX 1 "move_operand"))] + "reload_completed && ISA_HAS_LASX +- && loongarch_split_move_insn_p (operands[0], operands[1], insn)" ++ && loongarch_split_move_insn_p (operands[0], operands[1])" + [(const_int 0)] + { + loongarch_split_move_insn (operands[0], operands[1], curr_insn); +@@ -1143,7 +1164,25 @@ + [(set_attr "type" "simd_fmul") + (set_attr "mode" "")]) + +-(define_insn "div3" ++(define_expand "div3" ++ [(set (match_operand:FLASX 0 "register_operand") ++ (div:FLASX (match_operand:FLASX 1 "register_operand") ++ (match_operand:FLASX 2 "register_operand")))] ++ "ISA_HAS_LASX" ++{ ++ if (mode == V8SFmode ++ && TARGET_RECIP_VEC_DIV ++ && optimize_insn_for_speed_p () ++ && flag_finite_math_only && !flag_trapping_math ++ && flag_unsafe_math_optimizations) ++ { ++ loongarch_emit_swdivsf (operands[0], operands[1], ++ operands[2], V8SFmode); ++ DONE; ++ } ++}) ++ ++(define_insn "*div3" + [(set (match_operand:FLASX 0 "register_operand" "=f") + (div:FLASX (match_operand:FLASX 1 "register_operand" "f") + (match_operand:FLASX 2 "register_operand" "f")))] +@@ -1172,7 +1211,23 @@ + [(set_attr "type" "simd_fmadd") + (set_attr "mode" "")]) + +-(define_insn "sqrt2" ++(define_expand "sqrt2" ++ [(set (match_operand:FLASX 0 "register_operand") ++ (sqrt:FLASX (match_operand:FLASX 1 "register_operand")))] ++ "ISA_HAS_LASX" ++{ ++ if (mode == V8SFmode ++ && TARGET_RECIP_VEC_SQRT ++ && flag_unsafe_math_optimizations ++ && optimize_insn_for_speed_p () ++ && flag_finite_math_only && !flag_trapping_math) ++ { ++ loongarch_emit_swrsqrtsf (operands[0], operands[1], V8SFmode, 0); ++ DONE; ++ } ++}) ++ ++(define_insn "*sqrt2" + [(set (match_operand:FLASX 0 "register_operand" "=f") + (sqrt:FLASX (match_operand:FLASX 1 "register_operand" "f")))] + "ISA_HAS_LASX" +@@ -1307,13 +1362,13 @@ + [(set_attr "type" "simd_bit") + (set_attr "mode" "")]) + +-(define_insn "lasx_xvbitsel_" +- [(set (match_operand:ILASX 0 "register_operand" "=f") +- (ior:ILASX (and:ILASX (not:ILASX +- (match_operand:ILASX 3 "register_operand" "f")) +- (match_operand:ILASX 1 "register_operand" "f")) +- (and:ILASX (match_dup 3) +- (match_operand:ILASX 2 "register_operand" "f"))))] ++(define_insn "lasx_xvbitsel_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (ior:LASX (and:LASX (not:LASX ++ (match_operand:LASX 3 "register_operand" "0")) ++ (match_operand:LASX 1 "register_operand" "f")) ++ (and:LASX (match_dup 3) ++ (match_operand:LASX 2 "register_operand" "f"))))] + "ISA_HAS_LASX" + "xvbitsel.v\t%u0,%u1,%u2,%u3" + [(set_attr "type" "simd_bitmov") +@@ -1363,11 +1418,11 @@ + [(set_attr "type" "simd_int_arith") + (set_attr "mode" "")]) + +-(define_expand "vec_cmp" +- [(set (match_operand:ILASX 0 "register_operand") +- (match_operator:ILASX 1 "" +- [(match_operand:ILASX 2 "register_operand") +- (match_operand:ILASX 3 "register_operand")]))] ++(define_expand "vec_cmp" ++ [(set (match_operand: 0 "register_operand") ++ (match_operator 1 "" ++ [(match_operand:LASX 2 "register_operand") ++ (match_operand:LASX 3 "register_operand")]))] + "ISA_HAS_LASX" + { + bool ok = loongarch_expand_int_vec_cmp (operands); +@@ -1375,11 +1430,11 @@ + DONE; + }) + +-(define_expand "vec_cmp" +- [(set (match_operand:FLASX 0 "register_operand") +- (match_operator:FLASX 1 "" +- [(match_operand:FLASX 2 "register_operand") +- (match_operand:FLASX 3 "register_operand")]))] ++(define_expand "vec_cmpu" ++ [(set (match_operand: 0 "register_operand") ++ (match_operator 1 "" ++ [(match_operand:ILASX 2 "register_operand") ++ (match_operand:ILASX 3 "register_operand")]))] + "ISA_HAS_LASX" + { + bool ok = loongarch_expand_fp_vec_cmp (operands); +@@ -1493,8 +1548,8 @@ + (V2DF "V8SI")]) + + (define_insn "lasx_xvreplgr2vr_" +- [(set (match_operand:LASX 0 "register_operand" "=f,f") +- (vec_duplicate:LASX ++ [(set (match_operand:ILASX 0 "register_operand" "=f,f") ++ (vec_duplicate:ILASX + (match_operand: 1 "reg_or_0_operand" "r,J")))] + "ISA_HAS_LASX" + { +@@ -1508,10 +1563,9 @@ + } + [(set_attr "type" "simd_fill") + (set_attr "mode" "") +- (set_attr "can_delay" "no") + (set_attr "length" "8")]) + +-(define_insn "lasx_xvflogb_" ++(define_insn "logb2" + [(set (match_operand:FLASX 0 "register_operand" "=f") + (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] + UNSPEC_LASX_XVFLOGB))] +@@ -1572,6 +1626,15 @@ + [(set_attr "type" "simd_fdiv") + (set_attr "mode" "")]) + ++(define_insn "lasx_xvfrecipe_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_RECIPE))] ++ "ISA_HAS_LASX && flag_unsafe_math_optimizations && TARGET_RECIP_VEC_DIV" ++ "xvfrecipe.\t%u0,%u1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ + (define_insn "lasx_xvfrint_" + [(set (match_operand:FLASX 0 "register_operand" "=f") + (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] +@@ -1590,6 +1653,42 @@ + [(set_attr "type" "simd_fdiv") + (set_attr "mode" "")]) + ++ ++(define_insn "lasx_xvfrsqrte_" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_RSQRTE))] ++ "ISA_HAS_LASX && flag_unsafe_math_optimizations && TARGET_RECIP_VEC_RSQRT" ++ "xvfrsqrte.\t%u0,%u1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_expand "rsqrt2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRSQRT))] ++ "ISA_HAS_LASX" ++{ ++ if (mode == V8SFmode ++ && TARGET_RECIP_VEC_RSQRT ++ && flag_unsafe_math_optimizations ++ && optimize_insn_for_speed_p () ++ && flag_finite_math_only && !flag_trapping_math) ++ { ++ loongarch_emit_swrsqrtsf (operands[0], operands[1], V8SFmode, 1); ++ DONE; ++ } ++}) ++ ++(define_insn "*rsqrt2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRSQRT))] ++ "ISA_HAS_LASX" ++ "xvfrsqrt.\t%u0,%u1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ + (define_insn "lasx_xvftint_s__" + [(set (match_operand: 0 "register_operand" "=f") + (unspec: [(match_operand:FLASX 1 "register_operand" "f")] +@@ -2325,6 +2424,35 @@ + [(set_attr "type" "simd_shf") + (set_attr "mode" "")]) + ++(define_insn "lasx_xvshuf4i__1" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (vec_select:LASX_W ++ (match_operand:LASX_W 1 "nonimmediate_operand" "f") ++ (parallel [(match_operand 2 "const_0_to_3_operand") ++ (match_operand 3 "const_0_to_3_operand") ++ (match_operand 4 "const_0_to_3_operand") ++ (match_operand 5 "const_0_to_3_operand") ++ (match_operand 6 "const_4_to_7_operand") ++ (match_operand 7 "const_4_to_7_operand") ++ (match_operand 8 "const_4_to_7_operand") ++ (match_operand 9 "const_4_to_7_operand")])))] ++ "ISA_HAS_LASX ++ && INTVAL (operands[2]) + 4 == INTVAL (operands[6]) ++ && INTVAL (operands[3]) + 4 == INTVAL (operands[7]) ++ && INTVAL (operands[4]) + 4 == INTVAL (operands[8]) ++ && INTVAL (operands[5]) + 4 == INTVAL (operands[9])" ++{ ++ int mask = 0; ++ mask |= INTVAL (operands[2]) << 0; ++ mask |= INTVAL (operands[3]) << 2; ++ mask |= INTVAL (operands[4]) << 4; ++ mask |= INTVAL (operands[5]) << 6; ++ operands[2] = GEN_INT (mask); ++ ++ return "xvshuf4i.w\t%u0,%u1,%2"; ++} ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) + + (define_insn "lasx_xvsrar_" + [(set (match_operand:ILASX 0 "register_operand" "=f") +@@ -2386,11 +2514,11 @@ + [(set_attr "type" "simd_int_arith") + (set_attr "mode" "")]) + +-(define_insn "lasx_xvshuf_" +- [(set (match_operand:ILASX_DWH 0 "register_operand" "=f") +- (unspec:ILASX_DWH [(match_operand: 1 "register_operand" "0") +- (match_operand:ILASX_DWH 2 "register_operand" "f") +- (match_operand:ILASX_DWH 3 "register_operand" "f")] ++(define_insn "lasx_xvshuf_" ++ [(set (match_operand:LASX_DWH 0 "register_operand" "=f") ++ (unspec:LASX_DWH [(match_operand:LASX_DWH 1 "register_operand" "0") ++ (match_operand:LASX_DWH 2 "register_operand" "f") ++ (match_operand:LASX_DWH 3 "register_operand" "f")] + UNSPEC_LASX_XVSHUF))] + "ISA_HAS_LASX" + "xvshuf.\t%u0,%u2,%u3" +@@ -2497,14 +2625,14 @@ + [(set_attr "type" "simd_splat") + (set_attr "mode" "")]) + +- (define_insn "lasx_xvreplve0__scalar" +- [(set (match_operand:FLASX 0 "register_operand" "=f") +- (unspec:FLASX [(match_operand: 1 "register_operand" "f")] +- UNSPEC_LASX_XVREPLVE0))] +- "ISA_HAS_LASX" +- "xvreplve0.\t%u0,%u1" +- [(set_attr "type" "simd_splat") +- (set_attr "mode" "")]) ++(define_insn "lasx_xvreplve0__scalar" ++[(set (match_operand:FLASX 0 "register_operand" "=f") ++ (vec_duplicate:FLASX ++ (match_operand: 1 "register_operand" "f")))] ++ "ISA_HAS_LASX" ++ "xvreplve0.\t%u0,%u1" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) + + (define_insn "lasx_xvreplve0_q" + [(set (match_operand:V32QI 0 "register_operand" "=f") +@@ -2544,7 +2672,6 @@ + "xvfcvt.s.d\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8" + [(set_attr "type" "simd_fcvt") + (set_attr "mode" "V8SF") +- (set_attr "can_delay" "no") + (set_attr "length" "8")]) + + ;; Define for builtin function. +@@ -2579,7 +2706,6 @@ + "xvpermi.d\t%u0,%u1,0xfa\n\txvfcvtl.d.s\t%u0,%u0" + [(set_attr "type" "simd_fcvt") + (set_attr "mode" "V4DF") +- (set_attr "can_delay" "no") + (set_attr "length" "12")]) + + ;; Define for builtin function. +@@ -2614,7 +2740,6 @@ + "xvpermi.d\t%u0,%u1,0x50\n\txvfcvtl.d.s\t%u0,%u0" + [(set_attr "type" "simd_fcvt") + (set_attr "mode" "V4DF") +- (set_attr "can_delay" "no") + (set_attr "length" "8")]) + + (define_code_attr lasxbr +@@ -2653,8 +2778,7 @@ + "xvset.\t%z3%u1\n\tbcnez\t%Z3%0"); + } + [(set_attr "type" "simd_branch") +- (set_attr "mode" "") +- (set_attr "compact_form" "never")]) ++ (set_attr "mode" "")]) + + (define_insn "lasx__v_" + [(set (pc) (if_then_else +@@ -2672,12 +2796,8 @@ + "xvset.v\t%Z3%u1\n\tbcnez\t%Z3%0"); + } + [(set_attr "type" "simd_branch") +- (set_attr "mode" "") +- (set_attr "compact_form" "never")]) +- +- ++ (set_attr "mode" "")]) + +- + ;; loongson-asx. + (define_insn "lasx_vext2xv_h_b" + [(set (match_operand:V16HI 0 "register_operand" "=f") +@@ -3339,8 +3459,8 @@ + (set_attr "mode" "V8SF")]) + + (define_insn "lasx_xvfrintrne_s" +- [(set (match_operand:V8SI 0 "register_operand" "=f") +- (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] + UNSPEC_LASX_XVFRINTRNE_S))] + "ISA_HAS_LASX" + "xvfrintrne.s\t%u0,%u1" +@@ -3348,8 +3468,8 @@ + (set_attr "mode" "V8SF")]) + + (define_insn "lasx_xvfrintrne_d" +- [(set (match_operand:V4DI 0 "register_operand" "=f") +- (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] + UNSPEC_LASX_XVFRINTRNE_D))] + "ISA_HAS_LASX" + "xvfrintrne.d\t%u0,%u1" +@@ -3357,8 +3477,8 @@ + (set_attr "mode" "V4DF")]) + + (define_insn "lasx_xvfrintrz_s" +- [(set (match_operand:V8SI 0 "register_operand" "=f") +- (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] + UNSPEC_LASX_XVFRINTRZ_S))] + "ISA_HAS_LASX" + "xvfrintrz.s\t%u0,%u1" +@@ -3366,8 +3486,8 @@ + (set_attr "mode" "V8SF")]) + + (define_insn "lasx_xvfrintrz_d" +- [(set (match_operand:V4DI 0 "register_operand" "=f") +- (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] + UNSPEC_LASX_XVFRINTRZ_D))] + "ISA_HAS_LASX" + "xvfrintrz.d\t%u0,%u1" +@@ -3375,8 +3495,8 @@ + (set_attr "mode" "V4DF")]) + + (define_insn "lasx_xvfrintrp_s" +- [(set (match_operand:V8SI 0 "register_operand" "=f") +- (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] + UNSPEC_LASX_XVFRINTRP_S))] + "ISA_HAS_LASX" + "xvfrintrp.s\t%u0,%u1" +@@ -3384,8 +3504,8 @@ + (set_attr "mode" "V8SF")]) + + (define_insn "lasx_xvfrintrp_d" +- [(set (match_operand:V4DI 0 "register_operand" "=f") +- (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] + UNSPEC_LASX_XVFRINTRP_D))] + "ISA_HAS_LASX" + "xvfrintrp.d\t%u0,%u1" +@@ -3393,8 +3513,8 @@ + (set_attr "mode" "V4DF")]) + + (define_insn "lasx_xvfrintrm_s" +- [(set (match_operand:V8SI 0 "register_operand" "=f") +- (unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")] ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] + UNSPEC_LASX_XVFRINTRM_S))] + "ISA_HAS_LASX" + "xvfrintrm.s\t%u0,%u1" +@@ -3402,14 +3522,44 @@ + (set_attr "mode" "V8SF")]) + + (define_insn "lasx_xvfrintrm_d" +- [(set (match_operand:V4DI 0 "register_operand" "=f") +- (unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")] ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] + UNSPEC_LASX_XVFRINTRM_D))] + "ISA_HAS_LASX" + "xvfrintrm.d\t%u0,%u1" + [(set_attr "type" "simd_shift") + (set_attr "mode" "V4DF")]) + ++;; Vector versions of the floating-point frint patterns. ++;; Expands to btrunc, ceil, floor, rint. ++(define_insn "v8sf2" ++ [(set (match_operand:V8SF 0 "register_operand" "=f") ++ (unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")] ++ FRINT256_S))] ++ "ISA_HAS_LASX" ++ "xvfrint.s\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V8SF")]) ++ ++(define_insn "v4df2" ++ [(set (match_operand:V4DF 0 "register_operand" "=f") ++ (unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")] ++ FRINT256_D))] ++ "ISA_HAS_LASX" ++ "xvfrint.d\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4DF")]) ++ ++;; Expands to round. ++(define_insn "round2" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] ++ UNSPEC_LASX_XVFRINT))] ++ "ISA_HAS_LASX" ++ "xvfrint.\t%u0,%u1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ + ;; Offset load and broadcast + (define_expand "lasx_xvldrepl_" + [(match_operand:LASX 0 "register_operand") +@@ -3435,6 +3585,19 @@ + (set_attr "mode" "") + (set_attr "length" "4")]) + ++;; Offset is "0" ++(define_insn "lasx_xvldrepl__insn_0" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (vec_duplicate:LASX ++ (mem: (match_operand:DI 1 "register_operand" "r"))))] ++ "ISA_HAS_LASX" ++{ ++ return "xvldrepl.\t%u0,%1,0"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ + ;;XVADDWEV.H.B XVSUBWEV.H.B XVMULWEV.H.B + ;;XVADDWEV.H.BU XVSUBWEV.H.BU XVMULWEV.H.BU + (define_insn "lasx_xvwev_h_b" +@@ -4666,16 +4829,52 @@ + [(set_attr "type" "simd_shift") + (set_attr "mode" "")]) + +-(define_insn "lasx_xvpermi_w" +- [(set (match_operand:V8SI 0 "register_operand" "=f") +- (unspec:V8SI [(match_operand:V8SI 1 "register_operand" "0") +- (match_operand:V8SI 2 "register_operand" "f") +- (match_operand 3 "const_uimm8_operand" "")] +- UNSPEC_LASX_XVPERMI))] ++(define_mode_attr VDOUBLEMODEW256 ++ [(V8SI "V16SI") ++ (V8SF "V16SF")]) ++ ++(define_insn "lasx_xvpermi_" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (unspec:LASX_W [(match_operand:LASX_W 1 "register_operand" "0") ++ (match_operand:LASX_W 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand" "")] ++ UNSPEC_LASX_XVPERMI))] + "ISA_HAS_LASX" + "xvpermi.w\t%u0,%u2,%3" + [(set_attr "type" "simd_bit") +- (set_attr "mode" "V8SI")]) ++ (set_attr "mode" "")]) ++ ++(define_insn "lasx_xvpermi__1" ++ [(set (match_operand:LASX_W 0 "register_operand" "=f") ++ (vec_select:LASX_W ++ (vec_concat: ++ (match_operand:LASX_W 1 "register_operand" "f") ++ (match_operand:LASX_W 2 "register_operand" "0")) ++ (parallel [(match_operand 3 "const_0_to_3_operand") ++ (match_operand 4 "const_0_to_3_operand" ) ++ (match_operand 5 "const_8_to_11_operand" ) ++ (match_operand 6 "const_8_to_11_operand" ) ++ (match_operand 7 "const_4_to_7_operand" ) ++ (match_operand 8 "const_4_to_7_operand" ) ++ (match_operand 9 "const_12_to_15_operand") ++ (match_operand 10 "const_12_to_15_operand")])))] ++ "ISA_HAS_LASX ++ && INTVAL (operands[3]) + 4 == INTVAL (operands[7]) ++ && INTVAL (operands[4]) + 4 == INTVAL (operands[8]) ++ && INTVAL (operands[5]) + 4 == INTVAL (operands[9]) ++ && INTVAL (operands[6]) + 4 == INTVAL (operands[10])" ++{ ++ int mask = 0; ++ mask |= INTVAL (operands[3]) << 0; ++ mask |= INTVAL (operands[4]) << 2; ++ mask |= (INTVAL (operands[5]) - 8) << 4; ++ mask |= (INTVAL (operands[6]) - 8) << 6; ++ operands[3] = GEN_INT (mask); ++ ++ return "xvpermi.w\t%u0,%u1,%3"; ++} ++ [(set_attr "type" "simd_bit") ++ (set_attr "mode" "")]) + + (define_expand "lasx_xvld" + [(match_operand:V32QI 0 "register_operand") +@@ -4728,10 +4927,24 @@ + (set_attr "mode" "") + (set_attr "length" "4")]) + +-(define_insn "lasx_xvinsve0_" +- [(set (match_operand:ILASX_DW 0 "register_operand" "=f") +- (unspec:ILASX_DW [(match_operand:ILASX_DW 1 "register_operand" "0") +- (match_operand:ILASX_DW 2 "register_operand" "f") ++;; Offset is "0" ++(define_insn "lasx_xvstelm__insn_0" ++ [(set (mem: (match_operand:DI 0 "register_operand" "r")) ++ (vec_select: ++ (match_operand:LASX_WD 1 "register_operand" "f") ++ (parallel [(match_operand:SI 2 "const__operand")])))] ++ "ISA_HAS_LASX" ++{ ++ return "xvstelm.\t%u1,%0,0,%2"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ ++(define_insn "lasx_xvinsve0_" ++ [(set (match_operand:LASX_WD 0 "register_operand" "=f") ++ (unspec:LASX_WD [(match_operand:LASX_WD 1 "register_operand" "0") ++ (match_operand:LASX_WD 2 "register_operand" "f") + (match_operand 3 "const__operand" "")] + UNSPEC_LASX_XVINSVE0))] + "ISA_HAS_LASX" +@@ -4739,6 +4952,18 @@ + [(set_attr "type" "simd_shf") + (set_attr "mode" "")]) + ++(define_insn "lasx_xvinsve0__scalar" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (vec_merge:FLASX ++ (vec_duplicate:FLASX ++ (match_operand: 1 "register_operand" "f")) ++ (match_operand:FLASX 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")))] ++ "ISA_HAS_LASX" ++ "xvinsve0.\t%u0,%u1,%y3" ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ + (define_insn "lasx_xvpickve_" + [(set (match_operand:LASX_WD 0 "register_operand" "=f") + (unspec:LASX_WD [(match_operand:LASX_WD 1 "register_operand" "f") +@@ -4749,6 +4974,16 @@ + [(set_attr "type" "simd_shf") + (set_attr "mode" "")]) + ++(define_insn "lasx_xvpickve__scalar" ++ [(set (match_operand: 0 "register_operand" "=f") ++ (vec_select: ++ (match_operand:FLASX 1 "register_operand" "f") ++ (parallel [(match_operand 2 "const__operand" "")])))] ++ "ISA_HAS_LASX" ++ "xvpickve.\t%u0,%u1,%2" ++ [(set_attr "type" "simd_shf") ++ (set_attr "mode" "")]) ++ + (define_insn "lasx_xvssrlrn__" + [(set (match_operand: 0 "register_operand" "=f") + (unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f") +@@ -4823,3 +5058,142 @@ + [(set_attr "type" "simd_store") + (set_attr "mode" "DI")]) + ++(define_insn "vec_widen_mult_even_v8si" ++ [(set (match_operand:V4DI 0 "register_operand" "=f") ++ (mult:V4DI ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 1 "register_operand" "%f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)]))) ++ (any_extend:V4DI ++ (vec_select:V4SI ++ (match_operand:V8SI 2 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 2) ++ (const_int 4) (const_int 6)])))))] ++ "ISA_HAS_LASX" ++ "xvmulwev.d.w\t%u0,%u1,%u2" ++ [(set_attr "type" "simd_int_arith") ++ (set_attr "mode" "V4DI")]) ++ ++;; Vector reduction operation ++(define_expand "reduc_plus_scal_v4di" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:V4DI 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (V4DImode); ++ rtx tmp1 = gen_reg_rtx (V4DImode); ++ rtx vec_res = gen_reg_rtx (V4DImode); ++ emit_insn (gen_lasx_xvhaddw_q_d (tmp, operands[1], operands[1])); ++ emit_insn (gen_lasx_xvpermi_d_v4di (tmp1, tmp, GEN_INT (2))); ++ emit_insn (gen_addv4di3 (vec_res, tmp, tmp1)); ++ emit_insn (gen_vec_extractv4didi (operands[0], vec_res, const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_plus_scal_v8si" ++ [(match_operand:SI 0 "register_operand") ++ (match_operand:V8SI 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (V4DImode); ++ rtx tmp1 = gen_reg_rtx (V4DImode); ++ rtx vec_res = gen_reg_rtx (V4DImode); ++ emit_insn (gen_lasx_xvhaddw_d_w (tmp, operands[1], operands[1])); ++ emit_insn (gen_lasx_xvhaddw_q_d (tmp1, tmp, tmp)); ++ emit_insn (gen_lasx_xvpermi_d_v4di (tmp, tmp1, GEN_INT (2))); ++ emit_insn (gen_addv4di3 (vec_res, tmp, tmp1)); ++ emit_insn (gen_vec_extractv8sisi (operands[0], gen_lowpart(V8SImode,vec_res), const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_plus_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:FLASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_add3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc__scal_" ++ [(any_bitwise: ++ (match_operand: 0 "register_operand") ++ (match_operand:ILASX 1 "register_operand"))] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_smax_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:LASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_smax3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_smin_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:LASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_smin3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_umax_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_umax3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_umin_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILASX 1 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_umin3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++;; merge vec_unpacks_hi_v8sf/vec_unpacks_lo_v8sf ++(define_peephole ++ [(set (match_operand:V4DF 0 "register_operand") ++ (float_extend:V4DF (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)])))) ++ (set (match_operand:V4DF 2 "register_operand") ++ (float_extend:V4DF (vec_select:V4SF ++ (match_operand:V8SF 3 "register_operand") ++ (parallel [(const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] ++ "ISA_HAS_LASX && rtx_equal_p (operands[1], operands[3])" ++{ ++ return "xvpermi.d\t%u2,%u1,0xd8\n\txvfcvtl.d.s\t%u0,%u2\n\txvfcvth.d.s\t%u2,%u2"; ++}) +diff --git a/gcc/config/loongarch/lasxintrin.h b/gcc/config/loongarch/lasxintrin.h +index 185eee869..58f3047ac 100644 +--- a/gcc/config/loongarch/lasxintrin.h ++++ b/gcc/config/loongarch/lasxintrin.h +@@ -3262,70 +3262,70 @@ __m256i __lasx_xvftintrnel_l_s(__m256 _1) + /* Assembly instruction format: xd, xj. */ + /* Data types in instruction templates: V8SI, V8SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m256i __lasx_xvfrintrne_s(__m256 _1) ++__m256 __lasx_xvfrintrne_s(__m256 _1) + { +- return (__m256i)__builtin_lasx_xvfrintrne_s((v8f32)_1); ++ return (__m256)__builtin_lasx_xvfrintrne_s((v8f32)_1); + } + + /* Assembly instruction format: xd, xj. */ + /* Data types in instruction templates: V4DI, V4DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m256i __lasx_xvfrintrne_d(__m256d _1) ++__m256d __lasx_xvfrintrne_d(__m256d _1) + { +- return (__m256i)__builtin_lasx_xvfrintrne_d((v4f64)_1); ++ return (__m256d)__builtin_lasx_xvfrintrne_d((v4f64)_1); + } + + /* Assembly instruction format: xd, xj. */ + /* Data types in instruction templates: V8SI, V8SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m256i __lasx_xvfrintrz_s(__m256 _1) ++__m256 __lasx_xvfrintrz_s(__m256 _1) + { +- return (__m256i)__builtin_lasx_xvfrintrz_s((v8f32)_1); ++ return (__m256)__builtin_lasx_xvfrintrz_s((v8f32)_1); + } + + /* Assembly instruction format: xd, xj. */ + /* Data types in instruction templates: V4DI, V4DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m256i __lasx_xvfrintrz_d(__m256d _1) ++__m256d __lasx_xvfrintrz_d(__m256d _1) + { +- return (__m256i)__builtin_lasx_xvfrintrz_d((v4f64)_1); ++ return (__m256d)__builtin_lasx_xvfrintrz_d((v4f64)_1); + } + + /* Assembly instruction format: xd, xj. */ + /* Data types in instruction templates: V8SI, V8SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m256i __lasx_xvfrintrp_s(__m256 _1) ++__m256 __lasx_xvfrintrp_s(__m256 _1) + { +- return (__m256i)__builtin_lasx_xvfrintrp_s((v8f32)_1); ++ return (__m256)__builtin_lasx_xvfrintrp_s((v8f32)_1); + } + + /* Assembly instruction format: xd, xj. */ + /* Data types in instruction templates: V4DI, V4DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m256i __lasx_xvfrintrp_d(__m256d _1) ++__m256d __lasx_xvfrintrp_d(__m256d _1) + { +- return (__m256i)__builtin_lasx_xvfrintrp_d((v4f64)_1); ++ return (__m256d)__builtin_lasx_xvfrintrp_d((v4f64)_1); + } + + /* Assembly instruction format: xd, xj. */ + /* Data types in instruction templates: V8SI, V8SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m256i __lasx_xvfrintrm_s(__m256 _1) ++__m256 __lasx_xvfrintrm_s(__m256 _1) + { +- return (__m256i)__builtin_lasx_xvfrintrm_s((v8f32)_1); ++ return (__m256)__builtin_lasx_xvfrintrm_s((v8f32)_1); + } + + /* Assembly instruction format: xd, xj. */ + /* Data types in instruction templates: V4DI, V4DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m256i __lasx_xvfrintrm_d(__m256d _1) ++__m256d __lasx_xvfrintrm_d(__m256d _1) + { +- return (__m256i)__builtin_lasx_xvfrintrm_d((v4f64)_1); ++ return (__m256d)__builtin_lasx_xvfrintrm_d((v4f64)_1); + } + + /* Assembly instruction format: xd, rj, si12. */ + /* Data types in instruction templates: V32QI, CVPOINTER, SI. */ +-#define __lasx_xvld(/*void **/ _1, /*si12*/ _2) ((__m256i)__builtin_lasx_xvld((void *)(_1), (_2))) ++#define __lasx_xvld(/*void **/ _1, /*si12*/ _2) ((__m256i)__builtin_lasx_xvld((void const *)(_1), (_2))) + + /* Assembly instruction format: xd, rj, si12. */ + /* Data types in instruction templates: VOID, V32QI, CVPOINTER, SI. */ +@@ -3426,9 +3426,9 @@ __m256i __lasx_xvorn_v(__m256i _1, __m256i _2) + /* Assembly instruction format: xd, rj, rk. */ + /* Data types in instruction templates: V32QI, CVPOINTER, DI. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m256i __lasx_xvldx(void * _1, long int _2) ++__m256i __lasx_xvldx(void const * _1, long int _2) + { +- return (__m256i)__builtin_lasx_xvldx((void *)_1, (long int)_2); ++ return (__m256i)__builtin_lasx_xvldx((void const *)_1, (long int)_2); + } + + /* Assembly instruction format: xd, rj, rk. */ +@@ -3609,19 +3609,19 @@ __m256i __lasx_xvperm_w(__m256i _1, __m256i _2) + + /* Assembly instruction format: xd, rj, si12. */ + /* Data types in instruction templates: V32QI, CVPOINTER, SI. */ +-#define __lasx_xvldrepl_b(/*void **/ _1, /*si12*/ _2) ((__m256i)__builtin_lasx_xvldrepl_b((void *)(_1), (_2))) ++#define __lasx_xvldrepl_b(/*void **/ _1, /*si12*/ _2) ((__m256i)__builtin_lasx_xvldrepl_b((void const *)(_1), (_2))) + + /* Assembly instruction format: xd, rj, si11. */ + /* Data types in instruction templates: V16HI, CVPOINTER, SI. */ +-#define __lasx_xvldrepl_h(/*void **/ _1, /*si11*/ _2) ((__m256i)__builtin_lasx_xvldrepl_h((void *)(_1), (_2))) ++#define __lasx_xvldrepl_h(/*void **/ _1, /*si11*/ _2) ((__m256i)__builtin_lasx_xvldrepl_h((void const *)(_1), (_2))) + + /* Assembly instruction format: xd, rj, si10. */ + /* Data types in instruction templates: V8SI, CVPOINTER, SI. */ +-#define __lasx_xvldrepl_w(/*void **/ _1, /*si10*/ _2) ((__m256i)__builtin_lasx_xvldrepl_w((void *)(_1), (_2))) ++#define __lasx_xvldrepl_w(/*void **/ _1, /*si10*/ _2) ((__m256i)__builtin_lasx_xvldrepl_w((void const *)(_1), (_2))) + + /* Assembly instruction format: xd, rj, si9. */ + /* Data types in instruction templates: V4DI, CVPOINTER, SI. */ +-#define __lasx_xvldrepl_d(/*void **/ _1, /*si9*/ _2) ((__m256i)__builtin_lasx_xvldrepl_d((void *)(_1), (_2))) ++#define __lasx_xvldrepl_d(/*void **/ _1, /*si9*/ _2) ((__m256i)__builtin_lasx_xvldrepl_d((void const *)(_1), (_2))) + + /* Assembly instruction format: rd, xj, ui3. */ + /* Data types in instruction templates: SI, V8SI, UQI. */ +diff --git a/gcc/config/loongarch/linux-common.h b/gcc/config/loongarch/linux-common.h +deleted file mode 100644 +index 9e1a1b50f..000000000 +--- a/gcc/config/loongarch/linux-common.h ++++ /dev/null +@@ -1,68 +0,0 @@ +-/* Definitions for LARCH running Linux-based GNU systems with ELF format. +- Copyright (C) 2012-2018 Free Software Foundation, Inc. +- +-This file is part of GCC. +- +-GCC is free software; you can redistribute it and/or modify +-it under the terms of the GNU General Public License as published by +-the Free Software Foundation; either version 3, or (at your option) +-any later version. +- +-GCC is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +-GNU General Public License for more details. +- +-You should have received a copy of the GNU General Public License +-along with GCC; see the file COPYING3. If not see +-. */ +- +-#undef TARGET_OS_CPP_BUILTINS +-#define TARGET_OS_CPP_BUILTINS() \ +- do { \ +- GNU_USER_TARGET_OS_CPP_BUILTINS(); \ +- /* The GNU C++ standard library requires this. */ \ +- if (c_dialect_cxx ()) \ +- builtin_define ("_GNU_SOURCE"); \ +- ANDROID_TARGET_OS_CPP_BUILTINS(); \ +- } while (0) +- +-#define EXTRA_TARGET_D_OS_VERSIONS() \ +- ANDROID_TARGET_D_OS_VERSIONS(); +- +-#undef LINK_SPEC +-#define LINK_SPEC \ +- LINUX_OR_ANDROID_LD (GNU_USER_TARGET_LINK_SPEC, \ +- GNU_USER_TARGET_LINK_SPEC " " ANDROID_LINK_SPEC) +- +-#undef SUBTARGET_CC1_SPEC +-#define SUBTARGET_CC1_SPEC \ +- LINUX_OR_ANDROID_CC (GNU_USER_TARGET_CC1_SPEC, \ +- GNU_USER_TARGET_CC1_SPEC " " ANDROID_CC1_SPEC) +- +-#undef CC1PLUS_SPEC +-#define CC1PLUS_SPEC \ +- LINUX_OR_ANDROID_CC ("", ANDROID_CC1PLUS_SPEC) +- +-#undef LIB_SPEC +-#define LIB_SPEC \ +- LINUX_OR_ANDROID_LD (GNU_USER_TARGET_LIB_SPEC, \ +- GNU_USER_TARGET_NO_PTHREADS_LIB_SPEC " " ANDROID_LIB_SPEC) +- +-#undef STARTFILE_SPEC +-#define STARTFILE_SPEC \ +- LINUX_OR_ANDROID_LD (GNU_USER_TARGET_STARTFILE_SPEC, ANDROID_STARTFILE_SPEC) +- +-#undef ENDFILE_SPEC +-#define ENDFILE_SPEC \ +- LINUX_OR_ANDROID_LD (GNU_USER_TARGET_MATHFILE_SPEC " " \ +- GNU_USER_TARGET_ENDFILE_SPEC, \ +- GNU_USER_TARGET_MATHFILE_SPEC " " \ +- ANDROID_ENDFILE_SPEC) +- +-/* Define this to be nonzero if static stack checking is supported. */ +-#define STACK_CHECK_STATIC_BUILTIN 1 +- +-/* FIXME*/ +-/* The default value isn't sufficient in 64-bit mode. */ +-#define STACK_CHECK_PROTECT (TARGET_64BIT ? 16 * 1024 : 12 * 1024) +diff --git a/gcc/config/loongarch/linux.h b/gcc/config/loongarch/linux.h +index 520a8ef32..59854251f 100644 +--- a/gcc/config/loongarch/linux.h ++++ b/gcc/config/loongarch/linux.h +@@ -1,4 +1,4 @@ +-/* Definitions for LARCH running Linux-based GNU systems with ELF format. ++/* Definitions for Linux-based systems with libraries in ELF format. + Copyright (C) 1998-2018 Free Software Foundation, Inc. + + This file is part of GCC. +@@ -17,17 +17,34 @@ You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +-#define GNU_USER_LINK_EMULATION32 "elf32loongarch" +-#define GNU_USER_LINK_EMULATION64 "elf64loongarch" ++/* Default system library search paths. ++ * This ensures that a compiler configured with --disable-multilib ++ * can work in a multilib environment. */ + +-#define GLIBC_DYNAMIC_LINKERLP32 \ +- "/lib32/ld.so.1" +-#define GLIBC_DYNAMIC_LINKERLP64 \ +- "/lib64/ld.so.1" ++#if defined(LA_DISABLE_MULTILIB) && defined(LA_DISABLE_MULTIARCH) + +-#define GNU_USER_DYNAMIC_LINKERLP32 GLIBC_DYNAMIC_LINKERLP32 +-#define GNU_USER_DYNAMIC_LINKERLP64 GLIBC_DYNAMIC_LINKERLP64 ++ #if DEFAULT_ABI_BASE == ABI_BASE_LP64D ++ #define ABI_LIBDIR "lib64" ++ #elif DEFAULT_ABI_BASE == ABI_BASE_LP64F ++ #define ABI_LIBDIR "lib64/f32" ++ #elif DEFAULT_ABI_BASE == ABI_BASE_LP64S ++ #define ABI_LIBDIR "lib64/sf" ++ #endif + ++#endif ++ ++#ifndef ABI_LIBDIR ++#define ABI_LIBDIR "lib" ++#endif ++ ++#define STANDARD_STARTFILE_PREFIX_1 "/" ABI_LIBDIR "/" ++#define STANDARD_STARTFILE_PREFIX_2 "/usr/" ABI_LIBDIR "/" ++ ++ ++/* Define this to be nonzero if static stack checking is supported. */ ++#define STACK_CHECK_STATIC_BUILTIN 1 ++ ++/* The default value isn't sufficient in 64-bit mode. */ ++#define STACK_CHECK_PROTECT (TARGET_64BIT ? 16 * 1024 : 12 * 1024) + +-#undef TARGET_ASM_FILE_END + #define TARGET_ASM_FILE_END file_end_indicate_exec_stack +diff --git a/gcc/config/loongarch/loongarch-builtins.c b/gcc/config/loongarch/loongarch-builtins.c +index 9fa68b11f..b326ec46c 100644 +--- a/gcc/config/loongarch/loongarch-builtins.c ++++ b/gcc/config/loongarch/loongarch-builtins.c +@@ -1,7 +1,6 @@ +- +-/* Subroutines used for expanding LOONGARCH builtins. +- Copyright (C) 2011-2018 Free Software Foundation, Inc. +- Contributed by Andrew Waterman (andrew@sifive.com). ++/* Subroutines used for expanding LoongArch builtins. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Co. Ltd. + + This file is part of GCC. + +@@ -30,50 +29,29 @@ along with GCC; see the file COPYING3. If not see + #include "tree.h" + #include "memmodel.h" + #include "gimple.h" +-#include "cfghooks.h" +-#include "df.h" + #include "tm_p.h" +-#include "stringpool.h" +-#include "attribs.h" + #include "optabs.h" +-#include "regs.h" +-#include "emit-rtl.h" + #include "recog.h" +-#include "cgraph.h" + #include "diagnostic.h" +-#include "insn-attr.h" +-#include "output.h" +-#include "alias.h" + #include "fold-const.h" +-#include "varasm.h" +-#include "stor-layout.h" +-#include "calls.h" +-#include "explow.h" + #include "expr.h" +-#include "libfuncs.h" +-#include "reload.h" +-#include "common/common-target.h" + #include "langhooks.h" +-#include "cfgrtl.h" +-#include "cfganal.h" +-#include "sched-int.h" +-#include "gimplify.h" +-#include "target-globals.h" +-#include "tree-pass.h" +-#include "context.h" ++#include "emit-rtl.h" ++#include "explow.h" + #include "builtins.h" +-#include "rtl-iter.h" ++#include "stringpool.h" ++#include "case-cfn-macros.h" + +-/* This file should be included last. */ +-#include "target-def.h" + /* Macros to create an enumeration identifier for a function prototype. */ + #define LARCH_FTYPE_NAME1(A, B) LARCH_##A##_FTYPE_##B + #define LARCH_FTYPE_NAME2(A, B, C) LARCH_##A##_FTYPE_##B##_##C + #define LARCH_FTYPE_NAME3(A, B, C, D) LARCH_##A##_FTYPE_##B##_##C##_##D +-#define LARCH_FTYPE_NAME4(A, B, C, D, E) LARCH_##A##_FTYPE_##B##_##C##_##D##_##E ++#define LARCH_FTYPE_NAME4(A, B, C, D, E) \ ++ LARCH_##A##_FTYPE_##B##_##C##_##D##_##E + + /* Classifies the prototype of a built-in function. */ +-enum loongarch_function_type { ++enum loongarch_function_type ++{ + #define DEF_LARCH_FTYPE(NARGS, LIST) LARCH_FTYPE_NAME##NARGS LIST, + #include "config/loongarch/loongarch-ftypes.def" + #undef DEF_LARCH_FTYPE +@@ -81,7 +59,8 @@ enum loongarch_function_type { + }; + + /* Specifies how a built-in function should be converted into rtl. */ +-enum loongarch_builtin_type { ++enum loongarch_builtin_type ++{ + /* The function corresponds directly to an .md pattern. The return + value is mapped to operand 0 and the arguments are mapped to + operands 1 and above. */ +@@ -91,23 +70,23 @@ enum loongarch_builtin_type { + value and the arguments are mapped to operands 0 and above. */ + LARCH_BUILTIN_DIRECT_NO_TARGET, + ++ /* For generating LoongArch LSX. */ ++ LARCH_BUILTIN_LSX, ++ + /* The function corresponds to an LSX conditional branch instruction + combined with a compare instruction. */ + LARCH_BUILTIN_LSX_TEST_BRANCH, + +- /* For generating LoongArch LSX. */ +- LARCH_BUILTIN_LSX, +- + /* For generating LoongArch LASX. */ + LARCH_BUILTIN_LASX, + + /* The function corresponds to an LASX conditional branch instruction + combined with a compare instruction. */ +- LARCH_BUILTIN_LASX_TEST_BRANCH, ++ LARCH_BUILTIN_LASX_TEST_BRANCH + + }; + +-/* Invoke MACRO (COND) for each C.cond.fmt condition. */ ++/* Invoke MACRO (COND) for each fcmp.cond.{s/d} condition. */ + #define LARCH_FP_CONDITIONS(MACRO) \ + MACRO (f), \ + MACRO (un), \ +@@ -127,26 +106,27 @@ enum loongarch_builtin_type { + MACRO (ngt) + + /* Enumerates the codes above as LARCH_FP_COND_. */ +-#define DECLARE_LARCH_COND(X) LARCH_FP_COND_ ## X +-enum loongarch_fp_condition { ++#define DECLARE_LARCH_COND(X) LARCH_FP_COND_##X ++enum loongarch_fp_condition ++{ + LARCH_FP_CONDITIONS (DECLARE_LARCH_COND) + }; + #undef DECLARE_LARCH_COND + + /* Index X provides the string representation of LARCH_FP_COND_. */ + #define STRINGIFY(X) #X +-const char *const loongarch_fp_conditions[16] = { +- LARCH_FP_CONDITIONS (STRINGIFY) +-}; ++const char *const ++loongarch_fp_conditions[16] = {LARCH_FP_CONDITIONS (STRINGIFY)}; + #undef STRINGIFY +-/* Declare an availability predicate for built-in functions that require ++ ++/* Declare an availability predicate for built-in functions that require + * COND to be true. NAME is the main part of the predicate's name. */ +-#define AVAIL_ALL(NAME, COND) \ +- static unsigned int \ +- loongarch_builtin_avail_##NAME (void) \ +- { \ +- return (COND) ? 1 : 0; \ +- } ++#define AVAIL_ALL(NAME, COND) \ ++ static unsigned int \ ++ loongarch_builtin_avail_##NAME (void) \ ++ { \ ++ return (COND) ? 1 : 0; \ ++ } + + static unsigned int + loongarch_builtin_avail_default (void) +@@ -154,14 +134,12 @@ loongarch_builtin_avail_default (void) + return 1; + } + /* This structure describes a single built-in function. */ +-struct loongarch_builtin_description { ++struct loongarch_builtin_description ++{ + /* The code of the main .md file instruction. See loongarch_builtin_type + for more information. */ + enum insn_code icode; + +- /* The floating-point comparison code to use with ICODE, if any. */ +- enum loongarch_fp_condition cond; +- + /* The name of the built-in function. */ + const char *name; + +@@ -176,8 +154,8 @@ struct loongarch_builtin_description { + }; + + AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI) +-AVAIL_ALL (lsx, TARGET_LSX) +-AVAIL_ALL (lasx, TARGET_LASX) ++AVAIL_ALL (lsx, ISA_HAS_LSX) ++AVAIL_ALL (lasx, ISA_HAS_LASX) + + /* Construct a loongarch_builtin_description from the given arguments. + +@@ -194,31 +172,32 @@ AVAIL_ALL (lasx, TARGET_LASX) + + AVAIL is the name of the availability predicate, without the leading + loongarch_builtin_avail_. */ +-#define LARCH_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \ +- FUNCTION_TYPE, AVAIL) \ +- { CODE_FOR_loongarch_ ## INSN, LARCH_FP_COND_ ## COND, \ +- "__builtin_loongarch_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \ +- loongarch_builtin_avail_ ## AVAIL } ++#define LARCH_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL) \ ++ { \ ++ CODE_FOR_loongarch_##INSN, "__builtin_loongarch_" NAME, \ ++ BUILTIN_TYPE, FUNCTION_TYPE, \ ++ loongarch_builtin_avail_##AVAIL \ ++ } + + /* Define __builtin_loongarch_, which is a LARCH_BUILTIN_DIRECT function + mapped to instruction CODE_FOR_loongarch_, FUNCTION_TYPE and AVAIL + are as for LARCH_BUILTIN. */ +-#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \ +- LARCH_BUILTIN (INSN, f, #INSN, LARCH_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL) ++#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \ ++ LARCH_BUILTIN (INSN, #INSN, LARCH_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL) + + /* Define __builtin_loongarch_, which is a LARCH_BUILTIN_DIRECT_NO_TARGET + function mapped to instruction CODE_FOR_loongarch_, FUNCTION_TYPE + and AVAIL are as for LARCH_BUILTIN. */ +-#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \ +- LARCH_BUILTIN (INSN, f, #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ +- FUNCTION_TYPE, AVAIL) ++#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \ ++ LARCH_BUILTIN (INSN, #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ ++ FUNCTION_TYPE, AVAIL) + + /* Define an LSX LARCH_BUILTIN_DIRECT function __builtin_lsx_ + for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description + field. */ + #define LSX_BUILTIN(INSN, FUNCTION_TYPE) \ +- { CODE_FOR_lsx_ ## INSN, LARCH_FP_COND_f, \ +- "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT, \ ++ { CODE_FOR_lsx_ ## INSN, \ ++ "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT, \ + FUNCTION_TYPE, loongarch_builtin_avail_lsx } + + +@@ -226,7 +205,7 @@ AVAIL_ALL (lasx, TARGET_LASX) + for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description + field. */ + #define LSX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \ +- { CODE_FOR_lsx_ ## INSN, LARCH_FP_COND_f, \ ++ { CODE_FOR_lsx_ ## INSN, \ + "__builtin_lsx_" #INSN, LARCH_BUILTIN_LSX_TEST_BRANCH, \ + FUNCTION_TYPE, loongarch_builtin_avail_lsx } + +@@ -234,7 +213,7 @@ AVAIL_ALL (lasx, TARGET_LASX) + for instruction CODE_FOR_lsx_. FUNCTION_TYPE is a builtin_description + field. */ + #define LSX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \ +- { CODE_FOR_lsx_ ## INSN, LARCH_FP_COND_f, \ ++ { CODE_FOR_lsx_ ## INSN, \ + "__builtin_lsx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ + FUNCTION_TYPE, loongarch_builtin_avail_lsx } + +@@ -242,7 +221,7 @@ AVAIL_ALL (lasx, TARGET_LASX) + for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description + field. */ + #define LASX_BUILTIN(INSN, FUNCTION_TYPE) \ +- { CODE_FOR_lasx_ ## INSN, LARCH_FP_COND_f, \ ++ { CODE_FOR_lasx_ ## INSN, \ + "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX, \ + FUNCTION_TYPE, loongarch_builtin_avail_lasx } + +@@ -250,7 +229,7 @@ AVAIL_ALL (lasx, TARGET_LASX) + for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description + field. */ + #define LASX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE) \ +- { CODE_FOR_lasx_ ## INSN, LARCH_FP_COND_f, \ ++ { CODE_FOR_lasx_ ## INSN, \ + "__builtin_lasx_" #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \ + FUNCTION_TYPE, loongarch_builtin_avail_lasx } + +@@ -258,65 +237,10 @@ AVAIL_ALL (lasx, TARGET_LASX) + for instruction CODE_FOR_lasx_. FUNCTION_TYPE is a builtin_description + field. */ + #define LASX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE) \ +- { CODE_FOR_lasx_ ## INSN, LARCH_FP_COND_f, \ ++ { CODE_FOR_lasx_ ## INSN, \ + "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX_TEST_BRANCH, \ + FUNCTION_TYPE, loongarch_builtin_avail_lasx } + +-/* LoongArch BASE instructions define CODE_FOR_loongarch_xxx */ +-#define CODE_FOR_loongarch_fmax_sf CODE_FOR_smaxsf3 +-#define CODE_FOR_loongarch_fmax_df CODE_FOR_smaxdf3 +-#define CODE_FOR_loongarch_fmin_sf CODE_FOR_sminsf3 +-#define CODE_FOR_loongarch_fmin_df CODE_FOR_smindf3 +-#define CODE_FOR_loongarch_fmaxa_sf CODE_FOR_smaxasf3 +-#define CODE_FOR_loongarch_fmaxa_df CODE_FOR_smaxadf3 +-#define CODE_FOR_loongarch_fmina_sf CODE_FOR_sminasf3 +-#define CODE_FOR_loongarch_fmina_df CODE_FOR_sminadf3 +-#define CODE_FOR_loongarch_fclass_s CODE_FOR_fclass_s +-#define CODE_FOR_loongarch_fclass_d CODE_FOR_fclass_d +-#define CODE_FOR_loongarch_frint_s CODE_FOR_frint_s +-#define CODE_FOR_loongarch_frint_d CODE_FOR_frint_d +-#define CODE_FOR_loongarch_bytepick_w CODE_FOR_bytepick_w +-#define CODE_FOR_loongarch_bytepick_d CODE_FOR_bytepick_d +-#define CODE_FOR_loongarch_bitrev_4b CODE_FOR_bitrev_4b +-#define CODE_FOR_loongarch_bitrev_8b CODE_FOR_bitrev_8b +- +-/* LoongArch support crc */ +-#define CODE_FOR_loongarch_crc_w_b_w CODE_FOR_crc_w_b_w +-#define CODE_FOR_loongarch_crc_w_h_w CODE_FOR_crc_w_h_w +-#define CODE_FOR_loongarch_crc_w_w_w CODE_FOR_crc_w_w_w +-#define CODE_FOR_loongarch_crc_w_d_w CODE_FOR_crc_w_d_w +-#define CODE_FOR_loongarch_crcc_w_b_w CODE_FOR_crcc_w_b_w +-#define CODE_FOR_loongarch_crcc_w_h_w CODE_FOR_crcc_w_h_w +-#define CODE_FOR_loongarch_crcc_w_w_w CODE_FOR_crcc_w_w_w +-#define CODE_FOR_loongarch_crcc_w_d_w CODE_FOR_crcc_w_d_w +- +-/* Privileged state instruction */ +-#define CODE_FOR_loongarch_cpucfg CODE_FOR_cpucfg +-#define CODE_FOR_loongarch_asrtle_d CODE_FOR_asrtle_d +-#define CODE_FOR_loongarch_asrtgt_d CODE_FOR_asrtgt_d +-#define CODE_FOR_loongarch_csrrd CODE_FOR_csrrd +-#define CODE_FOR_loongarch_dcsrrd CODE_FOR_dcsrrd +-#define CODE_FOR_loongarch_csrwr CODE_FOR_csrwr +-#define CODE_FOR_loongarch_dcsrwr CODE_FOR_dcsrwr +-#define CODE_FOR_loongarch_csrxchg CODE_FOR_csrxchg +-#define CODE_FOR_loongarch_dcsrxchg CODE_FOR_dcsrxchg +-#define CODE_FOR_loongarch_iocsrrd_b CODE_FOR_iocsrrd_b +-#define CODE_FOR_loongarch_iocsrrd_h CODE_FOR_iocsrrd_h +-#define CODE_FOR_loongarch_iocsrrd_w CODE_FOR_iocsrrd_w +-#define CODE_FOR_loongarch_iocsrrd_d CODE_FOR_iocsrrd_d +-#define CODE_FOR_loongarch_iocsrwr_b CODE_FOR_iocsrwr_b +-#define CODE_FOR_loongarch_iocsrwr_h CODE_FOR_iocsrwr_h +-#define CODE_FOR_loongarch_iocsrwr_w CODE_FOR_iocsrwr_w +-#define CODE_FOR_loongarch_iocsrwr_d CODE_FOR_iocsrwr_d +-#define CODE_FOR_loongarch_lddir CODE_FOR_lddir +-#define CODE_FOR_loongarch_dlddir CODE_FOR_dlddir +-#define CODE_FOR_loongarch_ldpte CODE_FOR_ldpte +-#define CODE_FOR_loongarch_dldpte CODE_FOR_dldpte +-#define CODE_FOR_loongarch_cacop CODE_FOR_cacop +-#define CODE_FOR_loongarch_dcacop CODE_FOR_dcacop +-#define CODE_FOR_loongarch_dbar CODE_FOR_dbar +-#define CODE_FOR_loongarch_ibar CODE_FOR_ibar +- + /* LoongArch SX define CODE_FOR_lsx_xxx */ + #define CODE_FOR_lsx_vsadd_b CODE_FOR_ssaddv16qi3 + #define CODE_FOR_lsx_vsadd_h CODE_FOR_ssaddv8hi3 +@@ -389,6 +313,8 @@ AVAIL_ALL (lasx, TARGET_LASX) + #define CODE_FOR_lsx_vfmin_d CODE_FOR_sminv2df3 + #define CODE_FOR_lsx_vfsqrt_s CODE_FOR_sqrtv4sf2 + #define CODE_FOR_lsx_vfsqrt_d CODE_FOR_sqrtv2df2 ++#define CODE_FOR_lsx_vflogb_s CODE_FOR_logbv4sf2 ++#define CODE_FOR_lsx_vflogb_d CODE_FOR_logbv2df2 + #define CODE_FOR_lsx_vmax_b CODE_FOR_smaxv16qi3 + #define CODE_FOR_lsx_vmax_h CODE_FOR_smaxv8hi3 + #define CODE_FOR_lsx_vmax_w CODE_FOR_smaxv4si3 +@@ -654,6 +580,8 @@ AVAIL_ALL (lasx, TARGET_LASX) + #define CODE_FOR_lasx_xvfmin_d CODE_FOR_sminv4df3 + #define CODE_FOR_lasx_xvfsqrt_s CODE_FOR_sqrtv8sf2 + #define CODE_FOR_lasx_xvfsqrt_d CODE_FOR_sqrtv4df2 ++#define CODE_FOR_lasx_xvflogb_s CODE_FOR_logbv8sf2 ++#define CODE_FOR_lasx_xvflogb_d CODE_FOR_logbv4df2 + #define CODE_FOR_lasx_xvmax_b CODE_FOR_smaxv32qi3 + #define CODE_FOR_lasx_xvmax_h CODE_FOR_smaxv16hi3 + #define CODE_FOR_lasx_xvmax_w CODE_FOR_smaxv8si3 +@@ -771,6 +699,7 @@ AVAIL_ALL (lasx, TARGET_LASX) + #define CODE_FOR_lasx_xvfnmsub_d CODE_FOR_xvfnmsubv4df4_nmsub4 + + #define CODE_FOR_lasx_xvpermi_q CODE_FOR_lasx_xvpermi_q_v32qi ++#define CODE_FOR_lasx_xvpermi_d CODE_FOR_lasx_xvpermi_d_v4di + #define CODE_FOR_lasx_xbnz_v CODE_FOR_lasx_xbnz_v_b + #define CODE_FOR_lasx_xbz_v CODE_FOR_lasx_xbz_v_b + +@@ -857,36 +786,17 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { + #define LARCH_MOVGR2FCSR 1 + DIRECT_NO_TARGET_BUILTIN (movgr2fcsr, LARCH_VOID_FTYPE_UQI_USI, hard_float), + +- DIRECT_NO_TARGET_BUILTIN (cacop, LARCH_VOID_FTYPE_USI_USI_SI, default), +- DIRECT_NO_TARGET_BUILTIN (dcacop, LARCH_VOID_FTYPE_USI_UDI_SI, default), ++ DIRECT_NO_TARGET_BUILTIN (cacop_w, LARCH_VOID_FTYPE_USI_USI_SI, default), ++ DIRECT_NO_TARGET_BUILTIN (cacop_d, LARCH_VOID_FTYPE_USI_UDI_SI, default), + DIRECT_NO_TARGET_BUILTIN (dbar, LARCH_VOID_FTYPE_USI, default), + DIRECT_NO_TARGET_BUILTIN (ibar, LARCH_VOID_FTYPE_USI, default), + +- DIRECT_BUILTIN (fmax_sf, LARCH_SF_FTYPE_SF_SF, hard_float), +- DIRECT_BUILTIN (fmax_df, LARCH_DF_FTYPE_DF_DF, hard_float), +- DIRECT_BUILTIN (fmin_sf, LARCH_SF_FTYPE_SF_SF, hard_float), +- DIRECT_BUILTIN (fmin_df, LARCH_DF_FTYPE_DF_DF, hard_float), +- DIRECT_BUILTIN (fmaxa_sf, LARCH_SF_FTYPE_SF_SF, hard_float), +- DIRECT_BUILTIN (fmaxa_df, LARCH_DF_FTYPE_DF_DF, hard_float), +- DIRECT_BUILTIN (fmina_sf, LARCH_SF_FTYPE_SF_SF, hard_float), +- DIRECT_BUILTIN (fmina_df, LARCH_DF_FTYPE_DF_DF, hard_float), +- DIRECT_BUILTIN (fclass_s, LARCH_SF_FTYPE_SF, hard_float), +- DIRECT_BUILTIN (fclass_d, LARCH_DF_FTYPE_DF, hard_float), +- DIRECT_BUILTIN (frint_s, LARCH_SF_FTYPE_SF, hard_float), +- DIRECT_BUILTIN (frint_d, LARCH_DF_FTYPE_DF, hard_float), +- DIRECT_BUILTIN (bytepick_w, LARCH_SI_FTYPE_SI_SI_QI, default), +- DIRECT_BUILTIN (bytepick_d, LARCH_DI_FTYPE_DI_DI_QI, default), +- DIRECT_BUILTIN (bitrev_4b, LARCH_SI_FTYPE_SI, default), +- DIRECT_BUILTIN (bitrev_8b, LARCH_DI_FTYPE_DI, default), +- DIRECT_BUILTIN (cpucfg, LARCH_USI_FTYPE_USI, default), +- DIRECT_BUILTIN (asrtle_d, LARCH_VOID_FTYPE_DI_DI, default), +- DIRECT_BUILTIN (asrtgt_d, LARCH_VOID_FTYPE_DI_DI, default), +- DIRECT_BUILTIN (dlddir, LARCH_DI_FTYPE_DI_UQI, default), +- DIRECT_BUILTIN (lddir, LARCH_SI_FTYPE_SI_UQI, default), +- DIRECT_NO_TARGET_BUILTIN (dldpte, LARCH_VOID_FTYPE_DI_UQI, default), +- DIRECT_NO_TARGET_BUILTIN (ldpte, LARCH_VOID_FTYPE_SI_UQI, default), ++ DIRECT_BUILTIN (lddir_d, LARCH_DI_FTYPE_DI_UQI, default), ++ DIRECT_BUILTIN (lddir_w, LARCH_SI_FTYPE_SI_UQI, default), ++ DIRECT_NO_TARGET_BUILTIN (ldpte_d, LARCH_VOID_FTYPE_DI_UQI, default), ++ DIRECT_NO_TARGET_BUILTIN (ldpte_w, LARCH_VOID_FTYPE_SI_UQI, default), + +- /* CRC Instrinsic */ ++ /* CRC Instrinsic */ + + DIRECT_BUILTIN (crc_w_b_w, LARCH_SI_FTYPE_QI_SI, default), + DIRECT_BUILTIN (crc_w_h_w, LARCH_SI_FTYPE_HI_SI, default), +@@ -897,12 +807,12 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { + DIRECT_BUILTIN (crcc_w_w_w, LARCH_SI_FTYPE_SI_SI, default), + DIRECT_BUILTIN (crcc_w_d_w, LARCH_SI_FTYPE_DI_SI, default), + +- DIRECT_BUILTIN (csrrd, LARCH_USI_FTYPE_USI, default), +- DIRECT_BUILTIN (dcsrrd, LARCH_UDI_FTYPE_USI, default), +- DIRECT_BUILTIN (csrwr, LARCH_USI_FTYPE_USI_USI, default), +- DIRECT_BUILTIN (dcsrwr, LARCH_UDI_FTYPE_UDI_USI, default), +- DIRECT_BUILTIN (csrxchg, LARCH_USI_FTYPE_USI_USI_USI, default), +- DIRECT_BUILTIN (dcsrxchg, LARCH_UDI_FTYPE_UDI_UDI_USI, default), ++ DIRECT_BUILTIN (csrrd_w, LARCH_USI_FTYPE_USI, default), ++ DIRECT_BUILTIN (csrrd_d, LARCH_UDI_FTYPE_USI, default), ++ DIRECT_BUILTIN (csrwr_w, LARCH_USI_FTYPE_USI_USI, default), ++ DIRECT_BUILTIN (csrwr_d, LARCH_UDI_FTYPE_UDI_USI, default), ++ DIRECT_BUILTIN (csrxchg_w, LARCH_USI_FTYPE_USI_USI_USI, default), ++ DIRECT_BUILTIN (csrxchg_d, LARCH_UDI_FTYPE_UDI_UDI_USI, default), + DIRECT_BUILTIN (iocsrrd_b, LARCH_UQI_FTYPE_USI, default), + DIRECT_BUILTIN (iocsrrd_h, LARCH_UHI_FTYPE_USI, default), + DIRECT_BUILTIN (iocsrrd_w, LARCH_USI_FTYPE_USI, default), +@@ -912,6 +822,12 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { + DIRECT_NO_TARGET_BUILTIN (iocsrwr_w, LARCH_VOID_FTYPE_USI_USI, default), + DIRECT_NO_TARGET_BUILTIN (iocsrwr_d, LARCH_VOID_FTYPE_UDI_USI, default), + ++ DIRECT_BUILTIN (cpucfg, LARCH_USI_FTYPE_USI, default), ++ DIRECT_NO_TARGET_BUILTIN (asrtle_d, LARCH_VOID_FTYPE_DI_DI, default), ++ DIRECT_NO_TARGET_BUILTIN (asrtgt_d, LARCH_VOID_FTYPE_DI_DI, default), ++ DIRECT_NO_TARGET_BUILTIN (syscall, LARCH_VOID_FTYPE_USI, default), ++ DIRECT_NO_TARGET_BUILTIN (break, LARCH_VOID_FTYPE_USI, default), ++ + /* Built-in functions for LSX. */ + LSX_BUILTIN (vsll_b, LARCH_V16QI_FTYPE_V16QI_V16QI), + LSX_BUILTIN (vsll_h, LARCH_V8HI_FTYPE_V8HI_V8HI), +@@ -1439,14 +1355,14 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { + LSX_BUILTIN (vftintrmh_l_s, LARCH_V2DI_FTYPE_V4SF), + LSX_BUILTIN (vftintrnel_l_s, LARCH_V2DI_FTYPE_V4SF), + LSX_BUILTIN (vftintrneh_l_s, LARCH_V2DI_FTYPE_V4SF), +- LSX_BUILTIN (vfrintrne_s, LARCH_V4SI_FTYPE_V4SF), +- LSX_BUILTIN (vfrintrne_d, LARCH_V2DI_FTYPE_V2DF), +- LSX_BUILTIN (vfrintrz_s, LARCH_V4SI_FTYPE_V4SF), +- LSX_BUILTIN (vfrintrz_d, LARCH_V2DI_FTYPE_V2DF), +- LSX_BUILTIN (vfrintrp_s, LARCH_V4SI_FTYPE_V4SF), +- LSX_BUILTIN (vfrintrp_d, LARCH_V2DI_FTYPE_V2DF), +- LSX_BUILTIN (vfrintrm_s, LARCH_V4SI_FTYPE_V4SF), +- LSX_BUILTIN (vfrintrm_d, LARCH_V2DI_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrne_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrne_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrz_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrz_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrp_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrp_d, LARCH_V2DF_FTYPE_V2DF), ++ LSX_BUILTIN (vfrintrm_s, LARCH_V4SF_FTYPE_V4SF), ++ LSX_BUILTIN (vfrintrm_d, LARCH_V2DF_FTYPE_V2DF), + LSX_NO_TARGET_BUILTIN (vstelm_b, LARCH_VOID_FTYPE_V16QI_CVPOINTER_SI_UQI), + LSX_NO_TARGET_BUILTIN (vstelm_h, LARCH_VOID_FTYPE_V8HI_CVPOINTER_SI_UQI), + LSX_NO_TARGET_BUILTIN (vstelm_w, LARCH_VOID_FTYPE_V4SI_CVPOINTER_SI_UQI), +@@ -2152,14 +2068,14 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { + LASX_BUILTIN (xvftintrml_l_s, LARCH_V4DI_FTYPE_V8SF), + LASX_BUILTIN (xvftintrneh_l_s, LARCH_V4DI_FTYPE_V8SF), + LASX_BUILTIN (xvftintrnel_l_s, LARCH_V4DI_FTYPE_V8SF), +- LASX_BUILTIN (xvfrintrne_s, LARCH_V8SI_FTYPE_V8SF), +- LASX_BUILTIN (xvfrintrne_d, LARCH_V4DI_FTYPE_V4DF), +- LASX_BUILTIN (xvfrintrz_s, LARCH_V8SI_FTYPE_V8SF), +- LASX_BUILTIN (xvfrintrz_d, LARCH_V4DI_FTYPE_V4DF), +- LASX_BUILTIN (xvfrintrp_s, LARCH_V8SI_FTYPE_V8SF), +- LASX_BUILTIN (xvfrintrp_d, LARCH_V4DI_FTYPE_V4DF), +- LASX_BUILTIN (xvfrintrm_s, LARCH_V8SI_FTYPE_V8SF), +- LASX_BUILTIN (xvfrintrm_d, LARCH_V4DI_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrne_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrne_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrz_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrz_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrp_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrp_d, LARCH_V4DF_FTYPE_V4DF), ++ LASX_BUILTIN (xvfrintrm_s, LARCH_V8SF_FTYPE_V8SF), ++ LASX_BUILTIN (xvfrintrm_d, LARCH_V4DF_FTYPE_V4DF), + LASX_BUILTIN (xvld, LARCH_V32QI_FTYPE_CVPOINTER_SI), + LASX_NO_TARGET_BUILTIN (xvst, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI), + LASX_NO_TARGET_BUILTIN (xvstelm_b, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI_UQI), +@@ -2391,6 +2307,27 @@ static const struct loongarch_builtin_description loongarch_builtins[] = { + LASX_BUILTIN (xvssrarni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI), + }; + ++/* Index I is the function declaration for loongarch_builtins[I], or null if ++ the function isn't defined on this target. */ ++static GTY (()) tree loongarch_builtin_decls[ARRAY_SIZE (loongarch_builtins)]; ++/* Get the index I of the function declaration for loongarch_builtin_decls[I] ++ using the instruction code or return null if not defined for the target. */ ++static GTY (()) int loongarch_get_builtin_decl_index[NUM_INSN_CODES]; ++ ++/* Return a type for 'const volatile void*'. */ ++ ++static tree ++loongarch_build_cvpointer_type (void) ++{ ++ static tree cache; ++ ++ if (cache == NULL_TREE) ++ cache = build_pointer_type (build_qualified_type (void_type_node, ++ TYPE_QUAL_CONST ++ | TYPE_QUAL_VOLATILE)); ++ return cache; ++} ++ + + /* MODE is a vector mode whose elements have type TYPE. Return the type + of the vector itself. */ +@@ -2411,26 +2348,12 @@ loongarch_builtin_vector_type (tree type, machine_mode mode) + return types[mode_index]; + } + +-/* Return a type for 'const volatile void *'. */ +- +-static tree +-loongarch_build_cvpointer_type (void) +-{ +- static tree cache; +- +- if (cache == NULL_TREE) +- cache = build_pointer_type (build_qualified_type +- (void_type_node, +- TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)); +- return cache; +-} +- + /* Source-level argument types. */ + #define LARCH_ATYPE_VOID void_type_node + #define LARCH_ATYPE_INT integer_type_node + #define LARCH_ATYPE_POINTER ptr_type_node + #define LARCH_ATYPE_CVPOINTER loongarch_build_cvpointer_type () +-#define LARCH_ATYPE_BOOLEAN boolean_type_node ++#define LARCH_ATYPE_BOOLEAN boolean_type_node + /* Standard mode-based argument types. */ + #define LARCH_ATYPE_QI intQI_type_node + #define LARCH_ATYPE_UQI unsigned_intQI_type_node +@@ -2495,8 +2418,7 @@ loongarch_build_cvpointer_type (void) + + /* LARCH_FTYPE_ATYPESN takes N LARCH_FTYPES-like type codes and lists + their associated LARCH_ATYPEs. */ +-#define LARCH_FTYPE_ATYPES1(A, B) \ +- LARCH_ATYPE_##A, LARCH_ATYPE_##B ++#define LARCH_FTYPE_ATYPES1(A, B) LARCH_ATYPE_##A, LARCH_ATYPE_##B + + #define LARCH_FTYPE_ATYPES2(A, B, C) \ + LARCH_ATYPE_##A, LARCH_ATYPE_##B, LARCH_ATYPE_##C +@@ -2508,13 +2430,6 @@ loongarch_build_cvpointer_type (void) + LARCH_ATYPE_##A, LARCH_ATYPE_##B, LARCH_ATYPE_##C, LARCH_ATYPE_##D, \ + LARCH_ATYPE_##E + +-/* Index I is the function declaration for loongarch_builtins[I], or null if the +- function isn't defined on this target. */ +-static GTY(()) tree loongarch_builtin_decls[ARRAY_SIZE (loongarch_builtins)]; +-/* Get the index I of the function declaration for loongarch_builtin_decls[I] +- using the instruction code or return null if not defined for the target. */ +-static GTY(()) int loongarch_get_builtin_decl_index[NUM_INSN_CODES]; +- + /* Return the function type associated with function prototype TYPE. */ + + static tree +@@ -2525,11 +2440,10 @@ loongarch_build_function_type (enum loongarch_function_type type) + if (types[(int) type] == NULL_TREE) + switch (type) + { +-#define DEF_LARCH_FTYPE(NUM, ARGS) \ +- case LARCH_FTYPE_NAME##NUM ARGS: \ +- types[(int) type] \ +- = build_function_type_list (LARCH_FTYPE_ATYPES##NUM ARGS, \ +- NULL_TREE); \ ++#define DEF_LARCH_FTYPE(NUM, ARGS) \ ++ case LARCH_FTYPE_NAME##NUM ARGS: \ ++ types[(int) type] \ ++ = build_function_type_list (LARCH_FTYPE_ATYPES##NUM ARGS, NULL_TREE); \ + break; + #include "config/loongarch/loongarch-ftypes.def" + #undef DEF_LARCH_FTYPE +@@ -2547,6 +2461,7 @@ loongarch_init_builtins (void) + { + const struct loongarch_builtin_description *d; + unsigned int i; ++ tree type; + + /* Iterate through all of the bdesc arrays, initializing all of the + builtin functions. */ +@@ -2555,10 +2470,10 @@ loongarch_init_builtins (void) + d = &loongarch_builtins[i]; + if (d->avail ()) + { ++ type = loongarch_build_function_type (d->function_type); + loongarch_builtin_decls[i] +- = add_builtin_function (d->name, +- loongarch_build_function_type (d->function_type), +- i, BUILT_IN_MD, NULL, NULL); ++ = add_builtin_function (d->name, type, i, BUILT_IN_MD, NULL, ++ NULL); + loongarch_get_builtin_decl_index[d->icode] = i; + } + } +@@ -2574,6 +2489,104 @@ loongarch_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED) + return loongarch_builtin_decls[code]; + } + ++/* Handler for an SLEEF-style interface to ++ a library with vectorized intrinsics. */ ++static tree ++loongarch_builtin_vectorized_libsleef (combined_fn fn, tree type_out, tree type_in) ++{ ++ char name[20]; ++ tree fntype, new_fndecl; ++ unsigned args = 1; ++ const char *bname; ++ machine_mode el_mode, in_mode; ++ int n, in_n; ++ ++ /* The SLEEF is suitable for unsafe math only. */ ++ if (!flag_unsafe_math_optimizations || !ISA_HAS_LSX) ++ return NULL_TREE; ++ ++ el_mode = TYPE_MODE (TREE_TYPE (type_out)); ++ n = TYPE_VECTOR_SUBPARTS (type_out); ++ in_mode = TYPE_MODE (TREE_TYPE (type_in)); ++ in_n = TYPE_VECTOR_SUBPARTS (type_in); ++ if (el_mode != in_mode ++ || n != in_n) ++ return NULL_TREE; ++ ++ switch (fn) ++ { ++ CASE_CFN_ATAN2: ++ CASE_CFN_POW: ++ args = 2; ++ gcc_fallthrough (); ++ ++ CASE_CFN_EXP: ++ CASE_CFN_LOG: ++ CASE_CFN_LOG1P: ++ CASE_CFN_LOG2: ++ CASE_CFN_LOG10: ++ CASE_CFN_TANH: ++ CASE_CFN_TAN: ++ CASE_CFN_ATAN: ++ CASE_CFN_ATANH: ++ CASE_CFN_CBRT: ++ CASE_CFN_SINH: ++ CASE_CFN_SIN: ++ CASE_CFN_ASINH: ++ CASE_CFN_ASIN: ++ CASE_CFN_COSH: ++ CASE_CFN_COS: ++ CASE_CFN_ACOSH: ++ CASE_CFN_ACOS: ++ break; ++ ++ default: ++ return NULL_TREE; ++ } ++ ++ tree fndecl = mathfn_built_in (TREE_TYPE (type_in), fn); ++ bname = IDENTIFIER_POINTER (DECL_NAME (fndecl)); ++ ++ if (args == 1) ++ { ++ if (n == 8 && el_mode == SFmode) ++ sprintf (name, "_ZGVdN8v_%s", bname+10); ++ else if (n == 4 && el_mode == DFmode) ++ sprintf (name, "_ZGVdN4v_%s", bname+10); ++ else if (n == 4 && el_mode == SFmode) ++ sprintf (name, "_ZGVbN4v_%s", bname+10); ++ else ++ sprintf (name, "_ZGVbN2v_%s", bname+10); ++ ++ fntype = build_function_type_list (type_out, type_in, NULL); ++ } ++ else if (args == 2) ++ { ++ if (n == 8 && el_mode == SFmode) ++ sprintf (name, "_ZGVdN8vv_%s", bname+10); ++ else if (n == 4 && el_mode == DFmode) ++ sprintf (name, "_ZGVdN4vv_%s", bname+10); ++ else if (n == 4 && el_mode == SFmode) ++ sprintf (name, "_ZGVbN4vv_%s", bname+10); ++ else ++ sprintf (name, "_ZGVbN2vv_%s", bname+10); ++ ++ fntype = build_function_type_list (type_out, type_in, type_in, NULL); ++ } ++ else ++ gcc_unreachable (); ++ ++ /* Build a function declaration for the vectorized function. */ ++ new_fndecl = build_decl (BUILTINS_LOCATION, ++ FUNCTION_DECL, get_identifier (name), fntype); ++ TREE_PUBLIC (new_fndecl) = 1; ++ DECL_EXTERNAL (new_fndecl) = 1; ++ DECL_IS_NOVOPS (new_fndecl) = 1; ++ TREE_READONLY (new_fndecl) = 1; ++ ++ return new_fndecl; ++} ++ + /* Implement TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION. */ + + tree +@@ -2599,20 +2612,82 @@ loongarch_builtin_vectorized_function (unsigned int fn, tree type_out, tree type + + switch (fn) + { +- case BUILT_IN_SQRT: +- if (out_mode == DFmode && out_n == 2 +- && in_mode == DFmode && in_n == 2) +- return LARCH_GET_BUILTIN (lsx_vfsqrt_d); ++ CASE_CFN_CEIL: ++ if (out_mode == DFmode && in_mode == DFmode) ++ { ++ if (out_n == 2 && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfrintrp_d); ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrp_d); ++ } ++ if (out_mode == SFmode && in_mode == SFmode) ++ { ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfrintrp_s); ++ if (out_n == 8 && in_n == 8) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrp_s); ++ } + break; +- case BUILT_IN_SQRTF: +- if (out_mode == SFmode && out_n == 4 +- && in_mode == SFmode && in_n == 4) +- return LARCH_GET_BUILTIN (lsx_vfsqrt_s); ++ ++ CASE_CFN_TRUNC: ++ if (out_mode == DFmode && in_mode == DFmode) ++ { ++ if (out_n == 2 && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfrintrz_d); ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrz_d); ++ } ++ if (out_mode == SFmode && in_mode == SFmode) ++ { ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfrintrz_s); ++ if (out_n == 8 && in_n == 8) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrz_s); ++ } + break; ++ ++ CASE_CFN_RINT: ++ CASE_CFN_ROUND: ++ if (out_mode == DFmode && in_mode == DFmode) ++ { ++ if (out_n == 2 && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfrint_d); ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lasx_xvfrint_d); ++ } ++ if (out_mode == SFmode && in_mode == SFmode) ++ { ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfrint_s); ++ if (out_n == 8 && in_n == 8) ++ return LARCH_GET_BUILTIN (lasx_xvfrint_s); ++ } ++ break; ++ ++ CASE_CFN_FLOOR: ++ if (out_mode == DFmode && in_mode == DFmode) ++ { ++ if (out_n == 2 && in_n == 2) ++ return LARCH_GET_BUILTIN (lsx_vfrintrm_d); ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrm_d); ++ } ++ if (out_mode == SFmode && in_mode == SFmode) ++ { ++ if (out_n == 4 && in_n == 4) ++ return LARCH_GET_BUILTIN (lsx_vfrintrm_s); ++ if (out_n == 8 && in_n == 8) ++ return LARCH_GET_BUILTIN (lasx_xvfrintrm_s); ++ } ++ break; ++ + default: + break; + } + ++ /* Dispatch to a handler for a vectorization library. */ ++ if (loongarch_veclibabi_name && strcmp (loongarch_veclibabi_name, "sleef") == 0) ++ return loongarch_builtin_vectorized_libsleef (combined_fn (fn), type_out, type_in); + return NULL_TREE; + } + +@@ -2621,7 +2696,7 @@ loongarch_builtin_vectorized_function (unsigned int fn, tree type_out, tree type + + static void + loongarch_prepare_builtin_arg (struct expand_operand *op, tree exp, +- unsigned int argno) ++ unsigned int argno) + { + tree arg; + rtx value; +@@ -2649,11 +2724,10 @@ loongarch_gen_const_int_vector (machine_mode mode, HOST_WIDE_INT val) + + static rtx + loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, +- struct expand_operand *ops, bool has_target_p) ++ struct expand_operand *ops, bool has_target_p) + { + machine_mode imode; + int rangelo = 0, rangehi = 0, error_opno = 0; +- rtx sireg; + + switch (icode) + { +@@ -3002,7 +3076,7 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops, + + static rtx + loongarch_expand_builtin_direct (enum insn_code icode, rtx target, tree exp, +- bool has_target_p) ++ bool has_target_p) + { + struct expand_operand ops[MAX_RECOG_OPERANDS]; + int opno, argno; +@@ -3069,7 +3143,8 @@ loongarch_expand_builtin_lsx_test_branch (enum insn_code icode, tree exp) + + rtx + loongarch_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, +- machine_mode mode, int ignore) ++ machine_mode mode ATTRIBUTE_UNUSED, ++ int ignore ATTRIBUTE_UNUSED) + { + tree fndecl; + unsigned int fcode, avail; +@@ -3097,6 +3172,7 @@ loongarch_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, + } + gcc_unreachable (); + } ++ + /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */ + + void +@@ -3112,32 +3188,32 @@ loongarch_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update) + tree set_fcsr = loongarch_builtin_decls[LARCH_MOVGR2FCSR]; + tree get_fcsr_hold_call = build_call_expr (get_fcsr, 1, const0); + tree hold_assign_orig = build4 (TARGET_EXPR, LARCH_ATYPE_USI, +- fcsr_orig_var, get_fcsr_hold_call, +- NULL, NULL); ++ fcsr_orig_var, get_fcsr_hold_call, ++ NULL, NULL); + tree hold_mod_val = build2 (BIT_AND_EXPR, LARCH_ATYPE_USI, fcsr_orig_var, + build_int_cst (LARCH_ATYPE_USI, 0xffe0ffe0)); + tree hold_assign_mod = build4 (TARGET_EXPR, LARCH_ATYPE_USI, +- fcsr_mod_var, hold_mod_val, NULL, NULL); +- tree set_fcsr_hold_call = build_call_expr (set_fcsr, 2, const0, fcsr_mod_var); +- tree hold_all = build2 (COMPOUND_EXPR, LARCH_ATYPE_USI, +- hold_assign_orig, hold_assign_mod); +- *hold = build2 (COMPOUND_EXPR, void_type_node, hold_all, +- set_fcsr_hold_call); ++ fcsr_mod_var, hold_mod_val, NULL, NULL); ++ tree set_fcsr_hold_call = build_call_expr (set_fcsr, 2, const0, ++ fcsr_mod_var); ++ tree hold_all = build2 (COMPOUND_EXPR, LARCH_ATYPE_USI, hold_assign_orig, ++ hold_assign_mod); ++ *hold = build2 (COMPOUND_EXPR, void_type_node, hold_all, set_fcsr_hold_call); + + *clear = build_call_expr (set_fcsr, 2, const0, fcsr_mod_var); + + tree get_fcsr_update_call = build_call_expr (get_fcsr, 1, const0); + *update = build4 (TARGET_EXPR, LARCH_ATYPE_USI, exceptions_var, +- get_fcsr_update_call, NULL, NULL); +- tree set_fcsr_update_call = build_call_expr (set_fcsr, 2, const0, fcsr_orig_var); ++ get_fcsr_update_call, NULL, NULL); ++ tree set_fcsr_update_call = build_call_expr (set_fcsr, 2, const0, ++ fcsr_orig_var); + *update = build2 (COMPOUND_EXPR, void_type_node, *update, + set_fcsr_update_call); + tree atomic_feraiseexcept + = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT); +- tree int_exceptions_var = fold_convert (integer_type_node, +- exceptions_var); +- tree atomic_feraiseexcept_call = build_call_expr (atomic_feraiseexcept, +- 1, int_exceptions_var); ++ tree int_exceptions_var = fold_convert (integer_type_node, exceptions_var); ++ tree atomic_feraiseexcept_call = build_call_expr (atomic_feraiseexcept, 1, ++ int_exceptions_var); + *update = build2 (COMPOUND_EXPR, void_type_node, *update, + atomic_feraiseexcept_call); + } +@@ -3149,4 +3225,3 @@ loongarch_build_builtin_va_list (void) + { + return ptr_type_node; + } +- +diff --git a/gcc/config/loongarch/loongarch-c.c b/gcc/config/loongarch/loongarch-c.c +index 6eac43bdf..f8583f7aa 100644 +--- a/gcc/config/loongarch/loongarch-c.c ++++ b/gcc/config/loongarch/loongarch-c.c +@@ -1,22 +1,22 @@ + /* LoongArch-specific code for C family languages. +- Copyright (C) 2020-2021 Free Software Foundation, Inc. +- Contributed by Andrew Waterman (zhouyingkun@mail.loongson.cn). ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Ltd. + +- This file is part of GCC. ++This file is part of GCC. + +- GCC is free software; you can redistribute it and/or modify +- it under the terms of the GNU General Public License as published by +- the Free Software Foundation; either version 3, or (at your option) +- any later version. ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. + +- GCC is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- GNU General Public License for more details. ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. + +- You should have received a copy of the GNU General Public License +- along with GCC; see the file COPYING3. If not see +- . */ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ + + #define IN_TARGET_CODE 1 + +@@ -31,7 +31,28 @@ + #define builtin_define(TXT) cpp_define (pfile, TXT) + #define builtin_assert(TXT) cpp_assert (pfile, TXT) + +-/* TODO: what is the pfile technique ??? !!! */ ++/* Define preprocessor macros for the -march and -mtune options. ++ PREFIX is either _LOONGARCH_ARCH or _LOONGARCH_TUNE, INFO is ++ the selected processor. If INFO's canonical name is "foo", ++ define PREFIX to be "foo", and define an additional macro ++ PREFIX_FOO. */ ++#define LARCH_CPP_SET_PROCESSOR(PREFIX, CPU_TYPE) \ ++ do \ ++ { \ ++ char *macro, *p; \ ++ int cpu_type = (CPU_TYPE); \ ++ \ ++ macro = concat ((PREFIX), "_", \ ++ loongarch_cpu_strings[cpu_type], NULL); \ ++ for (p = macro; *p != 0; p++) \ ++ *p = TOUPPER (*p); \ ++ \ ++ builtin_define (macro); \ ++ builtin_define_with_value ((PREFIX), \ ++ loongarch_cpu_strings[cpu_type], 1); \ ++ free (macro); \ ++ } \ ++ while (0) + + void + loongarch_cpu_cpp_builtins (cpp_reader *pfile) +@@ -40,10 +61,43 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile) + builtin_assert ("cpu=loongarch"); + builtin_define ("__loongarch__"); + +- if (TARGET_FLOAT64) +- builtin_define ("__loongarch_fpr=64"); ++ LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", la_target.cpu_arch); ++ LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", la_target.cpu_tune); ++ ++ /* Base architecture / ABI. */ ++ if (TARGET_64BIT) ++ { ++ builtin_define ("__loongarch_grlen=64"); ++ builtin_define ("__loongarch64"); ++ } ++ ++ if (TARGET_ABI_LP64) ++ { ++ builtin_define ("_ABILP64=3"); ++ builtin_define ("_LOONGARCH_SIM=_ABILP64"); ++ builtin_define ("__loongarch_lp64"); ++ } ++ ++ /* These defines reflect the ABI in use, not whether the ++ FPU is directly accessible. */ ++ if (TARGET_DOUBLE_FLOAT_ABI) ++ builtin_define ("__loongarch_double_float=1"); ++ else if (TARGET_SINGLE_FLOAT_ABI) ++ builtin_define ("__loongarch_single_float=1"); ++ ++ if (TARGET_DOUBLE_FLOAT_ABI || TARGET_SINGLE_FLOAT_ABI) ++ builtin_define ("__loongarch_hard_float=1"); + else +- builtin_define ("__loongarch_fpr=32"); ++ builtin_define ("__loongarch_soft_float=1"); ++ ++ ++ /* ISA Extensions. */ ++ if (TARGET_DOUBLE_FLOAT) ++ builtin_define ("__loongarch_frlen=64"); ++ else if (TARGET_SINGLE_FLOAT) ++ builtin_define ("__loongarch_frlen=32"); ++ else ++ builtin_define ("__loongarch_frlen=0"); + + if (ISA_HAS_LSX) + { +@@ -62,74 +116,12 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile) + builtin_define ("__loongarch_simd_width=256"); + } + +- LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", loongarch_arch_info); +- LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", loongarch_tune_info); +- +- +- switch (loongarch_abi) +- { +- case ABILP32: +- builtin_define ("_ABILP32=1"); +- builtin_define ("_LOONGARCH_SIM=_ABILP32"); +- builtin_define ("__loongarch32"); +- break; +- +- case ABILPX32: +- builtin_define ("_ABILPX32=2"); +- builtin_define ("_LOONGARCH_SIM=_ABILPX32"); +- break; +- +- case ABILP64: +- builtin_define ("_ABILP64=3"); +- builtin_define ("_LOONGARCH_SIM=_ABILP64"); +- builtin_define ("__loongarch64"); +- break; +- } + ++ /* Native Data Sizes. */ + builtin_define_with_int_value ("_LOONGARCH_SZINT", INT_TYPE_SIZE); + builtin_define_with_int_value ("_LOONGARCH_SZLONG", LONG_TYPE_SIZE); + builtin_define_with_int_value ("_LOONGARCH_SZPTR", POINTER_SIZE); +- builtin_define_with_int_value ("_LOONGARCH_FPSET", +- 32 / MAX_FPRS_PER_FMT); +- builtin_define_with_int_value ("_LOONGARCH_SPFPSET", +- 32); +- +- /* These defines reflect the ABI in use, not whether the +- FPU is directly accessible. */ +- if (TARGET_NO_FLOAT) +- builtin_define ("__loongarch_no_float"); +- else if (TARGET_HARD_FLOAT_ABI) +- builtin_define ("__loongarch_hard_float"); +- else +- builtin_define ("__loongarch_soft_float"); ++ builtin_define_with_int_value ("_LOONGARCH_FPSET", 32); ++ builtin_define_with_int_value ("_LOONGARCH_SPFPSET", 32); + +- if (TARGET_SINGLE_FLOAT) +- builtin_define ("__loongarch_single_float"); +- +- /* Macros dependent on the C dialect. */ +- if (preprocessing_asm_p ()) +- { +- builtin_define_std ("LANGUAGE_ASSEMBLY"); +- builtin_define ("_LANGUAGE_ASSEMBLY"); +- } +- else if (c_dialect_cxx ()) +- { +- builtin_define ("_LANGUAGE_C_PLUS_PLUS"); +- builtin_define ("__LANGUAGE_C_PLUS_PLUS"); +- builtin_define ("__LANGUAGE_C_PLUS_PLUS__"); +- } +- else +- { +- builtin_define_std ("LANGUAGE_C"); +- builtin_define ("_LANGUAGE_C"); +- } +- +- if (c_dialect_objc ()) +- { +- builtin_define ("_LANGUAGE_OBJECTIVE_C"); +- builtin_define ("__LANGUAGE_OBJECTIVE_C"); +- /* Bizarre, but retained for backwards compatibility. */ +- builtin_define_std ("LANGUAGE_C"); +- builtin_define ("_LANGUAGE_C"); +- } + } +diff --git a/gcc/config/loongarch/loongarch-cpu.c b/gcc/config/loongarch/loongarch-cpu.c +new file mode 100644 +index 000000000..ce2e649c8 +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-cpu.c +@@ -0,0 +1,291 @@ ++/* Definitions for LoongArch CPU properties. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Ltd. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#define IN_TARGET_CODE 1 ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tm.h" ++#include "diagnostic-core.h" ++ ++#include "loongarch-def.h" ++#include "loongarch-opts.h" ++#include "loongarch-cpu.h" ++#include "loongarch-str.h" ++ ++/* Native CPU detection with "cpucfg" */ ++#define N_CPUCFG_WORDS 0x15 ++static uint32_t cpucfg_cache[N_CPUCFG_WORDS] = { 0 }; ++static const int cpucfg_useful_idx[] = {0, 1, 2, 16, 17, 18, 19}; ++ ++static uint32_t ++read_cpucfg_word (int wordno) ++{ ++ /* To make cross-compiler shut up. */ ++ (void) wordno; ++ uint32_t ret = 0; ++ ++ #ifdef __loongarch__ ++ __asm__ ("cpucfg %0,%1\n\t" :"=r"(ret) :"r"(wordno)); ++ #endif ++ ++ return ret; ++} ++ ++void ++cache_cpucfg (void) ++{ ++ for (unsigned int i = 0; i < sizeof (cpucfg_useful_idx) / sizeof (int); i++) ++ { ++ cpucfg_cache[cpucfg_useful_idx[i]] ++ = read_cpucfg_word (cpucfg_useful_idx[i]); ++ } ++} ++ ++uint32_t ++get_native_prid (void) ++{ ++ /* Fill loongarch_cpu_default_config[CPU_NATIVE] with cpucfg data, ++ see "Loongson Architecture Reference Manual" ++ (Volume 1, Section 2.2.10.5) */ ++ return cpucfg_cache[0]; ++} ++ ++const char* ++get_native_prid_str (void) ++{ ++ static char prid_str[9]; ++ sprintf (prid_str, "%08x", cpucfg_cache[0]); ++ return (const char*) prid_str; ++} ++ ++ ++/* Fill property tables for CPU_NATIVE. */ ++void ++fill_native_cpu_config (struct loongarch_target *tgt) ++{ ++ int arch_native_p = tgt->cpu_arch == CPU_NATIVE; ++ int tune_native_p = tgt->cpu_tune == CPU_NATIVE; ++ int native_cpu_type = CPU_NATIVE; ++ ++ /* Nothing needs to be done unless "-march/tune=native" ++ is given or implied. */ ++ if (!arch_native_p && !tune_native_p) ++ return; ++ ++ /* Fill cpucfg_cache with the "cpucfg" instruction. */ ++ cache_cpucfg (); ++ ++ /* Fill: tgt->cpu_arch | tgt->cpu_tune ++ With: processor ID (PRID) ++ At: cpucfg_words[0][31:0] */ ++ ++ switch (cpucfg_cache[0] & 0x00ffff00) ++ { ++ case 0x0014d000: /* LA664 */ ++ native_cpu_type = CPU_LA664; ++ break; ++ ++ case 0x0014c000: /* LA464 */ ++ native_cpu_type = CPU_LA464; ++ break; ++ ++ case 0x0014b000: /* LA364 */ ++ native_cpu_type = CPU_LA364; ++ break; ++ ++ case 0x0014a000: /* LA264 */ ++ native_cpu_type = CPU_LA264; ++ break; ++ ++ default: ++ /* Unknown PRID. */ ++ if (tune_native_p) ++ inform (UNKNOWN_LOCATION, "unknown processor ID %<0x%x%>, " ++ "some tuning parameters will fall back to default", ++ cpucfg_cache[0]); ++ break; ++ } ++ ++ /* if -march=native */ ++ if (arch_native_p) ++ { ++ int tmp; ++ tgt->cpu_arch = native_cpu_type; ++ ++ /* Fill: loongarch_cpu_default_isa[tgt->cpu_arch].base ++ With: base architecture (ARCH) ++ At: cpucfg_words[1][1:0] */ ++ ++ #define PRESET_ARCH (loongarch_cpu_default_isa[tgt->cpu_arch].base) ++ switch (cpucfg_cache[1] & 0x3) ++ { ++ case 0x02: ++ tmp = ISA_BASE_LA64V100; ++ break; ++ ++ default: ++ fatal_error (UNKNOWN_LOCATION, ++ "unknown native base architecture %<0x%x%>, %qs failed", ++ (unsigned int) (cpucfg_cache[1] & 0x3), ++ "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); ++ } ++ ++ /* Check consistency with PRID presets. */ ++ if (native_cpu_type != CPU_NATIVE && tmp != PRESET_ARCH) ++ warning (0, "base architecture %qs differs from PRID preset %qs", ++ loongarch_isa_base_strings[tmp], ++ loongarch_isa_base_strings[PRESET_ARCH]); ++ ++ /* Use the native value anyways. */ ++ PRESET_ARCH = tmp; ++ ++ /* Fill: loongarch_cpu_default_isa[tgt->cpu_arch].fpu ++ With: FPU type (FP, FP_SP, FP_DP) ++ At: cpucfg_words[2][2:0] */ ++ ++ #define PRESET_FPU (loongarch_cpu_default_isa[tgt->cpu_arch].fpu) ++ switch (cpucfg_cache[2] & 0x7) ++ { ++ case 0x07: ++ tmp = ISA_EXT_FPU64; ++ break; ++ ++ case 0x03: ++ tmp = ISA_EXT_FPU32; ++ break; ++ ++ case 0x00: ++ tmp = ISA_EXT_NONE; ++ break; ++ ++ default: ++ fatal_error (UNKNOWN_LOCATION, ++ "unknown native FPU type %<0x%x%>, %qs failed", ++ (unsigned int) (cpucfg_cache[2] & 0x7), ++ "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); ++ } ++ ++ /* Check consistency with PRID presets. */ ++ if (native_cpu_type != CPU_NATIVE && tmp != PRESET_FPU) ++ warning (0, "floating-point unit %qs differs from PRID preset %qs", ++ loongarch_isa_ext_strings[tmp], ++ loongarch_isa_ext_strings[PRESET_FPU]); ++ ++ /* Use the native value anyways. */ ++ PRESET_FPU = tmp; ++ ++ ++ /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].simd ++ With: SIMD extension type (LSX, LASX) ++ At: cpucfg_words[2][7:6] */ ++ ++ #define PRESET_SIMD (loongarch_cpu_default_isa[tgt->cpu_arch].simd) ++ switch (cpucfg_cache[2] & 0xc0) ++ { ++ case 0xc0: ++ tmp = ISA_EXT_SIMD_LASX; ++ break; ++ ++ case 0x40: ++ tmp = ISA_EXT_SIMD_LSX; ++ break; ++ ++ case 0x80: ++ warning (0, "unknown SIMD extension " ++ "(%qs disabled while %qs is enabled), disabling SIMD", ++ loongarch_isa_ext_strings[ISA_EXT_SIMD_LSX], ++ loongarch_isa_ext_strings[ISA_EXT_SIMD_LASX]); ++ ++ case 0x00: ++ tmp = 0; ++ break; ++ } ++ ++ /* Check consistency with PRID presets. */ ++ /* ++ if (native_cpu_type != CPU_NATIVE && tmp != PRESET_SIMD) ++ warning (0, "SIMD extension %qs differs from PRID preset %qs", ++ loongarch_isa_ext_strings[tmp], ++ loongarch_isa_ext_strings[PRESET_SIMD]); ++ */ ++ ++ /* Use the native value anyways. */ ++ PRESET_SIMD = tmp; ++ } ++ ++ if (tune_native_p) ++ { ++ tgt->cpu_tune = native_cpu_type; ++ ++ /* Fill: loongarch_cpu_cache[tgt->cpu_tune] ++ With: cache size info ++ At: cpucfg_words[16:20][31:0] */ ++ ++ #define PRESET_CACHE (loongarch_cpu_cache[tgt->cpu_tune]) ++ struct loongarch_cache native_cache; ++ int l1d_present = 0, l1u_present = 0; ++ int l2d_present = 0; ++ uint32_t l1_szword, l2_szword; ++ ++ l1u_present |= cpucfg_cache[16] & 3; /* bit[1:0]: unified l1 */ ++ l1d_present |= cpucfg_cache[16] & 4; /* bit[2:2]: l1d */ ++ l1_szword = l1d_present ? 18 : (l1u_present ? 17 : 0); ++ l1_szword = l1_szword ? cpucfg_cache[l1_szword]: 0; ++ ++ l2d_present |= cpucfg_cache[16] & 24; /* bit[4:3]: unified l2 */ ++ l2d_present |= cpucfg_cache[16] & 128; /* bit[7:7]: l2d */ ++ l2_szword = l2d_present ? cpucfg_cache[19]: 0; ++ ++ native_cache.l1d_line_size ++ = 1 << ((l1_szword & 0x7f000000) >> 24); /* bit[30:24]: log2(line) */ ++ ++ native_cache.l1d_size ++ = (1 << ((l1_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */ ++ * ((l1_szword & 0x0000ffff) + 1) /* bit[15:0]: sets - 1 */ ++ * (1 << ((l1_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(line) */ ++ >> 10; /* in kibibytes */ ++ ++ native_cache.l2d_size ++ = (1 << ((l2_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */ ++ * ((l2_szword & 0x0000ffff) + 1) /* bit[15:0]: sets - 1 */ ++ * (1 << ((l2_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(linesz) */ ++ >> 10; /* in kibibytes */ ++ ++ /* ++ if (native_cpu_type != CPU_NATIVE && ( ++ native_cache.l1d_line_size != PRESET_CACHE.l1d_line_size || ++ native_cache.l1d_size != PRESET_CACHE.l1d_size || ++ native_cache.l2d_size != PRESET_CACHE.l2d_size)) ++ warning (0, "native cache info (%) " ++ "differs from PRID preset (%)", ++ native_cache.l1d_size, native_cache.l2d_size, ++ native_cache.l1d_line_size, ++ PRESET_CACHE.l1d_size, PRESET_CACHE.l2d_size, ++ PRESET_CACHE.l1d_line_size); ++ */ ++ ++ /* Use the native value anyways. */ ++ PRESET_CACHE.l1d_line_size = native_cache.l1d_line_size; ++ PRESET_CACHE.l1d_size = native_cache.l1d_size; ++ PRESET_CACHE.l2d_size = native_cache.l2d_size; ++ } ++} +diff --git a/gcc/config/loongarch/loongarch-d.c b/gcc/config/loongarch/loongarch-cpu.h +similarity index 59% +rename from gcc/config/loongarch/loongarch-d.c +rename to gcc/config/loongarch/loongarch-cpu.h +index 971e5d33e..08d018372 100644 +--- a/gcc/config/loongarch/loongarch-d.c ++++ b/gcc/config/loongarch/loongarch-cpu.h +@@ -1,5 +1,7 @@ +-/* Subroutines for the D front end on the LARCH architecture. +- Copyright (C) 2017 Free Software Foundation, Inc. ++/* Definitions for loongarch native cpu property detection routines. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ ++This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by +@@ -15,17 +17,15 @@ You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +-#include "config.h" ++#ifndef LOONGARCH_CPU_H ++#define LOONGARCH_CPU_H ++ + #include "system.h" +-#include "coretypes.h" +-#include "tm.h" +-#include "d/d-target.h" +-#include "d/d-target-def.h" +- +-/* Implement TARGET_D_CPU_VERSIONS for LARCH targets. */ +- +-void +-loongarch_d_target_versions (void) +-{ +- // need to be improved !! +-} ++#include "loongarch-def.h" ++ ++void cache_cpucfg (void); ++void fill_native_cpu_config (struct loongarch_target *tgt); ++uint32_t get_native_prid (void); ++const char* get_native_prid_str (void); ++ ++#endif /* LOONGARCH_CPU_H */ +diff --git a/gcc/config/loongarch/loongarch-cpus.def b/gcc/config/loongarch/loongarch-cpus.def +deleted file mode 100644 +index 7ce2508e3..000000000 +--- a/gcc/config/loongarch/loongarch-cpus.def ++++ /dev/null +@@ -1,38 +0,0 @@ +-/* LARCH CPU names. +- Copyright (C) 1989-2018 Free Software Foundation, Inc. +- +-This file is part of GCC. +- +-GCC is free software; you can redistribute it and/or modify +-it under the terms of the GNU General Public License as published by +-the Free Software Foundation; either version 3, or (at your option) +-any later version. +- +-GCC is distributed in the hope that it will be useful, +-but WITHOUT ANY WARRANTY; without even the implied warranty of +-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +-GNU General Public License for more details. +- +-You should have received a copy of the GNU General Public License +-along with GCC; see the file COPYING3. If not see +-. */ +- +-/* A table describing all the processors GCC knows about. The first +- mention of an ISA level is taken as the canonical name for that +- ISA. +- +- To ease comparison, please keep this table in the same order +- as GAS's loongarch_cpu_info_table. Please also make sure that +- LARCH_ISA_LEVEL_SPEC and LARCH_ARCH_FLOAT_SPEC handle all -march +- options correctly. +- +- Before including this file, define a macro: +- +- LARCH_CPU (NAME, CPU, ISA, FLAGS) +- +- where the arguments are the fields of struct loongarch_cpu_info. */ +- +-/* Entries for generic ISAs. */ +-LARCH_CPU ("loongarch64", PROCESSOR_LOONGARCH64, 0, 0) +-LARCH_CPU ("la464", PROCESSOR_LA464, 0, 0) +- +diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c +new file mode 100644 +index 000000000..dde7a5dba +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-def.c +@@ -0,0 +1,232 @@ ++/* LoongArch static properties. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Ltd. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#include "loongarch-def.h" ++#include "loongarch-str.h" ++ ++/* CPU property tables. */ ++const char* ++loongarch_cpu_strings[N_TUNE_TYPES] = { ++ [CPU_NATIVE] = STR_CPU_NATIVE, ++ [CPU_ABI_DEFAULT] = STR_CPU_ABI_DEFAULT, ++ [CPU_LOONGARCH64] = STR_CPU_LOONGARCH64, ++ [CPU_LA464] = STR_CPU_LA464, ++ [CPU_LA364] = STR_CPU_LA364, ++ [CPU_LA264] = STR_CPU_LA264, ++ [CPU_LA664] = STR_CPU_LA664, ++}; ++ ++struct loongarch_isa ++loongarch_cpu_default_isa[N_ARCH_TYPES] = { ++ [CPU_LOONGARCH64] = { ++ .base = ISA_BASE_LA64V100, ++ .fpu = ISA_EXT_FPU64, ++ .simd = 0, ++ }, ++ [CPU_LA464] = { ++ .base = ISA_BASE_LA64V100, ++ .fpu = ISA_EXT_FPU64, ++ .simd = ISA_EXT_SIMD_LASX, ++ }, ++ [CPU_LA364] = { ++ .base = ISA_BASE_LA64V100, ++ .fpu = ISA_EXT_FPU64, ++ .simd = ISA_EXT_SIMD_LSX, ++ }, ++ [CPU_LA264] = { ++ .base = ISA_BASE_LA64V100, ++ .fpu = ISA_EXT_FPU64, ++ .simd = ISA_EXT_SIMD_LSX, ++ }, ++ [CPU_LA664] = { ++ .base = ISA_BASE_LA64V100, ++ .fpu = ISA_EXT_FPU64, ++ .simd = ISA_EXT_SIMD_LASX, ++ }, ++}; ++ ++struct loongarch_cache ++loongarch_cpu_cache[N_TUNE_TYPES] = { ++ [CPU_LOONGARCH64] = { ++ .l1d_line_size = 64, ++ .l1d_size = 64, ++ .l2d_size = 256, ++ .simultaneous_prefetches = 4, ++ }, ++ [CPU_LA464] = { ++ .l1d_line_size = 64, ++ .l1d_size = 64, ++ .l2d_size = 256, ++ .simultaneous_prefetches = 4, ++ }, ++ [CPU_LA364] = { ++ .l1d_line_size = 64, ++ .l1d_size = 64, ++ .l2d_size = 0, ++ .simultaneous_prefetches = 4, ++ }, ++ [CPU_LA264] = { ++ .l1d_line_size = 64, ++ .l1d_size = 32, ++ .l2d_size = 0, ++ .simultaneous_prefetches = 4, ++ }, ++ [CPU_LA664] = { ++ .l1d_line_size = 64, ++ .l1d_size = 64, ++ .l2d_size = 256, ++ .simultaneous_prefetches = 4, ++ }, ++}; ++ ++/* RTX costs */ ++/* Default RTX cost initializer. */ ++#define COSTS_N_INSNS(N) ((N) * 4) ++#define DEFAULT_COSTS \ ++ .fp_add = COSTS_N_INSNS (1), \ ++ .fp_mult_sf = COSTS_N_INSNS (2), \ ++ .fp_mult_df = COSTS_N_INSNS (4), \ ++ .fp_div_sf = COSTS_N_INSNS (6), \ ++ .fp_div_df = COSTS_N_INSNS (8), \ ++ .int_mult_si = COSTS_N_INSNS (1), \ ++ .int_mult_di = COSTS_N_INSNS (1), \ ++ .int_div_si = COSTS_N_INSNS (4), \ ++ .int_div_di = COSTS_N_INSNS (6), \ ++ .branch_cost = 6, \ ++ .memory_latency = 4 ++ ++/* The following properties cannot be looked up directly using "cpucfg". ++ So it is necessary to provide a default value for "unknown native" ++ tune targets (i.e. -mtune=native while PRID does not correspond to ++ any known "-mtune" type). */ ++ ++struct loongarch_rtx_cost_data ++loongarch_cpu_rtx_cost_data[N_TUNE_TYPES] = { ++ [CPU_NATIVE] = { ++ DEFAULT_COSTS ++ }, ++ [CPU_LOONGARCH64] = { ++ DEFAULT_COSTS ++ }, ++ [CPU_LA464] = { ++ DEFAULT_COSTS ++ }, ++ [CPU_LA364] = { ++ DEFAULT_COSTS ++ }, ++ [CPU_LA264] = { ++ DEFAULT_COSTS ++ }, ++ [CPU_LA664] = { ++ DEFAULT_COSTS ++ }, ++}; ++ ++/* RTX costs to use when optimizing for size. */ ++const struct loongarch_rtx_cost_data ++loongarch_rtx_cost_optimize_size = { ++ .fp_add = 4, ++ .fp_mult_sf = 4, ++ .fp_mult_df = 4, ++ .fp_div_sf = 4, ++ .fp_div_df = 4, ++ .int_mult_si = 4, ++ .int_mult_di = 4, ++ .int_div_si = 4, ++ .int_div_di = 4, ++ .branch_cost = 2, ++ .memory_latency = 4, ++}; ++ ++int ++loongarch_cpu_issue_rate[N_TUNE_TYPES] = { ++ [CPU_NATIVE] = 4, ++ [CPU_LOONGARCH64] = 4, ++ [CPU_LA464] = 4, ++ [CPU_LA364] = 3, ++ [CPU_LA264] = 2, ++ [CPU_LA664] = 6, ++}; ++ ++int ++loongarch_cpu_multipass_dfa_lookahead[N_TUNE_TYPES] = { ++ [CPU_NATIVE] = 4, ++ [CPU_LOONGARCH64] = 4, ++ [CPU_LA464] = 4, ++ [CPU_LA364] = 4, ++ [CPU_LA264] = 4, ++ [CPU_LA664] = 4, ++}; ++ ++/* Wiring string definitions from loongarch-str.h to global arrays ++ with standard index values from loongarch-opts.h, so we can ++ print config-related messages and do ABI self-spec filtering ++ from the driver in a self-consistent manner. */ ++ ++const char* ++loongarch_isa_base_strings[N_ISA_BASE_TYPES] = { ++ [ISA_BASE_LA64V100] = STR_ISA_BASE_LA64V100, ++}; ++ ++const char* ++loongarch_isa_ext_strings[N_ISA_EXT_TYPES] = { ++ [ISA_EXT_NONE] = STR_NONE, ++ [ISA_EXT_FPU32] = STR_ISA_EXT_FPU32, ++ [ISA_EXT_FPU64] = STR_ISA_EXT_FPU64, ++ [ISA_EXT_SIMD_LSX] = STR_ISA_EXT_LSX, ++ [ISA_EXT_SIMD_LASX] = STR_ISA_EXT_LASX, ++}; ++ ++const char* ++loongarch_abi_base_strings[N_ABI_BASE_OPTS] = { ++ [ABI_BASE_LP64D] = STR_ABI_BASE_LP64D, ++ [ABI_BASE_LP64F] = STR_ABI_BASE_LP64F, ++ [ABI_BASE_LP64S] = STR_ABI_BASE_LP64S, ++ [ABI_BASE_LP64] = STR_ABI_BASE_LP64, ++}; ++ ++const char* ++loongarch_abi_ext_strings[N_ABI_EXT_TYPES] = { ++ [ABI_EXT_BASE] = STR_ABI_EXT_BASE, ++}; ++ ++const char* ++loongarch_cmodel_strings[] = { ++ [CMODEL_NORMAL] = STR_CMODEL_NORMAL, ++ [CMODEL_TINY] = STR_CMODEL_TINY, ++ [CMODEL_TINY_STATIC] = STR_CMODEL_TS, ++ [CMODEL_LARGE] = STR_CMODEL_LARGE, ++ [CMODEL_EXTREME] = STR_CMODEL_EXTREME, ++}; ++ ++ ++/* ABI-related definitions. */ ++const struct loongarch_isa ++abi_minimal_isa[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES] = { ++ [ABI_BASE_LP64D] = { ++ [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_FPU64, .simd = 0}, ++ }, ++ [ABI_BASE_LP64F] = { ++ [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_FPU32, .simd = 0}, ++ }, ++ [ABI_BASE_LP64S] = { ++ [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_NONE, .simd = 0}, ++ }, ++}; +diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h +new file mode 100644 +index 000000000..45d9ac16c +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-def.h +@@ -0,0 +1,161 @@ ++/* LoongArch definitions. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Ltd. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Definition of standard codes for: ++ - base architecture types (isa_base), ++ - ISA extensions (isa_ext), ++ - base ABI types (abi_base), ++ - ABI extension types (abi_ext). ++ ++ - code models (cmodel) ++ - other command-line switches (switch) ++ ++ These values are primarily used for implementing option handling ++ logic in "loongarch.opt", "loongarch-driver.c" and "loongarch-opt.c". ++ ++ As for the result of this option handling process, the following ++ scheme is adopted to represent the final configuration: ++ ++ - The target ABI is encoded with a tuple (abi_base, abi_ext) ++ using the code defined below. ++ ++ - The target ISA is encoded with a "struct loongarch_isa" defined ++ in loongarch-cpu.h. ++ ++ - The target microarchitecture is represented with a cpu model ++ index defined in loongarch-cpu.h. ++*/ ++ ++#ifndef LOONGARCH_DEF_H ++#define LOONGARCH_DEF_H ++ ++#include "loongarch-tune.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++/* enum isa_base */ ++extern const char* loongarch_isa_base_strings[]; ++#define ISA_BASE_LA64V100 0 ++#define N_ISA_BASE_TYPES 1 ++ ++/* enum isa_ext_* */ ++extern const char* loongarch_isa_ext_strings[]; ++#define ISA_EXT_NONE 0 ++#define ISA_EXT_FPU32 1 ++#define ISA_EXT_FPU64 2 ++#define N_ISA_EXT_FPU_TYPES 3 ++#define ISA_EXT_SIMD_LSX 3 ++#define ISA_EXT_SIMD_LASX 4 ++#define N_ISA_EXT_TYPES 5 ++ ++/* enum abi_base */ ++extern const char* loongarch_abi_base_strings[]; ++#define ABI_BASE_LP64D 0 ++#define ABI_BASE_LP64F 1 ++#define ABI_BASE_LP64S 2 ++#define N_ABI_BASE_TYPES 3 ++#define ABI_BASE_LP64 3 ++#define N_ABI_BASE_OPTS 4 ++ ++#define IS_LP64_ABI_BASE(C) \ ++ (C == ABI_BASE_LP64D || C == ABI_BASE_LP64F || C == ABI_BASE_LP64S) ++ ++#define TO_LP64_ABI_BASE(C) (C) ++ ++#define ABI_FPU_64(abi_base) \ ++ (abi_base == ABI_BASE_LP64D) ++#define ABI_FPU_32(abi_base) \ ++ (abi_base == ABI_BASE_LP64F) ++#define ABI_FPU_NONE(abi_base) \ ++ (abi_base == ABI_BASE_LP64S) ++ ++ ++/* enum abi_ext */ ++extern const char* loongarch_abi_ext_strings[]; ++#define ABI_EXT_BASE 0 ++#define N_ABI_EXT_TYPES 1 ++ ++/* enum cmodel */ ++extern const char* loongarch_cmodel_strings[]; ++#define CMODEL_NORMAL 0 ++#define CMODEL_TINY 1 ++#define CMODEL_TINY_STATIC 2 ++#define CMODEL_LARGE 3 ++#define CMODEL_EXTREME 4 ++#define N_CMODEL_TYPES 5 ++ ++/* The common default value for variables whose assignments ++ are triggered by command-line options. */ ++ ++#define M_OPT_UNSET -1 ++#define M_OPT_ABSENT(opt_enum) ((opt_enum) == M_OPT_UNSET) ++ ++ ++/* Internal representation of the target. */ ++struct loongarch_isa ++{ ++ int base; /* ISA_BASE_ */ ++ int fpu; /* ISA_EXT_FPU_ */ ++ int simd; /* ISA_EXT_SIMD_ */ ++}; ++ ++struct loongarch_abi ++{ ++ int base; /* ABI_BASE_ */ ++ int ext; /* ABI_EXT_ */ ++}; ++ ++struct loongarch_target ++{ ++ struct loongarch_isa isa; ++ struct loongarch_abi abi; ++ int cpu_arch; /* CPU_ */ ++ int cpu_tune; /* same */ ++ int cmodel; /* CMODEL_ */ ++}; ++ ++/* CPU properties. */ ++/* index */ ++#define CPU_NATIVE 0 ++#define CPU_ABI_DEFAULT 1 ++#define CPU_LOONGARCH64 2 ++#define CPU_LA464 3 ++#define CPU_LA364 4 ++#define CPU_LA264 5 ++#define CPU_LA664 6 ++#define N_ARCH_TYPES 7 ++#define N_TUNE_TYPES 7 ++#define CPU_NONE 8 ++ ++/* parallel tables */ ++extern const char* loongarch_cpu_strings[]; ++extern struct loongarch_isa loongarch_cpu_default_isa[]; ++extern int loongarch_cpu_issue_rate[]; ++extern int loongarch_cpu_multipass_dfa_lookahead[]; ++ ++extern struct loongarch_cache loongarch_cpu_cache[]; ++extern struct loongarch_rtx_cost_data loongarch_cpu_rtx_cost_data[]; ++ ++#ifdef __cplusplus ++} ++#endif ++#endif /* LOONGARCH_DEF_H */ +diff --git a/gcc/config/loongarch/loongarch-driver.c b/gcc/config/loongarch/loongarch-driver.c +new file mode 100644 +index 000000000..1f56df84f +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-driver.c +@@ -0,0 +1,206 @@ ++/* Subroutines for the gcc driver. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Ltd. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#define IN_TARGET_CODE 1 ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tm.h" ++#include "obstack.h" ++#include "diagnostic-core.h" ++#include "opts.h" ++ ++#include "loongarch-opts.h" ++#include "loongarch-driver.h" ++ ++/* This flag is set to 1 if we believe that the user might be avoiding ++ linking (implicitly) against something from the startfile search paths. */ ++static int no_link = 0; ++ ++/* Use the public obstack from the gcc driver (defined in gcc.c). ++ This is for allocating space for the returned string. */ ++extern struct obstack opts_obstack; ++ ++const char* ++la_driver_init (int argc ATTRIBUTE_UNUSED, const char **argv ATTRIBUTE_UNUSED) ++{ ++ /* Initialize all fields of la_target to -1 */ ++ loongarch_init_target (&la_target, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET, ++ M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET); ++ return ""; ++} ++ ++const char* ++driver_set_no_link (int argc, const char **argv) ++{ ++ no_link = 1; ++ return ""; ++} ++ ++const char* ++driver_set_m_parm (int argc, const char **argv) ++{ ++ gcc_assert (argc == 2); ++ ++#define LARCH_DRIVER_PARSE_PARM(OPT_IDX, NAME, OPTSTR_LIST, \ ++ OPT_IDX_LO, OPT_IDX_HI) \ ++ if (strcmp (argv[0], OPTSTR_##NAME) == 0) \ ++ for (int i = (OPT_IDX_LO); i < (OPT_IDX_HI); i++) \ ++ { \ ++ if ((OPTSTR_LIST)[i] != 0) \ ++ if (strcmp (argv[1], (OPTSTR_LIST)[i]) == 0) \ ++ { \ ++ (OPT_IDX) = i; \ ++ return 0; \ ++ } \ ++ } ++ ++ LARCH_DRIVER_PARSE_PARM (la_target.abi.base, ABI_BASE, \ ++ loongarch_abi_base_strings, 0, N_ABI_BASE_OPTS) ++ ++ LARCH_DRIVER_PARSE_PARM (la_target.isa.fpu, ISA_EXT_FPU, \ ++ loongarch_isa_ext_strings, 0, N_ISA_EXT_FPU_TYPES) ++ ++ LARCH_DRIVER_PARSE_PARM (la_target.isa.simd, ISA_EXT_SIMD, \ ++ loongarch_isa_ext_strings, 0, N_ISA_EXT_TYPES) ++ ++ LARCH_DRIVER_PARSE_PARM (la_target.cpu_arch, ARCH, \ ++ loongarch_cpu_strings, 0, N_ARCH_TYPES) ++ ++ LARCH_DRIVER_PARSE_PARM (la_target.cpu_tune, TUNE, \ ++ loongarch_cpu_strings, 0, N_TUNE_TYPES) ++ ++ LARCH_DRIVER_PARSE_PARM (la_target.cmodel, CMODEL, \ ++ loongarch_cmodel_strings, 0, N_CMODEL_TYPES) ++ ++ gcc_unreachable (); ++} ++ ++static void ++driver_record_deferred_opts (struct loongarch_flags *flags) ++{ ++ unsigned int i; ++ cl_deferred_option *opt; ++ vec *v = (vec *) la_deferred_options; ++ ++ gcc_assert (flags); ++ ++ /* Initialize flags */ ++ flags->flt = M_OPT_UNSET; ++ flags->flt_str = NULL; ++ flags->sx[0] = flags->sx[1] = 0; ++ ++ int sx_flag_idx = 0; ++ ++ if (v) ++ FOR_EACH_VEC_ELT (*v, i, opt) ++ { ++ switch (opt->opt_index) ++ { ++ case OPT_mlsx: ++ flags->sx[sx_flag_idx++] = ISA_EXT_SIMD_LSX * (opt->value ? 1 : -1); ++ break; ++ ++ case OPT_mlasx: ++ flags->sx[sx_flag_idx++] = ISA_EXT_SIMD_LASX * (opt->value ? 1 : -1); ++ break; ++ ++ case OPT_msoft_float: ++ flags->flt = ISA_EXT_NONE; ++ flags->flt_str = OPTSTR_SOFT_FLOAT; ++ break; ++ ++ case OPT_msingle_float: ++ flags->flt = ISA_EXT_FPU32; ++ flags->flt_str = OPTSTR_SINGLE_FLOAT; ++ break; ++ ++ case OPT_mdouble_float: ++ flags->flt = ISA_EXT_FPU64; ++ flags->flt_str = OPTSTR_DOUBLE_FLOAT; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ gcc_assert (sx_flag_idx <= 2); ++ } ++} ++ ++const char* ++driver_get_normalized_m_opts (int argc, const char **argv ATTRIBUTE_UNUSED) ++{ ++ if (argc != 0) ++ return " %eget_normalized_m_opts requires no argument.\n"; ++ ++ struct loongarch_flags flags; ++ driver_record_deferred_opts (&flags); ++ loongarch_config_target (&la_target, &flags, !no_link /* follow_multilib_list */); ++ ++ /* Output normalized option strings. */ ++ obstack_blank (&opts_obstack, 0); ++ ++#undef APPEND_LTR ++#define APPEND_LTR(S) \ ++ obstack_grow (&opts_obstack, (const void*) (S), \ ++ sizeof ((S)) / sizeof (char) -1) ++ ++#undef APPEND_VAL ++#define APPEND_VAL(S) \ ++ obstack_grow (&opts_obstack, (const void*) (S), strlen ((S))) ++ ++#undef APPEND_OPT ++#define APPEND_OPT(NAME) \ ++ APPEND_LTR (" %. */ ++ ++#ifndef LOONGARCH_DRIVER_H ++#define LOONGARCH_DRIVER_H ++ ++#include "loongarch-str.h" ++ ++extern const char* ++la_driver_init (int argc, const char **argv); ++ ++extern const char* ++driver_set_m_parm (int argc, const char **argv); ++ ++extern const char* ++driver_set_no_link (int argc, const char **argv); ++ ++extern const char* ++driver_get_normalized_m_opts (int argc, const char **argv); ++ ++#define EXTRA_SPEC_FUNCTIONS \ ++ { "driver_init", la_driver_init }, \ ++ { "set_m_parm", driver_set_m_parm }, \ ++ { "set_no_link", driver_set_no_link }, \ ++ { "get_normalized_m_opts", driver_get_normalized_m_opts }, ++ ++/* Pre-process ABI-related options. */ ++#define LA_SET_PARM_SPEC(NAME) \ ++ " %{m" OPTSTR_##NAME "=*: %:set_m_parm(" OPTSTR_##NAME " %*)}" \ ++ ++#define DRIVER_HANDLE_MACHINE_OPTIONS \ ++ " %:driver_init()" \ ++ " %{c|S|E|nostdlib: %:set_no_link()}" \ ++ " %{nostartfiles: %{nodefaultlibs: %:set_no_link()}}" \ ++ LA_SET_PARM_SPEC (ABI_BASE) \ ++ LA_SET_PARM_SPEC (ARCH) \ ++ LA_SET_PARM_SPEC (TUNE) \ ++ LA_SET_PARM_SPEC (ISA_EXT_FPU) \ ++ LA_SET_PARM_SPEC (ISA_EXT_SIMD) \ ++ LA_SET_PARM_SPEC (CMODEL) \ ++ " %:get_normalized_m_opts()" ++ ++#define DRIVER_SELF_SPECS \ ++ DRIVER_HANDLE_MACHINE_OPTIONS ++ ++/* ABI spec strings. */ ++#define ABI_GRLEN_SPEC \ ++ "%{mabi=lp64*:64}" \ ++ ++#define ABI_SPEC \ ++ "%{mabi=lp64d:lp64d}" \ ++ "%{mabi=lp64f:lp64f}" \ ++ "%{mabi=lp64s:lp64s}" \ ++ ++#endif /* LOONGARCH_DRIVER_H */ +diff --git a/gcc/config/loongarch/loongarch-ftypes.def b/gcc/config/loongarch/loongarch-ftypes.def +index a10a025ba..1ef4e2dc8 100644 +--- a/gcc/config/loongarch/loongarch-ftypes.def ++++ b/gcc/config/loongarch/loongarch-ftypes.def +@@ -1,5 +1,7 @@ +-/* Definitions of prototypes for LARCH built-in functions. -*- C -*- +- Copyright (C) 2007-2018 Free Software Foundation, Inc. ++/* Definitions of prototypes for LoongArch built-in functions. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Co. Ltd. ++ Based on MIPS target for GNU compiler. + + This file is part of GCC. + +@@ -18,11 +20,11 @@ along with GCC; see the file COPYING3. If not see + . */ + + /* Invoke DEF_LARCH_FTYPE (NARGS, LIST) for each prototype used by +- LARCH built-in functions, where: ++ LoongArch built-in functions, where: + + NARGS is the number of arguments. + LIST contains the return-type code followed by the codes for each +- argument type. ++ argument type. + + Argument- and return-type codes are either modes or one of the following: + +@@ -30,65 +32,55 @@ along with GCC; see the file COPYING3. If not see + INT for integer_type_node + POINTER for ptr_type_node + +- (we don't use PTR because that's a ANSI-compatibillity macro). ++ (we don't use PTR because that's a ANSI-compatibility macro). + + Please keep this list lexicographically sorted by the LIST argument. */ +-DEF_LARCH_FTYPE (1, (DF, DF)) +-DEF_LARCH_FTYPE (2, (DF, DF, DF)) +-DEF_LARCH_FTYPE (1, (DF, V2DF)) +-DEF_LARCH_FTYPE (1, (DF, V4DF)) + +-DEF_LARCH_FTYPE (1, (DI, DI)) +-DEF_LARCH_FTYPE (1, (DI, SI)) +-DEF_LARCH_FTYPE (1, (DI, UQI)) +-DEF_LARCH_FTYPE (1, (UDI, USI)) ++/* Non-vector builtin types. */ ++ + DEF_LARCH_FTYPE (1, (UQI, USI)) +-DEF_LARCH_FTYPE (1, (USI, UQI)) + DEF_LARCH_FTYPE (1, (UHI, USI)) +-DEF_LARCH_FTYPE (2, (DI, DI, DI)) +-DEF_LARCH_FTYPE (2, (DI, DI, SI)) +-DEF_LARCH_FTYPE (2, (DI, DI, UQI)) ++DEF_LARCH_FTYPE (1, (USI, USI)) ++DEF_LARCH_FTYPE (1, (UDI, USI)) ++DEF_LARCH_FTYPE (1, (USI, UQI)) ++DEF_LARCH_FTYPE (1, (VOID, USI)) ++ ++DEF_LARCH_FTYPE (2, (VOID, UQI, USI)) ++DEF_LARCH_FTYPE (2, (VOID, UHI, USI)) ++DEF_LARCH_FTYPE (2, (VOID, USI, USI)) ++DEF_LARCH_FTYPE (2, (VOID, UDI, USI)) + DEF_LARCH_FTYPE (2, (VOID, DI, UQI)) + DEF_LARCH_FTYPE (2, (VOID, SI, UQI)) ++DEF_LARCH_FTYPE (2, (VOID, DI, DI)) ++DEF_LARCH_FTYPE (2, (SI, SI, UQI)) ++DEF_LARCH_FTYPE (2, (DI, DI, UQI)) ++DEF_LARCH_FTYPE (2, (SI, QI, SI)) ++DEF_LARCH_FTYPE (2, (SI, HI, SI)) ++DEF_LARCH_FTYPE (2, (SI, SI, SI)) ++DEF_LARCH_FTYPE (2, (SI, DI, SI)) ++DEF_LARCH_FTYPE (2, (USI, USI, USI)) + DEF_LARCH_FTYPE (2, (UDI, UDI, USI)) +-DEF_LARCH_FTYPE (3, (DI, DI, SI, SI)) +-DEF_LARCH_FTYPE (3, (DI, DI, USI, USI)) +-DEF_LARCH_FTYPE (3, (DI, DI, DI, QI)) ++ ++DEF_LARCH_FTYPE (3, (VOID, USI, USI, SI)) ++DEF_LARCH_FTYPE (3, (VOID, USI, UDI, SI)) ++DEF_LARCH_FTYPE (3, (USI, USI, USI, USI)) + DEF_LARCH_FTYPE (3, (UDI, UDI, UDI, USI)) ++ ++/* Vector builtin types. */ ++ ++DEF_LARCH_FTYPE (1, (DF, V2DF)) ++DEF_LARCH_FTYPE (1, (DF, V4DF)) + DEF_LARCH_FTYPE (3, (DI, DI, V2HI, V2HI)) + DEF_LARCH_FTYPE (3, (DI, DI, V4QI, V4QI)) +-DEF_LARCH_FTYPE (2, (DI, POINTER, SI)) +-DEF_LARCH_FTYPE (2, (DI, SI, SI)) +-DEF_LARCH_FTYPE (2, (DI, USI, USI)) + DEF_LARCH_FTYPE (2, (DI, V2DI, UQI)) + DEF_LARCH_FTYPE (2, (DI, V4DI, UQI)) + +-DEF_LARCH_FTYPE (2, (INT, DF, DF)) +-DEF_LARCH_FTYPE (2, (INT, SF, SF)) + DEF_LARCH_FTYPE (2, (INT, V2SF, V2SF)) + DEF_LARCH_FTYPE (4, (INT, V2SF, V2SF, V2SF, V2SF)) + +-DEF_LARCH_FTYPE (1, (SF, SF)) +-DEF_LARCH_FTYPE (2, (SF, SF, SF)) + DEF_LARCH_FTYPE (1, (SF, V2SF)) + DEF_LARCH_FTYPE (1, (SF, V4SF)) + +-DEF_LARCH_FTYPE (2, (SI, DI, SI)) +-DEF_LARCH_FTYPE (2, (SI, POINTER, SI)) +-DEF_LARCH_FTYPE (1, (SI, SI)) +-DEF_LARCH_FTYPE (1, (USI, USI)) +-DEF_LARCH_FTYPE (1, (SI, UDI)) +-DEF_LARCH_FTYPE (2, (QI, QI, QI)) +-DEF_LARCH_FTYPE (2, (HI, HI, HI)) +-DEF_LARCH_FTYPE (2, (SI, QI, SI)) +-DEF_LARCH_FTYPE (2, (SI, HI, SI)) +-DEF_LARCH_FTYPE (2, (SI, SI, SI)) +-DEF_LARCH_FTYPE (2, (SI, SI, UQI)) +-DEF_LARCH_FTYPE (2, (USI, USI, USI)) +-DEF_LARCH_FTYPE (3, (SI, SI, SI, SI)) +-DEF_LARCH_FTYPE (3, (SI, SI, SI, QI)) +-DEF_LARCH_FTYPE (3, (USI, USI, USI, USI)) +-DEF_LARCH_FTYPE (1, (SI, UQI)) + DEF_LARCH_FTYPE (1, (SI, UV16QI)) + DEF_LARCH_FTYPE (1, (SI, UV32QI)) + DEF_LARCH_FTYPE (1, (SI, UV2DI)) +@@ -106,9 +98,7 @@ DEF_LARCH_FTYPE (2, (SI, V4QI, V4QI)) + DEF_LARCH_FTYPE (2, (SI, V4SI, UQI)) + DEF_LARCH_FTYPE (2, (SI, V8SI, UQI)) + DEF_LARCH_FTYPE (2, (SI, V8HI, UQI)) +-DEF_LARCH_FTYPE (1, (SI, VOID)) + +-DEF_LARCH_FTYPE (2, (UDI, UDI, UDI)) + DEF_LARCH_FTYPE (2, (USI, V32QI, UQI)) + DEF_LARCH_FTYPE (2, (UDI, UV2SI, UV2SI)) + DEF_LARCH_FTYPE (2, (USI, V8SI, UQI)) +@@ -119,8 +109,6 @@ DEF_LARCH_FTYPE (2, (UDI, V4DI, UQI)) + DEF_LARCH_FTYPE (2, (USI, V16QI, UQI)) + DEF_LARCH_FTYPE (2, (USI, V4SI, UQI)) + DEF_LARCH_FTYPE (2, (USI, V8HI, UQI)) +-DEF_LARCH_FTYPE (1, (USI, VOID)) +- + DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, UQI)) + DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, USI)) + DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, UV16QI)) +@@ -476,19 +464,6 @@ DEF_LARCH_FTYPE (2, (V8QI, V4HI, V4HI)) + DEF_LARCH_FTYPE (1, (V8QI, V8QI)) + DEF_LARCH_FTYPE (2, (V8QI, V8QI, V8QI)) + +-DEF_LARCH_FTYPE (2, (VOID, SI, CVPOINTER)) +-DEF_LARCH_FTYPE (2, (VOID, SI, SI)) +-DEF_LARCH_FTYPE (2, (VOID, DI, DI)) +-DEF_LARCH_FTYPE (2, (VOID, UQI, SI)) +-DEF_LARCH_FTYPE (1, (VOID, USI)) +-DEF_LARCH_FTYPE (2, (VOID, USI, UQI)) +-DEF_LARCH_FTYPE (1, (VOID, UHI)) +-DEF_LARCH_FTYPE (2, (VOID, UQI, USI)) +-DEF_LARCH_FTYPE (2, (VOID, UHI, USI)) +-DEF_LARCH_FTYPE (2, (VOID, USI, USI)) +-DEF_LARCH_FTYPE (2, (VOID, UDI, USI)) +-DEF_LARCH_FTYPE (3, (VOID, USI, USI, SI)) +-DEF_LARCH_FTYPE (3, (VOID, USI, UDI, SI)) + DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, SI)) + DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, DI)) + DEF_LARCH_FTYPE (3, (VOID, V32QI, CVPOINTER, SI)) +@@ -648,36 +623,36 @@ DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV16QI, V16QI)) + DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV16QI, UV16QI)) + + +-DEF_LARCH_FTYPE(2,(V4DI,V16HI,V16HI)) +-DEF_LARCH_FTYPE(2,(V4DI,UV4SI,V4SI)) +-DEF_LARCH_FTYPE(2,(V8SI,UV16HI,V16HI)) +-DEF_LARCH_FTYPE(2,(V16HI,UV32QI,V32QI)) +-DEF_LARCH_FTYPE(2,(V4DI,UV8SI,V8SI)) +-DEF_LARCH_FTYPE(3,(V4DI,V4DI,V16HI,V16HI)) +-DEF_LARCH_FTYPE(2,(UV32QI,V32QI,UV32QI)) +-DEF_LARCH_FTYPE(2,(UV16HI,V16HI,UV16HI)) +-DEF_LARCH_FTYPE(2,(UV8SI,V8SI,UV8SI)) +-DEF_LARCH_FTYPE(2,(UV4DI,V4DI,UV4DI)) +-DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV4DI,V4DI)) +-DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV8SI,V8SI)) +-DEF_LARCH_FTYPE(3,(V8SI,V8SI,UV16HI,V16HI)) +-DEF_LARCH_FTYPE(3,(V16HI,V16HI,UV32QI,V32QI)) +-DEF_LARCH_FTYPE(2,(V4DI,UV4DI,V4DI)) +-DEF_LARCH_FTYPE(2,(V8SI,V32QI,V32QI)) +-DEF_LARCH_FTYPE(2,(UV4DI,UV16HI,UV16HI)) +-DEF_LARCH_FTYPE(2,(V4DI,UV16HI,V16HI)) +-DEF_LARCH_FTYPE(3,(V8SI,V8SI,V32QI,V32QI)) +-DEF_LARCH_FTYPE(3,(UV8SI,UV8SI,UV32QI,UV32QI)) +-DEF_LARCH_FTYPE(3,(UV4DI,UV4DI,UV16HI,UV16HI)) +-DEF_LARCH_FTYPE(3,(V8SI,V8SI,UV32QI,V32QI)) +-DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV16HI,V16HI)) +-DEF_LARCH_FTYPE(2,(UV8SI,UV32QI,UV32QI)) +-DEF_LARCH_FTYPE(2,(V8SI,UV32QI,V32QI)) +- +-DEF_LARCH_FTYPE(4,(VOID,V16QI,CVPOINTER,SI,UQI)) +-DEF_LARCH_FTYPE(4,(VOID,V8HI,CVPOINTER,SI,UQI)) +-DEF_LARCH_FTYPE(4,(VOID,V4SI,CVPOINTER,SI,UQI)) +-DEF_LARCH_FTYPE(4,(VOID,V2DI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE (2, (V4DI, V16HI, V16HI)) ++DEF_LARCH_FTYPE (2, (V4DI, UV4SI, V4SI)) ++DEF_LARCH_FTYPE (2, (V8SI, UV16HI, V16HI)) ++DEF_LARCH_FTYPE (2, (V16HI, UV32QI, V32QI)) ++DEF_LARCH_FTYPE (2, (V4DI, UV8SI, V8SI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V16HI, V16HI)) ++DEF_LARCH_FTYPE (2, (UV32QI, V32QI, UV32QI)) ++DEF_LARCH_FTYPE (2, (UV16HI, V16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (UV8SI, V8SI, UV8SI)) ++DEF_LARCH_FTYPE (2, (UV4DI, V4DI, UV4DI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, UV4DI, V4DI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, UV8SI, V8SI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, UV16HI, V16HI)) ++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UV32QI, V32QI)) ++DEF_LARCH_FTYPE (2, (V4DI, UV4DI, V4DI)) ++DEF_LARCH_FTYPE (2, (V8SI, V32QI, V32QI)) ++DEF_LARCH_FTYPE (2, (UV4DI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (2, (V4DI, UV16HI, V16HI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V32QI, V32QI)) ++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV16HI, UV16HI)) ++DEF_LARCH_FTYPE (3, (V8SI, V8SI, UV32QI, V32QI)) ++DEF_LARCH_FTYPE (3, (V4DI, V4DI, UV16HI, V16HI)) ++DEF_LARCH_FTYPE (2, (UV8SI, UV32QI, UV32QI)) ++DEF_LARCH_FTYPE (2, (V8SI, UV32QI, V32QI)) ++ ++DEF_LARCH_FTYPE (4, (VOID, V16QI, CVPOINTER, SI, UQI)) ++DEF_LARCH_FTYPE (4, (VOID, V8HI, CVPOINTER, SI, UQI)) ++DEF_LARCH_FTYPE (4, (VOID, V4SI, CVPOINTER, SI, UQI)) ++DEF_LARCH_FTYPE (4, (VOID, V2DI, CVPOINTER, SI, UQI)) + + DEF_LARCH_FTYPE (2, (DI, V16QI, UQI)) + DEF_LARCH_FTYPE (2, (DI, V8HI, UQI)) +@@ -699,16 +674,16 @@ DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, V16HI, USI)) + DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, V8SI, USI)) + DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, V4DI, USI)) + +-DEF_LARCH_FTYPE(4,(VOID,V32QI,CVPOINTER,SI,UQI)) +-DEF_LARCH_FTYPE(4,(VOID,V16HI,CVPOINTER,SI,UQI)) +-DEF_LARCH_FTYPE(4,(VOID,V8SI,CVPOINTER,SI,UQI)) +-DEF_LARCH_FTYPE(4,(VOID,V4DI,CVPOINTER,SI,UQI)) ++DEF_LARCH_FTYPE (4, (VOID, V32QI, CVPOINTER, SI, UQI)) ++DEF_LARCH_FTYPE (4, (VOID, V16HI, CVPOINTER, SI, UQI)) ++DEF_LARCH_FTYPE (4, (VOID, V8SI, CVPOINTER, SI, UQI)) ++DEF_LARCH_FTYPE (4, (VOID, V4DI, CVPOINTER, SI, UQI)) + +-DEF_LARCH_FTYPE (1, (BOOLEAN,V16QI)) +-DEF_LARCH_FTYPE(2,(V16QI,CVPOINTER,CVPOINTER)) +-DEF_LARCH_FTYPE(3,(VOID,V16QI,CVPOINTER,CVPOINTER)) +-DEF_LARCH_FTYPE(2,(V32QI,CVPOINTER,CVPOINTER)) +-DEF_LARCH_FTYPE(3,(VOID,V32QI,CVPOINTER,CVPOINTER)) ++DEF_LARCH_FTYPE (1, (BOOLEAN, V16QI)) ++DEF_LARCH_FTYPE (2, (V16QI, CVPOINTER, CVPOINTER)) ++DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, CVPOINTER)) ++DEF_LARCH_FTYPE (2, (V32QI, CVPOINTER, CVPOINTER)) ++DEF_LARCH_FTYPE (3, (VOID, V32QI, CVPOINTER, CVPOINTER)) + + DEF_LARCH_FTYPE (3, (V16QI, V16QI, SI, UQI)) + DEF_LARCH_FTYPE (3, (V2DI, V2DI, SI, UQI)) +diff --git a/gcc/config/loongarch/loongarch-modes.def b/gcc/config/loongarch/loongarch-modes.def +index fe5bc38d9..53392b484 100644 +--- a/gcc/config/loongarch/loongarch-modes.def ++++ b/gcc/config/loongarch/loongarch-modes.def +@@ -1,5 +1,7 @@ +-/* LARCH extra machine modes. +- Copyright (C) 2003-2018 Free Software Foundation, Inc. ++/* LoongArch extra machine modes. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Co. Ltd. ++ Based on MIPS target for GNU compiler. + + This file is part of GCC. + +diff --git a/gcc/config/loongarch/loongarch-opts.c b/gcc/config/loongarch/loongarch-opts.c +new file mode 100644 +index 000000000..cf11f67d1 +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-opts.c +@@ -0,0 +1,725 @@ ++#define IN_TARGET_CODE 1 ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tm.h" ++#include "obstack.h" ++#include "diagnostic-core.h" ++ ++#include "loongarch-cpu.h" ++#include "loongarch-opts.h" ++#include "loongarch-str.h" ++#include "loongarch-def.h" ++ ++struct loongarch_target la_target; ++ ++/* ABI-related configuration. */ ++#define ABI_COUNT (sizeof(abi_priority_list)/sizeof(struct loongarch_abi)) ++static const struct loongarch_abi ++abi_priority_list[] = { ++ {ABI_BASE_LP64D, ABI_EXT_BASE}, ++ {ABI_BASE_LP64F, ABI_EXT_BASE}, ++ {ABI_BASE_LP64S, ABI_EXT_BASE}, ++}; ++ ++/* Initialize enabled_abi_types from TM_MULTILIB_LIST. */ ++#ifdef LA_DISABLE_MULTILIB ++#define MULTILIB_LIST_LEN 1 ++#else ++#define MULTILIB_LIST_LEN (sizeof (tm_multilib_list) / sizeof (int) / 2) ++static const int tm_multilib_list[] = { TM_MULTILIB_LIST }; ++#endif ++static int enabled_abi_types[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES] = { 0 }; ++ ++#define isa_required(ABI) (abi_minimal_isa[(ABI).base][(ABI).ext]) ++extern "C" const struct loongarch_isa ++abi_minimal_isa[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES]; ++ ++static inline int ++is_multilib_enabled (struct loongarch_abi abi) ++{ ++ return enabled_abi_types[abi.base][abi.ext]; ++} ++ ++static void ++init_enabled_abi_types () ++{ ++#ifdef LA_DISABLE_MULTILIB ++ enabled_abi_types[DEFAULT_ABI_BASE][DEFAULT_ABI_EXT] = 1; ++#else ++ int abi_base, abi_ext; ++ for (unsigned int i = 0; i < MULTILIB_LIST_LEN; i++) ++ { ++ abi_base = tm_multilib_list[i << 1]; ++ abi_ext = tm_multilib_list[(i << 1) + 1]; ++ enabled_abi_types[abi_base][abi_ext] = 1; ++ } ++#endif ++} ++ ++/* String processing. */ ++static struct obstack msg_obstack; ++#define APPEND_STRING(STR) obstack_grow (&msg_obstack, STR, strlen(STR)); ++#define APPEND1(CH) obstack_1grow(&msg_obstack, CH); ++ ++static const char* abi_str (struct loongarch_abi abi); ++static const char* isa_str (const struct loongarch_isa *isa, char separator); ++static const char* arch_str (const struct loongarch_target *target); ++static const char* multilib_enabled_abi_list (); /* Misc */ ++static struct loongarch_abi isa_default_abi (const struct loongarch_isa *isa); ++static int isa_base_compat_p (const struct loongarch_isa *set1, ++ const struct loongarch_isa *set2); ++static int isa_fpu_compat_p (const struct loongarch_isa *set1, ++ const struct loongarch_isa *set2); ++static int abi_compat_p (const struct loongarch_isa *isa, ++ struct loongarch_abi abi); ++static int abi_default_cpu_arch (struct loongarch_abi abi, struct loongarch_isa *isa); ++ ++/* Mandatory configure-time defaults. */ ++#ifndef DEFAULT_ABI_BASE ++#error missing definition of DEFAULT_ABI_BASE in ${tm_defines}. ++#endif ++ ++#ifndef DEFAULT_ABI_EXT ++#error missing definition of DEFAULT_ABI_EXT in ${tm_defines}. ++#endif ++ ++#ifndef DEFAULT_CPU_ARCH ++#error missing definition of DEFAULT_CPU_ARCH in ${tm_defines}. ++#endif ++ ++/* Optional configure-time defaults. */ ++#ifdef DEFAULT_CPU_TUNE ++static int with_default_tune = 1; ++#else ++#define DEFAULT_CPU_TUNE -1 ++static int with_default_tune = 0; ++#endif ++ ++#ifdef DEFAULT_ISA_EXT_FPU ++static int with_default_fpu = 1; ++#else ++#define DEFAULT_ISA_EXT_FPU -1 ++static int with_default_fpu = 0; ++#endif ++ ++#ifdef DEFAULT_ISA_EXT_SIMD ++static int with_default_simd = 1; ++#else ++#define DEFAULT_ISA_EXT_SIMD -1 ++static int with_default_simd = 0; ++#endif ++ ++ ++/* Initialize loongarch_target from separate option variables. */ ++ ++void ++loongarch_init_target (struct loongarch_target *target, ++ int cpu_arch, int cpu_tune, int fpu, int simd, ++ int abi_base, int abi_ext, int cmodel) ++{ ++ if (!target) ++ return; ++ target->cpu_arch = cpu_arch; ++ target->cpu_tune = cpu_tune; ++ target->isa.fpu = fpu; ++ target->isa.simd = simd; ++ target->abi.base = abi_base; ++ target->abi.ext = abi_ext; ++ target->cmodel = cmodel; ++} ++ ++ ++/* Handle combinations of -m parameters ++ (see loongarch.opt and loongarch-opts.h). */ ++ ++void ++loongarch_config_target (struct loongarch_target *target, ++ struct loongarch_flags *flags, ++ int follow_multilib_list_p) ++{ ++ struct loongarch_target t; ++ if (!target) ++ return; ++ ++ /* Initialization */ ++ init_enabled_abi_types (); ++ obstack_init (&msg_obstack); ++ ++ struct { ++ int arch, tune, fpu, simd, abi_base, abi_ext, cmodel, abi_flt; ++ } constrained = { ++ M_OPT_ABSENT (target->cpu_arch) ? 0 : 1, ++ M_OPT_ABSENT (target->cpu_tune) ? 0 : 1, ++ M_OPT_ABSENT (target->isa.fpu) ? 0 : 1, ++ M_OPT_ABSENT (target->isa.simd) ? 0 : 1, ++ M_OPT_ABSENT (target->abi.base) ? 0 : 1, ++ M_OPT_ABSENT (target->abi.ext) ? 0 : 1, ++ M_OPT_ABSENT (target->cmodel) ? 0 : 1, ++ M_OPT_ABSENT (target->abi.base) ? 0 : 1, ++ }; ++ ++ /* 1. Target ABI */ ++ if (constrained.abi_base && target->abi.base >= N_ABI_BASE_TYPES) ++ /* Special treatments for legacy options ("-mabi=lp64") ++ in GCC driver. */ ++ switch (target->abi.base) ++ { ++ case ABI_BASE_LP64: ++ t.abi.base = TO_LP64_ABI_BASE (DEFAULT_ABI_BASE); ++ constrained.abi_flt = 0; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ else if (constrained.abi_base) ++ t.abi.base = target->abi.base; ++ else ++ t.abi.base = DEFAULT_ABI_BASE; ++ ++ t.abi.ext = constrained.abi_ext ? target->abi.ext : DEFAULT_ABI_EXT; ++ ++ /* Process -m*-float flags */ ++ if (flags && !M_OPT_ABSENT (flags->flt)) ++ { ++ /* Modifying the original "target" here makes it easier to write the ++ t.isa.fpu assignment below, because otherwise there would be three ++ levels of precedence (-m*-float / -mfpu / -march) to be handled ++ (now the first two are merged). */ ++ ++ target->isa.fpu = flags->flt; ++ constrained.fpu = 1; ++ ++ /* The target ISA is not ready yet, but (isa_required (t.abi) ++ + forced fpu) is enough for computing the forced base ABI. */ ++ ++ struct loongarch_isa force_isa = isa_required (t.abi); ++ force_isa.fpu = flags->flt; ++ ++ struct loongarch_abi force_abi; ++ force_abi.base = isa_default_abi (&force_isa).base; ++ ++ if (constrained.abi_base && constrained.abi_flt ++ && (t.abi.base != force_abi.base)) ++ { ++ force_abi.ext = t.abi.ext; ++ inform (UNKNOWN_LOCATION, ++ "%<-m%s%> overrides %<-m%s=%s%>, adjusting ABI to %qs", ++ flags->flt_str, OPTSTR_ABI_BASE, ++ loongarch_abi_base_strings[t.abi.base], ++ abi_str (force_abi)); ++ } ++ ++ t.abi.base = force_abi.base; ++ constrained.abi_flt = 1; ++ } ++ ++#ifdef LA_DISABLE_MULTILIB ++ if (follow_multilib_list_p) ++ if (t.abi.base != DEFAULT_ABI_BASE || t.abi.ext != DEFAULT_ABI_EXT) ++ { ++ static const struct loongarch_abi default_abi ++ = {DEFAULT_ABI_BASE, DEFAULT_ABI_EXT}; ++ ++ warning (0, "ABI changed (%qs to %qs) while multilib is disabled", ++ abi_str (default_abi), abi_str (t.abi)); ++ } ++#endif ++ ++ /* 2. Target CPU */ ++ t.cpu_arch = constrained.arch ? target->cpu_arch : DEFAULT_CPU_ARCH; ++ ++ /* If cpu_tune is not set using neither -mtune nor --with-tune, ++ the current cpu_arch is used as its default. */ ++ t.cpu_tune = constrained.tune ? target->cpu_tune ++ : (constrained.arch ? target->cpu_arch : ++ (with_default_tune ? DEFAULT_CPU_TUNE : DEFAULT_CPU_ARCH)); ++ ++ ++ /* Handle -march/tune=native */ ++#ifdef __loongarch__ ++ /* For native compilers, gather local CPU information ++ and fill the "CPU_NATIVE" index of arrays defined in ++ loongarch-cpu.c. */ ++ ++ fill_native_cpu_config (&t); ++ ++#else ++ if (t.cpu_arch == CPU_NATIVE) ++ fatal_error (UNKNOWN_LOCATION, ++ "%qs does not work on a cross compiler", ++ "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE); ++ ++ else if (t.cpu_tune == CPU_NATIVE) ++ fatal_error (UNKNOWN_LOCATION, ++ "%qs does not work on a cross compiler", ++ "-m" OPTSTR_TUNE "=" STR_CPU_NATIVE); ++#endif ++ ++ /* Handle -march/tune=abi-default */ ++ if (t.cpu_tune == CPU_ABI_DEFAULT) ++ t.cpu_tune = abi_default_cpu_arch (t.abi, NULL); ++ ++ if (t.cpu_arch == CPU_ABI_DEFAULT) ++ { ++ t.cpu_arch = abi_default_cpu_arch (t.abi, &(t.isa)); ++ loongarch_cpu_default_isa[t.cpu_arch] = t.isa; ++ } ++ ++ /* 3. Target base ISA */ ++config_target_isa: ++ ++ /* Get default ISA from "-march" or its default value. */ ++ t.isa = loongarch_cpu_default_isa[t.cpu_arch]; ++ ++ /* Apply incremental changes. */ ++ /* "-march=native" overrides the default FPU type. */ ++ ++ t.isa.fpu = constrained.fpu ? target->isa.fpu : ++ (constrained.arch ? t.isa.fpu : ++ (with_default_fpu ? DEFAULT_ISA_EXT_FPU : t.isa.fpu)); ++ ++ t.isa.simd = constrained.simd ? target->isa.simd : ++ (constrained.arch ? t.isa.simd : ++ (with_default_simd ? DEFAULT_ISA_EXT_SIMD : t.isa.simd)); ++ ++ /* apply -m[no-]lsx and -m[no-]lasx flags */ ++ if (flags) ++ for (int i = 0; i < 2; i++) ++ { ++ switch (SX_FLAG_TYPE (flags->sx[i])) ++ { ++ case ISA_EXT_SIMD_LSX: ++ constrained.simd = 1; ++ if (flags->sx[i] > 0 && t.isa.simd != ISA_EXT_SIMD_LASX) ++ t.isa.simd = ISA_EXT_SIMD_LSX; ++ else if (flags->sx[i] < 0) ++ t.isa.simd = ISA_EXT_NONE; ++ break; ++ ++ case ISA_EXT_SIMD_LASX: ++ constrained.simd = 1; ++ if (flags->sx[i] < 0 && t.isa.simd == ISA_EXT_SIMD_LASX) ++ t.isa.simd = ISA_EXT_SIMD_LSX; ++ else if (flags->sx[i] > 0) ++ t.isa.simd = ISA_EXT_SIMD_LASX; ++ break; ++ ++ case 0: ++ break; ++ ++ default: ++ gcc_unreachable(); ++ } ++ } ++ ++ /* All SIMD extensions imply a 64-bit FPU: ++ - silently adjust t.isa.fpu to "fpu64" if it is unconstrained. ++ - warn if -msingle-float / -msoft-float is on, ++ then disable SIMD extensions (done in driver) ++ - abort if -mfpu=0 / -mfpu=32 is forced. */ ++ ++ if (t.isa.simd != ISA_EXT_NONE && t.isa.fpu != ISA_EXT_FPU64) ++ { ++ if (!constrained.fpu) ++ { ++ /* As long as the arch-default "t.isa.simd" is set to non-zero ++ for an element "t" in loongarch_cpu_default_isa, "t.isa.fpu" ++ should be set to "ISA_EXT_FPU64" accordingly. Thus reaching ++ here must be the result of forcing -mlsx/-mlasx explicitly. */ ++ gcc_assert (constrained.simd); ++ ++ inform (UNKNOWN_LOCATION, ++ "enabing %qs promotes %<%s%s%> to %<%s%s%>", ++ loongarch_isa_ext_strings[t.isa.simd], ++ OPTSTR_ISA_EXT_FPU, loongarch_isa_ext_strings[t.isa.fpu], ++ OPTSTR_ISA_EXT_FPU, loongarch_isa_ext_strings[ISA_EXT_FPU64]); ++ ++ t.isa.fpu = ISA_EXT_FPU64; ++ } ++ else if (flags && (flags->flt == ISA_EXT_NONE || flags->flt == ISA_EXT_FPU32)) ++ { ++ if (constrained.simd) ++ inform (UNKNOWN_LOCATION, ++ "%qs is disabled by %<-m%s%>, because it requires %<%s%s%>", ++ loongarch_isa_ext_strings[t.isa.simd], flags->flt_str, ++ OPTSTR_ISA_EXT_FPU, loongarch_isa_ext_strings[ISA_EXT_FPU64]); ++ ++ t.isa.simd = ISA_EXT_NONE; ++ } ++ else ++ { ++ /* -mfpu=0 / -mfpu=32 is set. */ ++ if (constrained.simd) ++ fatal_error (UNKNOWN_LOCATION, ++ "%<-m%s=%s%> conflicts with %qs, which requires %<%s%s%>", ++ OPTSTR_ISA_EXT_FPU, loongarch_isa_ext_strings[t.isa.fpu], ++ loongarch_isa_ext_strings[t.isa.simd], ++ OPTSTR_ISA_EXT_FPU, loongarch_isa_ext_strings[ISA_EXT_FPU64]); ++ ++ /* Same as above. */ ++ t.isa.simd = ISA_EXT_NONE; ++ } ++ } ++ ++ ++ /* 4. ABI-ISA compatibility */ ++ /* Note: ++ - There IS a unique default -march value for each ABI type ++ (config.gcc: triplet -> abi -> default arch). ++ ++ - If the base ABI is incompatible with the default arch, ++ try using the default -march it implies (and mark it ++ as "constrained" this time), then re-apply step 3. */ ++ ++ struct loongarch_abi abi_tmp; ++ const struct loongarch_isa* isa_min; ++ ++ abi_tmp = t.abi; ++ isa_min = &isa_required (abi_tmp); ++ ++ if (isa_base_compat_p (&t.isa, isa_min)); /* OK */ ++ else if (!constrained.arch) ++ { ++ /* Base architecture can only be implied by -march, ++ so we adjust that first if it is not constrained. */ ++ int fallback_arch = abi_default_cpu_arch (t.abi, NULL); ++ ++ if (t.cpu_arch == CPU_NATIVE) ++ warning (0, "your native CPU architecture (%qs) " ++ "does not support %qs ABI, falling back to %<-m%s=%s%>", ++ arch_str (&t), abi_str (t.abi), OPTSTR_ARCH, ++ loongarch_cpu_strings[fallback_arch]); ++ else ++ warning (0, "default CPU architecture (%qs) " ++ "does not support %qs ABI, falling back to %<-m%s=%s%>", ++ arch_str (&t), abi_str (t.abi), OPTSTR_ARCH, ++ loongarch_cpu_strings[fallback_arch]); ++ ++ t.cpu_arch = fallback_arch; ++ constrained.arch = 1; ++ goto config_target_isa; ++ } ++ else if (!constrained.abi_base) ++ { ++ /* If -march is given while -mabi is not, ++ try selecting another base ABI type. */ ++ abi_tmp.base = isa_default_abi (&t.isa).base; ++ } ++ else ++ goto fatal; ++ ++ if (isa_fpu_compat_p (&t.isa, isa_min)); /* OK */ ++ else if (!constrained.fpu) ++ t.isa.fpu = isa_min->fpu; ++ else if (!constrained.abi_base) ++ /* If -march is compatible with the default ABI ++ while -mfpu is not. */ ++ abi_tmp.base = isa_default_abi (&t.isa).base; ++ else ++ goto fatal; ++ ++ if (0) ++fatal: ++ fatal_error (UNKNOWN_LOCATION, ++ "unable to implement ABI %qs with instruction set %qs", ++ abi_str (t.abi), isa_str (&t.isa, '/')); ++ ++ ++ /* Using the fallback ABI. */ ++ if (abi_tmp.base != t.abi.base || abi_tmp.ext != t.abi.ext) ++ { ++ /* This flag is only set in the GCC driver. */ ++ if (follow_multilib_list_p) ++ { ++ ++ /* Continue falling back until we find a feasible ABI type ++ enabled by TM_MULTILIB_LIST. */ ++ if (!is_multilib_enabled (abi_tmp)) ++ { ++ for (unsigned int i = 0; i < ABI_COUNT; i++) ++ { ++ if (is_multilib_enabled (abi_priority_list[i]) ++ && abi_compat_p (&t.isa, abi_priority_list[i])) ++ { ++ abi_tmp = abi_priority_list[i]; ++ ++ warning (0, "ABI %qs cannot be implemented due to " ++ "limited instruction set %qs, " ++ "falling back to %qs", abi_str (t.abi), ++ isa_str (&t.isa, '/'), abi_str (abi_tmp)); ++ ++ goto fallback; ++ } ++ } ++ ++ /* Otherwise, keep using abi_tmp with a warning. */ ++#ifdef LA_DISABLE_MULTILIB ++ warning (0, "instruction set %qs cannot implement " ++ "default ABI %qs, falling back to %qs", ++ isa_str (&t.isa, '/'), abi_str (t.abi), ++ abi_str (abi_tmp)); ++#else ++ warning (0, "no multilib-enabled ABI (%qs) can be implemented " ++ "with instruction set %qs, falling back to %qs", ++ multilib_enabled_abi_list (), ++ isa_str (&t.isa, '/'), abi_str (abi_tmp)); ++#endif ++ } ++ } ++ ++fallback: ++ t.abi = abi_tmp; ++ } ++ else if (follow_multilib_list_p) ++ { ++ if (!is_multilib_enabled (t.abi)) ++ { ++ inform (UNKNOWN_LOCATION, ++ "ABI %qs is not enabled at configure-time, " ++ "the linker might report an error", abi_str (t.abi)); ++ ++ inform (UNKNOWN_LOCATION, "ABI with startfiles: %s", ++ multilib_enabled_abi_list ()); ++ } ++ } ++ ++ ++ /* 5. Target code model */ ++ t.cmodel = constrained.cmodel ? target->cmodel : CMODEL_NORMAL; ++ ++ /* Cleanup and return. */ ++ obstack_free (&msg_obstack, NULL); ++ *target = t; ++} ++ ++/* Returns the default ABI for the given instruction set. */ ++static inline struct loongarch_abi ++isa_default_abi (const struct loongarch_isa *isa) ++{ ++ struct loongarch_abi abi; ++ ++ switch (isa->fpu) ++ { ++ case ISA_EXT_FPU64: ++ if (isa->base == ISA_BASE_LA64V100) ++ abi.base = ABI_BASE_LP64D; ++ break; ++ ++ case ISA_EXT_FPU32: ++ if (isa->base == ISA_BASE_LA64V100) ++ abi.base = ABI_BASE_LP64F; ++ break; ++ ++ case ISA_EXT_NONE: ++ if (isa->base == ISA_BASE_LA64V100) ++ abi.base = ABI_BASE_LP64S; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ abi.ext = ABI_EXT_BASE; ++ return abi; ++} ++ ++/* Check if set2 is a subset of set1. */ ++static inline int ++isa_base_compat_p (const struct loongarch_isa *set1, ++ const struct loongarch_isa *set2) ++{ ++ switch (set2->base) ++ { ++ case ISA_BASE_LA64V100: ++ return (set1->base == ISA_BASE_LA64V100); ++ ++ default: ++ gcc_unreachable (); ++ } ++} ++ ++static inline int ++isa_fpu_compat_p (const struct loongarch_isa *set1, ++ const struct loongarch_isa *set2) ++{ ++ switch (set2->fpu) ++ { ++ case ISA_EXT_FPU64: ++ return set1->fpu == ISA_EXT_FPU64; ++ ++ case ISA_EXT_FPU32: ++ return set1->fpu == ISA_EXT_FPU32 || set1->fpu == ISA_EXT_FPU64; ++ ++ case ISA_EXT_NONE: ++ return 1; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++} ++ ++static inline int ++abi_compat_p (const struct loongarch_isa *isa, struct loongarch_abi abi) ++{ ++ int compatible = 1; ++ const struct loongarch_isa *isa2 = &isa_required (abi); ++ ++ /* Append conditionals for new ISA components below. */ ++ compatible = compatible && isa_base_compat_p (isa, isa2); ++ compatible = compatible && isa_fpu_compat_p (isa, isa2); ++ return compatible; ++} ++ ++/* The behavior of this function should be consistent ++ with config.gcc. */ ++static int ++abi_default_cpu_arch (struct loongarch_abi abi, ++ struct loongarch_isa *isa) ++{ ++ static struct loongarch_isa tmp; ++ if (!isa) ++ isa = &tmp; ++ ++ if (abi.ext == ABI_EXT_BASE) ++ switch (abi.base) ++ { ++ case ABI_BASE_LP64D: ++ case ABI_BASE_LP64F: ++ case ABI_BASE_LP64S: ++ *isa = isa_required (abi); ++ return CPU_LOONGARCH64; ++ } ++ gcc_unreachable (); ++} ++ ++static const char* ++abi_str (struct loongarch_abi abi) ++{ ++ /* "/base" can be omitted. */ ++ if (abi.ext == ABI_EXT_BASE) ++ return (const char*) ++ obstack_copy0 (&msg_obstack, loongarch_abi_base_strings[abi.base], ++ strlen (loongarch_abi_base_strings[abi.base])); ++ else ++ { ++ APPEND_STRING (loongarch_abi_base_strings[abi.base]) ++ APPEND1 ('/') ++ APPEND_STRING (loongarch_abi_ext_strings[abi.ext]) ++ APPEND1 ('\0') ++ ++ return XOBFINISH (&msg_obstack, const char *); ++ } ++} ++ ++static const char* ++isa_str (const struct loongarch_isa *isa, char separator) ++{ ++ APPEND_STRING (loongarch_isa_base_strings[isa->base]) ++ APPEND1 (separator) ++ ++ if (isa->fpu == ISA_EXT_NONE) ++ { ++ APPEND_STRING ("no" OPTSTR_ISA_EXT_FPU) ++ } ++ else ++ { ++ APPEND_STRING (OPTSTR_ISA_EXT_FPU) ++ APPEND_STRING (loongarch_isa_ext_strings[isa->fpu]) ++ } ++ ++ switch (isa->simd) ++ { ++ case ISA_EXT_SIMD_LSX: ++ case ISA_EXT_SIMD_LASX: ++ APPEND1 (separator); ++ APPEND_STRING (loongarch_isa_ext_strings[isa->simd]); ++ break; ++ ++ default: ++ gcc_assert (isa->simd == 0); ++ } ++ APPEND1 ('\0') ++ ++ /* Add more here. */ ++ ++ return XOBFINISH (&msg_obstack, const char *); ++} ++ ++static const char* ++arch_str (const struct loongarch_target *target) ++{ ++ if (target->cpu_arch == CPU_NATIVE) ++ { ++ /* Describe a native CPU with unknown PRID. */ ++ const char* isa_string = isa_str (&target->isa, ','); ++ APPEND_STRING ("PRID: 0x") ++ APPEND_STRING (get_native_prid_str ()) ++ APPEND_STRING (", ISA features: ") ++ APPEND_STRING (isa_string) ++ } ++ else ++ APPEND_STRING (loongarch_cpu_strings[target->cpu_arch]); ++ ++ APPEND1 ('\0') ++ return XOBFINISH (&msg_obstack, const char *); ++} ++ ++static const char* ++multilib_enabled_abi_list () ++{ ++ int enabled_abi_idx[MULTILIB_LIST_LEN] = { 0 }; ++ const char* enabled_abi_str[MULTILIB_LIST_LEN] = { NULL }; ++ unsigned int j = 0; ++ ++ for (unsigned int i = 0; i < ABI_COUNT && j < MULTILIB_LIST_LEN; i++) ++ { ++ if (enabled_abi_types[abi_priority_list[i].base] ++ [abi_priority_list[i].ext]) ++ { ++ enabled_abi_idx[j++] = i; ++ } ++ } ++ ++ for (unsigned int k = 0; k < j; k++) ++ { ++ enabled_abi_str[k] = abi_str (abi_priority_list[enabled_abi_idx[k]]); ++ } ++ ++ for (unsigned int k = 0; k < j - 1; k++) ++ { ++ APPEND_STRING (enabled_abi_str[k]) ++ APPEND1 (',') ++ APPEND1 (' ') ++ } ++ APPEND_STRING (enabled_abi_str[j - 1]) ++ APPEND1 ('\0') ++ ++ return XOBFINISH (&msg_obstack, const char *); ++} ++ ++/* option status feedback for "gcc --help=target -Q" */ ++void ++loongarch_update_gcc_opt_status (struct loongarch_target *target, ++ struct gcc_options *opts, ++ struct gcc_options *opts_set) ++{ ++ (void) opts_set; ++ ++ /* status of -mabi */ ++ opts->x_la_opt_abi_base = target->abi.base; ++ ++ opts->x_target_flags |= ++ IS_LP64_ABI_BASE (target->abi.base) ? MASK_LP64 : 0; ++ ++ /* status of -march and -mtune */ ++ opts->x_la_opt_cpu_arch = target->cpu_arch; ++ opts->x_la_opt_cpu_tune = target->cpu_tune; ++ ++ /* status of -mfpu and -msimd */ ++ opts->x_la_opt_fpu = target->isa.fpu; ++ opts->x_la_opt_simd = target->isa.simd; ++} +diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h +index 21639fa74..33eb8b2da 100644 +--- a/gcc/config/loongarch/loongarch-opts.h ++++ b/gcc/config/loongarch/loongarch-opts.h +@@ -1,5 +1,6 @@ +-/* Definitions for option handling for LARCH. +- Copyright (C) 1989-2018 Free Software Foundation, Inc. ++/* Definitions for loongarch-specific option handling. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Ltd. + + This file is part of GCC. + +@@ -17,18 +18,81 @@ You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +-#ifndef LARCH_OPTS_H +-#define LARCH_OPTS_H ++#ifndef LOONGARCH_OPTS_H ++#define LOONGARCH_OPTS_H + +-#define LARCH_ARCH_OPTION_NATIVE -1 ++#include "loongarch-def.h" + ++/* Target configuration */ ++extern struct loongarch_target la_target; + +-enum loongarch_code_model { +- LARCH_CMODEL_NORMAL, +- LARCH_CMODEL_TINY, +- LARCH_CMODEL_TINY_STATIC, +- LARCH_CMODEL_LARGE, +- LARCH_CMODEL_EXTREME ++/* Flag status */ ++struct loongarch_flags { ++ int flt; const char* flt_str; ++#define SX_FLAG_TYPE(x) ((x) < 0 ? -(x) : (x)) ++ int sx[2]; + }; + ++#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS) ++ ++/* Initialize loongarch_target from separate option variables. */ ++void ++loongarch_init_target (struct loongarch_target *target, ++ int cpu_arch, int cpu_tune, int fpu, int simd, ++ int abi_base, int abi_ext, int cmodel); ++ ++ ++/* Handler for "-m" option combinations, ++ shared by the driver and the compiler proper. */ ++void ++loongarch_config_target (struct loongarch_target *target, ++ struct loongarch_flags *flags, ++ int follow_multilib_list_p); ++ ++/* option status feedback for "gcc --help=target -Q" */ ++void ++loongarch_update_gcc_opt_status (struct loongarch_target *target, ++ struct gcc_options *opts, ++ struct gcc_options *opts_set); + #endif ++ ++ ++/* Macros for common conditional expressions used in loongarch.{c,h,md} */ ++#define TARGET_CMODEL_NORMAL (la_target.cmodel == CMODEL_NORMAL) ++#define TARGET_CMODEL_TINY (la_target.cmodel == CMODEL_TINY) ++#define TARGET_CMODEL_TINY_STATIC (la_target.cmodel == CMODEL_TINY_STATIC) ++#define TARGET_CMODEL_LARGE (la_target.cmodel == CMODEL_LARGE) ++#define TARGET_CMODEL_EXTREME (la_target.cmodel == CMODEL_EXTREME) ++ ++#define TARGET_HARD_FLOAT (la_target.isa.fpu != ISA_EXT_NONE) ++#define TARGET_HARD_FLOAT_ABI (la_target.abi.base == ABI_BASE_LP64D \ ++ || la_target.abi.base == ABI_BASE_LP64F) ++ ++#define TARGET_SOFT_FLOAT (la_target.isa.fpu == ISA_EXT_NONE) ++#define TARGET_SOFT_FLOAT_ABI (la_target.abi.base == ABI_BASE_LP64S) ++#define TARGET_SINGLE_FLOAT (la_target.isa.fpu == ISA_EXT_FPU32) ++#define TARGET_SINGLE_FLOAT_ABI (la_target.abi.base == ABI_BASE_LP64F) ++#define TARGET_DOUBLE_FLOAT (la_target.isa.fpu == ISA_EXT_FPU64) ++#define TARGET_DOUBLE_FLOAT_ABI (la_target.abi.base == ABI_BASE_LP64D) ++ ++#define TARGET_64BIT (la_target.isa.base == ISA_BASE_LA64V100) ++#define TARGET_ABI_LP64 (la_target.abi.base == ABI_BASE_LP64D \ ++ || la_target.abi.base == ABI_BASE_LP64F \ ++ || la_target.abi.base == ABI_BASE_LP64S) ++ ++#define ISA_HAS_LSX (la_target.isa.simd == ISA_EXT_SIMD_LSX \ ++ || la_target.isa.simd == ISA_EXT_SIMD_LASX) ++#define ISA_HAS_LASX (la_target.isa.simd == ISA_EXT_SIMD_LASX) ++ ++ ++/* TARGET_ macros for use in *.md template conditionals */ ++#define TARGET_uARCH_LA464 (la_target.cpu_tune == CPU_LA464) ++#define TARGET_uARCH_LA364 (la_target.cpu_tune == CPU_LA364) ++#define TARGET_uARCH_LA264 (la_target.cpu_tune == CPU_LA264) ++#define TARGET_uARCH_LA664 (la_target.cpu_tune == CPU_LA664) ++ ++/* Note: optimize_size may vary across functions, ++ while -m[no]-memcpy imposes a global constraint. */ ++#define TARGET_DO_OPTIMIZE_BLOCK_MOVE_P loongarch_do_optimize_block_move_p() ++ ++#endif /* LOONGARCH_OPTS_H */ +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index c36fdd37d..498d80514 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -1,9 +1,7 @@ +-/* Prototypes of target machine for GNU compiler. LARCH version. ++/* Prototypes of target machine for GNU compiler. LoongArch version. + Copyright (C) 1989-2018 Free Software Foundation, Inc. +- Contributed by A. Lichnewsky (lich@inria.inria.fr). +- Changed by Michael Meissner (meissner@osf.org). +- 64-bit r4000 support by Ian Lance Taylor (ian@cygnus.com) and +- Brendan Eich (brendan@microunity.com). ++ Contributed by Loongson Ltd. ++ Based on MIPS target for GNU compiler. + + This file is part of GCC. + +@@ -21,24 +19,8 @@ You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +-#ifndef GCC_LARCH_PROTOS_H +-#define GCC_LARCH_PROTOS_H +- +-/* Describes how a symbol is used. +- +- SYMBOL_CONTEXT_CALL +- The symbol is used as the target of a call instruction. +- +- SYMBOL_CONTEXT_LEA +- The symbol is used in a load-address operation. +- +- SYMBOL_CONTEXT_MEM +- The symbol is used as the address in a MEM. */ +-enum loongarch_symbol_context { +- SYMBOL_CONTEXT_CALL, +- SYMBOL_CONTEXT_LEA, +- SYMBOL_CONTEXT_MEM +-}; ++#ifndef GCC_LOONGARCH_PROTOS_H ++#define GCC_LOONGARCH_PROTOS_H + + /* Classifies a SYMBOL_REF, LABEL_REF or UNSPEC address. + +@@ -57,67 +39,30 @@ enum loongarch_symbol_type { + SYMBOL_GOT_DISP, + SYMBOL_TLS, + SYMBOL_TLSGD, +- SYMBOL_TLSLDM, ++ SYMBOL_TLSLDM + }; + #define NUM_SYMBOL_TYPES (SYMBOL_TLSLDM + 1) + +-/* Classifies a type of call. +- +- LARCH_CALL_NORMAL +- A normal call or call_value pattern. +- +- LARCH_CALL_SIBCALL +- A sibcall or sibcall_value pattern. +- +- LARCH_CALL_EPILOGUE +- A call inserted in the epilogue. */ +-enum loongarch_call_type { +- LARCH_CALL_NORMAL, +- LARCH_CALL_SIBCALL, +- LARCH_CALL_EPILOGUE +-}; +- +-/* Controls the conditions under which certain instructions are split. +- +- SPLIT_IF_NECESSARY +- Only perform splits that are necessary for correctness +- (because no unsplit version exists). +- +- SPLIT_FOR_SPEED +- Perform splits that are necessary for correctness or +- beneficial for code speed. +- +- SPLIT_FOR_SIZE +- Perform splits that are necessary for correctness or +- beneficial for code size. */ +-enum loongarch_split_type { +- SPLIT_IF_NECESSARY, +- SPLIT_FOR_SPEED, +- SPLIT_FOR_SIZE +-}; + extern const char *const loongarch_fp_conditions[16]; + +-extern const char *loongarch_output_gpr_save (unsigned); ++/* Routines implemented in loongarch.c. */ ++extern rtx loongarch_emit_move (rtx, rtx); + extern HOST_WIDE_INT loongarch_initial_elimination_offset (int, int); + extern void loongarch_expand_prologue (void); + extern void loongarch_expand_epilogue (bool); + extern bool loongarch_can_use_return_insn (void); +-extern rtx loongarch_function_value (const_tree, const_tree, enum machine_mode); +-extern bool loongarch_symbolic_constant_p (rtx, enum loongarch_symbol_context, +- enum loongarch_symbol_type *); ++ ++extern bool loongarch_symbolic_constant_p (rtx, enum loongarch_symbol_type *); + extern int loongarch_regno_mode_ok_for_base_p (int, machine_mode, bool); +-extern bool loongarch_stack_address_p (rtx, machine_mode); + extern int loongarch_address_insns (rtx, machine_mode, bool); + extern int loongarch_const_insns (rtx); + extern int loongarch_split_const_insns (rtx); + extern int loongarch_split_128bit_const_insns (rtx); + extern int loongarch_load_store_insns (rtx, rtx_insn *); + extern int loongarch_idiv_insns (machine_mode); +-extern rtx loongarch_emit_move (rtx, rtx); + #ifdef RTX_CODE + extern void loongarch_emit_binary (enum rtx_code, rtx, rtx, rtx); + #endif +-extern rtx loongarch_pic_base_register (rtx); + extern bool loongarch_split_symbol (rtx, rtx, machine_mode, rtx *); + extern rtx loongarch_unspec_address (rtx, enum loongarch_symbol_type); + extern rtx loongarch_strip_unspec_address (rtx); +@@ -126,9 +71,9 @@ extern bool loongarch_legitimize_move (machine_mode, rtx, rtx); + extern rtx loongarch_legitimize_call_address (rtx); + + extern rtx loongarch_subword (rtx, bool); +-extern bool loongarch_split_move_p (rtx, rtx, enum loongarch_split_type); +-extern void loongarch_split_move (rtx, rtx, enum loongarch_split_type, rtx); +-extern bool loongarch_split_move_insn_p (rtx, rtx, rtx); ++extern bool loongarch_split_move_p (rtx, rtx); ++extern void loongarch_split_move (rtx, rtx, rtx); ++extern bool loongarch_split_move_insn_p (rtx, rtx); + extern void loongarch_split_move_insn (rtx, rtx, rtx); + extern void loongarch_split_128bit_move (rtx, rtx); + extern bool loongarch_split_128bit_move_p (rtx, rtx); +@@ -139,50 +84,29 @@ extern void loongarch_split_lsx_insert_d (rtx, rtx, rtx, rtx); + extern void loongarch_split_lsx_fill_d (rtx, rtx); + extern const char *loongarch_output_move (rtx, rtx); + extern bool loongarch_cfun_has_cprestore_slot_p (void); +-extern bool loongarch_cprestore_address_p (rtx, bool); + #ifdef RTX_CODE + extern void loongarch_expand_scc (rtx *); + extern bool loongarch_expand_int_vec_cmp (rtx *); + extern bool loongarch_expand_fp_vec_cmp (rtx *); + extern void loongarch_expand_conditional_branch (rtx *); +-extern void loongarch_expand_conditional_move (rtx *); ++extern bool loongarch_expand_conditional_move_la464 (rtx *); + extern void loongarch_expand_conditional_trap (rtx); + #endif +-extern bool loongarch_get_pic_call_symbol (rtx *, int); + extern void loongarch_set_return_address (rtx, rtx); + extern bool loongarch_move_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int); +-extern bool loongarch_store_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int); + extern bool loongarch_expand_block_move (rtx, rtx, rtx); + +-extern void loongarch_init_cumulative_args (CUMULATIVE_ARGS *, tree); + extern bool loongarch_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT, +- HOST_WIDE_INT, bool); ++ HOST_WIDE_INT, bool); + extern bool loongarch_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT, +- HOST_WIDE_INT); +-extern bool loongarch_mem_fits_mode_p (machine_mode mode, rtx x); ++ HOST_WIDE_INT); + extern HOST_WIDE_INT loongarch_debugger_offset (rtx, HOST_WIDE_INT); + +-extern void loongarch_push_asm_switch (struct loongarch_asm_switch *); +-extern void loongarch_pop_asm_switch (struct loongarch_asm_switch *); + extern void loongarch_output_external (FILE *, tree, const char *); + extern void loongarch_output_ascii (FILE *, const char *, size_t); +-extern void loongarch_output_aligned_decl_common (FILE *, tree, const char *, +- unsigned HOST_WIDE_INT, +- unsigned int); +-extern void loongarch_declare_common_object (FILE *, const char *, +- const char *, unsigned HOST_WIDE_INT, +- unsigned int, bool); +-extern void loongarch_declare_object (FILE *, const char *, const char *, +- const char *, ...) ATTRIBUTE_PRINTF_4; +-extern void loongarch_declare_object_name (FILE *, const char *, tree); +-extern void loongarch_finish_declare_object (FILE *, tree, int, int); +-extern void loongarch_set_text_contents_type (FILE *, const char *, +- unsigned long, bool); +- + extern bool loongarch_small_data_pattern_p (rtx); + extern rtx loongarch_rewrite_small_data (rtx); + extern rtx loongarch_return_addr (int, rtx); +-extern bool loongarch_must_initialize_gp_p (void); + + extern bool loongarch_const_vector_same_val_p (rtx, machine_mode); + extern bool loongarch_const_vector_same_bytes_p (rtx, machine_mode); +@@ -194,26 +118,27 @@ extern bool loongarch_const_vector_bitimm_clr_p (rtx, machine_mode); + extern rtx loongarch_lsx_vec_parallel_const_half (machine_mode, bool); + extern rtx loongarch_gen_const_int_vector (machine_mode, HOST_WIDE_INT); + extern enum reg_class loongarch_secondary_reload_class (enum reg_class, +- machine_mode, +- rtx, bool); ++ machine_mode, ++ rtx, bool); + extern int loongarch_class_max_nregs (enum reg_class, machine_mode); + + extern machine_mode loongarch_hard_regno_caller_save_mode (unsigned int, +- unsigned int, +- machine_mode); ++ unsigned int, ++ machine_mode); + extern int loongarch_adjust_insn_length (rtx_insn *, int); + extern const char *loongarch_output_conditional_branch (rtx_insn *, rtx *, +- const char *, const char *); +-extern const char *loongarch_output_order_conditional_branch (rtx_insn *, rtx *, +- bool); +-extern const char *loongarch_output_equal_conditional_branch (rtx_insn *, rtx *, +- bool); ++ const char *, ++ const char *); ++extern const char *loongarch_output_order_conditional_branch (rtx_insn *, ++ rtx *, ++ bool); ++extern const char *loongarch_output_equal_conditional_branch (rtx_insn *, ++ rtx *, ++ bool); + extern const char *loongarch_output_division (const char *, rtx *); + extern const char *loongarch_lsx_output_division (const char *, rtx *); + extern const char *loongarch_output_probe_stack_range (rtx, rtx, rtx); + extern bool loongarch_hard_regno_rename_ok (unsigned int, unsigned int); +-extern bool loongarch_linked_madd_p (rtx_insn *, rtx_insn *); +-extern bool loongarch_store_data_bypass_p (rtx_insn *, rtx_insn *); + extern int loongarch_dspalu_bypass_p (rtx, rtx); + extern rtx loongarch_prefetch_cookie (rtx, rtx); + +@@ -226,9 +151,6 @@ extern const char *current_section_name (void); + extern unsigned int current_section_flags (void); + extern bool loongarch_use_ins_ext_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT); + +-extern bool and_operands_ok (machine_mode, rtx, rtx); +-extern bool loongarch_fmadd_bypass (rtx_insn *, rtx_insn *); +- + union loongarch_gen_fn_ptrs + { + rtx (*fn_8) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx); +@@ -239,25 +161,26 @@ union loongarch_gen_fn_ptrs + }; + + extern void loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs, +- rtx, rtx, rtx, rtx, rtx); ++ rtx, rtx, rtx, rtx, rtx); + + extern void loongarch_expand_vector_init (rtx, rtx); + extern void loongarch_expand_vec_unpack (rtx op[2], bool, bool); ++extern void loongarch_expand_vec_perm (rtx, rtx, rtx, rtx); ++extern void loongarch_expand_vec_perm_1 (rtx[]); ++extern void loongarch_expand_vector_extract (rtx, rtx, int); ++extern void loongarch_expand_vector_reduc (rtx (*)(rtx, rtx, rtx), rtx, rtx); + + extern int loongarch_ldst_scaled_shift (machine_mode); + extern bool loongarch_signed_immediate_p (unsigned HOST_WIDE_INT, int, int); + extern bool loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT, int, int); +-extern bool loongarch_load_store_pair_p (bool, rtx *); +-extern bool loongarch_movep_target_p (rtx, rtx); + extern bool loongarch_12bit_offset_address_p (rtx, machine_mode); + extern bool loongarch_14bit_shifted_offset_address_p (rtx, machine_mode); ++extern bool loongarch_base_index_address_p (rtx, machine_mode); + extern bool loongarch_9bit_offset_address_p (rtx, machine_mode); +-extern bool lwsp_swsp_address_p (rtx, machine_mode); + extern rtx loongarch_expand_thread_pointer (rtx); + + extern bool loongarch_eh_uses (unsigned int); + extern bool loongarch_epilogue_uses (unsigned int); +-extern int loongarch_trampoline_code_size (void); + extern bool loongarch_load_store_bonding_p (rtx *, machine_mode, bool); + extern bool loongarch_la464_128_store_p (rtx[]); + extern bool loongarch_la464_128_load_p (rtx[]); +@@ -270,10 +193,6 @@ typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx); + extern void loongarch_register_frame_header_opt (void); + extern void loongarch_expand_vec_cond_expr (machine_mode, machine_mode, rtx *); + +-extern void loongarch_declare_function_name(FILE *, const char *, tree); +-/* Routines implemented in loongarch-d.c */ +-extern void loongarch_d_target_versions (void); +- + /* Routines implemented in loongarch-c.c. */ + void loongarch_cpu_cpp_builtins (cpp_reader *); + +@@ -281,10 +200,12 @@ extern void loongarch_init_builtins (void); + extern void loongarch_atomic_assign_expand_fenv (tree *, tree *, tree *); + extern tree loongarch_builtin_decl (unsigned int, bool); + extern rtx loongarch_expand_builtin (tree, rtx, rtx subtarget ATTRIBUTE_UNUSED, +- machine_mode, int); ++ machine_mode, int); + extern tree loongarch_builtin_vectorized_function (unsigned int, tree, tree); + extern rtx loongarch_gen_const_int_vector_shuffle (machine_mode, int); + extern tree loongarch_build_builtin_va_list (void); +- + extern rtx loongarch_build_signbit_mask (machine_mode, bool, bool); ++extern void loongarch_emit_swrsqrtsf (rtx, rtx, machine_mode, bool); ++extern void loongarch_emit_swdivsf (rtx, rtx, rtx, machine_mode); ++extern rtx loongarch_prefetch_cookie (rtx, rtx); + #endif /* ! GCC_LARCH_PROTOS_H */ +diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h +new file mode 100644 +index 000000000..aca3d667b +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-str.h +@@ -0,0 +1,68 @@ ++/* Generated automatically by "genstr" from "loongarch-strings". ++ Please do not edit this file directly. ++ ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Ltd. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#ifndef LOONGARCH_STR_H ++#define LOONGARCH_STR_H ++ ++#define OPTSTR_ARCH "arch" ++#define OPTSTR_TUNE "tune" ++ ++#define STR_CPU_NATIVE "native" ++#define STR_CPU_ABI_DEFAULT "abi-default" ++#define STR_CPU_LOONGARCH64 "loongarch64" ++#define STR_CPU_LA464 "la464" ++#define STR_CPU_LA364 "la364" ++#define STR_CPU_LA264 "la264" ++#define STR_CPU_LA664 "la664" ++ ++#define STR_ISA_BASE_LA64V100 "la64" ++ ++#define OPTSTR_ISA_EXT_FPU "fpu" ++#define STR_NONE "none" ++#define STR_ISA_EXT_FPU0 "0" ++#define STR_ISA_EXT_FPU32 "32" ++#define STR_ISA_EXT_FPU64 "64" ++ ++#define OPTSTR_SOFT_FLOAT "soft-float" ++#define OPTSTR_SINGLE_FLOAT "single-float" ++#define OPTSTR_DOUBLE_FLOAT "double-float" ++ ++#define OPTSTR_ISA_EXT_SIMD "simd" ++#define STR_ISA_EXT_LSX "lsx" ++#define STR_ISA_EXT_LASX "lasx" ++ ++#define OPTSTR_ABI_BASE "abi" ++#define STR_ABI_BASE_LP64D "lp64d" ++#define STR_ABI_BASE_LP64F "lp64f" ++#define STR_ABI_BASE_LP64S "lp64s" ++#define STR_ABI_BASE_LP64 "lp64" ++ ++#define STR_ABI_EXT_BASE "base" ++ ++#define OPTSTR_CMODEL "cmodel" ++#define STR_CMODEL_NORMAL "normal" ++#define STR_CMODEL_TINY "tiny" ++#define STR_CMODEL_TS "tiny-static" ++#define STR_CMODEL_LARGE "large" ++#define STR_CMODEL_EXTREME "extreme" ++ ++#endif /* LOONGARCH_STR_H */ +diff --git a/gcc/config/loongarch/loongarch-tables.opt b/gcc/config/loongarch/loongarch-tables.opt +deleted file mode 100644 +index 80794b564..000000000 +--- a/gcc/config/loongarch/loongarch-tables.opt ++++ /dev/null +@@ -1,34 +0,0 @@ +-; -*- buffer-read-only: t -*- +-; Generated automatically by genopt.sh from loongarch-cpus.def. +- +-; Copyright (C) 2011-2018 Free Software Foundation, Inc. +-; +-; This file is part of GCC. +-; +-; GCC is free software; you can redistribute it and/or modify it under +-; the terms of the GNU General Public License as published by the Free +-; Software Foundation; either version 3, or (at your option) any later +-; version. +-; +-; GCC is distributed in the hope that it will be useful, but WITHOUT ANY +-; WARRANTY; without even the implied warranty of MERCHANTABILITY or +-; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-; for more details. +-; +-; You should have received a copy of the GNU General Public License +-; along with GCC; see the file COPYING3. If not see +-; . +- +-Enum +-Name(loongarch_arch_opt_value) Type(int) +-Known LARCH CPUs (for use with the -march= and -mtune= options): +- +-EnumValue +-Enum(loongarch_arch_opt_value) String(native) Value(LARCH_ARCH_OPTION_NATIVE) DriverOnly +- +-EnumValue +-Enum(loongarch_arch_opt_value) String(loongarch64) Value(0) Canonical +- +-EnumValue +-Enum(loongarch_arch_opt_value) String(la464) Value(1) Canonical +- +diff --git a/gcc/config/loongarch/loongarch-tune.h b/gcc/config/loongarch/loongarch-tune.h +new file mode 100644 +index 000000000..bb01f2d98 +--- /dev/null ++++ b/gcc/config/loongarch/loongarch-tune.h +@@ -0,0 +1,51 @@ ++/* Definitions for microarchitecture-related data structures. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Ltd. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#ifndef LOONGARCH_TUNE_H ++#define LOONGARCH_TUNE_H ++ ++/* RTX costs of various operations on the different architectures. */ ++struct loongarch_rtx_cost_data ++{ ++ unsigned short fp_add; ++ unsigned short fp_mult_sf; ++ unsigned short fp_mult_df; ++ unsigned short fp_div_sf; ++ unsigned short fp_div_df; ++ unsigned short int_mult_si; ++ unsigned short int_mult_di; ++ unsigned short int_div_si; ++ unsigned short int_div_di; ++ unsigned short branch_cost; ++ unsigned short memory_latency; ++}; ++ ++/* Costs to use when optimizing for size. */ ++extern const struct loongarch_rtx_cost_data loongarch_rtx_cost_optimize_size; ++ ++/* Cache size record of known processor models. */ ++struct loongarch_cache { ++ int l1d_line_size; /* bytes */ ++ int l1d_size; /* KiB */ ++ int l2d_size; /* kiB */ ++ int simultaneous_prefetches; /* number of parallel prefetch */ ++}; ++ ++#endif /* LOONGARCH_TUNE_H */ +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index e556f81e4..a1dde5a0f 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -1,9 +1,7 @@ +-/* Subroutines used for LARCH code generation. +- Copyright (C) 1989-2018 Free Software Foundation, Inc. +- Contributed by A. Lichnewsky, lich@inria.inria.fr. +- Changes by Michael Meissner, meissner@osf.org. +- 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and +- Brendan Eich, brendan@microunity.com. ++/* Subroutines used for LoongArch code generation. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Technology Co. Ltd.. ++ Based on MIPS and RISC-V target for GNU compiler. + + This file is part of GCC. + +@@ -63,8 +61,14 @@ along with GCC; see the file COPYING3. If not see + #include "target-globals.h" + #include "tree-pass.h" + #include "context.h" ++#include "shrink-wrap.h" + #include "builtins.h" + #include "rtl-iter.h" ++#include "cfgloop.h" ++#include "gimple-iterator.h" ++#include "tree-vectorizer.h" ++#include "params.h" ++#include "opts.h" + + /* This file should be included last. */ + #include "target-def.h" +@@ -76,48 +80,20 @@ along with GCC; see the file COPYING3. If not see + && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES) + + /* Extract the symbol or label from UNSPEC wrapper X. */ +-#define UNSPEC_ADDRESS(X) \ +- XVECEXP (X, 0, 0) ++#define UNSPEC_ADDRESS(X) XVECEXP (X, 0, 0) + + /* Extract the symbol type from UNSPEC wrapper X. */ + #define UNSPEC_ADDRESS_TYPE(X) \ + ((enum loongarch_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST)) + +-/* The maximum distance between the top of the stack frame and the +- value $sp has when we save and restore registers. +-*/ +-#define LARCH_MAX_FIRST_STACK_STEP 0x7f0 +- + /* True if INSN is a loongarch.md pattern or asm statement. */ + /* ??? This test exists through the compiler, perhaps it should be +- moved to rtl.h. */ ++ moved to rtl.h. */ + #define USEFUL_INSN_P(INSN) \ + (NONDEBUG_INSN_P (INSN) \ + && GET_CODE (PATTERN (INSN)) != USE \ + && GET_CODE (PATTERN (INSN)) != CLOBBER) + +-/* If INSN is a delayed branch sequence, return the first instruction +- in the sequence, otherwise return INSN itself. */ +-#define SEQ_BEGIN(INSN) \ +- (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \ +- ? as_a (XVECEXP (PATTERN (INSN), 0, 0)) \ +- : (INSN)) +- +-/* Likewise for the last instruction in a delayed branch sequence. */ +-#define SEQ_END(INSN) \ +- (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \ +- ? as_a (XVECEXP (PATTERN (INSN), \ +- 0, \ +- XVECLEN (PATTERN (INSN), 0) - 1)) \ +- : (INSN)) +- +-/* Execute the following loop body with SUBINSN set to each instruction +- between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */ +-#define FOR_EACH_SUBINSN(SUBINSN, INSN) \ +- for ((SUBINSN) = SEQ_BEGIN (INSN); \ +- (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \ +- (SUBINSN) = NEXT_INSN (SUBINSN)) +- + /* True if bit BIT is set in VALUE. */ + #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0) + +@@ -127,54 +103,25 @@ along with GCC; see the file COPYING3. If not see + A natural register + offset address. The register satisfies + loongarch_valid_base_register_p and the offset is a const_arith_operand. + ++ ADDRESS_REG_REG ++ A base register indexed by (optionally scaled) register. ++ + ADDRESS_CONST_INT + A signed 16-bit constant address. + + ADDRESS_SYMBOLIC: + A constant symbolic address. */ +-enum loongarch_address_type { ++enum loongarch_address_type ++{ + ADDRESS_REG, ++ ADDRESS_REG_REG, + ADDRESS_CONST_INT, + ADDRESS_SYMBOLIC + }; + + +-/* A class used to control a comdat-style stub that we output in each +- translation unit that needs it. */ +-class loongarch_one_only_stub { +-public: +- virtual ~loongarch_one_only_stub () {} +- +- /* Return the name of the stub. */ +- virtual const char *get_name () = 0; +- +- /* Output the body of the function to asm_out_file. */ +- virtual void output_body () = 0; +-}; +- +-/* Tuning information that is automatically derived from other sources +- (such as the scheduler). */ +-static struct { +- /* The architecture and tuning settings that this structure describes. */ +- enum processor arch; +- enum processor tune; +- +- /* True if the structure has been initialized. */ +- bool initialized_p; +- +-} loongarch_tuning_info; +- +-/* Information about an address described by loongarch_address_type. +- +- ADDRESS_CONST_INT +- No fields are used. +- +- ADDRESS_REG +- REG is the base register and OFFSET is the constant offset. +- +- ADDRESS_SYMBOLIC +- SYMBOL_TYPE is the type of symbol that the address references. */ +-struct loongarch_address_info { ++struct loongarch_address_info ++{ + enum loongarch_address_type type; + rtx reg; + rtx offset; +@@ -184,224 +131,82 @@ struct loongarch_address_info { + /* Method to load immediate number fields. + + METHOD_NORMAL: +- load immediate number 0-31 bit ++ Load bit 0-31 of the immediate number. + + METHOD_LU32I: +- load imm 32-51 bit ++ Load bit 32-51 of the immediate number. + + METHOD_LU52I: +- load imm 52-63 bit ++ load bit 52-63 of the immediate number. + + METHOD_INSV: +- imm 0xfff00000fffffxxx ++ immediates like 0xfff00000fffffxxx + */ +-enum loongarch_load_imm_method { ++enum loongarch_load_imm_method ++{ + METHOD_NORMAL, + METHOD_LU32I, + METHOD_LU52I, + METHOD_INSV + }; + +-/* One stage in a constant building sequence. These sequences have +- the form: +- +- A = VALUE[0] +- A = A CODE[1] VALUE[1] +- A = A CODE[2] VALUE[2] +- ... +- +- where A is an accumulator, each CODE[i] is a binary rtl operation +- and each VALUE[i] is a constant integer. CODE[0] is undefined. */ +-struct loongarch_integer_op { ++struct loongarch_integer_op ++{ + enum rtx_code code; +- unsigned HOST_WIDE_INT value; ++ HOST_WIDE_INT value; + enum loongarch_load_imm_method method; + }; + + /* The largest number of operations needed to load an integer constant. +- The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI. +- When the lowest bit is clear, we can try, but reject a sequence with +- an extra SLL at the end. */ +-#define LARCH_MAX_INTEGER_OPS 9 +- +-/* Costs of various operations on the different architectures. */ +- +-struct loongarch_rtx_cost_data +-{ +- unsigned short fp_add; +- unsigned short fp_mult_sf; +- unsigned short fp_mult_df; +- unsigned short fp_div_sf; +- unsigned short fp_div_df; +- unsigned short int_mult_si; +- unsigned short int_mult_di; +- unsigned short int_div_si; +- unsigned short int_div_di; +- unsigned short branch_cost; +- unsigned short memory_latency; +-}; +- +-/* Global variables for machine-dependent things. */ +- +-/* The -G setting, or the configuration's default small-data limit if +- no -G option is given. */ +-static unsigned int loongarch_small_data_threshold; +- +-/* The number of file directives written by loongarch_output_filename. */ +-int num_source_filenames; +- +-/* The name that appeared in the last .file directive written by +- loongarch_output_filename, or "" if loongarch_output_filename hasn't +- written anything yet. */ +-const char *current_function_file = ""; ++ The worst accepted case for 64-bit constants is LU12I.W,LU32I.D,LU52I.D,ORI ++ or LU12I.W,LU32I.D,LU52I.D,ADDI.D DECL_ASSEMBLER_NAME. */ ++#define LARCH_MAX_INTEGER_OPS 4 + + /* Arrays that map GCC register numbers to debugger register numbers. */ +-int loongarch_dbx_regno[FIRST_PSEUDO_REGISTER]; + int loongarch_dwarf_regno[FIRST_PSEUDO_REGISTER]; + +-/* The current instruction-set architecture. */ +-enum processor loongarch_arch; +-const struct loongarch_cpu_info *loongarch_arch_info; +- +-/* The processor that we should tune the code for. */ +-enum processor loongarch_tune; +-const struct loongarch_cpu_info *loongarch_tune_info; +- +-/* The ISA level associated with loongarch_arch. */ +-int loongarch_isa; +- +-/* The ISA revision level. */ +-int loongarch_isa_rev; +- +-/* Which cost information to use. */ +-static const struct loongarch_rtx_cost_data *loongarch_cost; +- + /* Index [M][R] is true if register R is allowed to hold a value of mode M. */ +-static bool loongarch_hard_regno_mode_ok_p[MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER]; ++static bool loongarch_hard_regno_mode_ok_p[MAX_MACHINE_MODE] ++ [FIRST_PSEUDO_REGISTER]; + + /* Index C is true if character C is a valid PRINT_OPERAND punctation + character. */ + static bool loongarch_print_operand_punct[256]; + +-static GTY (()) int loongarch_output_filename_first_time = 1; +- +-/* loongarch_use_pcrel_pool_p[X] is true if symbols of type X should be +- forced into a PC-relative constant pool. */ +-bool loongarch_use_pcrel_pool_p[NUM_SYMBOL_TYPES]; +- +-/* Cached value of can_issue_more. This is cached in loongarch_variable_issue hook +- and returned from loongarch_sched_reorder2. */ ++/* Cached value of can_issue_more. This is cached in loongarch_variable_issue ++ hook and returned from loongarch_sched_reorder2. */ + static int cached_can_issue_more; + + /* Index R is the smallest register class that contains register R. */ + const enum reg_class loongarch_regno_to_class[FIRST_PSEUDO_REGISTER] = { +- GR_REGS, GR_REGS, GR_REGS, GR_REGS, +- JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, +- JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, +- SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, +- SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, +- SIBCALL_REGS, GR_REGS, GR_REGS, JALR_REGS, +- JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, +- JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS, +- +- FP_REGS, FP_REGS, FP_REGS, FP_REGS, +- FP_REGS, FP_REGS, FP_REGS, FP_REGS, +- FP_REGS, FP_REGS, FP_REGS, FP_REGS, +- FP_REGS, FP_REGS, FP_REGS, FP_REGS, +- FP_REGS, FP_REGS, FP_REGS, FP_REGS, +- FP_REGS, FP_REGS, FP_REGS, FP_REGS, +- FP_REGS, FP_REGS, FP_REGS, FP_REGS, +- FP_REGS, FP_REGS, FP_REGS, FP_REGS, +- ST_REGS, ST_REGS, ST_REGS, ST_REGS, +- ST_REGS, ST_REGS, ST_REGS, ST_REGS, +- FRAME_REGS, FRAME_REGS +-}; +- +-static tree loongarch_handle_interrupt_attr (tree *, tree, tree, int, bool *); +-static tree loongarch_handle_use_shadow_register_set_attr (tree *, tree, tree, int, +- bool *); +- +-/* The value of TARGET_ATTRIBUTE_TABLE. */ +-static const struct attribute_spec loongarch_attribute_table[] = { +- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, +- affects_type_identity, handler, exclude } */ +- { "long_call", 0, 0, false, true, true, false, NULL, NULL }, +- { "short_call", 0, 0, false, true, true, false, NULL, NULL }, +- { "far", 0, 0, false, true, true, false, NULL, NULL }, +- { "near", 0, 0, false, true, true, false, NULL, NULL }, +- { "nocompression", 0, 0, true, false, false, false, NULL, NULL }, +- /* Allow functions to be specified as interrupt handlers */ +- { "interrupt", 0, 1, false, true, true, false, loongarch_handle_interrupt_attr, +- NULL }, +- { "use_shadow_register_set", 0, 1, false, true, true, false, +- loongarch_handle_use_shadow_register_set_attr, NULL }, +- { "keep_interrupts_masked", 0, 0, false, true, true, false, NULL, NULL }, +- { "use_debug_exception_return", 0, 0, false, true, true, false, NULL, NULL }, +- { NULL, 0, 0, false, false, false, false, NULL, NULL } +-}; +- +-/* A table describing all the processors GCC knows about; see +- loongarch-cpus.def for details. */ +-static const struct loongarch_cpu_info loongarch_cpu_info_table[] = { +-#define LARCH_CPU(NAME, CPU, ISA, FLAGS) \ +- { NAME, CPU, ISA, FLAGS }, +-#include "loongarch-cpus.def" +-#undef LARCH_CPU +-}; +- +-/* Default costs. If these are used for a processor we should look +- up the actual costs. */ +-#define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \ +- COSTS_N_INSNS (7), /* fp_mult_sf */ \ +- COSTS_N_INSNS (8), /* fp_mult_df */ \ +- COSTS_N_INSNS (23), /* fp_div_sf */ \ +- COSTS_N_INSNS (36), /* fp_div_df */ \ +- COSTS_N_INSNS (10), /* int_mult_si */ \ +- COSTS_N_INSNS (10), /* int_mult_di */ \ +- COSTS_N_INSNS (69), /* int_div_si */ \ +- COSTS_N_INSNS (69), /* int_div_di */ \ +- 2, /* branch_cost */ \ +- 4 /* memory_latency */ +- +-/* Floating-point costs for processors without an FPU. Just assume that +- all floating-point libcalls are very expensive. */ +-#define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \ +- COSTS_N_INSNS (256), /* fp_mult_sf */ \ +- COSTS_N_INSNS (256), /* fp_mult_df */ \ +- COSTS_N_INSNS (256), /* fp_div_sf */ \ +- COSTS_N_INSNS (256) /* fp_div_df */ +- +-/* Costs to use when optimizing for size. */ +-static const struct loongarch_rtx_cost_data loongarch_rtx_cost_optimize_size = { +- COSTS_N_INSNS (1), /* fp_add */ +- COSTS_N_INSNS (1), /* fp_mult_sf */ +- COSTS_N_INSNS (1), /* fp_mult_df */ +- COSTS_N_INSNS (1), /* fp_div_sf */ +- COSTS_N_INSNS (1), /* fp_div_df */ +- COSTS_N_INSNS (1), /* int_mult_si */ +- COSTS_N_INSNS (1), /* int_mult_di */ +- COSTS_N_INSNS (1), /* int_div_si */ +- COSTS_N_INSNS (1), /* int_div_di */ +- 2, /* branch_cost */ +- 4 /* memory_latency */ ++ GR_REGS, GR_REGS, GR_REGS, GR_REGS, ++ JIRL_REGS, JIRL_REGS, JIRL_REGS, JIRL_REGS, ++ JIRL_REGS, JIRL_REGS, JIRL_REGS, JIRL_REGS, ++ SIBCALL_REGS, JIRL_REGS, SIBCALL_REGS, SIBCALL_REGS, ++ SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, ++ SIBCALL_REGS, GR_REGS, GR_REGS, JIRL_REGS, ++ JIRL_REGS, JIRL_REGS, JIRL_REGS, JIRL_REGS, ++ JIRL_REGS, JIRL_REGS, JIRL_REGS, JIRL_REGS, ++ ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FP_REGS, FP_REGS, FP_REGS, FP_REGS, ++ FCC_REGS, FCC_REGS, FCC_REGS, FCC_REGS, ++ FCC_REGS, FCC_REGS, FCC_REGS, FCC_REGS, ++ FRAME_REGS, FRAME_REGS + }; + +-/* Costs to use when optimizing for speed, indexed by processor. */ +-static const struct loongarch_rtx_cost_data +- loongarch_rtx_cost_data[NUM_PROCESSOR_VALUES] = { +- { /* loongarch */ +- DEFAULT_COSTS +- }, +- { /* loongarch64 */ +- DEFAULT_COSTS +- }, +- { /* la464 */ +- DEFAULT_COSTS +- } +-}; ++/* Which cost information to use. */ ++static const struct loongarch_rtx_cost_data *loongarch_cost; + + /* Information about a single argument. */ +-struct loongarch_arg_info { ++struct loongarch_arg_info ++{ + /* True if the argument is at least partially passed on the stack. */ + bool stack_p; + +@@ -419,21 +224,6 @@ struct loongarch_arg_info { + unsigned int fpr_offset; + }; + +- +-/* Emit a move from SRC to DEST. Assume that the move expanders can +- handle all moves if !can_create_pseudo_p (). The distinction is +- important because, unlike emit_move_insn, the move expanders know +- how to force Pmode objects into the constant pool even when the +- constant pool address is not itself legitimate. */ +- +-rtx +-loongarch_emit_move (rtx dest, rtx src) +-{ +- return (can_create_pseudo_p () +- ? emit_move_insn (dest, src) +- : emit_move_insn_1 (dest, src)); +-} +- + /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at + least PARM_BOUNDARY bits of alignment, but will be given anything up + to PREFERRED_STACK_BOUNDARY bits if the type requires it. */ +@@ -470,7 +260,8 @@ loongarch_pass_mode_in_fpr_p (machine_mode mode) + return 0; + } + +-typedef struct { ++typedef struct ++{ + const_tree type; + HOST_WIDE_INT offset; + } loongarch_aggregate_field; +@@ -480,18 +271,18 @@ typedef struct { + + static int + loongarch_flatten_aggregate_field (const_tree type, +- loongarch_aggregate_field fields[2], +- int n, HOST_WIDE_INT offset, +- const int use_vecarg_p) ++ loongarch_aggregate_field fields[2], int n, ++ HOST_WIDE_INT offset, ++ const int use_vecarg_p) + { + switch (TREE_CODE (type)) + { + case RECORD_TYPE: +- /* Can't handle incomplete types nor sizes that are not fixed. */ +- if (!COMPLETE_TYPE_P (type) +- || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST +- || !tree_fits_uhwi_p (TYPE_SIZE (type))) +- return -1; ++ /* Can't handle incomplete types nor sizes that are not fixed. */ ++ if (!COMPLETE_TYPE_P (type) ++ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST ++ || !tree_fits_uhwi_p (TYPE_SIZE (type))) ++ return -1; + + for (tree f = TYPE_FIELDS (type); f; f = DECL_CHAIN (f)) + if (TREE_CODE (f) == FIELD_DECL) +@@ -500,7 +291,8 @@ loongarch_flatten_aggregate_field (const_tree type, + return -1; + + HOST_WIDE_INT pos = offset + int_byte_position (f); +- n = loongarch_flatten_aggregate_field (TREE_TYPE (f), fields, n, pos, 0); ++ n = loongarch_flatten_aggregate_field (TREE_TYPE (f), fields, n, ++ pos, 0); + if (n < 0) + return -1; + } +@@ -513,7 +305,8 @@ loongarch_flatten_aggregate_field (const_tree type, + tree index = TYPE_DOMAIN (type); + tree elt_size = TYPE_SIZE_UNIT (TREE_TYPE (type)); + int n_subfields = loongarch_flatten_aggregate_field (TREE_TYPE (type), +- subfields, 0, offset, 0); ++ subfields, 0, ++ offset, 0); + + /* Can't handle incomplete types nor sizes that are not fixed. */ + if (n_subfields <= 0 +@@ -528,7 +321,7 @@ loongarch_flatten_aggregate_field (const_tree type, + return -1; + + n_elts = 1 + tree_to_uhwi (TYPE_MAX_VALUE (index)) +- - tree_to_uhwi (TYPE_MIN_VALUE (index)); ++ - tree_to_uhwi (TYPE_MIN_VALUE (index)); + gcc_assert (n_elts >= 0); + + for (HOST_WIDE_INT i = 0; i < n_elts; i++) +@@ -566,11 +359,11 @@ loongarch_flatten_aggregate_field (const_tree type, + } + + default: +- if (n < 2 ++ if ((n < 2 + && ((SCALAR_FLOAT_TYPE_P (type) + && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FP_ARG) + || (INTEGRAL_TYPE_P (type) +- && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD)) ++ && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD))) + || (use_vecarg_p && VECTOR_TYPE_P (type) + && ((ISA_HAS_LSX && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_LSX_REG) + || (ISA_HAS_LASX && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_LASX_REG)))) +@@ -589,8 +382,8 @@ loongarch_flatten_aggregate_field (const_tree type, + + static int + loongarch_flatten_aggregate_argument (const_tree type, +- loongarch_aggregate_field fields[2], +- const int use_vecarg_p) ++ loongarch_aggregate_field fields[2], ++ const int use_vecarg_p) + { + if (!type || !((TREE_CODE (type) == RECORD_TYPE) + || (use_vecarg_p && TREE_CODE (type) == VECTOR_TYPE))) +@@ -603,9 +396,9 @@ loongarch_flatten_aggregate_argument (const_tree type, + two floating-point registers. If so, populate FIELDS accordingly. */ + + static unsigned +-loongarch_pass_aggregate_in_fpr_pair_p (const_tree type, +- loongarch_aggregate_field fields[2], +- const int use_vecarg_p) ++loongarch_pass_aggregate_num_fpr (const_tree type, ++ loongarch_aggregate_field fields[2], ++ const int use_vecarg_p) + { + int n = loongarch_flatten_aggregate_argument (type, fields, use_vecarg_p); + +@@ -616,13 +409,13 @@ loongarch_pass_aggregate_in_fpr_pair_p (const_tree type, + return n > 0 ? n : 0; + } + +-/* See whether TYPE is a record whose fields should be returned in one or ++/* See whether TYPE is a record whose fields should be returned in one + floating-point register and one integer register. If so, populate + FIELDS accordingly. */ + + static bool + loongarch_pass_aggregate_in_fpr_and_gpr_p (const_tree type, +- loongarch_aggregate_field fields[2]) ++ loongarch_aggregate_field fields[2]) + { + unsigned num_int = 0, num_float = 0; + int n = loongarch_flatten_aggregate_argument (type, fields, 0); +@@ -640,20 +433,21 @@ loongarch_pass_aggregate_in_fpr_and_gpr_p (const_tree type, + when the value has mode VALUE_MODE and the type has TYPE_MODE. The + two modes may be different for structures like: + +- struct __attribute__((packed)) foo { float f; } ++ struct __attribute__((packed)) foo { float f; } + +- where the SFmode value "f" is passed in REGNO but the struct itself +- has mode BLKmode. */ ++ where the SFmode value "f" is passed in REGNO but the struct itself ++ has mode BLKmode. */ + + static rtx + loongarch_pass_fpr_single (machine_mode type_mode, unsigned regno, +- machine_mode value_mode) ++ machine_mode value_mode, ++ HOST_WIDE_INT offset) + { + rtx x = gen_rtx_REG (value_mode, regno); + + if (type_mode != value_mode) + { +- x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx); ++ x = gen_rtx_EXPR_LIST (VOIDmode, x, GEN_INT (offset)); + x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x)); + } + return x; +@@ -666,19 +460,16 @@ loongarch_pass_fpr_single (machine_mode type_mode, unsigned regno, + + static rtx + loongarch_pass_fpr_pair (machine_mode mode, unsigned regno1, +- machine_mode mode1, HOST_WIDE_INT offset1, +- unsigned regno2, machine_mode mode2, +- HOST_WIDE_INT offset2) ++ machine_mode mode1, HOST_WIDE_INT offset1, ++ unsigned regno2, machine_mode mode2, ++ HOST_WIDE_INT offset2) + { +- return gen_rtx_PARALLEL +- (mode, +- gen_rtvec (2, +- gen_rtx_EXPR_LIST (VOIDmode, +- gen_rtx_REG (mode1, regno1), +- GEN_INT (offset1)), +- gen_rtx_EXPR_LIST (VOIDmode, +- gen_rtx_REG (mode2, regno2), +- GEN_INT (offset2)))); ++ return gen_rtx_PARALLEL ( ++ mode, gen_rtvec (2, ++ gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode1, regno1), ++ GEN_INT (offset1)), ++ gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode2, regno2), ++ GEN_INT (offset2)))); + } + + /* Fill INFO with information about a single argument, and return an +@@ -689,9 +480,9 @@ loongarch_pass_fpr_pair (machine_mode mode, unsigned regno1, + returning the argument, or false if passing the argument. */ + + static rtx +-loongarch_get_arg_info (struct loongarch_arg_info *info, const CUMULATIVE_ARGS *cum, +- machine_mode mode, const_tree type, bool named, +- bool return_p) ++loongarch_get_arg_info (struct loongarch_arg_info *info, ++ const CUMULATIVE_ARGS *cum, machine_mode mode, ++ const_tree type, bool named, bool return_p) + { + unsigned num_bytes, num_words; + unsigned fpr_base = return_p ? FP_RETURN : FP_ARG_FIRST; +@@ -713,21 +504,23 @@ loongarch_get_arg_info (struct loongarch_arg_info *info, const CUMULATIVE_ARGS * + unsigned gregno = gpr_base + info->gpr_offset; + + /* Pass one- or two-element floating-point aggregates in FPRs. */ +- if ((info->num_fprs = loongarch_pass_aggregate_in_fpr_pair_p (type, fields, use_vecarg_p)) ++ if ((info->num_fprs ++ = loongarch_pass_aggregate_num_fpr (type, fields, use_vecarg_p)) + && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS) + switch (info->num_fprs) + { + case 1: + return loongarch_pass_fpr_single (mode, fregno, +- TYPE_MODE (fields[0].type)); ++ TYPE_MODE (fields[0].type), ++ fields[0].offset); + + case 2: + return loongarch_pass_fpr_pair (mode, fregno, +- TYPE_MODE (fields[0].type), +- fields[0].offset, +- fregno + 1, +- TYPE_MODE (fields[1].type), +- fields[1].offset); ++ TYPE_MODE (fields[0].type), ++ fields[0].offset, ++ fregno + 1, ++ TYPE_MODE (fields[1].type), ++ fields[1].offset); + + default: + gcc_unreachable (); +@@ -742,9 +535,10 @@ loongarch_get_arg_info (struct loongarch_arg_info *info, const CUMULATIVE_ARGS * + return gen_rtx_REG (mode, fregno); + + case MODE_COMPLEX_FLOAT: +- return loongarch_pass_fpr_pair (mode, fregno, GET_MODE_INNER (mode), 0, +- fregno + 1, GET_MODE_INNER (mode), +- GET_MODE_UNIT_SIZE (mode)); ++ return loongarch_pass_fpr_pair (mode, fregno, ++ GET_MODE_INNER (mode), 0, ++ fregno + 1, GET_MODE_INNER (mode), ++ GET_MODE_UNIT_SIZE (mode)); + + default: + gcc_unreachable (); +@@ -761,10 +555,11 @@ loongarch_get_arg_info (struct loongarch_arg_info *info, const CUMULATIVE_ARGS * + if (!SCALAR_FLOAT_TYPE_P (fields[0].type)) + std::swap (fregno, gregno); + +- return loongarch_pass_fpr_pair (mode, fregno, TYPE_MODE (fields[0].type), +- fields[0].offset, +- gregno, TYPE_MODE (fields[1].type), +- fields[1].offset); ++ return loongarch_pass_fpr_pair (mode, fregno, ++ TYPE_MODE (fields[0].type), ++ fields[0].offset, gregno, ++ TYPE_MODE (fields[1].type), ++ fields[1].offset); + } + } + +@@ -791,7 +586,7 @@ loongarch_get_arg_info (struct loongarch_arg_info *info, const CUMULATIVE_ARGS * + + static rtx + loongarch_function_arg (cumulative_args_t cum_v, machine_mode mode, +- const_tree type, bool named) ++ const_tree type, bool named) + { + CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); + struct loongarch_arg_info info; +@@ -806,7 +601,7 @@ loongarch_function_arg (cumulative_args_t cum_v, machine_mode mode, + + static void + loongarch_function_arg_advance (cumulative_args_t cum_v, machine_mode mode, +- const_tree type, bool named) ++ const_tree type, bool named) + { + CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); + struct loongarch_arg_info info; +@@ -825,11 +620,12 @@ loongarch_function_arg_advance (cumulative_args_t cum_v, machine_mode mode, + + static int + loongarch_arg_partial_bytes (cumulative_args_t cum, +- machine_mode mode, tree type, bool named) ++ machine_mode mode, tree type, bool named) + { + struct loongarch_arg_info arg; + +- loongarch_get_arg_info (&arg, get_cumulative_args (cum), mode, type, named, false); ++ loongarch_get_arg_info (&arg, get_cumulative_args (cum), ++ mode, type, named, false); + return arg.stack_p ? arg.num_gprs * UNITS_PER_WORD : 0; + } + +@@ -837,8 +633,9 @@ loongarch_arg_partial_bytes (cumulative_args_t cum, + VALTYPE is the return type and MODE is VOIDmode. For libcalls, + VALTYPE is null and MODE is the mode of the return value. */ + +-rtx +-loongarch_function_value (const_tree type, const_tree func, machine_mode mode) ++static rtx ++loongarch_function_value_1 (const_tree type, const_tree func, ++ machine_mode mode) + { + struct loongarch_arg_info info; + CUMULATIVE_ARGS args; +@@ -854,15 +651,34 @@ loongarch_function_value (const_tree type, const_tree func, machine_mode mode) + mode = promote_function_mode (type, mode, &unsigned_p, func, 1); + } + +- memset (&args, 0, sizeof args); ++ memset (&args, 0, sizeof (args)); + return loongarch_get_arg_info (&info, &args, mode, type, true, true); + } + +-/* Implement TARGET_PASS_BY_REFERENCE. */ ++ ++/* Implement TARGET_FUNCTION_VALUE. */ ++ ++static rtx ++loongarch_function_value (const_tree valtype, const_tree fn_decl_or_type, ++ bool outgoing ATTRIBUTE_UNUSED) ++{ ++ return loongarch_function_value_1 (valtype, fn_decl_or_type, VOIDmode); ++} ++ ++/* Implement TARGET_LIBCALL_VALUE. */ ++ ++static rtx ++loongarch_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED) ++{ ++ return loongarch_function_value_1 (NULL_TREE, NULL_TREE, mode); ++} ++ ++ ++/* Implement TARGET_PASS_BY_REFERENCE. */ + + static bool + loongarch_pass_by_reference (cumulative_args_t cum_v, machine_mode mode, +- const_tree type, bool named) ++ const_tree type, bool named) + { + HOST_WIDE_INT size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode); + struct loongarch_arg_info info; +@@ -886,23 +702,25 @@ loongarch_pass_by_reference (cumulative_args_t cum_v, machine_mode mode, + /* Implement TARGET_RETURN_IN_MEMORY. */ + + static bool +-loongarch_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED) ++loongarch_return_in_memory (const_tree type, ++ const_tree fndecl ATTRIBUTE_UNUSED) + { + CUMULATIVE_ARGS args; + cumulative_args_t cum = pack_cumulative_args (&args); + + /* The rules for returning in memory are the same as for passing the + first named argument by reference. */ +- memset (&args, 0, sizeof args); ++ memset (&args, 0, sizeof (args)); + return loongarch_pass_by_reference (cum, TYPE_MODE (type), type, true); + } + + /* Implement TARGET_SETUP_INCOMING_VARARGS. */ + + static void +-loongarch_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode, +- tree type, int *pretend_size ATTRIBUTE_UNUSED, +- int no_rtl) ++loongarch_setup_incoming_varargs (cumulative_args_t cum, ++ machine_mode mode, tree type, ++ int *pretend_size ATTRIBUTE_UNUSED, ++ int no_rtl) + { + CUMULATIVE_ARGS local_cum; + int gp_saved; +@@ -911,7 +729,8 @@ loongarch_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode, + argument. Advance a local copy of CUM past the last "real" named + argument, to find out how many registers are left over. */ + local_cum = *get_cumulative_args (cum); +- loongarch_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1); ++ loongarch_function_arg_advance (pack_cumulative_args (&local_cum), ++ mode, type, 1); + + /* Found out how many registers we need to save. */ + gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs; +@@ -920,12 +739,11 @@ loongarch_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode, + { + rtx ptr = plus_constant (Pmode, virtual_incoming_args_rtx, + REG_PARM_STACK_SPACE (cfun->decl) +- - gp_saved * UNITS_PER_WORD); ++ - gp_saved * UNITS_PER_WORD); + rtx mem = gen_frame_mem (BLKmode, ptr); + set_mem_alias_set (mem, get_varargs_alias_set ()); + +- move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST, +- mem, gp_saved); ++ move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST, mem, gp_saved); + } + if (REG_PARM_STACK_SPACE (cfun->decl) == 0) + cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD; +@@ -941,8 +759,7 @@ loongarch_set_frame_expr (rtx frame_pattern) + + insn = get_last_insn (); + RTX_FRAME_RELATED_P (insn) = 1; +- REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR, +- frame_pattern, ++ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR, frame_pattern, + REG_NOTES (insn)); + } + +@@ -963,8 +780,8 @@ static bool + loongarch_save_reg_p (unsigned int regno) + { + bool call_saved = !global_regs[regno] && !call_used_regs[regno]; +- bool might_clobber = crtl->saves_all_registers +- || df_regs_ever_live_p (regno); ++ bool might_clobber ++ = crtl->saves_all_registers || df_regs_ever_live_p (regno); + + if (call_saved && might_clobber) + return true; +@@ -978,15 +795,6 @@ loongarch_save_reg_p (unsigned int regno) + return false; + } + +-/* Determine whether to call GPR save/restore routines. */ +-static bool +-loongarch_use_save_libcall (const struct loongarch_frame_info *frame) +-{ +- // FIXME: if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed) +- return false; +- +-} +- + /* Determine which GPR save/restore routine to call. */ + + static unsigned +@@ -998,43 +806,114 @@ loongarch_save_libcall_count (unsigned mask) + abort (); + } + ++/* Find an available register to be used as dynamic realign argument ++ pointer regsiter. Such a register will be written in prologue and ++ used in begin of body, so it must not be ++ 1. parameter passing register. ++ 2. GOT pointer. ++ We reuse static-chain register if it is available. Otherwise, we ++ use r15 for loongarch64(There may be a better choice. TODO). ++ ++ Return: the regno of chosen register. */ ++ ++static unsigned int ++find_drap_reg (void) ++{ ++ tree decl = cfun->decl; ++ /* Always use callee-saved register if there are no caller-saved ++ registers. */ ++ /* Use r15 for nested function or function need static chain. ++ Since function with tail call may use any caller-saved ++ registers in epilogue, DRAP must not use caller-saved ++ register in such case. */ ++ if (DECL_STATIC_CHAIN (decl) ++ || crtl->tail_call_emit) ++ return DRAP_REGNUM; ++ ++ return STATIC_CHAIN_REGNUM; ++} ++ ++ ++/* Return Dynamic Realign Argument Pointer RTX. Now there isn't any. */ ++ ++static rtx ++loongarch_get_drap_rtx (void) ++{ ++ if (crtl->stack_alignment_needed <= STACK_BOUNDARY ++ || (get_frame_size () == 0 && crtl->args.size == 0)) ++ { ++ crtl->stack_realign_needed = false; ++ return NULL; ++ } ++ ++ if (loongarch_force_drap) ++ crtl->need_drap = true; ++ ++ if (stack_realign_drap) ++ { ++ /* Assign DRAP to vDRAP and returns vDRAP */ ++ unsigned int regno = find_drap_reg (); ++ rtx drap_vreg; ++ rtx arg_ptr; ++ rtx_insn *seq, *insn; ++ ++ arg_ptr = gen_rtx_REG (Pmode, regno); ++ crtl->drap_reg = arg_ptr; ++ ++ start_sequence (); ++ drap_vreg = copy_to_reg (arg_ptr); ++ seq = get_insns (); ++ end_sequence (); ++ ++ insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ())); ++ if (!optimize) ++ { ++ add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ } ++ return drap_vreg; ++ } ++ else ++ return NULL; ++} ++ + /* Populate the current function's loongarch_frame_info structure. + +- LARCH stack frames grown downward. High addresses are at the top. +- +- +-------------------------------+ +- | | +- | incoming stack arguments | +- | | +- +-------------------------------+ <-- incoming stack pointer +- | | +- | callee-allocated save area | +- | for arguments that are | +- | split between registers and | +- | the stack | +- | | +- +-------------------------------+ <-- arg_pointer_rtx +- | | +- | callee-allocated save area | +- | for register varargs | +- | | +- +-------------------------------+ <-- hard_frame_pointer_rtx; +- | | stack_pointer_rtx + gp_sp_offset +- | GPR save area | + UNITS_PER_WORD +- | | +- +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset +- | | + UNITS_PER_HWVALUE +- | FPR save area | +- | | +- +-------------------------------+ <-- frame_pointer_rtx (virtual) +- | | +- | local variables | +- | | +- P +-------------------------------+ +- | | +- | outgoing stack arguments | +- | | +- +-------------------------------+ <-- stack_pointer_rtx ++ LoongArch stack frames grown downward. High addresses are at the top. ++ ++ +-------------------------------+ ++ | | ++ | incoming stack arguments | ++ | | ++ +-------------------------------+ <-- incoming stack pointer ++ | | ++ | callee-allocated save area | ++ | for arguments that are | ++ | split between registers and | ++ | the stack | ++ | | ++ +-------------------------------+ <-- arg_pointer_rtx (virtual) ++ | | ++ | callee-allocated save area | ++ | for register varargs | ++ | | ++ +-------------------------------+ <-- hard_frame_pointer_rtx; ++ | | stack_pointer_rtx + gp_sp_offset ++ | GPR save area | + UNITS_PER_WORD ++ | | ++ +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset ++ | | + UNITS_PER_HWVALUE ++ | FPR save area | ++ | | ++ +-------------------------------+ <-- frame_pointer_rtx (virtual) ++ | | ++ | local variables | ++ | | ++ P +-------------------------------+ ++ | | ++ | outgoing stack arguments | ++ | | ++ +-------------------------------+ <-- stack_pointer_rtx + + Dynamic stack allocations such as alloca insert data at point P. + They decrease stack_pointer_rtx but leave frame_pointer_rtx and +@@ -1050,58 +929,93 @@ loongarch_compute_frame_info (void) + frame = &cfun->machine->frame; + memset (frame, 0, sizeof (*frame)); + +- /* Find out which GPRs we need to save. */ +- for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) ++ /* Find out which GPRs we need to save. */ ++ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) ++ if (loongarch_save_reg_p (regno)) ++ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++; ++ ++ /* If this function calls eh_return, we must also save and restore the ++ EH data registers. */ ++ if (crtl->calls_eh_return) ++ for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++) ++ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++; ++ ++ /* Find out which FPRs we need to save. This loop must iterate over ++ the same space as its companion in loongarch_for_each_saved_reg. */ ++ if (TARGET_HARD_FLOAT) ++ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) + if (loongarch_save_reg_p (regno)) +- frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++; +- +- /* If this function calls eh_return, we must also save and restore the +- EH data registers. */ +- if (crtl->calls_eh_return) +- for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++) +- frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++; +- +- /* Find out which FPRs we need to save. This loop must iterate over +- the same space as its companion in loongarch_for_each_saved_reg. */ +- if (TARGET_HARD_FLOAT) +- for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) +- if (loongarch_save_reg_p (regno)) +- frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++; +- +- /* At the bottom of the frame are any outgoing stack arguments. */ +- offset = LARCH_STACK_ALIGN (crtl->outgoing_args_size); +- /* Next are local stack variables. */ +- offset += LARCH_STACK_ALIGN (get_frame_size ()); +- /* The virtual frame pointer points above the local variables. */ ++ frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++; ++ ++ /* Treat drap reg as a callee-saved reg. */ ++ if (stack_realign_drap) ++ frame->mask |= 1 << (find_drap_reg ()), num_x_saved++; ++ ++ /* At the bottom of the frame are any outgoing stack arguments. */ ++ offset = LARCH_STACK_ALIGN2 (crtl->outgoing_args_size); ++ /* Next are local stack variables. */ ++ offset += LARCH_STACK_ALIGN2 (get_frame_size ()); ++ /* The virtual frame pointer points above the local variables. */ + frame->frame_pointer_offset = offset; +- /* Next are the callee-saved FPRs. */ ++ /* Next are the callee-saved FPRs. */ + if (frame->fmask) +- offset += LARCH_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG); +- frame->fp_sp_offset = offset - UNITS_PER_FP_REG; +- /* Next are the callee-saved GPRs. */ ++ { ++ if (crtl->stack_realign_needed) ++ offset += num_f_saved * UNITS_PER_FP_REG; ++ else ++ offset += LARCH_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG); ++ frame->fp_sp_offset = offset - UNITS_PER_FP_REG; ++ } ++ else ++ frame->fp_sp_offset = offset; ++ /* Next are the callee-saved GPRs. */ + if (frame->mask) + { +- unsigned x_save_size = LARCH_STACK_ALIGN (num_x_saved * UNITS_PER_WORD); +- unsigned num_save_restore = 1 + loongarch_save_libcall_count (frame->mask); ++ unsigned x_save_size; ++ if (crtl->stack_realign_needed) ++ x_save_size = num_x_saved * UNITS_PER_WORD; ++ else ++ x_save_size = LARCH_STACK_ALIGN (num_x_saved * UNITS_PER_WORD); ++ unsigned num_save_restore ++ = 1 + loongarch_save_libcall_count (frame->mask); + + /* Only use save/restore routines if they don't alter the stack size. */ + if (LARCH_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size) + frame->save_libcall_adjustment = x_save_size; + + offset += x_save_size; ++ frame->gp_sp_offset = offset - UNITS_PER_WORD; + } +- frame->gp_sp_offset = offset - UNITS_PER_WORD; +- /* The hard frame pointer points above the callee-saved GPRs. */ +- frame->hard_frame_pointer_offset = offset; +- /* Above the hard frame pointer is the callee-allocated varags save area. */ +- offset += LARCH_STACK_ALIGN (cfun->machine->varargs_size); ++ else ++ frame->gp_sp_offset = offset; ++ ++ /* The hard frame pointer points above the callee-saved GPRs. */ ++ if (crtl->stack_realign_needed) ++ frame->hard_frame_pointer_offset = frame->gp_sp_offset; /* For dwarf. */ ++ else ++ frame->hard_frame_pointer_offset = offset; ++ ++ /* Realign here for saving space if crtl->stack_realign_needed is true. */ ++ if (stack_realign_drap) ++ offset = LARCH_STACK_ALIGN2 (offset); ++ else if (stack_realign_fp) ++ offset = LARCH_STACK_ALIGN (offset); ++ ++ /* Above the hard frame pointer is the callee-allocated varags save area. */ ++ if (stack_realign_fp) ++ offset += LARCH_STACK_ALIGN (cfun->machine->varargs_size); ++ else ++ offset += LARCH_STACK_ALIGN2 (cfun->machine->varargs_size); + /* Next is the callee-allocated area for pretend stack arguments. */ +- offset += LARCH_STACK_ALIGN (crtl->args.pretend_args_size); ++ if (stack_realign_fp) ++ offset += LARCH_STACK_ALIGN (crtl->args.pretend_args_size); ++ else ++ offset += LARCH_STACK_ALIGN2 (crtl->args.pretend_args_size); + /* Arg pointer must be below pretend args, but must be above alignment + padding. */ + frame->arg_pointer_offset = offset - crtl->args.pretend_args_size; + frame->total_size = offset; +- /* Next points the incoming stack pointer and any incoming arguments. */ ++ /* Next points the incoming stack pointer and any incoming arguments. */ + + /* Only use save/restore routines when the GPRs are atop the frame. */ + if (frame->hard_frame_pointer_offset != frame->total_size) +@@ -1117,8 +1031,6 @@ loongarch_initial_elimination_offset (int from, int to) + { + HOST_WIDE_INT src, dest; + +- loongarch_compute_frame_info (); +- + if (to == HARD_FRAME_POINTER_REGNUM) + dest = cfun->machine->frame.hard_frame_pointer_offset; + else if (to == STACK_POINTER_REGNUM) +@@ -1145,8 +1057,8 @@ typedef void (*loongarch_save_restore_fn) (rtx, rtx); + stack pointer. */ + + static void +-loongarch_save_restore_reg (machine_mode mode, int regno, +- HOST_WIDE_INT offset, loongarch_save_restore_fn fn) ++loongarch_save_restore_reg (machine_mode mode, int regno, HOST_WIDE_INT offset, ++ loongarch_save_restore_fn fn) + { + rtx mem; + +@@ -1159,12 +1071,29 @@ loongarch_save_restore_reg (machine_mode mode, int regno, + of the frame. */ + + static void +-loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset, loongarch_save_restore_fn fn) ++loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset, ++ loongarch_save_restore_fn fn) + { + HOST_WIDE_INT offset; + +- /* Save the link register and s-registers. */ ++ /* Save the link register and s-registers. */ + offset = cfun->machine->frame.gp_sp_offset - sp_offset; ++ ++ /* The drap reg and fp reg have been saved in loongarch_expand_prologue ++ * when stack_realign_drap is true. */ ++ if (stack_realign_drap) ++ offset -= UNITS_PER_WORD * cfun->machine->frame.gpr_saved_num; ++ ++ /* Save fp reg first for access incoming-args in stack easily ++ * when stack_realign_fp is true. */ ++ if (stack_realign_fp) ++ { ++ loongarch_save_restore_reg (word_mode, HARD_FRAME_POINTER_REGNUM, ++ offset, fn); ++ cfun->machine->frame.mask &= (~(1LL << HARD_FRAME_POINTER_REGNUM)); ++ offset -= UNITS_PER_WORD; ++ } ++ + for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) + if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) + { +@@ -1172,6 +1101,10 @@ loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset, loongarch_save_restore_fn + offset -= UNITS_PER_WORD; + } + ++ /* Undo. */ ++ if (stack_realign_fp) ++ cfun->machine->frame.mask |= (1LL << HARD_FRAME_POINTER_REGNUM); ++ + /* This loop must iterate over the same space as its companion in + loongarch_compute_frame_info. */ + offset = cfun->machine->frame.fp_sp_offset - sp_offset; +@@ -1185,6 +1118,19 @@ loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset, loongarch_save_restore_fn + } + } + ++/* Emit a move from SRC to DEST. Assume that the move expanders can ++ handle all moves if !can_create_pseudo_p (). The distinction is ++ important because, unlike emit_move_insn, the move expanders know ++ how to force Pmode objects into the constant pool even when the ++ constant pool address is not itself legitimate. */ ++ ++rtx ++loongarch_emit_move (rtx dest, rtx src) ++{ ++ return (can_create_pseudo_p () ? emit_move_insn (dest, src) ++ : emit_move_insn_1 (dest, src)); ++} ++ + /* Save register REG to MEM. Make the instruction frame-related. */ + + static void +@@ -1207,575 +1153,690 @@ loongarch_restore_reg (rtx reg, rtx mem) + RTX_FRAME_RELATED_P (insn) = 1; + } + +-/* Return the code to invoke the GPR save routine. */ +- +-const char * +-loongarch_output_gpr_save (unsigned mask) +-{ +- static char s[32]; +- unsigned n = loongarch_save_libcall_count (mask); +- +- ssize_t bytes = snprintf (s, sizeof (s), "call\tt0,__loongarch_save_%u", n); +- gcc_assert ((size_t) bytes < sizeof (s)); +- +- return s; +-} +- +-#define IMM_BITS 12 +- +-#define IMM_REACH (1LL << IMM_BITS) +- + /* For stack frames that can't be allocated with a single ADDI instruction, + compute the best value to initially allocate. It must at a minimum +- allocate enough space to spill the callee-saved registers. If TARGET_RVC, +- try to pick a value that will allow compression of the register saves +- without adding extra instructions. */ ++ allocate enough space to spill the callee-saved registers. */ + + static HOST_WIDE_INT + loongarch_first_stack_step (struct loongarch_frame_info *frame) + { +- if (SMALL_OPERAND (frame->total_size)) ++ ++ /* Only for fpr/gpr saved regs first when stack_realign_fp is true. */ ++ if (stack_realign_fp) ++ return frame->total_size - frame->frame_pointer_offset; ++ ++ HOST_WIDE_INT realign_size = crtl->stack_alignment_needed / BITS_PER_UNIT; ++ ++ if (IMM12_OPERAND (frame->total_size)) + return frame->total_size; + +- HOST_WIDE_INT min_first_step = +- LARCH_STACK_ALIGN (frame->total_size - frame->fp_sp_offset); ++ HOST_WIDE_INT min_first_step ++ = LARCH_STACK_ALIGN2 (frame->total_size - frame->fp_sp_offset); + HOST_WIDE_INT max_first_step = IMM_REACH / 2 - PREFERRED_STACK_BOUNDARY / 8; + HOST_WIDE_INT min_second_step = frame->total_size - max_first_step; +- gcc_assert (min_first_step <= max_first_step); + + /* As an optimization, use the least-significant bits of the total frame +- size, so that the second adjustment step is just LUI + ADD. */ +- if (!SMALL_OPERAND (min_second_step) ++ size, so that the second adjustment step is just LU12I + ADD. */ ++ if (!IMM12_OPERAND (min_second_step) + && frame->total_size % IMM_REACH < IMM_REACH / 2 + && frame->total_size % IMM_REACH >= min_first_step) + return frame->total_size % IMM_REACH; + +- return max_first_step; +-} +- +-static rtx +-loongarch_adjust_libcall_cfi_prologue () +-{ +- rtx dwarf = NULL_RTX; +- rtx adjust_sp_rtx, reg, mem, insn; +- int saved_size = cfun->machine->frame.save_libcall_adjustment; +- int offset; +- +- for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) +- if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) +- { +- /* The save order is ra, s0 to s8. */ +- if (regno == RETURN_ADDR_REGNUM) +- offset = saved_size - UNITS_PER_WORD; +- else +- offset = saved_size - ((regno - S0_REGNUM + 2) * UNITS_PER_WORD); +- +- reg = gen_rtx_REG (SImode, regno); +- mem = gen_frame_mem (SImode, plus_constant (Pmode, +- stack_pointer_rtx, +- offset)); +- +- insn = gen_rtx_SET (mem, reg); +- dwarf = alloc_reg_note (REG_CFA_OFFSET, insn, dwarf); +- } +- +- /* Debug info for adjust sp. */ +- adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx, +- stack_pointer_rtx, GEN_INT (-saved_size)); +- dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx, +- dwarf); +- return dwarf; ++ return crtl->stack_realign_needed ? (max_first_step < realign_size ++ ? realign_size ++ : ROUND_DOWN (max_first_step, ++ realign_size)) ++ : max_first_step; + } + + static void + loongarch_emit_stack_tie (void) + { +- if (Pmode == SImode) +- emit_insn (gen_stack_tiesi (stack_pointer_rtx, hard_frame_pointer_rtx)); +- else +- emit_insn (gen_stack_tiedi (stack_pointer_rtx, hard_frame_pointer_rtx)); +-} +- +-/* Return nonzero if this function is known to have a null epilogue. +- This allows the optimizer to omit jumps to jumps if no stack +- was created. */ +- +-bool +-loongarch_can_use_return_insn (void) +-{ +- return reload_completed && cfun->machine->frame.total_size == 0; ++ emit_insn (PMODE_INSN (gen_stack_tie, ++ (stack_pointer_rtx, hard_frame_pointer_rtx))); + } + +-static rtx +-loongarch_adjust_libcall_cfi_epilogue () +-{ +- rtx dwarf = NULL_RTX; +- rtx adjust_sp_rtx, reg; +- int saved_size = cfun->machine->frame.save_libcall_adjustment; +- +- /* Debug info for adjust sp. */ +- adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx, +- stack_pointer_rtx, GEN_INT (saved_size)); +- dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx, +- dwarf); +- +- for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) +- if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) +- { +- reg = gen_rtx_REG (SImode, regno); +- dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf); +- } ++#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP) + +- return dwarf; +-} ++#if PROBE_INTERVAL > 16384 ++#error Cannot use indexed addressing mode for stack probing ++#endif + +-/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P +- says which. */ ++/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE, ++ inclusive. These are offsets from the current stack pointer. */ + +-void +-loongarch_expand_epilogue (bool sibcall_p) ++static void ++loongarch_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size) + { +- /* Split the frame into two. STEP1 is the amount of stack we should +- deallocate before restoring the registers. STEP2 is the amount we +- should deallocate afterwards. ++ HOST_WIDE_INT rounded_size; ++ rtx r12 = LARCH_PROLOGUE_TEMP2 (Pmode); ++ rtx r14 = LARCH_PROLOGUE_TEMP3 (Pmode); + +- Start off by assuming that no registers need to be restored. */ +- struct loongarch_frame_info *frame = &cfun->machine->frame; +- unsigned mask = frame->mask; +- HOST_WIDE_INT step1 = frame->total_size; +- HOST_WIDE_INT step2 = 0; +- bool use_restore_libcall = !sibcall_p && loongarch_use_save_libcall (frame); +- rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM); +- rtx insn; ++ size = size + first; ++ /* Sanity check for the addressing mode we're going to use. */ ++ gcc_assert (first <= 16384); + +- /* We need to add memory barrier to prevent read from deallocated stack. */ +- bool need_barrier_p = (get_frame_size () +- + cfun->machine->frame.arg_pointer_offset) != 0; ++ /* Step 1: round SIZE to the previous multiple of the interval. */ + +- if (!sibcall_p && loongarch_can_use_return_insn ()) +- { +- emit_jump_insn (gen_return ()); +- return; +- } ++ rounded_size = ROUND_DOWN (size, PROBE_INTERVAL); + +- /* Move past any dynamic stack allocations. */ +- if (cfun->calls_alloca) ++ /* Step 2: compute initial and final value of the loop counter. */ ++ ++ emit_move_insn (r14, GEN_INT (PROBE_INTERVAL)); ++ /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */ ++ if (rounded_size != 0) + { +- /* Emit a barrier to prevent loads from a deallocated stack. */ +- loongarch_emit_stack_tie (); +- need_barrier_p = false; ++ emit_move_insn (r12, GEN_INT (rounded_size)); ++ emit_insn (gen_rtx_SET (r12, gen_rtx_MINUS (Pmode, ++ stack_pointer_rtx, r12))); + +- rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset); +- if (!SMALL_OPERAND (INTVAL (adjust))) +- { +- loongarch_emit_move (N_LARCH_PROLOGUE_TEMP (Pmode), adjust); +- adjust = N_LARCH_PROLOGUE_TEMP (Pmode); +- } ++ /* Step 3: the loop + +- insn = emit_insn ( +- gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx, +- adjust)); ++ do ++ { ++ TEST_ADDR = TEST_ADDR + PROBE_INTERVAL ++ probe at TEST_ADDR ++ } ++ while (TEST_ADDR != LAST_ADDR) + +- rtx dwarf = NULL_RTX; +- rtx cfa_adjust_value = gen_rtx_PLUS ( +- Pmode, hard_frame_pointer_rtx, +- GEN_INT (-frame->hard_frame_pointer_offset)); +- rtx cfa_adjust_rtx = gen_rtx_SET (stack_pointer_rtx, cfa_adjust_value); +- dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, cfa_adjust_rtx, dwarf); +- RTX_FRAME_RELATED_P (insn) = 1; ++ probes at FIRST + N * PROBE_INTERVAL for values of N from 1 ++ until it is equal to ROUNDED_SIZE. */ + +- REG_NOTES (insn) = dwarf; ++ emit_insn (PMODE_INSN (gen_probe_stack_range, (stack_pointer_rtx, ++ stack_pointer_rtx, r12, r14))); + } + +- /* If we need to restore registers, deallocate as much stack as +- possible in the second step without going out of range. */ +- if ((frame->mask | frame->fmask) != 0) +- { +- step2 = loongarch_first_stack_step (frame); +- step1 -= step2; +- } ++ /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time ++ that SIZE is equal to ROUNDED_SIZE. */ + +- /* Set TARGET to BASE + STEP1. */ +- if (step1 > 0) ++ if (size != rounded_size) + { +- /* Emit a barrier to prevent loads from a deallocated stack. */ +- loongarch_emit_stack_tie (); +- need_barrier_p = false; +- +- /* Get an rtx for STEP1 that we can add to BASE. */ +- rtx adjust = GEN_INT (step1); +- if (!SMALL_OPERAND (step1)) ++ if (size - rounded_size >= PROBE_INTERVAL/2) + { +- loongarch_emit_move (N_LARCH_PROLOGUE_TEMP (Pmode), adjust); +- adjust = N_LARCH_PROLOGUE_TEMP (Pmode); ++ emit_move_insn (r14, GEN_INT (size - rounded_size)); ++ emit_insn (gen_rtx_SET (stack_pointer_rtx, gen_rtx_MINUS (Pmode, ++ stack_pointer_rtx, ++ r14))); + } ++ else ++ emit_insn (gen_rtx_SET (stack_pointer_rtx, gen_rtx_PLUS (Pmode, ++ stack_pointer_rtx, ++ GEN_INT (rounded_size - size)))); + +- insn = emit_insn ( +- gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust)); +- +- rtx dwarf = NULL_RTX; +- rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx, +- GEN_INT (step2)); +- +- dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf); +- RTX_FRAME_RELATED_P (insn) = 1; +- +- REG_NOTES (insn) = dwarf; + } + +- if (use_restore_libcall) +- frame->mask = 0; /* Temporarily fib that we need not save GPRs. */ +- +- /* Restore the registers. */ +- loongarch_for_each_saved_reg (frame->total_size - step2, loongarch_restore_reg); +- +- if (use_restore_libcall) ++ if (first) + { +- frame->mask = mask; /* Undo the above fib. */ +- gcc_assert (step2 >= frame->save_libcall_adjustment); +- step2 -= frame->save_libcall_adjustment; ++ emit_move_insn (r12, GEN_INT (first)); ++ emit_insn (gen_rtx_SET (stack_pointer_rtx, gen_rtx_PLUS (Pmode, ++ stack_pointer_rtx, r12))); + } + +- if (need_barrier_p) +- loongarch_emit_stack_tie (); ++ /* Make sure nothing is scheduled before we are done. */ ++ emit_insn (gen_blockage ()); ++} + +- /* Deallocate the final bit of the frame. */ +- if (step2 > 0) +- { +- insn = emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, +- GEN_INT (step2))); ++/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are ++ absolute addresses. */ ++const char * ++loongarch_output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3) ++{ ++ static int labelno = 0; ++ char loop_lab[32], tmp[64]; ++ rtx xops[3]; + +- rtx dwarf = NULL_RTX; +- rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx, +- const0_rtx); +- dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf); +- RTX_FRAME_RELATED_P (insn) = 1; ++ ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++); + +- REG_NOTES (insn) = dwarf; ++ /* Loop. */ ++ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab); ++ ++ /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */ ++ xops[0] = reg1; ++ xops[1] = GEN_INT (-PROBE_INTERVAL); ++ xops[2] = reg3; ++ if (TARGET_64BIT) ++ output_asm_insn ("sub.d\t%0,%0,%2", xops); ++ else ++ output_asm_insn ("sub.w\t%0,%0,%2", xops); ++ ++ /* Probe at TEST_ADDR, test if TEST_ADDR == LAST_ADDR and branch. */ ++ xops[1] = reg2; ++ strcpy (tmp, "bne\t%0,%1,"); ++ if (TARGET_64BIT) ++ output_asm_insn ("st.d\t$r0,%0,0", xops); ++ else ++ output_asm_insn ("st.w\t$r0,%0,0", xops); ++ output_asm_insn (strcat (tmp, &loop_lab[1]), xops); ++ ++ return ""; ++} ++ ++/* Expand the "prologue" pattern. */ ++ ++void ++loongarch_expand_prologue (void) ++{ ++ struct loongarch_frame_info *frame; ++ HOST_WIDE_INT size; ++ rtx insn; ++ HOST_WIDE_INT realign_size; ++ HOST_WIDE_INT offset; ++ unsigned mask; ++ HOST_WIDE_INT saved_gpr_num = 0; ++ ++ /* Finalize crtl->stack_realign_needed and frame_pointer_needed flags. */ ++ if((crtl->stack_realign_needed || (!flag_omit_frame_pointer && optimize)) && loongarch_stack_realign) ++ { ++ unsigned int incoming_stack_boundary ++ = (crtl->parm_stack_boundary > PREFERRED_STACK_BOUNDARY ++ ? crtl->parm_stack_boundary : PREFERRED_STACK_BOUNDARY); ++ unsigned int stack_alignment ++ = (crtl->is_leaf ++ ? crtl->max_used_stack_slot_alignment ++ : crtl->stack_alignment_needed); ++ unsigned int stack_realign ++ = (incoming_stack_boundary < stack_alignment); ++ ++ if ((get_frame_size () + crtl->outgoing_args_size) == 0 ++ && (crtl->args.size == 0) ++ && frame_pointer_needed ++ && crtl->is_leaf ++ && crtl->sp_is_unchanging ++ && !cfun->calls_alloca ++ && !crtl->calls_eh_return ++ && !(STACK_CHECK_MOVING_SP ++ && flag_stack_check ++ && flag_exceptions ++ && cfun->can_throw_non_call_exceptions)) ++ { ++ /* If drap has been set, but it actually isn't live at the ++ start of the function, there is no reason to set it up. */ ++ if (crtl->drap_reg) ++ { ++ basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; ++ if (! REGNO_REG_SET_P (DF_LR_IN (bb), ++ REGNO (crtl->drap_reg))) ++ { ++ crtl->drap_reg = NULL_RTX; ++ crtl->need_drap = false; ++ } ++ } ++ frame_pointer_needed = false; ++ crtl->stack_realign_needed = false; ++ crtl->max_used_stack_slot_alignment = incoming_stack_boundary; ++ crtl->stack_alignment_needed = incoming_stack_boundary; ++ crtl->stack_alignment_estimated = incoming_stack_boundary; ++ if (crtl->preferred_stack_boundary > incoming_stack_boundary) ++ crtl->preferred_stack_boundary = incoming_stack_boundary; ++ ++ df_finish_pass (true); ++ df_scan_alloc (NULL); ++ df_scan_blocks (); ++ df_compute_regs_ever_live (true); ++ df_analyze (); ++ loongarch_compute_frame_info(); ++ } ++ } ++ ++ frame = &cfun->machine->frame; ++ size = frame->total_size; ++ ++ mask = frame->mask; ++ ++ realign_size = crtl->stack_alignment_needed / BITS_PER_UNIT; ++ ++ if (flag_stack_usage_info) ++ { ++ if (stack_realign_drap) ++ { ++ current_function_dynamic_stack_size += crtl->stack_alignment_needed / BITS_PER_UNIT; ++ } ++ current_function_static_stack_size = size; + } + +- if (use_restore_libcall) ++ /* When stack_realign_drap is true, save current sp in drap-reg then realign. */ ++ if (stack_realign_drap) + { +- rtx dwarf = loongarch_adjust_libcall_cfi_epilogue (); +- insn = emit_insn (gen_gpr_restore (GEN_INT (loongarch_save_libcall_count (mask)))); ++ rtx tmp_reg = plus_constant (Pmode, stack_pointer_rtx, 0); ++ insn = emit_insn (gen_rtx_SET (crtl->drap_reg, tmp_reg)); + RTX_FRAME_RELATED_P (insn) = 1; +- REG_NOTES (insn) = dwarf; + +- emit_jump_insn (gen_gpr_restore_return (ra)); +- return; ++ int log2_realigned_bytes = exact_log2 (realign_size); ++ tmp_reg = gen_rtx_REG (Pmode, GP_REG_FIRST); ++ insn = emit_insn (gen_insvdi (stack_pointer_rtx, ++ GEN_INT (log2_realigned_bytes), ++ const0_rtx, ++ tmp_reg)); ++ insn = gen_anddi3 (stack_pointer_rtx, ++ stack_pointer_rtx, ++ GEN_INT (-realign_size)); ++ loongarch_set_frame_expr (insn); + } + +- /* Add in the __builtin_eh_return stack adjustment. */ +- if (crtl->calls_eh_return) +- emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, +- EH_RETURN_STACKADJ_RTX)); ++ /* Save the registers. */ ++ if ((frame->mask | frame->fmask) != 0) ++ { ++ HOST_WIDE_INT step1 = MIN (size, loongarch_first_stack_step (frame)); + +- if (!sibcall_p) +- emit_jump_insn (gen_simple_return_internal (ra)); +-} ++ /* Save fp first for dwarf. */ ++ if (stack_realign_drap) ++ { ++ gcc_assert (step1 % realign_size == 0); ++ if (frame->mask & (1LL << HARD_FRAME_POINTER_REGNUM)) ++ { ++ emit_insn (gen_add3_insn (stack_pointer_rtx, ++ stack_pointer_rtx, ++ GEN_INT (-(frame->total_size ++ - frame->gp_sp_offset)))); ++ step1 -= (frame->total_size - frame->gp_sp_offset); ++ loongarch_save_restore_reg (word_mode, HARD_FRAME_POINTER_REGNUM, ++ 0, loongarch_save_reg); ++ cfun->machine->frame.mask ++ = frame->mask & ~(1LL << HARD_FRAME_POINTER_REGNUM); ++ saved_gpr_num ++; ++ } ++ /* Set up the frame pointer, if we're using one. */ ++ if (frame_pointer_needed) ++ { ++ insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx); ++ RTX_FRAME_RELATED_P (insn) = 1; + +- +-static rtx loongarch_find_pic_call_symbol (rtx_insn *, rtx, bool); +-static int loongarch_register_move_cost (machine_mode, reg_class_t, +- reg_class_t); +- +-/* Predicates to test for presence of "near"/"short_call" and "far"/"long_call" +- attributes on the given TYPE. */ ++ loongarch_emit_stack_tie (); ++ } ++ } + +-static bool +-loongarch_near_type_p (const_tree type) +-{ +- return (lookup_attribute ("short_call", TYPE_ATTRIBUTES (type)) != NULL +- || lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL); +-} ++ if (!IMM12_OPERAND (-step1) && stack_realign_drap) ++ { ++ loongarch_emit_move (LARCH_PROLOGUE_TEMP (Pmode), GEN_INT (-step1)); ++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, ++ LARCH_PROLOGUE_TEMP (Pmode))); + +-static bool +-loongarch_far_type_p (const_tree type) +-{ +- return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL +- || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL); +-} ++ /* Describe the effect of the previous instructions. */ ++ insn = plus_constant (Pmode, stack_pointer_rtx, -step1); ++ insn = gen_rtx_SET (stack_pointer_rtx, insn); ++ loongarch_set_frame_expr (insn); ++ } ++ else ++ { ++ insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, ++ GEN_INT (-step1)); ++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; ++ } + ++ if (saved_gpr_num && stack_realign_drap) ++ size -= (step1 + frame->total_size - frame->gp_sp_offset); ++ else ++ size -= step1; + +-/* Check if the interrupt attribute is set for a function. */ ++ if (stack_realign_drap && (frame->mask & (1LL << find_drap_reg ()))) ++ { ++ offset = cfun->machine->frame.gp_sp_offset - size ++ - UNITS_PER_WORD * saved_gpr_num; ++ loongarch_save_restore_reg (word_mode, find_drap_reg (), ++ offset, loongarch_save_reg); ++ cfun->machine->frame.mask ++ = frame->mask & ~(1LL << (find_drap_reg ())); ++ saved_gpr_num ++; ++ } + +-static bool +-loongarch_interrupt_type_p (tree type) +-{ +- return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL; +-} ++ cfun->machine->frame.gpr_saved_num = saved_gpr_num; ++ loongarch_for_each_saved_reg (size, loongarch_save_reg); ++ cfun->machine->frame.mask = mask; ++ } + +-/* Implement TARGET_COMP_TYPE_ATTRIBUTES. */ ++ /* Set up the frame pointer, if we're using one. */ ++ if (frame_pointer_needed && !stack_realign_drap) ++ { ++ insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx, ++ GEN_INT (frame->hard_frame_pointer_offset - size)); ++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; + +-static int +-loongarch_comp_type_attributes (const_tree type1, const_tree type2) +-{ +- /* Disallow mixed near/far attributes. */ +- if (loongarch_far_type_p (type1) && loongarch_near_type_p (type2)) +- return 0; +- if (loongarch_near_type_p (type1) && loongarch_far_type_p (type2)) +- return 0; +- return 1; +-} ++ loongarch_emit_stack_tie (); ++ } + +-/* Implement TARGET_INSERT_ATTRIBUTES. */ ++ /* Stack realign when stack_realign_fp is true. */ ++ if (stack_realign_fp) ++ { ++ int log2_realigned_bytes = exact_log2 (realign_size); ++ rtx tmp_reg = gen_rtx_REG (Pmode, GP_REG_FIRST); ++ insn = emit_insn (gen_insvdi (stack_pointer_rtx, ++ GEN_INT (log2_realigned_bytes), ++ const0_rtx, ++ tmp_reg)); ++ insn = gen_anddi3 (stack_pointer_rtx, ++ stack_pointer_rtx, ++ GEN_INT (-realign_size)); ++ loongarch_set_frame_expr (insn); ++ } + +-static void +-loongarch_insert_attributes (tree decl, tree *attributes) +-{ +-} ++ /* Allocate the rest of the frame. */ ++ if ((flag_stack_check == STATIC_BUILTIN_STACK_CHECK ++ || flag_stack_clash_protection) ++ && size > 0) ++ { ++ loongarch_emit_probe_stack_range (get_stack_check_protect (), size); + +-/* Implement TARGET_MERGE_DECL_ATTRIBUTES. */ ++ /* Describe the effect of the previous instructions. */ ++ insn = plus_constant (Pmode, stack_pointer_rtx, -size); ++ insn = gen_rtx_SET (stack_pointer_rtx, insn); ++ loongarch_set_frame_expr (insn); ++ } ++ else ++ { ++ if (size > 0) ++ { ++ if (stack_realign_drap) ++ gcc_assert (size % realign_size == 0); + +-static tree +-loongarch_merge_decl_attributes (tree olddecl, tree newdecl) +-{ +- return merge_attributes (DECL_ATTRIBUTES (olddecl), +- DECL_ATTRIBUTES (newdecl)); ++ if (IMM12_OPERAND (-size)) ++ { ++ insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, ++ GEN_INT (-size)); ++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; ++ } ++ else ++ { ++ loongarch_emit_move (LARCH_PROLOGUE_TEMP (Pmode), GEN_INT (-size)); ++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, ++ LARCH_PROLOGUE_TEMP (Pmode))); ++ ++ /* Describe the effect of the previous instructions. */ ++ insn = plus_constant (Pmode, stack_pointer_rtx, -size); ++ insn = gen_rtx_SET (stack_pointer_rtx, insn); ++ loongarch_set_frame_expr (insn); ++ } ++ } ++ } + } + +-/* Implement TARGET_CAN_INLINE_P. */ ++/* Return nonzero if this function is known to have a null epilogue. ++ This allows the optimizer to omit jumps to jumps if no stack ++ was created. */ + +-static bool +-loongarch_can_inline_p (tree caller, tree callee) ++bool ++loongarch_can_use_return_insn (void) + { +- return default_target_can_inline_p (caller, callee); ++ return reload_completed && cfun->machine->frame.total_size == 0; + } + +-/* Handle an "interrupt" attribute with an optional argument. */ ++/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P ++ says which. */ + +-static tree +-loongarch_handle_interrupt_attr (tree *node ATTRIBUTE_UNUSED, tree name, tree args, +- int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) ++void ++loongarch_expand_epilogue (bool sibcall_p) + { +- /* Check for an argument. */ +- if (is_attribute_p ("interrupt", name) && args != NULL) ++ /* Split the frame into two. STEP1 is the amount of stack we should ++ deallocate before restoring the registers. STEP2 is the amount we ++ should deallocate afterwards. ++ ++ Start off by assuming that no registers need to be restored. */ ++ struct loongarch_frame_info *frame = &cfun->machine->frame; ++ unsigned mask = frame->mask; ++ HOST_WIDE_INT step1 = frame->total_size; ++ HOST_WIDE_INT step2 = 0; ++ rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM); ++ rtx insn; ++ HOST_WIDE_INT offset; ++ HOST_WIDE_INT saved_gpr_num = 0; ++ ++ /* We need to add memory barrier to prevent read from deallocated stack. */ ++ bool need_barrier_p ++ = (get_frame_size () + cfun->machine->frame.arg_pointer_offset) != 0; ++ ++ if (!sibcall_p && loongarch_can_use_return_insn ()) + { +- tree cst; ++ emit_jump_insn (gen_return ()); ++ return; ++ } + +- cst = TREE_VALUE (args); +- if (TREE_CODE (cst) != STRING_CST) +- { +- warning (OPT_Wattributes, +- "%qE attribute requires a string argument", +- name); +- *no_add_attrs = true; +- } +- else if (strcmp (TREE_STRING_POINTER (cst), "eic") != 0 +- && strncmp (TREE_STRING_POINTER (cst), "vector=", 7) != 0) +- { +- warning (OPT_Wattributes, +- "argument to %qE attribute is neither eic, nor " +- "vector=", name); +- *no_add_attrs = true; +- } +- else if (strncmp (TREE_STRING_POINTER (cst), "vector=", 7) == 0) +- { +- const char *arg = TREE_STRING_POINTER (cst) + 7; ++ if (!stack_realign_fp) ++ { ++ /* Move past any dynamic stack allocations. */ ++ if (cfun->calls_alloca) ++ { ++ /* Emit a barrier to prevent loads from a deallocated stack. */ ++ loongarch_emit_stack_tie (); ++ need_barrier_p = false; + +- /* Acceptable names are: sw0,sw1,hw0,hw1,hw2,hw3,hw4,hw5. */ +- if (strlen (arg) != 3 +- || (arg[0] != 's' && arg[0] != 'h') +- || arg[1] != 'w' +- || (arg[0] == 's' && arg[2] != '0' && arg[2] != '1') +- || (arg[0] == 'h' && (arg[2] < '0' || arg[2] > '5'))) ++ rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset); ++ if (!IMM12_OPERAND (INTVAL (adjust))) + { +- warning (OPT_Wattributes, +- "interrupt vector to %qE attribute is not " +- "vector=(sw0|sw1|hw0|hw1|hw2|hw3|hw4|hw5)", +- name); +- *no_add_attrs = true; ++ loongarch_emit_move (LARCH_PROLOGUE_TEMP (Pmode), adjust); ++ adjust = LARCH_PROLOGUE_TEMP (Pmode); + } +- } + +- return NULL_TREE; +- } ++ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, ++ hard_frame_pointer_rtx, ++ adjust)); + +- return NULL_TREE; +-} ++ if (!(stack_realign_drap)) ++ { ++ rtx dwarf = NULL_RTX; ++ rtx minus_offset = GEN_INT (-frame->hard_frame_pointer_offset); ++ rtx cfa_adjust_value = gen_rtx_PLUS (Pmode, ++ hard_frame_pointer_rtx, ++ minus_offset); + +-/* Handle a "use_shadow_register_set" attribute with an optional argument. */ ++ rtx cfa_adjust_rtx = gen_rtx_SET (stack_pointer_rtx, cfa_adjust_value); ++ dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, cfa_adjust_rtx, dwarf); ++ RTX_FRAME_RELATED_P (insn) = 1; + +-static tree +-loongarch_handle_use_shadow_register_set_attr (tree *node ATTRIBUTE_UNUSED, +- tree name, tree args, +- int flags ATTRIBUTE_UNUSED, +- bool *no_add_attrs) +-{ +- /* Check for an argument. */ +- if (is_attribute_p ("use_shadow_register_set", name) && args != NULL) +- { +- tree cst; ++ REG_NOTES (insn) = dwarf; ++ } ++ } + +- cst = TREE_VALUE (args); +- if (TREE_CODE (cst) != STRING_CST) +- { +- warning (OPT_Wattributes, +- "%qE attribute requires a string argument", +- name); +- *no_add_attrs = true; +- } +- else if (strcmp (TREE_STRING_POINTER (cst), "intstack") != 0) +- { +- warning (OPT_Wattributes, +- "argument to %qE attribute is not intstack", name); +- *no_add_attrs = true; +- } ++ /* If we need to restore registers, deallocate as much stack as ++ possible in the second step without going out of range. */ ++ if ((frame->mask | frame->fmask) != 0) ++ { ++ step2 = loongarch_first_stack_step (frame); ++ step1 -= step2; ++ } + +- return NULL_TREE; +- } ++ /* Set TARGET to BASE + STEP1. */ ++ if (step1 > 0) ++ { ++ /* Emit a barrier to prevent loads from a deallocated stack. */ ++ loongarch_emit_stack_tie (); ++ need_barrier_p = false; + +- return NULL_TREE; +-} +- +-/* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR +- and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */ ++ /* Get an rtx for STEP1 that we can add to BASE. */ ++ rtx adjust = GEN_INT (step1); ++ if (!IMM12_OPERAND (step1)) ++ { ++ loongarch_emit_move (LARCH_PROLOGUE_TEMP (Pmode), adjust); ++ adjust = LARCH_PROLOGUE_TEMP (Pmode); ++ } + +-static void +-loongarch_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr) +-{ +- if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))) ++ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, ++ stack_pointer_rtx, ++ adjust)); ++ ++ rtx dwarf = NULL_RTX; ++ rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, ++ stack_pointer_rtx, ++ GEN_INT (step2)); ++ ++ dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ ++ REG_NOTES (insn) = dwarf; ++ } ++ ++ /* Restore drap reg and fp reg first when stack_realign_drap is true. */ ++ if (stack_realign_drap) ++ { ++ if (frame->mask & (1LL << HARD_FRAME_POINTER_REGNUM)) ++ { ++ offset = cfun->machine->frame.gp_sp_offset ++ - (frame->total_size - step2) ; ++ loongarch_save_restore_reg (word_mode, HARD_FRAME_POINTER_REGNUM, ++ offset, loongarch_restore_reg); ++ cfun->machine->frame.mask ++ = frame->mask & ~(1LL << HARD_FRAME_POINTER_REGNUM); ++ saved_gpr_num ++; ++ } ++ if (frame->mask & (1LL << find_drap_reg ())) ++ { ++ offset = cfun->machine->frame.gp_sp_offset ++ - (frame->total_size - step2) - UNITS_PER_WORD * saved_gpr_num; ++ loongarch_save_restore_reg (word_mode, find_drap_reg (), ++ offset, loongarch_restore_reg); ++ cfun->machine->frame.mask ++ = frame->mask & ~(1LL << (find_drap_reg ())); ++ saved_gpr_num ++; ++ } ++ cfun->machine->frame.gpr_saved_num = saved_gpr_num; ++ } ++ } ++ else /* stack_realign_fp. */ + { +- *base_ptr = XEXP (x, 0); +- *offset_ptr = INTVAL (XEXP (x, 1)); ++ /* If we need to restore registers, deallocate as much stack as ++ possible in the second step without going out of range. */ ++ if ((frame->mask | frame->fmask) != 0) ++ { ++ step2 = loongarch_first_stack_step (frame); ++ rtx tmp_reg = plus_constant (Pmode, ++ hard_frame_pointer_rtx, ++ -(frame->hard_frame_pointer_offset ++ - frame->frame_pointer_offset)); ++ insn = emit_insn (gen_rtx_SET (stack_pointer_rtx, tmp_reg)); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ } + } +- else ++ ++ /* Restore the registers. */ ++ loongarch_for_each_saved_reg (frame->total_size - step2, ++ loongarch_restore_reg); ++ ++ cfun->machine->frame.mask = mask; ++ ++ if (need_barrier_p) ++ loongarch_emit_stack_tie (); ++ ++ /* Deallocate the final bit of the frame. */ ++ if (step2 > 0) + { +- *base_ptr = x; +- *offset_ptr = 0; ++ if (stack_realign_drap) ++ { ++ rtx tmp_reg = gen_rtx_REG (Pmode, find_drap_reg ()); ++ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, ++ tmp_reg, ++ const0_rtx)); ++ } ++ else ++ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, ++ stack_pointer_rtx, ++ GEN_INT (step2))); ++ ++ rtx dwarf = NULL_RTX; ++ rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx, const0_rtx); ++ dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ ++ REG_NOTES (insn) = dwarf; + } ++ ++ /* Add in the __builtin_eh_return stack adjustment. */ ++ if (crtl->calls_eh_return) ++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, ++ EH_RETURN_STACKADJ_RTX)); ++ ++ if (!sibcall_p) ++ emit_jump_insn (gen_simple_return_internal (ra)); + } +- +-static unsigned int loongarch_build_integer (struct loongarch_integer_op *, +- unsigned HOST_WIDE_INT); ++ ++#define LU32I_B (0xfffffULL << 32) ++#define LU52I_B (0xfffULL << 52) + + /* Fill CODES with a sequence of rtl operations to load VALUE. +- Return the number of operations needed. +- Split interger in loongarch_output_move. */ ++ Return the number of operations needed. */ + + static unsigned int + loongarch_build_integer (struct loongarch_integer_op *codes, +- unsigned HOST_WIDE_INT value) ++ HOST_WIDE_INT value) ++ + { +- uint32_t hi32, lo32; +- char all0_bit_vec, sign_bit_vec, allf_bit_vec, paritial_is_sext_of_prev; + unsigned int cost = 0; + +- lo32 = value & 0xffffffff; +- hi32 = value >> 32; +- +- all0_bit_vec = (((hi32 & 0xfff00000) == 0) << 3) +- | (((hi32 & 0x000fffff) == 0) << 2) +- | (((lo32 & 0xfffff000) == 0) << 1) +- | ((lo32 & 0x00000fff) == 0); +- sign_bit_vec = (((hi32 & 0x80000000) != 0) << 3) +- | (((hi32 & 0x00080000) != 0) << 2) +- | (((lo32 & 0x80000000) != 0) << 1) +- | ((lo32 & 0x00000800) != 0); +- allf_bit_vec = (((hi32 & 0xfff00000) == 0xfff00000) << 3) +- | (((hi32 & 0x000fffff) == 0x000fffff) << 2) +- | (((lo32 & 0xfffff000) == 0xfffff000) << 1) +- | ((lo32 & 0x00000fff) == 0x00000fff); +- paritial_is_sext_of_prev = (all0_bit_vec ^ allf_bit_vec) +- & (all0_bit_vec ^ (sign_bit_vec << 1)); +- +- do +- { +- if (paritial_is_sext_of_prev == 0x7) +- { +- codes[0].code = UNKNOWN; +- codes[0].method = METHOD_LU52I; +- codes[0].value = value & 0xfff0000000000000; +- cost++; +- break; +- } +- if ((all0_bit_vec & 0x3) == 0x2) +- { +- codes[cost].code = UNKNOWN; +- codes[cost].method = METHOD_NORMAL; +- codes[cost].value = value & 0xfff; +- cost++; +- } +- else +- { +- switch (paritial_is_sext_of_prev & 0x3) +- { +- case 0: +- codes[cost].code = UNKNOWN; +- codes[cost].method = METHOD_NORMAL; +- codes[cost].value = ((HOST_WIDE_INT)value << 32 >> 32) & 0xfffffffffffff000; +- cost++; +- codes[cost].code = IOR; +- codes[cost].method = METHOD_NORMAL; +- codes[cost].value = value & 0xfff; +- cost++; +- break; +- case 1: +- codes[cost].code = UNKNOWN; +- codes[cost].method = METHOD_NORMAL; +- codes[cost].value = ((HOST_WIDE_INT)value << 32 >> 32) & 0xfffffffffffff000; +- cost++; +- break; +- case 2: +- codes[cost].code = UNKNOWN; +- codes[cost].method = METHOD_NORMAL; +- codes[cost].value = (HOST_WIDE_INT)value << 52 >> 52; +- cost++; +- break; +- case 3: +- codes[cost].code = UNKNOWN; +- codes[cost].method = METHOD_NORMAL; +- codes[cost].value = 0; ++ /* Get the lower 32 bits of the value. */ ++ HOST_WIDE_INT low_part = (int32_t)value; ++ ++ if (IMM12_OPERAND (low_part) || IMM12_OPERAND_UNSIGNED (low_part)) ++ { ++ /* The value of the lower 32 bit be loaded with one instruction. ++ lu12i.w. */ ++ codes[0].code = UNKNOWN; ++ codes[0].method = METHOD_NORMAL; ++ codes[0].value = low_part; ++ cost++; ++ } ++ else ++ { ++ /* lu12i.w + ior. */ ++ codes[0].code = UNKNOWN; ++ codes[0].method = METHOD_NORMAL; ++ codes[0].value = low_part & ~(IMM_REACH - 1); ++ cost++; ++ HOST_WIDE_INT iorv = low_part & (IMM_REACH - 1); ++ if (iorv != 0) ++ { ++ codes[1].code = IOR; ++ codes[1].method = METHOD_NORMAL; ++ codes[1].value = iorv; + cost++; +- break; +- default: +- gcc_unreachable (); + } +- } ++ } + +- if (((value & 0xfffffffffffff800) ^ 0xfff00000fffff800) == 0) ++ if (TARGET_64BIT) ++ { ++ bool lu32i[2] = {(value & LU32I_B) == 0, (value & LU32I_B) == LU32I_B}; ++ bool lu52i[2] = {(value & LU52I_B) == 0, (value & LU52I_B) == LU52I_B}; ++ ++ int sign31 = (value & (HOST_WIDE_INT_1U << 31)) >> 31; ++ int sign51 = (value & (HOST_WIDE_INT_1U << 51)) >> 51; ++ /* Determine whether the upper 32 bits are sign-extended from the lower ++ 32 bits. If it is, the instructions to load the high order can be ++ ommitted. */ ++ if (lu32i[sign31] && lu52i[sign31]) ++ return cost; ++ /* Determine whether bits 32-51 are sign-extended from the lower 32 ++ bits. If so, directly load 52-63 bits. */ ++ else if (lu32i[sign31]) + { +- codes[cost].method = METHOD_INSV; +- cost++; +- break; ++ codes[cost].method = METHOD_LU52I; ++ codes[cost].value = value & LU52I_B; ++ return cost + 1; + } + +- switch (paritial_is_sext_of_prev >> 2) +- { +- case 0: +- codes[cost].method = METHOD_LU32I; +- codes[cost].value = ((HOST_WIDE_INT)value << 12 >> 12) & 0xffffffff00000000; +- cost++; +- case 1: ++ codes[cost].method = METHOD_LU32I; ++ codes[cost].value = (value & LU32I_B) | (sign51 ? LU52I_B : 0); ++ cost++; ++ ++ /* Determine whether the 52-61 bits are sign-extended from the low order, ++ and if not, load the 52-61 bits. */ ++ if (!lu52i[(value & (HOST_WIDE_INT_1U << 51)) >> 51]) ++ { + codes[cost].method = METHOD_LU52I; +- codes[cost].value = value & 0xfff0000000000000; +- cost++; +- break; +- case 2: +- codes[cost].method = METHOD_LU32I; +- codes[cost].value = ((HOST_WIDE_INT)value << 12 >> 12) & 0xffffffff00000000; +- cost++; +- break; +- case 3: +- break; +- default: +- gcc_unreachable (); +- } ++ codes[cost].value = value & LU52I_B; ++ cost++; ++ } + } +- while (0); ++ ++ gcc_assert (cost <= LARCH_MAX_INTEGER_OPS); + + return cost; + } +- ++ + /* Fill CODES with a sequence of rtl operations to load VALUE. + Return the number of operations needed. +- Split interger in loongarch_output_move. */ ++ Split interger in loongarch_output_move. */ + + static unsigned int + loongarch_integer_cost (HOST_WIDE_INT value) + { + struct loongarch_integer_op codes[LARCH_MAX_INTEGER_OPS]; +- return loongarch_build_integer(codes, value); ++ return loongarch_build_integer (codes, value); + } + + /* Implement TARGET_LEGITIMATE_CONSTANT_P. */ +@@ -1785,14 +1846,13 @@ loongarch_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x) + { + return loongarch_const_insns (x) > 0; + } +- + + /* Return true if X is a thread-local symbol. */ + + static bool + loongarch_tls_symbol_p (rtx x) + { +- return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0; ++ return SYMBOL_REF_P (x) && SYMBOL_REF_TLS_MODEL (x) != 0; + } + + /* Return true if SYMBOL_REF X is associated with a global symbol +@@ -1809,9 +1869,6 @@ loongarch_global_symbol_p (const_rtx x) + if (!decl) + return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x); + +- /* Weakref symbols are not TREE_PUBLIC, but their targets are global +- or weak symbols. Relocations in the object file will be against +- the target symbol, so it's that symbol's binding that matters here. */ + return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl)); + } + +@@ -1826,9 +1883,6 @@ loongarch_global_symbol_noweak_p (const_rtx x) + if (!decl) + return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x); + +- /* Weakref symbols are not TREE_PUBLIC, but their targets are global +- or weak symbols. Relocations in the object file will be against +- the target symbol, so it's that symbol's binding that matters here. */ + return DECL_P (decl) && TREE_PUBLIC (decl); + } + +@@ -1841,7 +1895,6 @@ loongarch_weak_symbol_p (const_rtx x) + return DECL_P (decl) && DECL_WEAK (decl); + } + +- + /* Return true if SYMBOL_REF X binds locally. */ + + bool +@@ -1850,9 +1903,8 @@ loongarch_symbol_binds_local_p (const_rtx x) + if (GET_CODE (x) == LABEL_REF) + return false; + +- return (SYMBOL_REF_DECL (x) +- ? targetm.binds_local_p (SYMBOL_REF_DECL (x)) +- : SYMBOL_REF_LOCAL_P (x)); ++ return (SYMBOL_REF_DECL (x) ? targetm.binds_local_p (SYMBOL_REF_DECL (x)) ++ : SYMBOL_REF_LOCAL_P (x)); + } + + /* Return true if OP is a constant vector with the number of units in MODE, +@@ -1995,38 +2047,34 @@ loongarch_const_vector_shuffle_set_p (rtx op, machine_mode mode) + static bool + loongarch_rtx_constant_in_small_data_p (machine_mode mode) + { +- return (GET_MODE_SIZE (mode) <= loongarch_small_data_threshold); ++ return (GET_MODE_SIZE (mode) <= g_switch_value); + } + + /* Return the method that should be used to access SYMBOL_REF or +- LABEL_REF X in context CONTEXT. */ ++ LABEL_REF X. */ + + static enum loongarch_symbol_type +-loongarch_classify_symbol (const_rtx x, enum loongarch_symbol_context context) ++loongarch_classify_symbol (const_rtx x) + { +- if (TARGET_RTP_PIC) +- return SYMBOL_GOT_DISP; +- + if (GET_CODE (x) == LABEL_REF) +- { +- return SYMBOL_GOT_DISP; +- } ++ return SYMBOL_GOT_DISP; + +- gcc_assert (GET_CODE (x) == SYMBOL_REF); ++ gcc_assert (SYMBOL_REF_P (x)); + + if (SYMBOL_REF_TLS_MODEL (x)) + return SYMBOL_TLS; + +- if (GET_CODE (x) == SYMBOL_REF) ++ if (SYMBOL_REF_P (x)) + return SYMBOL_GOT_DISP; ++ ++ return SYMBOL_GOT_DISP; + } + +-/* Return true if X is a symbolic constant that can be used in context +- CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */ ++/* Return true if X is a symbolic constant. If it is, ++ store the type of the symbol in *SYMBOL_TYPE. */ + + bool +-loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_context context, +- enum loongarch_symbol_type *symbol_type) ++loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_type *symbol_type) + { + rtx offset; + +@@ -2036,9 +2084,9 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_context context, + *symbol_type = UNSPEC_ADDRESS_TYPE (x); + x = UNSPEC_ADDRESS (x); + } +- else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF) ++ else if (SYMBOL_REF_P (x) || GET_CODE (x) == LABEL_REF) + { +- *symbol_type = loongarch_classify_symbol (x, context); ++ *symbol_type = loongarch_classify_symbol (x); + if (*symbol_type == SYMBOL_TLS) + return true; + } +@@ -2052,8 +2100,6 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_context context, + relocations. */ + switch (*symbol_type) + { +- /* Fall through. */ +- + case SYMBOL_GOT_DISP: + case SYMBOL_TLSGD: + case SYMBOL_TLSLDM: +@@ -2062,17 +2108,25 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_context context, + } + gcc_unreachable (); + } +- +-/* Like loongarch_symbol_insns We rely on the fact that, in the worst case. */ ++ ++/* If MODE is MAX_MACHINE_MODE, return the number of instructions needed ++ to load symbols of type TYPE into a register. Return 0 if the given ++ type of symbol cannot be used as an immediate operand. ++ ++ Otherwise, return the number of instructions needed to load or store ++ values of mode MODE to or from addresses of type TYPE. Return 0 if ++ the given type of symbol is not valid in addresses. */ + + static int +-loongarch_symbol_insns_1 (enum loongarch_symbol_type type, machine_mode mode) ++loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode) + { +- if (loongarch_use_pcrel_pool_p[(int) type]) +- { +- /* The constant must be loaded and then dereferenced. */ +- return 0; +- } ++ /* LSX LD.* and ST.* cannot support loading symbols via an immediate ++ operand. */ ++ if (LSX_SUPPORTED_MODE_P (mode)) ++ return 0; ++ ++ if (LASX_SUPPORTED_MODE_P (mode)) ++ return 0; + + switch (type) + { +@@ -2082,8 +2136,6 @@ loongarch_symbol_insns_1 (enum loongarch_symbol_type type, machine_mode mode) + if (mode != MAX_MACHINE_MODE) + return 0; + +- /* Fall through. */ +- + return 3; + + case SYMBOL_TLSGD: +@@ -2097,30 +2149,6 @@ loongarch_symbol_insns_1 (enum loongarch_symbol_type type, machine_mode mode) + gcc_unreachable (); + } + +-/* If MODE is MAX_MACHINE_MODE, return the number of instructions needed +- to load symbols of type TYPE into a register. Return 0 if the given +- type of symbol cannot be used as an immediate operand. +- +- Otherwise, return the number of instructions needed to load or store +- values of mode MODE to or from addresses of type TYPE. Return 0 if +- the given type of symbol is not valid in addresses. +- +- In both cases, instruction counts are based off BASE_INSN_LENGTH. */ +- +-static int +-loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode) +-{ +- /* LSX LD.* and ST.* cannot support loading symbols via an immediate +- operand. */ +- if (LSX_SUPPORTED_MODE_P (mode)) +- return 0; +- +- if (LASX_SUPPORTED_MODE_P (mode)) +- return 0; +- +- return loongarch_symbol_insns_1 (type, mode) * (1); +-} +- + /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */ + + static bool +@@ -2129,11 +2157,6 @@ loongarch_cannot_force_const_mem (machine_mode mode, rtx x) + enum loongarch_symbol_type type; + rtx base, offset; + +- /* There is no assembler syntax for expressing an address-sized +- high part. */ +- if (GET_CODE (x) == HIGH) +- return true; +- + /* As an optimization, reject constants that loongarch_legitimize_move + can expand inline. + +@@ -2147,16 +2170,12 @@ loongarch_cannot_force_const_mem (machine_mode mode, rtx x) + return true; + + split_const (x, &base, &offset); +- if (loongarch_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type)) ++ if (loongarch_symbolic_constant_p (base, &type)) + { +- /* See whether we explicitly want these symbols in the pool. */ +- if (loongarch_use_pcrel_pool_p[(int) type]) +- return false; +- + /* The same optimization as for CONST_INT. */ +- if (SMALL_INT (offset) && loongarch_symbol_insns (type, MAX_MACHINE_MODE) > 0) ++ if (IMM12_INT (offset) ++ && loongarch_symbol_insns (type, MAX_MACHINE_MODE) > 0) + return true; +- + } + + /* TLS symbols must be computed by loongarch_legitimize_move. */ +@@ -2166,22 +2185,13 @@ loongarch_cannot_force_const_mem (machine_mode mode, rtx x) + return false; + } + +-/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for +- constants when we're using a per-function constant pool. */ +- +-static bool +-loongarch_use_blocks_for_constant_p (machine_mode mode ATTRIBUTE_UNUSED, +- const_rtx x ATTRIBUTE_UNUSED) +-{ +- return 1; +-} +- + /* Return true if register REGNO is a valid base register for mode MODE. + STRICT_P is true if REG_OK_STRICT is in effect. */ + + int +-loongarch_regno_mode_ok_for_base_p (int regno, machine_mode mode, +- bool strict_p) ++loongarch_regno_mode_ok_for_base_p (int regno, ++ machine_mode mode ATTRIBUTE_UNUSED, ++ bool strict_p) + { + if (!HARD_REGISTER_NUM_P (regno)) + { +@@ -2196,7 +2206,6 @@ loongarch_regno_mode_ok_for_base_p (int regno, machine_mode mode, + if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM) + return true; + +- + return GP_REG_P (regno); + } + +@@ -2206,7 +2215,7 @@ loongarch_regno_mode_ok_for_base_p (int regno, machine_mode mode, + static bool + loongarch_valid_base_register_p (rtx x, machine_mode mode, bool strict_p) + { +- if (!strict_p && GET_CODE (x) == SUBREG) ++ if (!strict_p && SUBREG_P (x)) + x = SUBREG_REG (x); + + return (REG_P (x) +@@ -2220,8 +2229,8 @@ static bool + loongarch_valid_offset_p (rtx x, machine_mode mode) + { + /* Check that X is a signed 12-bit number, +- * or check that X is a signed 16-bit number +- * and offset 4 byte aligned */ ++ or check that X is a signed 16-bit number ++ and offset 4 byte aligned. */ + if (!(const_arith_operand (x, Pmode) + || ((mode == E_SImode || mode == E_DImode) + && const_imm16_operand (x, Pmode) +@@ -2231,7 +2240,7 @@ loongarch_valid_offset_p (rtx x, machine_mode mode) + /* We may need to split multiword moves, so make sure that every word + is accessible. */ + if (GET_MODE_SIZE (mode) > UNITS_PER_WORD +- && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD)) ++ && !IMM12_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD)) + return false; + + /* LSX LD.* and ST.* supports 10-bit signed offsets. */ +@@ -2248,13 +2257,42 @@ loongarch_valid_offset_p (rtx x, machine_mode mode) + return true; + } + ++static bool ++loongarch_valid_index_p (struct loongarch_address_info *info, rtx x, ++ machine_mode mode, bool strict_p) ++{ ++ rtx index; ++ ++ if ((REG_P (x) || SUBREG_P (x)) ++ && GET_MODE (x) == Pmode) ++ { ++ index = x; ++ } ++ else ++ return false; ++ ++ if (!strict_p ++ && SUBREG_P (index) ++ && contains_reg_of_mode[GENERAL_REGS][GET_MODE (SUBREG_REG (index))]) ++ index = SUBREG_REG (index); ++ ++ if (loongarch_valid_base_register_p (index, mode, strict_p)) ++ { ++ info->type = ADDRESS_REG_REG; ++ info->offset = index; ++ return true; ++ } ++ ++ return false; ++} ++ + /* Return true if X is a valid address for machine mode MODE. If it is, + fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in + effect. */ + + static bool + loongarch_classify_address (struct loongarch_address_info *info, rtx x, +- machine_mode mode, bool strict_p) ++ machine_mode mode, bool strict_p) + { + switch (GET_CODE (x)) + { +@@ -2266,21 +2304,26 @@ loongarch_classify_address (struct loongarch_address_info *info, rtx x, + return loongarch_valid_base_register_p (info->reg, mode, strict_p); + + case PLUS: ++/* ++ if (loongarch_valid_base_register_p (XEXP (x, 0), mode, strict_p) ++ && loongarch_valid_index_p (info, XEXP (x, 1), mode, strict_p)) ++ { ++ info->reg = XEXP (x, 0); ++ return true; ++ } ++ ++ if (loongarch_valid_base_register_p (XEXP (x, 1), mode, strict_p) ++ && loongarch_valid_index_p (info, XEXP (x, 0), mode, strict_p)) ++ { ++ info->reg = XEXP (x, 1); ++ return true; ++ } ++*/ + info->type = ADDRESS_REG; + info->reg = XEXP (x, 0); + info->offset = XEXP (x, 1); + return (loongarch_valid_base_register_p (info->reg, mode, strict_p) + && loongarch_valid_offset_p (info->offset, mode)); +- #if 0 +- case LABEL_REF: +- case SYMBOL_REF: +- info->type = ADDRESS_SYMBOLIC; +- return (loongarch_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM, +- &info->symbol_type) +- && loongarch_symbol_insns (info->symbol_type, mode) > 0 +- && !loongarch_split_p[info->symbol_type]); +- +- #endif + default: + return false; + } +@@ -2296,39 +2339,21 @@ loongarch_legitimate_address_p (machine_mode mode, rtx x, bool strict_p) + return loongarch_classify_address (&addr, x, mode, strict_p); + } + +-/* Return true if X is a legitimate $sp-based address for mode MODE. */ +- +-bool +-loongarch_stack_address_p (rtx x, machine_mode mode) +-{ +- struct loongarch_address_info addr; +- +- return (loongarch_classify_address (&addr, x, mode, false) +- && addr.type == ADDRESS_REG +- && addr.reg == stack_pointer_rtx); +-} +- +-/* Return true if ADDR matches the pattern for the L{B,H,W,D}{,U}X load +- indexed address instruction. Note that such addresses are +- not considered legitimate in the TARGET_LEGITIMATE_ADDRESS_P +- sense, because their use is so restricted. */ ++/* Return true if ADDR matches the pattern for the indexed address ++ instruction. */ + + static bool +-loongarch_lx_address_p (rtx addr, machine_mode mode) ++loongarch_index_address_p (rtx addr, machine_mode mode ATTRIBUTE_UNUSED) + { + if (GET_CODE (addr) != PLUS + || !REG_P (XEXP (addr, 0)) + || !REG_P (XEXP (addr, 1))) + return false; +- if (LSX_SUPPORTED_MODE_P (mode)) +- return true; +- return false; ++ return true; + } +- + + /* Return the number of instructions needed to load or store a value +- of mode MODE at address X, assuming that BASE_INSN_LENGTH is the +- length of one instruction. Return 0 if X isn't valid for MODE. ++ of mode MODE at address X. Return 0 if X isn't valid for MODE. + Assume that multiword moves may need to be split into word moves + if MIGHT_SPLIT_P, otherwise assume that a single load or store is + enough. */ +@@ -2338,7 +2363,8 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) + { + struct loongarch_address_info addr; + int factor; +- bool lsx_p = (!might_split_p && (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))); ++ bool lsx_p = (!might_split_p && ++ (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))); + + if (!loongarch_classify_address (&addr, x, mode, false)) + return 0; +@@ -2367,6 +2393,9 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) + } + return factor; + ++ case ADDRESS_REG_REG: ++ return lsx_p ? 0 : factor; ++ + case ADDRESS_CONST_INT: + return lsx_p ? 0 : factor; + +@@ -2380,7 +2409,8 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p) + shifted left SHIFT bits before being used. */ + + bool +-loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0) ++loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT x, int bits, ++ int shift = 0) + { + return (x & ((1 << shift) - 1)) == 0 && x < ((unsigned) 1 << (shift + bits)); + } +@@ -2389,7 +2419,8 @@ loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = + shifted left SHIFT bits before being used. */ + + bool +-loongarch_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0) ++loongarch_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits, ++ int shift = 0) + { + x += 1 << (bits + shift - 1); + return loongarch_unsigned_immediate_p (x, bits, shift); +@@ -2408,20 +2439,6 @@ loongarch_ldst_scaled_shift (machine_mode mode) + return shift; + } + +-/* Return true if X is a legitimate address that conforms to the requirements +- for a microLARCH LWSP or SWSP insn. */ +- +-bool +-lwsp_swsp_address_p (rtx x, machine_mode mode) +-{ +- struct loongarch_address_info addr; +- +- return (loongarch_classify_address (&addr, x, mode, false) +- && addr.type == ADDRESS_REG +- && REGNO (addr.reg) == STACK_POINTER_REGNUM +- && uw5_operand (addr.offset, mode)); +-} +- + /* Return true if X is a legitimate address with a 12-bit offset. + MODE is the mode of the value being accessed. */ + +@@ -2433,54 +2450,47 @@ loongarch_12bit_offset_address_p (rtx x, machine_mode mode) + return (loongarch_classify_address (&addr, x, mode, false) + && addr.type == ADDRESS_REG + && CONST_INT_P (addr.offset) +- && ULARCH_12BIT_OFFSET_P (INTVAL (addr.offset))); ++ && LARCH_U12BIT_OFFSET_P (INTVAL (addr.offset))); + } + +-/* Return true if X is a legitimate address with a 9-bit offset. ++/* Return true if X is a legitimate address with a 14-bit offset shifted 2. + MODE is the mode of the value being accessed. */ + + bool +-loongarch_9bit_offset_address_p (rtx x, machine_mode mode) ++loongarch_14bit_shifted_offset_address_p (rtx x, machine_mode mode) + { + struct loongarch_address_info addr; + + return (loongarch_classify_address (&addr, x, mode, false) + && addr.type == ADDRESS_REG + && CONST_INT_P (addr.offset) +- && LARCH_9BIT_OFFSET_P (INTVAL (addr.offset))); ++ && LARCH_16BIT_OFFSET_P (INTVAL (addr.offset)) ++ && LARCH_SHIFT_2_OFFSET_P (INTVAL (addr.offset))); + } + +-/* Return true if X is a legitimate address with a 14-bit offset shifted 2. +- MODE is the mode of the value being accessed. */ +- + bool +-loongarch_14bit_shifted_offset_address_p (rtx x, machine_mode mode) ++loongarch_base_index_address_p (rtx x, machine_mode mode) + { + struct loongarch_address_info addr; + + return (loongarch_classify_address (&addr, x, mode, false) +- && addr.type == ADDRESS_REG +- && CONST_INT_P (addr.offset) +- && LISA_16BIT_OFFSET_P (INTVAL (addr.offset)) +- && LISA_SHIFT_2_OFFSET_P (INTVAL (addr.offset))); ++ && addr.type == ADDRESS_REG_REG ++ && REG_P (addr.offset)); + } + +- + /* Return the number of instructions needed to load constant X, +- assuming that BASE_INSN_LENGTH is the length of one instruction. + Return 0 if X isn't a valid constant. */ + + int + loongarch_const_insns (rtx x) + { +- struct loongarch_integer_op codes[LARCH_MAX_INTEGER_OPS]; + enum loongarch_symbol_type symbol_type; + rtx offset; + + switch (GET_CODE (x)) + { + case CONST_INT: +- return loongarch_build_integer (codes, INTVAL (x)); ++ return loongarch_integer_cost (INTVAL (x)); + + case CONST_VECTOR: + if ((ISA_HAS_LSX || ISA_HAS_LASX) +@@ -2488,19 +2498,18 @@ loongarch_const_insns (rtx x) + return 1; + /* Fall through. */ + case CONST_DOUBLE: +- /* Allow zeros for normal mode, where we can use $0. */ + return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0; + + case CONST: + /* See if we can refer to X directly. */ +- if (loongarch_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type)) ++ if (loongarch_symbolic_constant_p (x, &symbol_type)) + return loongarch_symbol_insns (symbol_type, MAX_MACHINE_MODE); + + /* Otherwise try splitting the constant into a base and offset. +- If the offset is a 16-bit value, we can load the base address +- into a register and then use (D)ADDIU to add in the offset. ++ If the offset is a 12-bit value, we can load the base address ++ into a register and then use ADDI.{W/D} to add in the offset. + If the offset is larger, we can load the base and offset +- into separate registers and add them together with (D)ADDU. ++ into separate registers and add them together with ADD.{W/D}. + However, the latter is only possible before reload; during + and after reload, we must have the option of forcing the + constant into the pool instead. */ +@@ -2510,18 +2519,18 @@ loongarch_const_insns (rtx x) + int n = loongarch_const_insns (x); + if (n != 0) + { +- if (SMALL_INT (offset)) ++ if (IMM12_INT (offset)) + return n + 1; + else if (!targetm.cannot_force_const_mem (GET_MODE (x), x)) +- return n + 1 + loongarch_build_integer (codes, INTVAL (offset)); ++ return n + 1 + loongarch_integer_cost (INTVAL (offset)); + } + } + return 0; + + case SYMBOL_REF: + case LABEL_REF: +- return loongarch_symbol_insns (loongarch_classify_symbol (x, SYMBOL_CONTEXT_LEA), +- MAX_MACHINE_MODE); ++ return loongarch_symbol_insns ( ++ loongarch_classify_symbol (x), MAX_MACHINE_MODE); + + default: + return 0; +@@ -2530,8 +2539,7 @@ loongarch_const_insns (rtx x) + + /* X is a doubleword constant that can be handled by splitting it into + two words and loading each word separately. Return the number of +- instructions required to do this, assuming that BASE_INSN_LENGTH +- is the length of one instruction. */ ++ instructions required to do this. */ + + int + loongarch_split_const_insns (rtx x) +@@ -2565,8 +2573,7 @@ loongarch_subword_at_byte (rtx op, unsigned int byte) + } + + /* Return the number of instructions needed to implement INSN, +- given that it loads from or stores to MEM. Assume that +- BASE_INSN_LENGTH is the length of one instruction. */ ++ given that it loads from or stores to MEM. */ + + int + loongarch_load_store_insns (rtx mem, rtx_insn *insn) +@@ -2583,18 +2590,18 @@ loongarch_load_store_insns (rtx mem, rtx_insn *insn) + if (might_split_p) + { + set = single_set (insn); +- if (set && !loongarch_split_move_insn_p (SET_DEST (set), SET_SRC (set), insn)) ++ if (set ++ && !loongarch_split_move_insn_p (SET_DEST (set), SET_SRC (set))) + might_split_p = false; + } + + return loongarch_address_insns (XEXP (mem, 0), mode, might_split_p); + } + +-/* Return the number of instructions needed for an integer division, +- assuming that BASE_INSN_LENGTH is the length of one instruction. */ ++/* Return the number of instructions needed for an integer division. */ + + int +-loongarch_idiv_insns (machine_mode mode) ++loongarch_idiv_insns (machine_mode mode ATTRIBUTE_UNUSED) + { + int count; + +@@ -2605,7 +2612,6 @@ loongarch_idiv_insns (machine_mode mode) + return count; + } + +- + /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */ + + void +@@ -2619,7 +2625,8 @@ loongarch_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1) + of mode MODE. Return that new register. */ + + static rtx +-loongarch_force_binary (machine_mode mode, enum rtx_code code, rtx op0, rtx op1) ++loongarch_force_binary (machine_mode mode, enum rtx_code code, rtx op0, ++ rtx op1) + { + rtx reg; + +@@ -2643,13 +2650,12 @@ loongarch_force_temporary (rtx dest, rtx value) + } + } + +- + /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE, + then add CONST_INT OFFSET to the result. */ + + static rtx + loongarch_unspec_address_offset (rtx base, rtx offset, +- enum loongarch_symbol_type symbol_type) ++ enum loongarch_symbol_type symbol_type) + { + base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base), + UNSPEC_ADDRESS_FIRST + symbol_type); +@@ -2684,42 +2690,20 @@ loongarch_strip_unspec_address (rtx op) + return op; + } + +- +-/* Return a base register that holds pic_offset_table_rtx. +- TEMP, if nonnull, is a scratch Pmode base register. */ +- +-rtx +-loongarch_pic_base_register (rtx temp) +-{ +- return pic_offset_table_rtx; +- +-} +- +-/* If SRC is the RHS of a load_call insn, return the underlying symbol +- reference. Return NULL_RTX otherwise. */ +- +-static rtx +-loongarch_strip_unspec_call (rtx src) +-{ +- if (GET_CODE (src) == UNSPEC && XINT (src, 1) == UNSPEC_LOAD_CALL) +- return loongarch_strip_unspec_address (XVECEXP (src, 0, 1)); +- return NULL_RTX; +-} +- + /* Return a legitimate address for REG + OFFSET. TEMP is as for + loongarch_force_temporary; it is only needed when OFFSET is not a +- SMALL_OPERAND. */ ++ IMM12_OPERAND. */ + + static rtx + loongarch_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset) + { +- if (!SMALL_OPERAND (offset)) ++ if (!IMM12_OPERAND (offset)) + { + rtx high; + +- /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. +- The addition inside the macro CONST_HIGH_PART may cause an +- overflow, so we need to force a sign-extension check. */ ++ /* Leave OFFSET as a 12-bit offset and put the excess in HIGH. ++ The addition inside the macro CONST_HIGH_PART may cause an ++ overflow, so we need to force a sign-extension check. */ + high = gen_int_mode (CONST_HIGH_PART (offset), Pmode); + offset = CONST_LOW_PART (offset); + high = loongarch_force_temporary (temp, high); +@@ -2727,49 +2711,40 @@ loongarch_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset) + } + return plus_constant (Pmode, reg, offset); + } +- ++ + /* The __tls_get_attr symbol. */ +-static GTY(()) rtx loongarch_tls_symbol; ++static GTY (()) rtx loongarch_tls_symbol; + + /* Load an entry from the GOT for a TLS GD access. */ + +-static rtx loongarch_got_load_tls_gd (rtx dest, rtx sym) ++static rtx ++loongarch_got_load_tls_gd (rtx dest, rtx sym) + { +- if (Pmode == DImode) +- return gen_got_load_tls_gddi (dest, sym); +- else +- return gen_got_load_tls_gdsi (dest, sym); ++ return PMODE_INSN (gen_got_load_tls_gd, (dest, sym)); + } + + /* Load an entry from the GOT for a TLS LD access. */ + +-static rtx loongarch_got_load_tls_ld (rtx dest, rtx sym) ++static rtx ++loongarch_got_load_tls_ld (rtx dest, rtx sym) + { +- if (Pmode == DImode) +- return gen_got_load_tls_lddi (dest, sym); +- else +- return gen_got_load_tls_ldsi (dest, sym); ++ return PMODE_INSN (gen_got_load_tls_ld, (dest, sym)); + } + +- + /* Load an entry from the GOT for a TLS IE access. */ + +-static rtx loongarch_got_load_tls_ie (rtx dest, rtx sym) ++static rtx ++loongarch_got_load_tls_ie (rtx dest, rtx sym) + { +- if (Pmode == DImode) +- return gen_got_load_tls_iedi (dest, sym); +- else +- return gen_got_load_tls_iesi (dest, sym); ++ return PMODE_INSN (gen_got_load_tls_ie, (dest, sym)); + } + + /* Add in the thread pointer for a TLS LE access. */ + +-static rtx loongarch_got_load_tls_le (rtx dest, rtx sym) ++static rtx ++loongarch_got_load_tls_le (rtx dest, rtx sym) + { +- if (Pmode == DImode) +- return gen_got_load_tls_ledi (dest, sym); +- else +- return gen_got_load_tls_lesi (dest, sym); ++ return PMODE_INSN (gen_got_load_tls_le, (dest, sym)); + } + + /* Return an instruction sequence that calls __tls_get_addr. SYM is +@@ -2799,7 +2774,8 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) + else + gcc_unreachable (); + +- insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol, const0_rtx)); ++ insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol, ++ const0_rtx)); + RTL_CONST_CALL_P (insn) = 1; + use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0); + insn = get_insns (); +@@ -2820,12 +2796,6 @@ loongarch_legitimize_tls_address (rtx loc) + enum tls_model model = SYMBOL_REF_TLS_MODEL (loc); + rtx_insn *insn; + +- /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */ +- #if 0 +- if (!flag_pic) +- model = TLS_MODEL_LOCAL_EXEC; +- #endif +- + switch (model) + { + case TLS_MODEL_LOCAL_DYNAMIC: +@@ -2843,7 +2813,7 @@ loongarch_legitimize_tls_address (rtx loc) + break; + + case TLS_MODEL_INITIAL_EXEC: +- /* la.tls.ie; tp-relative add */ ++ /* la.tls.ie; tp-relative add */ + tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); + tmp = gen_reg_rtx (Pmode); + emit_insn (loongarch_got_load_tls_ie (tmp, loc)); +@@ -2852,7 +2822,7 @@ loongarch_legitimize_tls_address (rtx loc) + break; + + case TLS_MODEL_LOCAL_EXEC: +- /* la.tls.le; tp-relative add */ ++ /* la.tls.le; tp-relative add */ + tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); + tmp = gen_reg_rtx (Pmode); + emit_insn (loongarch_got_load_tls_le (tmp, loc)); +@@ -2865,7 +2835,7 @@ loongarch_legitimize_tls_address (rtx loc) + } + return dest; + } +- ++ + rtx + loongarch_legitimize_call_address (rtx addr) + { +@@ -2877,7 +2847,25 @@ loongarch_legitimize_call_address (rtx addr) + } + return addr; + } +- ++ ++/* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR ++ and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */ ++ ++static void ++loongarch_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr) ++{ ++ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))) ++ { ++ *base_ptr = XEXP (x, 0); ++ *offset_ptr = INTVAL (XEXP (x, 1)); ++ } ++ else ++ { ++ *base_ptr = x; ++ *offset_ptr = 0; ++ } ++} ++ + /* If X is not a valid address for mode MODE, force it into a register. */ + + static rtx +@@ -2895,7 +2883,7 @@ loongarch_force_address (rtx x, machine_mode mode) + + static rtx + loongarch_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, +- machine_mode mode) ++ machine_mode mode) + { + rtx base, addr; + HOST_WIDE_INT offset; +@@ -2941,28 +2929,30 @@ loongarch_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value) + } + else + x = force_reg (mode, x); ++ + switch (codes[i].method) + { + case METHOD_NORMAL: +- x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value)); ++ x = gen_rtx_fmt_ee (codes[i].code, mode, x, ++ GEN_INT (codes[i].value)); + break; + case METHOD_LU32I: +- emit_insn (gen_rtx_SET (x, gen_rtx_IOR (DImode, +- gen_rtx_ZERO_EXTEND (DImode, +- gen_rtx_SUBREG (SImode, x, 0)), +- GEN_INT (codes[i].value)))); ++ emit_insn ( ++ gen_rtx_SET (x, ++ gen_rtx_IOR (DImode, ++ gen_rtx_ZERO_EXTEND ( ++ DImode, gen_rtx_SUBREG (SImode, x, 0)), ++ GEN_INT (codes[i].value)))); + break; + case METHOD_LU52I: +- emit_insn (gen_lu52i_d (x, x, +- GEN_INT (0xfffffffffffff), +- GEN_INT (codes[i].value))); ++ emit_insn (gen_lu52i_d (x, x, GEN_INT (0xfffffffffffff), ++ GEN_INT (codes[i].value))); + break; + case METHOD_INSV: +- emit_insn (gen_rtx_SET (gen_rtx_ZERO_EXTRACT (DImode, +- x, +- GEN_INT (20), +- GEN_INT (32)), +- gen_rtx_REG (DImode, 0))); ++ emit_insn ( ++ gen_rtx_SET (gen_rtx_ZERO_EXTRACT (DImode, x, GEN_INT (20), ++ GEN_INT (32)), ++ gen_rtx_REG (DImode, 0))); + break; + default: + gcc_unreachable (); +@@ -2997,7 +2987,7 @@ loongarch_legitimize_const_move (machine_mode mode, rtx dest, rtx src) + + /* If we have (const (plus symbol offset)), and that expression cannot + be forced into memory, load the symbol first and add in the offset. +- prefer to do this even if the constant _can_ be forced into memory, ++ prefer to do this even if the constant _can_ be forced into memory, + as it usually produces better code. */ + split_const (src, &base, &offset); + if (offset != const0_rtx +@@ -3005,7 +2995,8 @@ loongarch_legitimize_const_move (machine_mode mode, rtx dest, rtx src) + || (can_create_pseudo_p ()))) + { + base = loongarch_force_temporary (dest, base); +- loongarch_emit_move (dest, loongarch_add_offset (NULL, base, INTVAL (offset))); ++ loongarch_emit_move (dest, ++ loongarch_add_offset (NULL, base, INTVAL (offset))); + return; + } + +@@ -3020,7 +3011,6 @@ loongarch_legitimize_const_move (machine_mode mode, rtx dest, rtx src) + bool + loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src) + { +- + if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode)) + { + loongarch_emit_move (dest, force_reg (mode, src)); +@@ -3029,10 +3019,9 @@ loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src) + + /* Both src and dest are non-registers; one special case is supported where + the source is (const_int 0) and the store can source the zero register. +- LSX and lasx are never able to source the zero register directly in ++ LSX and LASX are never able to source the zero register directly in + memory operations. */ +- if (!register_operand (dest, mode) +- && !register_operand (src, mode) ++ if (!register_operand (dest, mode) && !register_operand (src, mode) + && (!const_0_operand (src, mode) + || LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))) + { +@@ -3049,40 +3038,26 @@ loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src) + return true; + } + +- if ((GET_CODE (src) == SYMBOL_REF || GET_CODE (src) == LABEL_REF) +- && symbolic_operand (src, VOIDmode) +- && (loongarch_cmodel_var == LARCH_CMODEL_EXTREME)) +- { +- rtx temp = gen_reg_rtx (GET_MODE (dest)); +- rtx x = gen_rtx_UNSPEC_VOLATILE (GET_MODE (dest), gen_rtvec (1, src), UNSPECV_MOVE_EXTREME); +- temp = gen_rtx_USE(VOIDmode, temp); +- temp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec(2, gen_rtx_SET (dest, x), temp)); +- emit_insn (temp); +- return true; +- } +- + return false; + } + +-/* Return true if OP refers to small data symbols directly, not through +- a LO_SUM. CONTEXT is the context in which X appears. */ ++/* Return true if OP refers to small data symbols directly. */ + + static int +-loongarch_small_data_pattern_1 (rtx x, enum loongarch_symbol_context context) ++loongarch_small_data_pattern_1 (rtx x) + { + subrtx_var_iterator::array_type array; + FOR_EACH_SUBRTX_VAR (iter, array, x, ALL) + { + rtx x = *iter; + +- /* Ignore things like "g" constraints in asms. We make no particular +- guarantee about which symbolic constants are acceptable as asm operands +- versus which must be forced into a GPR. */ ++ /* We make no particular guarantee about which symbolic constants are ++ acceptable as asm operands versus which must be forced into a GPR. */ + if (GET_CODE (x) == ASM_OPERANDS) + iter.skip_subrtxes (); + else if (MEM_P (x)) + { +- if (loongarch_small_data_pattern_1 (XEXP (x, 0), SYMBOL_CONTEXT_MEM)) ++ if (loongarch_small_data_pattern_1 (XEXP (x, 0))) + return true; + iter.skip_subrtxes (); + } +@@ -3090,20 +3065,19 @@ loongarch_small_data_pattern_1 (rtx x, enum loongarch_symbol_context context) + return false; + } + +-/* Return true if OP refers to small data symbols directly, not through +- a LO_SUM. */ ++/* Return true if OP refers to small data symbols directly. */ + + bool + loongarch_small_data_pattern_p (rtx op) + { +- return loongarch_small_data_pattern_1 (op, SYMBOL_CONTEXT_LEA); ++ return loongarch_small_data_pattern_1 (op); + } + + /* Rewrite *LOC so that it refers to small data using explicit +- relocations. CONTEXT is the context in which *LOC appears. */ ++ relocation. */ + + static void +-loongarch_rewrite_small_data_1 (rtx *loc, enum loongarch_symbol_context context) ++loongarch_rewrite_small_data_1 (rtx *loc) + { + subrtx_ptr_iterator::array_type array; + FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL) +@@ -3111,7 +3085,7 @@ loongarch_rewrite_small_data_1 (rtx *loc, enum loongarch_symbol_context context) + rtx *loc = *iter; + if (MEM_P (*loc)) + { +- loongarch_rewrite_small_data_1 (&XEXP (*loc, 0), SYMBOL_CONTEXT_MEM); ++ loongarch_rewrite_small_data_1 (&XEXP (*loc, 0)); + iter.skip_subrtxes (); + } + } +@@ -3124,15 +3098,15 @@ rtx + loongarch_rewrite_small_data (rtx pattern) + { + pattern = copy_insn (pattern); +- loongarch_rewrite_small_data_1 (&pattern, SYMBOL_CONTEXT_LEA); ++ loongarch_rewrite_small_data_1 (&pattern); + return pattern; + } +- ++ + /* The cost of loading values from the constant pool. It should be + larger than the cost of any constant we want to synthesize inline. */ + #define CONSTANT_POOL_COST COSTS_N_INSNS (8) + +-/* Return true if there is a instruction that implements CODE ++/* Return true if there is a instruction that implements CODE + and if that instruction accepts X as an immediate operand. */ + + static int +@@ -3148,20 +3122,19 @@ loongarch_immediate_operand_p (int code, HOST_WIDE_INT x) + + case ROTATE: + case ROTATERT: +- /* Likewise rotates, if the target supports rotates at all. */ + return true; + + case AND: + case IOR: + case XOR: + /* These instructions take 12-bit unsigned immediates. */ +- return SMALL_OPERAND_UNSIGNED (x); ++ return IMM12_OPERAND_UNSIGNED (x); + + case PLUS: + case LT: + case LTU: + /* These instructions take 12-bit signed immediates. */ +- return SMALL_OPERAND (x); ++ return IMM12_OPERAND (x); + + case EQ: + case NE: +@@ -3178,11 +3151,11 @@ loongarch_immediate_operand_p (int code, HOST_WIDE_INT x) + + case LE: + /* We add 1 to the immediate and use SLT. */ +- return SMALL_OPERAND (x + 1); ++ return IMM12_OPERAND (x + 1); + + case LEU: + /* Likewise SLTU, but reject the always-true case. */ +- return SMALL_OPERAND (x + 1) && x + 1 != 0; ++ return IMM12_OPERAND (x + 1) && x + 1 != 0; + + case SIGN_EXTRACT: + case ZERO_EXTRACT: +@@ -3219,7 +3192,8 @@ loongarch_binary_cost (rtx x, int single_cost, int double_cost, bool speed) + static int + loongarch_fp_mult_cost (machine_mode mode) + { +- return mode == DFmode ? loongarch_cost->fp_mult_df : loongarch_cost->fp_mult_sf; ++ return mode == DFmode ? loongarch_cost->fp_mult_df ++ : loongarch_cost->fp_mult_sf; + } + + /* Return the cost of floating-point divisions of mode MODE. */ +@@ -3227,23 +3201,20 @@ loongarch_fp_mult_cost (machine_mode mode) + static int + loongarch_fp_div_cost (machine_mode mode) + { +- return mode == DFmode ? loongarch_cost->fp_div_df : loongarch_cost->fp_div_sf; ++ return mode == DFmode ? loongarch_cost->fp_div_df ++ : loongarch_cost->fp_div_sf; + } + + /* Return the cost of sign-extending OP to mode MODE, not including the + cost of OP itself. */ + + static int +-loongarch_sign_extend_cost (machine_mode mode, rtx op) ++loongarch_sign_extend_cost (rtx op) + { + if (MEM_P (op)) + /* Extended loads are as cheap as unextended ones. */ + return 0; + +- if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) +- /* A sign extension from SImode to DImode in 64-bit mode is free. */ +- return 0; +- + return COSTS_N_INSNS (1); + } + +@@ -3251,16 +3222,12 @@ loongarch_sign_extend_cost (machine_mode mode, rtx op) + cost of OP itself. */ + + static int +-loongarch_zero_extend_cost (machine_mode mode, rtx op) ++loongarch_zero_extend_cost (rtx op) + { + if (MEM_P (op)) + /* Extended loads are as cheap as unextended ones. */ + return 0; + +- if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) +- /* We need a shift left by 32 bits and a shift right by 32 bits. */ +- return COSTS_N_INSNS (2); +- + /* We can use ANDI. */ + return COSTS_N_INSNS (1); + } +@@ -3281,15 +3248,15 @@ loongarch_set_reg_reg_cost (machine_mode mode) + { + switch (GET_MODE_CLASS (mode)) + { +- case MODE_FCC: +- return loongarch_set_reg_reg_piece_cost (mode, GET_MODE_SIZE (FCCmode)); ++ case MODE_CC: ++ return loongarch_set_reg_reg_piece_cost (mode, GET_MODE_SIZE (CCmode)); + + case MODE_FLOAT: + case MODE_COMPLEX_FLOAT: + case MODE_VECTOR_FLOAT: + if (TARGET_HARD_FLOAT) + return loongarch_set_reg_reg_piece_cost (mode, UNITS_PER_HWFPVALUE); +- /* Fall through */ ++ /* Fall through. */ + + default: + return loongarch_set_reg_reg_piece_cost (mode, UNITS_PER_WORD); +@@ -3300,20 +3267,13 @@ loongarch_set_reg_reg_cost (machine_mode mode) + + static bool + loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, +- int opno ATTRIBUTE_UNUSED, int *total, bool speed) ++ int opno ATTRIBUTE_UNUSED, int *total, bool speed) + { + int code = GET_CODE (x); + bool float_mode_p = FLOAT_MODE_P (mode); + int cost; + rtx addr; + +- /* The cost of a COMPARE is hard to define for LARCH. COMPAREs don't +- appear in the instruction stream, and the cost of a comparison is +- really the cost of the branch or scc condition. At the time of +- writing, GCC only uses an explicit outer COMPARE code when optabs +- is testing whether a constant is expensive enough to force into a +- register. We want optabs to pass such constants through the LARCH +- expanders instead, so make all constants very cheap here. */ + if (outer_code == COMPARE) + { + gcc_assert (CONSTANT_P (x)); +@@ -3324,68 +3284,34 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + switch (code) + { + case CONST_INT: +- /* Treat *clear_upper32-style ANDs as having zero cost in the +- second operand. The cost is entirely in the first operand. +- +- ??? This is needed because we would otherwise try to CSE +- the constant operand. Although that's the right thing for +- instructions that continue to be a register operation throughout +- compilation, it is disastrous for instructions that could +- later be converted into a memory operation. */ +- if (TARGET_64BIT +- && outer_code == AND +- && UINTVAL (x) == 0xffffffff) ++ if (TARGET_64BIT && outer_code == AND && UINTVAL (x) == 0xffffffff) + { + *total = 0; + return true; + } + +- /* When not optimizing for size, we care more about the cost +- of hot code, and hot code is often in a loop. If a constant +- operand needs to be forced into a register, we will often be +- able to hoist the constant load out of the loop, so the load +- should not contribute to the cost. */ +- if (speed || loongarch_immediate_operand_p (outer_code, INTVAL (x))) +- { +- *total = 0; +- return true; +- } ++ /* When not optimizing for size, we care more about the cost ++ of hot code, and hot code is often in a loop. If a constant ++ operand needs to be forced into a register, we will often be ++ able to hoist the constant load out of the loop, so the load ++ should not contribute to the cost. */ ++ if (speed || loongarch_immediate_operand_p (outer_code, INTVAL (x))) ++ { ++ *total = 0; ++ return true; ++ } + /* Fall through. */ + + case CONST: + case SYMBOL_REF: + case LABEL_REF: + case CONST_DOUBLE: +- if (force_to_mem_operand (x, VOIDmode)) +- { +- *total = COSTS_N_INSNS (1); +- return true; +- } + cost = loongarch_const_insns (x); + if (cost > 0) + { +- /* If the constant is likely to be stored in a GPR, SETs of +- single-insn constants are as cheap as register sets; we +- never want to CSE them. +- +- Don't reduce the cost of storing a floating-point zero in +- FPRs. If we have a zero in an FPR for other reasons, we +- can get better cfg-cleanup and delayed-branch results by +- using it consistently, rather than using $0 sometimes and +- an FPR at other times. Also, moves between floating-point +- registers are sometimes cheaper than MOVGR2FR.W/MOVGR2FR.D $0. */ +- if (cost == 1 +- && outer_code == SET ++ if (cost == 1 && outer_code == SET + && !(float_mode_p && TARGET_HARD_FLOAT)) + cost = 0; +- /* When code loads a constant N>1 times, we rarely +- want to CSE the constant itself. It is usually better to +- have N copies of the last operation in the sequence and one +- shared copy of the other operations. +- +- Also, if we have a CONST_INT, we don't know whether it is +- for a word or doubleword operation, so we cannot rely on +- the result of loongarch_build_integer. */ + else if ((outer_code == SET || GET_MODE (x) == VOIDmode)) + cost = 1; + *total = COSTS_N_INSNS (cost); +@@ -3399,16 +3325,16 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + /* If the address is legitimate, return the number of + instructions it needs. */ + addr = XEXP (x, 0); +- cost = loongarch_address_insns (addr, mode, true); +- if (cost > 0) ++ /* Check for a scaled indexed address. */ ++ if (loongarch_index_address_p (addr, mode)) + { +- *total = COSTS_N_INSNS (cost + 1); ++ *total = COSTS_N_INSNS (2); + return true; + } +- /* Check for a scaled indexed address. */ +- if (loongarch_lx_address_p (addr, mode)) ++ cost = loongarch_address_insns (addr, mode, true); ++ if (cost > 0) + { +- *total = COSTS_N_INSNS (2); ++ *total = COSTS_N_INSNS (cost + 1); + return true; + } + /* Otherwise use the default handling. */ +@@ -3425,34 +3351,31 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + case AND: + /* Check for a *clear_upper32 pattern and treat it like a zero + extension. See the pattern's comment for details. */ +- if (TARGET_64BIT +- && mode == DImode +- && CONST_INT_P (XEXP (x, 1)) ++ if (TARGET_64BIT && mode == DImode && CONST_INT_P (XEXP (x, 1)) + && UINTVAL (XEXP (x, 1)) == 0xffffffff) + { +- *total = (loongarch_zero_extend_cost (mode, XEXP (x, 0)) ++ *total = (loongarch_zero_extend_cost (XEXP (x, 0)) + + set_src_cost (XEXP (x, 0), mode, speed)); + return true; + } + /* (AND (NOT op0) (NOT op1) is a nor operation that can be done in + a single instruction. */ +- if (GET_CODE (XEXP (x, 0)) == NOT +- && GET_CODE (XEXP (x, 1)) == NOT) ++ if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT) + { + cost = GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1; +- *total = (COSTS_N_INSNS (cost) ++ *total = (COSTS_N_INSNS (cost) + + set_src_cost (XEXP (XEXP (x, 0), 0), mode, speed) + + set_src_cost (XEXP (XEXP (x, 1), 0), mode, speed)); + return true; + } +- ++ + /* Fall through. */ + + case IOR: + case XOR: + /* Double-word operations use two single-word operations. */ + *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2), +- speed); ++ speed); + return true; + + case ASHIFT: +@@ -3461,18 +3384,18 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + case ROTATE: + case ROTATERT: + if (CONSTANT_P (XEXP (x, 1))) +- *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4), +- speed); ++ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), ++ COSTS_N_INSNS (4), speed); + else +- *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12), +- speed); ++ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), ++ COSTS_N_INSNS (12), speed); + return true; + + case ABS: + if (float_mode_p) +- *total = loongarch_cost->fp_add; ++ *total = loongarch_cost->fp_add; + else +- *total = COSTS_N_INSNS (4); ++ *total = COSTS_N_INSNS (4); + return false; + + case LT: +@@ -3500,7 +3423,7 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + return false; + } + *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4), +- speed); ++ speed); + return true; + + case MINUS: +@@ -3512,13 +3435,12 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + } + + /* If it's an add + mult (which is equivalent to shift left) and +- it's immediate operand satisfies const_immlsa_operand predicate. */ +- if (((ISA_HAS_LSA && mode == SImode) +- || (ISA_HAS_DLSA && mode == DImode)) ++ it's immediate operand satisfies const_immalsl_operand predicate. */ ++ if ((mode == SImode || (TARGET_64BIT && mode == DImode)) + && GET_CODE (XEXP (x, 0)) == MULT) + { + rtx op2 = XEXP (XEXP (x, 0), 1); +- if (const_immlsa_operand (op2, mode)) ++ if (const_immalsl_operand (op2, mode)) + { + *total = (COSTS_N_INSNS (1) + + set_src_cost (XEXP (XEXP (x, 0), 0), mode, speed) +@@ -3529,9 +3451,8 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + + /* Double-word operations require three single-word operations and + an SLTU. */ +- *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), +- COSTS_N_INSNS (4), +- speed); ++ *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4), ++ speed); + return true; + + case NEG: +@@ -3549,9 +3470,6 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + if (float_mode_p) + *total = loongarch_fp_mult_cost (mode); + else if (mode == DImode && !TARGET_64BIT) +- /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions, +- where the mulsidi3 always includes an MFHI and an MFLO. */ +- // FIXED ME??? + *total = (speed + ? loongarch_cost->int_mult_si * 3 + 6 + : COSTS_N_INSNS (7)); +@@ -3566,7 +3484,6 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + case DIV: + /* Check for a reciprocal. */ + if (float_mode_p +- && ISA_HAS_FP_RECIP_RSQRT (mode) + && flag_unsafe_math_optimizations + && XEXP (x, 0) == CONST1_RTX (mode)) + { +@@ -3597,17 +3514,17 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + *total = COSTS_N_INSNS (loongarch_idiv_insns (mode)); + } + else if (mode == DImode) +- *total = loongarch_cost->int_div_di; ++ *total = loongarch_cost->int_div_di; + else + *total = loongarch_cost->int_div_si; + return false; + + case SIGN_EXTEND: +- *total = loongarch_sign_extend_cost (mode, XEXP (x, 0)); ++ *total = loongarch_sign_extend_cost (XEXP (x, 0)); + return false; + + case ZERO_EXTEND: +- *total = loongarch_zero_extend_cost (mode, XEXP (x, 0)); ++ *total = loongarch_zero_extend_cost (XEXP (x, 0)); + return false; + case TRUNCATE: + /* Costings for highpart multiplies. Matching patterns of the form: +@@ -3617,11 +3534,11 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + (const_int 32) + */ + if ((GET_CODE (XEXP (x, 0)) == ASHIFTRT +- || GET_CODE (XEXP (x, 0)) == LSHIFTRT) ++ || GET_CODE (XEXP (x, 0)) == LSHIFTRT) + && CONST_INT_P (XEXP (XEXP (x, 0), 1)) + && ((INTVAL (XEXP (XEXP (x, 0), 1)) == 32 + && GET_MODE (XEXP (x, 0)) == DImode) +- || (ISA_HAS_DMUL ++ || (TARGET_64BIT + && INTVAL (XEXP (XEXP (x, 0), 1)) == 64 + && GET_MODE (XEXP (x, 0)) == TImode)) + && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT +@@ -3643,13 +3560,13 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + for (int i = 0; i < 2; ++i) + { + rtx op = XEXP (XEXP (XEXP (x, 0), 0), i); +- if (ISA_HAS_DMUL ++ if (TARGET_64BIT + && GET_CODE (op) == ZERO_EXTEND + && GET_MODE (op) == DImode) + *total += rtx_cost (op, DImode, MULT, i, speed); + else +- *total += rtx_cost (XEXP (op, 0), VOIDmode, GET_CODE (op), +- 0, speed); ++ *total += rtx_cost (XEXP (op, 0), VOIDmode, GET_CODE (op), 0, ++ speed); + } + + return true; +@@ -3684,58 +3601,168 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + + static int + loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, +- tree vectype, +- int misalign ATTRIBUTE_UNUSED) ++ tree vectype, ++ int misalign ATTRIBUTE_UNUSED) + { +- unsigned elements; +- ++ int elements; + switch (type_of_cost) + { +- case scalar_stmt: +- case scalar_load: +- case vector_stmt: +- case vector_load: +- case vec_to_scalar: +- case scalar_to_vec: +- case cond_branch_not_taken: +- case vec_perm: +- case vec_promote_demote: +- case scalar_store: +- case vector_store: +- return 1; +- +- case unaligned_load: +- case vector_gather_load: +- return 2; ++ case scalar_stmt: ++ case vector_stmt: ++ case vec_to_scalar: ++ case scalar_to_vec: ++ case vec_perm: ++ case vec_promote_demote: ++ return 1; + +- case unaligned_store: +- case vector_scatter_store: +- return 10; ++ case scalar_store: ++ case scalar_load: ++ return 3; + +- case cond_branch_taken: +- return 3; ++ case vector_store: ++ case vector_load: ++ return loongarch_vector_access_cost; + +- case vec_construct: +- elements = TYPE_VECTOR_SUBPARTS (vectype); +- return elements / 2 + 1; ++ case unaligned_load: ++ case unaligned_store: ++ case vector_gather_load: ++ case vector_scatter_store: ++ return 5; + +- default: +- gcc_unreachable (); +- } +-} ++ case cond_branch_taken: ++ return 4; ++ ++ case cond_branch_not_taken: ++ return 2; ++ ++ case vec_construct: ++ { ++ elements = TYPE_VECTOR_SUBPARTS (vectype); ++ if (ISA_HAS_LASX) ++ return elements + 1; ++ else ++ return elements; ++ } ++ ++ default: ++ gcc_unreachable (); ++ } ++} ++ ++/* Implement targetm.vectorize.add_stmt_cost. */ ++static unsigned ++loongarch_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, ++ struct _stmt_vec_info *stmt_info, int misalign, ++ enum vect_cost_model_location where) ++{ ++ unsigned *cost = (unsigned *) data; ++ unsigned retval = 0; ++ ++ tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE; ++ int stmt_cost = - 1; ++ ++ if ((kind == vector_stmt || kind == scalar_stmt) ++ && stmt_info ++ && stmt_info->stmt && gimple_code (stmt_info->stmt) == GIMPLE_ASSIGN) ++ { ++ tree_code subcode = gimple_assign_rhs_code (stmt_info->stmt); ++ bool fp = false; ++ machine_mode mode = TImode; ++ ++ if (vectype != NULL) ++ { ++ fp = FLOAT_TYPE_P (vectype); ++ mode = TYPE_MODE (vectype); ++ } ++ ++ switch (subcode) ++ { ++ case PLUS_EXPR: ++ case POINTER_PLUS_EXPR: ++ case MINUS_EXPR: ++ case MULT_EXPR: ++ case WIDEN_MULT_EXPR: ++ case MULT_HIGHPART_EXPR: ++ stmt_cost = fp ? 2 : 1; ++ break; ++ ++ case TRUNC_DIV_EXPR: ++ case CEIL_DIV_EXPR: ++ case FLOOR_DIV_EXPR: ++ case ROUND_DIV_EXPR: ++ case TRUNC_MOD_EXPR: ++ case CEIL_MOD_EXPR: ++ case FLOOR_MOD_EXPR: ++ case RDIV_EXPR: ++ case ROUND_MOD_EXPR: ++ case EXACT_DIV_EXPR: ++ stmt_cost = fp ? 4 : 1; ++ break; ++ ++ case NOP_EXPR: ++ /* Only sign-conversions are free. */ ++ if (tree_nop_conversion_p ++ (TREE_TYPE (gimple_assign_lhs (stmt_info->stmt)), ++ TREE_TYPE (gimple_assign_rhs1 (stmt_info->stmt)))) ++ stmt_cost = 0; ++ break; ++ ++ default: ++ break; ++ } ++ } ++ if (kind == vec_construct ++ && stmt_info ++ && (STMT_VINFO_TYPE (stmt_info) == load_vec_info_type ++ || STMT_VINFO_TYPE (stmt_info) == store_vec_info_type) ++ && STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) == VMAT_ELEMENTWISE ++ && TREE_CODE (DR_STEP (STMT_VINFO_DATA_REF (stmt_info))) != INTEGER_CST) ++ { ++ stmt_cost = loongarch_builtin_vectorization_cost (kind, vectype, misalign); ++ stmt_cost *= TYPE_VECTOR_SUBPARTS (vectype); ++ } ++ if (stmt_cost == -1) ++ stmt_cost = loongarch_builtin_vectorization_cost (kind, vectype, misalign); ++ ++ /* Statements in an inner loop relative to the loop being ++ vectorized are weighted more heavily. The value here is ++ arbitrary and could potentially be improved with analysis. */ ++ if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info)) ++ count *= 50; /* FIXME. */ ++ ++ retval = (unsigned) (count * stmt_cost); ++ ++ cost[where] += retval; ++ ++ return retval; ++} ++ ++static bool ++loongarch_builtin_support_vector_misalignment(machine_mode mode, const_tree type, ++ int misalignment, bool is_packed) ++{ ++ if ((ISA_HAS_LSX || ISA_HAS_LASX) && STRICT_ALIGNMENT) ++ { ++ if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing) ++ return false; ++ if (misalignment == -1) ++ return false; ++ } ++ return default_builtin_support_vector_misalignment (mode, type, misalignment, ++ is_packed); ++} + + + /* Implement TARGET_ADDRESS_COST. */ + + static int + loongarch_address_cost (rtx addr, machine_mode mode, +- addr_space_t as ATTRIBUTE_UNUSED, +- bool speed ATTRIBUTE_UNUSED) ++ addr_space_t as ATTRIBUTE_UNUSED, ++ bool speed ATTRIBUTE_UNUSED) + { + return loongarch_address_insns (addr, mode, false); + } + +- + /* Return one word of double-word value OP, taking into account the fixed + endianness of certain registers. HIGH_P is true to select the high part, + false to select the low part. */ +@@ -3743,24 +3770,16 @@ loongarch_address_cost (rtx addr, machine_mode mode, + rtx + loongarch_subword (rtx op, bool high_p) + { +- unsigned int byte, offset; ++ unsigned int byte; + machine_mode mode; + ++ byte = high_p ? UNITS_PER_WORD : 0; + mode = GET_MODE (op); + if (mode == VOIDmode) + mode = TARGET_64BIT ? TImode : DImode; + +- if (high_p) +- byte = UNITS_PER_WORD; +- else +- byte = 0; +- + if (FP_REG_RTX_P (op)) +- { +- /* Paired FPRs are always ordered little-endian. */ +- offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0); +- return gen_rtx_REG (word_mode, REGNO (op) + offset); +- } ++ return gen_rtx_REG (word_mode, REGNO (op) + high_p); + + if (MEM_P (op)) + return loongarch_rewrite_small_data (adjust_address (op, word_mode, byte)); +@@ -3768,11 +3787,10 @@ loongarch_subword (rtx op, bool high_p) + return simplify_gen_subreg (word_mode, op, mode, byte); + } + +-/* Return true if a move from SRC to DEST should be split into two. +- SPLIT_TYPE describes the split condition. */ ++/* Return true if a move from SRC to DEST should be split into two. */ + + bool +-loongarch_split_move_p (rtx dest, rtx src, enum loongarch_split_type split_type) ++loongarch_split_move_p (rtx dest, rtx src) + { + /* FPR-to-FPR moves can be done in a single instruction, if they're + allowed at all. */ +@@ -3801,19 +3819,18 @@ loongarch_split_move_p (rtx dest, rtx src, enum loongarch_split_type split_type) + return size > UNITS_PER_WORD; + } + +-/* Split a move from SRC to DEST, given that loongarch_split_move_p holds. +- SPLIT_TYPE describes the split condition. */ ++/* Split a move from SRC to DEST, given that loongarch_split_move_p holds. */ + + void +-loongarch_split_move (rtx dest, rtx src, enum loongarch_split_type split_type, rtx insn_) ++loongarch_split_move (rtx dest, rtx src, rtx insn_) + { + rtx low_dest; + +- gcc_checking_assert (loongarch_split_move_p (dest, src, split_type)); ++ gcc_checking_assert (loongarch_split_move_p (dest, src)); + if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))) + loongarch_split_128bit_move (dest, src); + else if (LASX_SUPPORTED_MODE_P (GET_MODE (dest))) +- loongarch_split_256bit_move (dest, src); ++ loongarch_split_256bit_move (dest, src); + else if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src)) + { + if (!TARGET_64BIT && GET_MODE (dest) == DImode) +@@ -3830,23 +3847,24 @@ loongarch_split_move (rtx dest, rtx src, enum loongarch_split_type split_type, r + /* The operation can be split into two normal moves. Decide in + which order to do them. */ + low_dest = loongarch_subword (dest, false); +- if (REG_P (low_dest) +- && reg_overlap_mentioned_p (low_dest, src)) ++ if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src)) + { +- loongarch_emit_move (loongarch_subword (dest, true), loongarch_subword (src, true)); ++ loongarch_emit_move (loongarch_subword (dest, true), ++ loongarch_subword (src, true)); + loongarch_emit_move (low_dest, loongarch_subword (src, false)); + } + else + { + loongarch_emit_move (low_dest, loongarch_subword (src, false)); +- loongarch_emit_move (loongarch_subword (dest, true), loongarch_subword (src, true)); ++ loongarch_emit_move (loongarch_subword (dest, true), ++ loongarch_subword (src, true)); + } + } + + /* This is a hack. See if the next insn uses DEST and if so, see if we + can forward SRC for DEST. This is most useful if the next insn is a +- simple store. */ +- rtx_insn *insn = (rtx_insn *)insn_; ++ simple store. */ ++ rtx_insn *insn = (rtx_insn *) insn_; + struct loongarch_address_info addr = {}; + if (insn) + { +@@ -3859,7 +3877,8 @@ loongarch_split_move (rtx dest, rtx src, enum loongarch_split_type split_type, r + if (MEM_P (src)) + { + rtx tmp = XEXP (src, 0); +- loongarch_classify_address (&addr, tmp, GET_MODE (tmp), true); ++ loongarch_classify_address (&addr, tmp, GET_MODE (tmp), ++ true); + if (addr.reg && !reg_overlap_mentioned_p (dest, addr.reg)) + validate_change (next, &SET_SRC (set), src, false); + } +@@ -3870,24 +3889,6 @@ loongarch_split_move (rtx dest, rtx src, enum loongarch_split_type split_type, r + } + } + +-/* Return the split type for instruction INSN. */ +- +-static enum loongarch_split_type +-loongarch_insn_split_type (rtx insn) +-{ +- basic_block bb = BLOCK_FOR_INSN (insn); +- if (bb) +- { +- if (optimize_bb_for_speed_p (bb)) +- return SPLIT_FOR_SPEED; +- else +- return SPLIT_FOR_SIZE; +- } +- /* Once CFG information has been removed, we should trust the optimization +- decisions made by previous passes and only split where necessary. */ +- return SPLIT_IF_NECESSARY; +-} +- + /* Return true if a 128-bit move from SRC to DEST should be split. */ + + bool +@@ -3974,10 +3975,10 @@ loongarch_split_128bit_move (rtx dest, rtx src) + s = loongarch_subword_at_byte (src, byte); + if (!TARGET_64BIT) + emit_insn (gen_lsx_vinsgr2vr_w (new_dest, s, new_dest, +- GEN_INT (1 << index))); ++ GEN_INT (1 << index))); + else + emit_insn (gen_lsx_vinsgr2vr_d (new_dest, s, new_dest, +- GEN_INT (1 << index))); ++ GEN_INT (1 << index))); + } + } + else if (FP_REG_RTX_P (src)) +@@ -4200,28 +4201,93 @@ loongarch_split_lsx_fill_d (rtx dest, rtx src) + emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 1))); + emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 3))); + } +- ++ + /* Return true if a move from SRC to DEST in INSN should be split. */ + + bool +-loongarch_split_move_insn_p (rtx dest, rtx src, rtx insn) ++loongarch_split_move_insn_p (rtx dest, rtx src) + { +- return loongarch_split_move_p (dest, src, loongarch_insn_split_type (insn)); ++ return loongarch_split_move_p (dest, src); + } + +-/* Split a move from SRC to DEST in INSN, given that loongarch_split_move_insn_p +- holds. */ ++/* Split a move from SRC to DEST in INSN, given that ++ loongarch_split_move_insn_p holds. */ + + void + loongarch_split_move_insn (rtx dest, rtx src, rtx insn) + { +- loongarch_split_move (dest, src, loongarch_insn_split_type (insn), insn); ++ loongarch_split_move (dest, src, insn); + } +- + +-/* Forward declaration. Used below */ ++/* Implement TARGET_CONSTANT_ALIGNMENT. */ ++ + static HOST_WIDE_INT +-loongarch_constant_alignment (const_tree exp, HOST_WIDE_INT align); ++loongarch_constant_alignment (const_tree exp, HOST_WIDE_INT align) ++{ ++ if (TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR) ++ return MAX (align, BITS_PER_WORD); ++ return align; ++} ++ ++const char * ++loongarch_output_move_index (rtx x, machine_mode mode, bool ldr) ++{ ++ int index = exact_log2 (GET_MODE_SIZE (mode)); ++ if (!IN_RANGE (index, 0, 3)) ++ return NULL; ++ ++ struct loongarch_address_info info; ++ if ((loongarch_classify_address (&info, x, mode, false) ++ && !(info.type == ADDRESS_REG_REG)) ++ || !loongarch_legitimate_address_p (mode, x, false)) ++ return NULL; ++ ++ const char *const insn[][4] = ++ { ++ { ++ "stx.b\t%z1,%0", ++ "stx.h\t%z1,%0", ++ "stx.w\t%z1,%0", ++ "stx.d\t%z1,%0", ++ }, ++ { ++ "ldx.bu\t%0,%1", ++ "ldx.hu\t%0,%1", ++ "ldx.w\t%0,%1", ++ "ldx.d\t%0,%1", ++ } ++ }; ++ ++ return insn[ldr][index]; ++} ++ ++const char * ++loongarch_output_move_index_float (rtx x, machine_mode mode, bool ldr) ++{ ++ int index = exact_log2 (GET_MODE_SIZE (mode)); ++ if (!IN_RANGE (index, 2, 3)) ++ return NULL; ++ ++ struct loongarch_address_info info; ++ if ((loongarch_classify_address (&info, x, mode, false) ++ && !(info.type == ADDRESS_REG_REG)) ++ || !loongarch_legitimate_address_p (mode, x, false)) ++ return NULL; ++ ++ const char *const insn[][2] = ++ { ++ { ++ "fstx.s\t%1,%0", ++ "fstx.d\t%1,%0" ++ }, ++ { ++ "fldx.s\t%0,%1", ++ "fldx.d\t%0,%1" ++ } ++ }; ++ ++ return insn[ldr][index-2]; ++} + + /* Return the appropriate instructions to move SRC into DEST. Assume + that SRC is operand 1 and DEST is operand 0. */ +@@ -4235,9 +4301,8 @@ loongarch_output_move (rtx dest, rtx src) + bool dbl_p = (GET_MODE_SIZE (mode) == 8); + bool lsx_p = LSX_SUPPORTED_MODE_P (mode); + bool lasx_p = LASX_SUPPORTED_MODE_P (mode); +- enum loongarch_symbol_type symbol_type; + +- if (loongarch_split_move_p (dest, src, SPLIT_IF_NECESSARY)) ++ if (loongarch_split_move_p (dest, src)) + return "#"; + + if ((lsx_p || lasx_p) +@@ -4246,7 +4311,7 @@ loongarch_output_move (rtx dest, rtx src) + && CONST_INT_P (CONST_VECTOR_ELT (src, 0))) + { + gcc_assert (loongarch_const_vector_same_int_p (src, mode, -512, 511)); +- if(lsx_p || lasx_p) ++ if (lsx_p || lasx_p) + { + switch (GET_MODE_SIZE (mode)) + { +@@ -4254,7 +4319,8 @@ loongarch_output_move (rtx dest, rtx src) + return "vrepli.%v0\t%w0,%E1"; + case 32: + return "xvrepli.%v0\t%u0,%E1"; +- default: gcc_unreachable (); ++ default: ++ gcc_unreachable (); + } + } + } +@@ -4278,77 +4344,98 @@ loongarch_output_move (rtx dest, rtx src) + return "vrepli.b\t%w0,0"; + case 32: + return "xvrepli.b\t%u0,0"; +- default: gcc_unreachable (); ++ default: ++ gcc_unreachable (); + } + } + + return dbl_p ? "movgr2fr.d\t%0,%z1" : "movgr2fr.w\t%0,%z1"; + } + } +- if (dest_code == MEM) ++ if (dest_code == MEM) + { ++ const char *insn = NULL; ++ insn = loongarch_output_move_index (XEXP (dest, 0), GET_MODE (dest), ++ false); ++ if (insn) ++ return insn; ++ + rtx offset = XEXP (dest, 0); +- if (GET_CODE(offset) == PLUS) +- offset = XEXP(offset, 1); ++ if (GET_CODE (offset) == PLUS) ++ offset = XEXP (offset, 1); ++ else ++ offset = const0_rtx; + switch (GET_MODE_SIZE (mode)) + { +- case 1: return "st.b\t%z1,%0"; +- case 2: return "st.h\t%z1,%0"; +- case 4: +- if (const_arith_operand (offset, Pmode)) +- return "st.w\t%z1,%0"; +- else +- return "stptr.w\t%z1,%0"; +- case 8: +- if (const_arith_operand (offset, Pmode)) +- return "st.d\t%z1,%0"; +- else +- return "stptr.d\t%z1,%0"; +- default: gcc_unreachable (); +- } ++ case 1: ++ return "st.b\t%z1,%0"; ++ case 2: ++ return "st.h\t%z1,%0"; ++ case 4: ++ if (const_arith_operand (offset, Pmode) || (offset == const0_rtx)) ++ return "st.w\t%z1,%0"; ++ else ++ return "stptr.w\t%z1,%0"; ++ case 8: ++ if (const_arith_operand (offset, Pmode) || (offset == const0_rtx)) ++ return "st.d\t%z1,%0"; ++ else ++ return "stptr.d\t%z1,%0"; ++ default: ++ gcc_unreachable (); ++ } + } + } + if (dest_code == REG && GP_REG_P (REGNO (dest))) + { + if (src_code == REG) +- { +- if (FP_REG_P (REGNO (src))) +- { +- gcc_assert (!lsx_p); +- return dbl_p ? "movfr2gr.d\t%0,%1" : "movfr2gr.s\t%0,%1"; +- } +- } ++ if (FP_REG_P (REGNO (src))) ++ { ++ gcc_assert (!lsx_p && !lasx_p); ++ return dbl_p ? "movfr2gr.d\t%0,%1" : "movfr2gr.s\t%0,%1"; ++ } + + if (src_code == MEM) + { ++ const char *insn = NULL; ++ insn = loongarch_output_move_index (XEXP (src, 0), GET_MODE (src), ++ true); ++ if (insn) ++ return insn; ++ + rtx offset = XEXP (src, 0); +- if (GET_CODE(offset) == PLUS) +- offset = XEXP(offset, 1); ++ if (GET_CODE (offset) == PLUS) ++ offset = XEXP (offset, 1); ++ else ++ offset = const0_rtx; + switch (GET_MODE_SIZE (mode)) + { +- case 1: return "ld.bu\t%0,%1"; +- case 2: return "ld.hu\t%0,%1"; +- case 4: +- if (const_arith_operand (offset, Pmode)) +- return "ld.w\t%0,%1"; +- else +- return "ldptr.w\t%0,%1"; +- case 8: +- if (const_arith_operand (offset, Pmode)) +- return "ld.d\t%0,%1"; +- else +- return "ldptr.d\t%0,%1"; +- default: gcc_unreachable (); ++ case 1: ++ return "ld.bu\t%0,%1"; ++ case 2: ++ return "ld.hu\t%0,%1"; ++ case 4: ++ if (const_arith_operand (offset, Pmode) || (offset == const0_rtx)) ++ return "ld.w\t%0,%1"; ++ else ++ return "ldptr.w\t%0,%1"; ++ case 8: ++ if (const_arith_operand (offset, Pmode) || (offset == const0_rtx)) ++ return "ld.d\t%0,%1"; ++ else ++ return "ldptr.d\t%0,%1"; ++ default: ++ gcc_unreachable (); + } + } +- ++ + if (src_code == CONST_INT) + { +- if (LUI_INT (src)) ++ if (LU12I_INT (src)) + return "lu12i.w\t%0,%1>>12\t\t\t# %X1"; +- else if (SMALL_INT (src)) ++ else if (IMM12_INT (src)) + return "addi.w\t%0,$r0,%1\t\t\t# %X1"; +- else if (SMALL_INT_UNSIGNED (src)) ++ else if (IMM12_INT_UNSIGNED (src)) + return "ori\t%0,$r0,%1\t\t\t# %X1"; + else if (LU52I_INT (src)) + return "lu52i.d\t%0,$r0,%X1>>52\t\t\t# %1"; +@@ -4358,56 +4445,51 @@ loongarch_output_move (rtx dest, rtx src) + + if (symbolic_operand (src, VOIDmode)) + { +- +- switch (loongarch_cmodel_var) ++ if ((TARGET_CMODEL_TINY && (!loongarch_global_symbol_p (src) ++ || loongarch_symbol_binds_local_p (src))) ++ || (TARGET_CMODEL_TINY_STATIC && !loongarch_weak_symbol_p (src))) + { +- case LARCH_CMODEL_TINY: +- do ++ /* The symbol must be aligned to 4 byte. */ ++ unsigned int align; ++ ++ if (GET_CODE (src) == LABEL_REF) ++ align = 32 /* Whatever. */; ++ else if (CONSTANT_POOL_ADDRESS_P (src)) ++ align = GET_MODE_ALIGNMENT (get_pool_mode (src)); ++ else if (TREE_CONSTANT_POOL_ADDRESS_P (src)) + { +- if (loongarch_global_symbol_p (src) +- && !loongarch_symbol_binds_local_p (src)) +- break; +- case LARCH_CMODEL_TINY_STATIC: +- if (loongarch_weak_symbol_p (src)) +- break; +- +- /* The symbol must be aligned to 4 byte. */ +- unsigned int align; +- +- if (GET_CODE (src) == LABEL_REF) +- align = 128 /* whatever */; +- /* copy from aarch64 */ +- else if (CONSTANT_POOL_ADDRESS_P (src)) +- align = GET_MODE_ALIGNMENT (get_pool_mode (src)); +- else if (TREE_CONSTANT_POOL_ADDRESS_P (src)) +- { +- tree exp = SYMBOL_REF_DECL (src); +- align = TYPE_ALIGN (TREE_TYPE (exp)); +- align = loongarch_constant_alignment (exp, align); +- } +- else if (SYMBOL_REF_DECL (src)) +- align = DECL_ALIGN (SYMBOL_REF_DECL (src)); +- else if (SYMBOL_REF_HAS_BLOCK_INFO_P (src) +- && SYMBOL_REF_BLOCK (src) != NULL) +- align = SYMBOL_REF_BLOCK (src)->alignment; +- else +- align = BITS_PER_UNIT; +- +- if (align % (4 * 8) == 0) +- return "pcaddi\t%0,%%pcrel(%1)>>2"; ++ tree exp = SYMBOL_REF_DECL (src); ++ align = TYPE_ALIGN (TREE_TYPE (exp)); ++ align = loongarch_constant_alignment (exp, align); + } +- while (0); +- case LARCH_CMODEL_NORMAL: +- case LARCH_CMODEL_LARGE: ++ else if (SYMBOL_REF_DECL (src)) ++ align = DECL_ALIGN (SYMBOL_REF_DECL (src)); ++ else if (SYMBOL_REF_HAS_BLOCK_INFO_P (src) ++ && SYMBOL_REF_BLOCK (src) != NULL) ++ align = SYMBOL_REF_BLOCK (src)->alignment; ++ else ++ align = BITS_PER_UNIT; ++ ++ if (align % (4 * 8) == 0) ++ return "pcaddi\t%0,%%pcrel(%1)>>2"; ++ } ++ if (TARGET_CMODEL_TINY ++ || TARGET_CMODEL_TINY_STATIC ++ || TARGET_CMODEL_NORMAL ++ || TARGET_CMODEL_LARGE) ++ { + if (!loongarch_global_symbol_p (src) + || loongarch_symbol_binds_local_p (src)) + return "la.local\t%0,%1"; + else + return "la.global\t%0,%1"; +- case LARCH_CMODEL_EXTREME: +- default: ++ } ++ if (TARGET_CMODEL_EXTREME) ++ { ++ sorry ("Normal symbol loading not implemented in extreme mode."); + gcc_unreachable (); + } ++ + } + } + if (src_code == REG && FP_REG_P (REGNO (src))) +@@ -4416,14 +4498,14 @@ loongarch_output_move (rtx dest, rtx src) + { + if (lsx_p || lasx_p) + { +- + switch (GET_MODE_SIZE (mode)) + { + case 16: + return "vori.b\t%w0,%w1,0"; + case 32: + return "xvori.b\t%u0,%u1,0"; +- default: gcc_unreachable (); ++ default: ++ gcc_unreachable (); + } + } + else +@@ -4434,16 +4516,22 @@ loongarch_output_move (rtx dest, rtx src) + { + if (lsx_p || lasx_p) + { +- + switch (GET_MODE_SIZE (mode)) + { + case 16: + return "vst\t%w1,%0"; + case 32: + return "xvst\t%u1,%0"; +- default: gcc_unreachable (); ++ default: ++ gcc_unreachable (); + } + } ++ const char *insn = NULL; ++ insn = loongarch_output_move_index_float (XEXP (dest, 0), ++ GET_MODE (dest), ++ false); ++ if (insn) ++ return insn; + + return dbl_p ? "fst.d\t%1,%0" : "fst.s\t%1,%0"; + } +@@ -4460,17 +4548,25 @@ loongarch_output_move (rtx dest, rtx src) + return "vld\t%w0,%1"; + case 32: + return "xvld\t%u0,%1"; +- default: gcc_unreachable (); ++ default: ++ gcc_unreachable (); + } + } ++ const char *insn = NULL; ++ insn = loongarch_output_move_index_float (XEXP (src, 0), ++ GET_MODE (src), ++ true); ++ if (insn) ++ return insn; ++ + return dbl_p ? "fld.d\t%0,%1" : "fld.s\t%0,%1"; + } + } + gcc_unreachable (); + } +- ++ + /* Return true if CMP1 is a suitable second operand for integer ordering +- test CODE. See also the *sCC patterns in loongarch.md. */ ++ test CODE. */ + + static bool + loongarch_int_order_operand_ok_p (enum rtx_code code, rtx cmp1) +@@ -4508,7 +4604,7 @@ loongarch_int_order_operand_ok_p (enum rtx_code code, rtx cmp1) + + static bool + loongarch_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1, +- machine_mode mode) ++ machine_mode mode) + { + HOST_WIDE_INT plus_one; + +@@ -4551,11 +4647,11 @@ loongarch_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1, + + static void + loongarch_emit_int_order_test (enum rtx_code code, bool *invert_ptr, +- rtx target, rtx cmp0, rtx cmp1) ++ rtx target, rtx cmp0, rtx cmp1) + { + machine_mode mode; + +- /* First see if there is a LARCH instruction that can do this operation. ++ /* First see if there is a LoongArch instruction that can do this operation. + If not, try doing the same for the inverse operation. If that also + fails, force CMP1 into a register and try again. */ + mode = GET_MODE (cmp0); +@@ -4574,7 +4670,7 @@ loongarch_emit_int_order_test (enum rtx_code code, bool *invert_ptr, + rtx inv_target; + + inv_target = loongarch_force_binary (GET_MODE (target), +- inv_code, cmp0, cmp1); ++ inv_code, cmp0, cmp1); + loongarch_emit_binary (XOR, target, inv_target, const1_rtx); + } + else +@@ -4595,43 +4691,14 @@ loongarch_zero_if_equal (rtx cmp0, rtx cmp1) + return cmp0; + + if (uns_arith_operand (cmp1, VOIDmode)) +- return expand_binop (GET_MODE (cmp0), xor_optab, +- cmp0, cmp1, 0, 0, OPTAB_DIRECT); ++ return expand_binop (GET_MODE (cmp0), xor_optab, cmp0, cmp1, 0, 0, ++ OPTAB_DIRECT); + +- return expand_binop (GET_MODE (cmp0), sub_optab, +- cmp0, cmp1, 0, 0, OPTAB_DIRECT); ++ return expand_binop (GET_MODE (cmp0), sub_optab, cmp0, cmp1, 0, 0, ++ OPTAB_DIRECT); + } + +-/* Allocate a floating-point condition-code register of mode MODE. +- +- These condition code registers are used for certain kinds +- of compound operation, such as compare and branches, vconds, +- and built-in functions. At expand time, their use is entirely +- controlled by LARCH-specific code and is entirely internal +- to these compound operations. +- +- We could (and did in the past) expose condition-code values +- as pseudo registers and leave the register allocator to pick +- appropriate registers. The problem is that it is not practically +- possible for the rtl optimizers to guarantee that no spills will +- be needed, even when AVOID_CCMODE_COPIES is defined. We would +- therefore need spill and reload sequences to handle the worst case. +- +- Although such sequences do exist, they are very expensive and are +- not something we'd want to use. +- +- The main benefit of having more than one condition-code register +- is to allow the pipelining of operations, especially those involving +- comparisons and conditional moves. We don't really expect the +- registers to be live for long periods, and certainly never want +- them to be live across calls. +- +- Also, there should be no penalty attached to using all the available +- registers. They are simply bits in the same underlying FPU control +- register. +- +- We therefore expose the hardware registers from the outset and use +- a simple round-robin allocation scheme. */ ++/* Allocate a floating-point condition-code register of mode MODE. */ + + static rtx + loongarch_allocate_fcc (machine_mode mode) +@@ -4646,15 +4713,14 @@ loongarch_allocate_fcc (machine_mode mode) + gcc_unreachable (); + + cfun->machine->next_fcc += -cfun->machine->next_fcc & (count - 1); +- if (cfun->machine->next_fcc > ST_REG_LAST - ST_REG_FIRST) ++ if (cfun->machine->next_fcc > FCC_REG_LAST - FCC_REG_FIRST) + cfun->machine->next_fcc = 0; + +- regno = ST_REG_FIRST + cfun->machine->next_fcc; ++ regno = FCC_REG_FIRST + cfun->machine->next_fcc; + cfun->machine->next_fcc += count; + return gen_rtx_REG (mode, regno); + } + +- + /* Sign- or zero-extend OP0 and OP1 for integer comparisons. */ + + static void +@@ -4681,6 +4747,7 @@ loongarch_extend_comparands (rtx_code code, rtx *op0, rtx *op1) + } + } + ++ + /* Convert a comparison into something that can be used in a branch. On + entry, *OP0 and *OP1 are the values being compared and *CODE is the code + used to compare them. Update them to describe the final comparison. */ +@@ -4688,6 +4755,9 @@ loongarch_extend_comparands (rtx_code code, rtx *op0, rtx *op1) + static void + loongarch_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1) + { ++ static const enum rtx_code ++ mag_comparisons[][2] = {{LEU, LTU}, {GTU, GEU}, {LE, LT}, {GT, GE}}; ++ + if (splittable_const_int_operand (*op1, VOIDmode)) + { + HOST_WIDE_INT rhs = INTVAL (*op1); +@@ -4695,7 +4765,7 @@ loongarch_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1) + if (*code == EQ || *code == NE) + { + /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */ +- if (SMALL_OPERAND (-rhs)) ++ if (IMM12_OPERAND (-rhs)) + { + *op0 = loongarch_force_binary (GET_MODE (*op0), PLUS, *op0, + GEN_INT (-rhs)); +@@ -4704,10 +4774,6 @@ loongarch_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1) + } + else + { +- static const enum rtx_code mag_comparisons[][2] = { +- {LEU, LTU}, {GTU, GEU}, {LE, LT}, {GT, GE} +- }; +- + /* Convert e.g. (OP0 <= 0xFFF) into (OP0 < 0x1000). */ + for (size_t i = 0; i < ARRAY_SIZE (mag_comparisons); i++) + { +@@ -4730,13 +4796,14 @@ loongarch_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1) + } + } + ++ loongarch_extend_comparands (*code, op0, op1); + +- *op0 = force_reg (GET_MODE (*op0), *op0); +- if (*op1 != const0_rtx) +- *op1 = force_reg (GET_MODE (*op0), *op1); ++ *op0 = force_reg (word_mode, *op0); ++ if (*op1 != const0_rtx) ++ *op1 = force_reg (word_mode, *op1); + } + +-/* Like riscv_emit_int_compare, but for floating-point comparisons. */ ++/* Like loongarch_emit_int_compare, but for floating-point comparisons. */ + + static void + loongarch_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1) +@@ -4749,7 +4816,7 @@ loongarch_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1) + then compare that register against zero. + + Set CMP_CODE to the code of the comparison instruction and +- *CODE to the code that the branch or move should use. */ ++ *CODE to the code that the branch or move should use. */ + enum rtx_code cmp_code = *code; + /* Three FP conditions cannot be implemented by reversing the + operands for FCMP.cond.fmt, instead a reversed condition code is +@@ -4760,7 +4827,7 @@ loongarch_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1) + *op1 = const0_rtx; + loongarch_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1); + } +- ++ + /* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2] + and OPERAND[3]. Store the result in OPERANDS[0]. + +@@ -4775,14 +4842,15 @@ loongarch_expand_scc (rtx operands[]) + rtx op0 = operands[2]; + rtx op1 = operands[3]; + ++ loongarch_extend_comparands (code, &op0, &op1); ++ op0 = force_reg (word_mode, op0); ++ + gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT); + + if (code == EQ || code == NE) + { +- { +- rtx zie = loongarch_zero_if_equal (op0, op1); +- loongarch_emit_binary (code, target, zie, const0_rtx); +- } ++ rtx zie = loongarch_zero_if_equal (op0, op1); ++ loongarch_emit_binary (code, target, zie, const0_rtx); + } + else + loongarch_emit_int_order_test (code, 0, target, op0, op1); +@@ -4804,49 +4872,65 @@ loongarch_expand_conditional_branch (rtx *operands) + else + loongarch_emit_int_compare (&code, &op0, &op1); + +- condition = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1); ++ condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1); + emit_jump_insn (gen_condjump (condition, operands[3])); + } + + /* Perform the comparison in OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0] + if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0]. */ +- +-void +-loongarch_expand_conditional_move (rtx *operands) ++bool ++loongarch_expand_conditional_move_la464 (rtx *operands) + { + enum rtx_code code = GET_CODE (operands[1]); + rtx op0 = XEXP (operands[1], 0); + rtx op1 = XEXP (operands[1], 1); ++ machine_mode cmp_mode = GET_MODE(op0); ++ machine_mode sel_mode = GET_MODE(operands[2]); + ++ /*ffii means Selecting a fixed point based on floating point comparison results */ + if (FLOAT_MODE_P (GET_MODE (op1))) + loongarch_emit_float_compare (&code, &op0, &op1); + else + { +- if (code == EQ || code == NE) /*see test-mask-1.c && test-mask-5.c*/ ++ loongarch_extend_comparands (code, &op0, &op1); ++ ++ op0 = force_reg (word_mode, op0); ++ ++ if (code == EQ || code == NE) ++ { ++ op0 = loongarch_zero_if_equal (op0, op1); ++ op1 = const0_rtx; ++ /*Be careful iiff*/ ++ if(FLOAT_MODE_P(sel_mode)){ ++ rtx target = gen_reg_rtx (GET_MODE (op0)); ++ bool invert = false; ++ loongarch_emit_int_order_test (LTU, NULL, op0, ++ force_reg (GET_MODE (op0), const0_rtx), ++ op0); ++ op1 = const0_rtx; ++ } ++ } ++ else + { +- op0 = loongarch_zero_if_equal(op0, op1); ++ /* The comparison needs a separate scc instruction. Store the ++ result of the scc in *OP0 and compare it against zero. */ ++ bool invert = false; ++ rtx target = gen_reg_rtx (GET_MODE (op0)); ++ loongarch_emit_int_order_test (code, &invert, target, op0, op1); ++ code = invert ? EQ : NE; ++ op0 = target; + op1 = const0_rtx; + } +- else /*see test-mask-2.c*/ +- { +- /* The comparison needs a separate scc instruction. Store the +- result of the scc in *OP0 and compare it against zero. */ +- bool invert = false; +- rtx target = gen_reg_rtx (GET_MODE (op0)); +- loongarch_emit_int_order_test (code, &invert, target, op0, op1); +- code = invert ? EQ: NE; +- op0 = target; +- op1 = const0_rtx; +- } + } + + rtx cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1); + /* There is no direct support for general conditional GP move involving +- two registers using SEL. see test-mask-3.c */ +- if (INTEGRAL_MODE_P (GET_MODE (operands[2])) ++ two registers using SEL. */ ++ if (INTEGRAL_MODE_P (cmp_mode) ++ &&(INTEGRAL_MODE_P (sel_mode)) + && register_operand (operands[2], VOIDmode) +- && register_operand (operands[3], VOIDmode)) +- { ++ && register_operand (operands[3], VOIDmode)) { ++ + machine_mode mode = GET_MODE (operands[0]); + rtx temp = gen_reg_rtx (mode); + rtx temp2 = gen_reg_rtx (mode); +@@ -4864,26 +4948,72 @@ loongarch_expand_conditional_move (rtx *operands) + + /* Merge the two results, at least one is guaranteed to be zero. */ + emit_insn (gen_rtx_SET (operands[0], gen_rtx_IOR (mode, temp, temp2))); +- } +- else +- emit_insn (gen_rtx_SET (operands[0], +- gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond, +- operands[2], operands[3]))); +-} +- +- +-/* Initialize *CUM for a call to a function of type FNTYPE. */ + +-void +-loongarch_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype) +-{ +- memset (cum, 0, sizeof (*cum)); +- cum->prototype = (fntype && prototype_p (fntype)); +- cum->gp_reg_found = (cum->prototype && stdarg_p (fntype)); ++ return true; ++ /*For ffii, iiff due to movgr2fr, movfr2gr overhead is relatively large, ++ * so we use some compromise*/ ++ } else if (INTEGRAL_MODE_P (cmp_mode) ++ &&(FLOAT_MODE_P (sel_mode)) ++ && register_operand (operands[2], VOIDmode) ++ && register_operand (operands[3], VOIDmode)) { ++ rtx temp = gen_reg_rtx(sel_mode); ++ rtx fcc_reg =loongarch_allocate_fcc (FCCmode); ++ rtx diop0 = convert_to_mode(E_DImode, op0, true); ++ /*stl t0 i i-> movgr2fr f0 t0 -> movfr2cf fcc0 f0 -> fsel f f*/ ++ if(sel_mode == E_DFmode){ ++ emit_insn(gen_movdgr2frdf(temp, diop0)); ++ emit_insn(gen_movfr2fccdf(fcc_reg, temp)); ++ }else if(sel_mode == E_SFmode){ ++ emit_insn(gen_movdgr2frsf(temp, diop0)); ++ emit_insn(gen_movfr2fccsf(fcc_reg, temp)); ++ } ++ cond = gen_rtx_fmt_ee (code, GET_MODE(fcc_reg), fcc_reg, const0_rtx); ++ ++ emit_insn (gen_rtx_SET (operands[0], ++ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond, ++ operands[2], operands[3]))); ++ return true; ++ } else if (FLOAT_MODE_P (cmp_mode) ++ &&(INTEGRAL_MODE_P (sel_mode))) { ++ /*movgr2fr f0 i -> movgr2fr f1 i -> fcmp fcc0 f f ++ * -> fsel f3 f0 f1 -> movfr2gr t0 f3*/ ++ machine_mode dst_mode = GET_MODE (operands[0]); ++ rtx temp = gen_reg_rtx (E_DFmode); ++ rtx temp2 = gen_reg_rtx (E_DFmode); ++ rtx temp3 = gen_reg_rtx (E_DFmode); ++ ++ if(CONST_INT_P(operands[2])){ ++ operands[2] = copy_to_mode_reg(dst_mode, operands[2]); ++ } ++ if(CONST_INT_P(operands[3])){ ++ operands[3] = copy_to_mode_reg(dst_mode, operands[3]); ++ } ++ if(GET_MODE(operands[2]) != E_DImode) ++ operands[2] = convert_to_mode(E_DImode, operands[2], false); ++ if(GET_MODE(operands[3]) != E_DImode) ++ operands[3] = convert_to_mode(E_DImode, operands[3], false); ++ ++ emit_insn(gen_movdgr2frdf(temp2, operands[2])); ++ emit_insn(gen_movdgr2frdf(temp3, operands[3])); ++ ++ emit_insn (gen_rtx_SET (temp, ++ gen_rtx_IF_THEN_ELSE (E_DFmode, cond, ++ temp2, temp3))); ++ if(GET_MODE(operands[0]) == E_DImode) ++ emit_insn(gen_movdfr2grdi(operands[0], temp)); ++ else if(GET_MODE(operands[0]) == E_SImode) ++ emit_insn(gen_movdfr2grsi(operands[0], temp)); ++ return true; ++ } else if(FLOAT_MODE_P (cmp_mode) ++ &&FLOAT_MODE_P (sel_mode)){ ++ emit_insn (gen_rtx_SET (operands[0], ++ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond, ++ operands[2], operands[3]))); ++ return true; ++ } ++ ++ return false; + } +- +- +- + /* Implement TARGET_EXPAND_BUILTIN_VA_START. */ + + static void +@@ -4893,100 +5023,15 @@ loongarch_va_start (tree valist, rtx nextarg) + std_expand_builtin_va_start (valist, nextarg); + } + +- +-/* Start a definition of function NAME. */ +- +-static void +-loongarch_start_function_definition (const char *name) +-{ +- ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function"); +- +- /* Start the definition proper. */ +- assemble_name (asm_out_file, name); +- fputs (":\n", asm_out_file); +-} +- +-/* End a function definition started by loongarch_start_function_definition. */ +- +-static void +-loongarch_end_function_definition (const char *name) +-{ +-} +- + /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */ + + static bool +-loongarch_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED) ++loongarch_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED, ++ tree exp ATTRIBUTE_UNUSED) + { +- if (!TARGET_SIBCALLS) +- return false; +- +- /* Interrupt handlers need special epilogue code and therefore can't +- use sibcalls. */ +- if (loongarch_interrupt_type_p (TREE_TYPE (current_function_decl))) +- return false; +- +- /* Otherwise OK. */ ++ /* Always OK. */ + return true; + } +- +-/* Implement a handler for STORE_BY_PIECES operations +- for TARGET_USE_MOVE_BY_PIECES_INFRASTRUCTURE_P. */ +- +-bool +-loongarch_store_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align) +-{ +- /* Storing by pieces involves moving constants into registers +- of size MIN (ALIGN, BITS_PER_WORD), then storing them. +- We need to decide whether it is cheaper to load the address of +- constant data into a register and use a block move instead. */ +- +- /* If the data is only byte aligned, then: +- +- (a1) A block move of less than 4 bytes would involve three 3 LD.Bs and +- 3 ST.Bs. We might as well use 3 single-instruction LIs and 3 SD.Bs +- instead. +- +- (a2) A block move of 4 bytes from aligned source data can use an +- LD.W/ST.W sequence. This is often better than the 4 LIs and +- 4 SD.Bs that we would generate when storing by pieces. */ +- if (align <= BITS_PER_UNIT) +- return size < 4; +- +- /* If the data is 2-byte aligned, then: +- +- (b1) A block move of less than 4 bytes would use a combination of LD.Bs, +- LD.Hs, SD.Bs and SD.Hs. We get better code by using single-instruction +- LIs, SD.Bs and SD.Hs instead. +- +- (b2) A block move of 4 bytes from aligned source data would again use +- an LD.W/ST.W sequence. In most cases, loading the address of +- the source data would require at least one extra instruction. +- It is often more efficient to use 2 single-instruction LIs and +- 2 SHs instead. +- +- (b3) A block move of up to 3 additional bytes would be like (b1). +- +- (b4) A block move of 8 bytes from aligned source data can use two +- LD.W/ST.W sequences. Both sequences are better than the 4 LIs +- and 4 ST.Hs that we'd generate when storing by pieces. +- +- The reasoning for higher alignments is similar: +- +- (c1) A block move of less than 4 bytes would be the same as (b1). +- +- (c2) A block move of 4 bytes would use an LD.W/ST.W sequence. Again, +- loading the address of the source data would typically require +- at least one extra instruction. It is generally better to use +- LUI/ORI/SW instead. +- +- (c3) A block move of up to 3 additional bytes would be like (b1). +- +- (c4) A block move of 8 bytes can use two LD.W/ST.W sequences or a single +- LD.D/ST.D sequence, and in these cases we've traditionally preferred +- the memory copy over the more bulky constant moves. */ +- return size < 8; +-} + + /* Emit straight-line code to move LENGTH bytes from SRC to DEST. + Assume that the areas do not overlap. */ +@@ -4999,20 +5044,13 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) + int i; + machine_mode mode; + rtx *regs; ++ ++ if (STRICT_ALIGNMENT) ++ bits = MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))); ++ else ++ bits = BITS_PER_WORD; + +- /* Work out how many bits to move at a time. If both operands have +- half-word alignment, it is usually better to move in half words. +- For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr +- and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr. +- Otherwise move word-sized chunks. +- +- For ISA_HAS_LWL_LWR we rely on the lwl/lwr & swl/swr load. Otherwise +- picking the minimum of alignment or BITS_PER_WORD gets us the +- desired size for bits. */ +- +- bits = MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))); +- +- if (TARGET_LASX) ++ if (ISA_HAS_LASX && !STRICT_ALIGNMENT) + { + bits = BITS_PER_WORD * 4; + mode = V4DImode; +@@ -5029,7 +5067,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) + + /* Load as many BITS-sized chunks as possible. Use a normal load if + the source has enough alignment, otherwise use left/right pairs. */ +- if (TARGET_LASX) ++ if (ISA_HAS_LASX && !STRICT_ALIGNMENT) + { + for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) + { +@@ -5047,7 +5085,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) + } + + /* Copy the chunks to the destination. */ +- if (TARGET_LASX) ++ if (ISA_HAS_LASX && !STRICT_ALIGNMENT) + { + + for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) +@@ -5065,9 +5103,9 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) + /* Mop up any left-over bytes. */ + if (offset < length) + { +- if (TARGET_LASX) ++ if (ISA_HAS_LASX && !STRICT_ALIGNMENT) + { +- if(length - offset >= 16) ++ if (length - offset >= 16) + { + rtx *regs_tmp = XALLOCAVEC (rtx, 1); + regs_tmp[0] = gen_reg_rtx (V2DImode); +@@ -5075,7 +5113,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) + loongarch_emit_move (adjust_address (dest, V2DImode, offset), regs_tmp[0]); + offset += 16; + } +- if(length - offset >= 8) ++ if (length - offset >= 8) + { + rtx *regs_tmp = XALLOCAVEC (rtx, 1); + regs_tmp[0] = gen_reg_rtx (DImode); +@@ -5083,7 +5121,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) + loongarch_emit_move (adjust_address (dest, DImode, offset), regs_tmp[0]); + offset += 8; + } +- if(length - offset >= 4) ++ if (length - offset >= 4) + { + rtx *regs_tmp = XALLOCAVEC (rtx, 1); + regs_tmp[0] = gen_reg_rtx (SImode); +@@ -5091,7 +5129,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) + loongarch_emit_move (adjust_address (dest, SImode, offset), regs_tmp[0]); + offset += 4; + } +- if(length - offset >= 2) ++ if (length - offset >= 2) + { + rtx *regs_tmp = XALLOCAVEC (rtx, 1); + regs_tmp[0] = gen_reg_rtx (HImode); +@@ -5099,7 +5137,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) + loongarch_emit_move (adjust_address (dest, HImode, offset), regs_tmp[0]); + offset += 2; + } +- if(length - offset >= 1) ++ if (length - offset >= 1) + { + rtx *regs_tmp = XALLOCAVEC (rtx, 1); + regs_tmp[0] = gen_reg_rtx (QImode); +@@ -5108,7 +5146,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) + offset += 1; + } + +- if(length - offset != 0) ++ if (length - offset != 0) + gcc_unreachable (); + } + else +@@ -5131,8 +5169,8 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) + register. Store them in *LOOP_REG and *LOOP_MEM respectively. */ + + static void +-loongarch_adjust_block_mem (rtx mem, HOST_WIDE_INT length, +- rtx *loop_reg, rtx *loop_mem) ++loongarch_adjust_block_mem (rtx mem, HOST_WIDE_INT length, rtx *loop_reg, ++ rtx *loop_mem) + { + *loop_reg = copy_addr_to_reg (XEXP (mem, 0)); + +@@ -5148,7 +5186,7 @@ loongarch_adjust_block_mem (rtx mem, HOST_WIDE_INT length, + + static void + loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, +- HOST_WIDE_INT bytes_per_iter) ++ HOST_WIDE_INT bytes_per_iter) + { + rtx_code_label *label; + rtx src_reg, dest_reg, final_src, test; +@@ -5163,8 +5201,8 @@ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, + + /* Calculate the value that SRC_REG should have after the last iteration + of the loop. */ +- final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length), +- 0, 0, OPTAB_WIDEN); ++ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length), 0, ++ 0, OPTAB_WIDEN); + + /* Emit the start of the loop. */ + label = gen_label_rtx (); +@@ -5174,8 +5212,10 @@ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, + loongarch_block_move_straight (dest, src, bytes_per_iter); + + /* Move on to the next block. */ +- loongarch_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter)); +- loongarch_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter)); ++ loongarch_emit_move (src_reg, ++ plus_constant (Pmode, src_reg, bytes_per_iter)); ++ loongarch_emit_move (dest_reg, ++ plus_constant (Pmode, dest_reg, bytes_per_iter)); + + /* Emit the loop condition. */ + test = gen_rtx_NE (VOIDmode, src_reg, final_src); +@@ -5198,12 +5238,12 @@ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, + bool + loongarch_expand_block_move (rtx dest, rtx src, rtx length) + { +- +- int max_move_bytes = (TARGET_LASX ? \ ++ int max_move_bytes = (ISA_HAS_LASX ? \ + LARCH_MAX_MOVE_BYTES_STRAIGHT * 8 \ + : LARCH_MAX_MOVE_BYTES_STRAIGHT); + +- if (CONST_INT_P (length) && INTVAL (length) <= loongarch_max_inline_memcpy_size) ++ if (CONST_INT_P (length) ++ && INTVAL (length) <= loongarch_max_inline_memcpy_size) + { + if (INTVAL (length) <= max_move_bytes) + { +@@ -5213,13 +5253,12 @@ loongarch_expand_block_move (rtx dest, rtx src, rtx length) + else if (optimize) + { + loongarch_block_move_loop (dest, src, INTVAL (length), +- LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER); ++ LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER); + return true; + } + } + return false; + } +- + + /* Expand a QI or HI mode atomic memory operation. + +@@ -5239,13 +5278,12 @@ loongarch_expand_block_move (rtx dest, rtx src, rtx length) + + void + loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs generator, +- rtx result, rtx mem, rtx oldval, +- rtx newval, rtx model) ++ rtx result, rtx mem, rtx oldval, rtx newval, ++ rtx model) + { + rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask; + rtx unshifted_mask_reg, mask, inverted_mask, si_op; + rtx res = NULL; +- rtx tmp = NULL; + machine_mode mode; + + mode = GET_MODE (mem); +@@ -5253,7 +5291,7 @@ loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs generator, + /* Compute the address of the containing SImode value. */ + orig_addr = force_reg (Pmode, XEXP (mem, 0)); + memsi_addr = loongarch_force_binary (Pmode, AND, orig_addr, +- force_reg (Pmode, GEN_INT (-4))); ++ force_reg (Pmode, GEN_INT (-4))); + + /* Create a memory reference for it. */ + memsi = gen_rtx_MEM (SImode, memsi_addr); +@@ -5263,7 +5301,6 @@ loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs generator, + /* Work out the byte offset of the QImode or HImode value, + counting from the least significant byte. */ + shift = loongarch_force_binary (Pmode, AND, orig_addr, GEN_INT (3)); +- + /* Multiply by eight to convert the shift value from bytes to bits. */ + loongarch_emit_binary (ASHIFT, shift, shift, GEN_INT (3)); + +@@ -5301,14 +5338,13 @@ loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs generator, + res = gen_reg_rtx (SImode); + + if (newval) +- si_op = generator.fn_7 (res, memsi, mask, inverted_mask, oldval, newval, model); ++ si_op = generator.fn_7 (res, memsi, mask, inverted_mask, oldval, newval, ++ model); + else if (result) + si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, model); + else + si_op = generator.fn_5 (memsi, mask, inverted_mask, oldval, model); + +- //si_op = generator.fn_7 (res, memsi, mask, inverted_mask, oldval, newval, model); +- + emit_insn (si_op); + + if (result) +@@ -5320,24 +5356,14 @@ loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs generator, + } + } + +-/* Return true if X is a MEM with the same size as MODE. */ +- +-bool +-loongarch_mem_fits_mode_p (machine_mode mode, rtx x) +-{ +- return (MEM_P (x) +- && MEM_SIZE_KNOWN_P (x) +- && MEM_SIZE (x) == GET_MODE_SIZE (mode)); +-} +- + /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the + source of an "ext" instruction or the destination of an "ins" + instruction. OP must be a register operand and the following + conditions must hold: + +- 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op)) +- 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op)) +- 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op)) ++ 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op)) ++ 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op)) ++ 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op)) + + Also reject lengths equal to a word as they are better handled + by the move patterns. */ +@@ -5358,31 +5384,11 @@ loongarch_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos) + return true; + } + +- +-/* Return true iff OP1 and OP2 are valid operands together for the +- *and3 patterns. For the cases to consider, +- see the table in the comment before the pattern. */ +- +-bool +-and_operands_ok (machine_mode mode, rtx op1, rtx op2) +-{ +- +- if (memory_operand (op1, mode)) +- { +- return and_load_operand (op2, mode); +- } +- else +- return and_reg_operand (op2, mode); +-} +- + /* Print the text for PRINT_OPERAND punctation character CH to FILE. + The punctuation characters are: + + '.' Print the name of the register with a hard-wired zero (zero or $r0). + '$' Print the name of the stack pointer register (sp or $r3). +- ':' Print "c" to use the compact version if the delay slot is a nop. +- '!' Print "s" to use the short version if the delay slot contains a +- 16-bit instruction. + + See also loongarch_init_print_operand_punct. */ + +@@ -5399,14 +5405,6 @@ loongarch_print_operand_punctuation (FILE *file, int ch) + fputs (reg_names[STACK_POINTER_REGNUM], file); + break; + +- case ':': +- /* When final_sequence is 0, the delay slot will be a nop. We can +- use the compact version where available. The %: formatter will +- only be present if a compact form of the branch is available. */ +- if (final_sequence == 0) +- putc ('c', file); +- break; +- + default: + gcc_unreachable (); + break; +@@ -5420,7 +5418,7 @@ loongarch_init_print_operand_punct (void) + { + const char *p; + +- for (p = ".$:"; *p; p++) ++ for (p = ".$"; *p; p++) + loongarch_print_operand_punct[(unsigned char) *p] = true; + } + +@@ -5429,7 +5427,8 @@ loongarch_init_print_operand_punct (void) + opcode to FILE. */ + + static void +-loongarch_print_int_branch_condition (FILE *file, enum rtx_code code, int letter) ++loongarch_print_int_branch_condition (FILE *file, enum rtx_code code, ++ int letter) + { + switch (code) + { +@@ -5443,7 +5442,7 @@ loongarch_print_int_branch_condition (FILE *file, enum rtx_code code, int letter + case GEU: + case LTU: + case LEU: +- /* Conveniently, the LARCH names for these conditions are the same ++ /* Conveniently, the LoongArch names for these conditions are the same + as their RTL equivalents. */ + fputs (GET_RTX_NAME (code), file); + break; +@@ -5457,7 +5456,8 @@ loongarch_print_int_branch_condition (FILE *file, enum rtx_code code, int letter + /* Likewise floating-point branches. */ + + static void +-loongarch_print_float_branch_condition (FILE *file, enum rtx_code code, int letter) ++loongarch_print_float_branch_condition (FILE *file, enum rtx_code code, ++ int letter) + { + switch (code) + { +@@ -5487,20 +5487,22 @@ loongarch_print_operand_punct_valid_p (unsigned char code) + implement the release portion of memory model MODEL. */ + + static bool +-loongarch_memmodel_needs_rel_and_acq_fence (enum memmodel model) ++loongarch_memmodel_needs_rel_acq_fence (enum memmodel model) + { + switch (model) + { + case MEMMODEL_ACQ_REL: + case MEMMODEL_SEQ_CST: + case MEMMODEL_SYNC_SEQ_CST: +- case MEMMODEL_RELEASE: + case MEMMODEL_SYNC_RELEASE: +- case MEMMODEL_ACQUIRE: +- case MEMMODEL_CONSUME: + case MEMMODEL_SYNC_ACQUIRE: + return true; + ++ case MEMMODEL_RELEASE: ++ case MEMMODEL_ACQUIRE: ++ case MEMMODEL_CONSUME: ++ if (!TARGET_uARCH_LA664) ++ return true; + case MEMMODEL_RELAXED: + return false; + +@@ -5517,25 +5519,25 @@ loongarch_memmodel_needs_release_fence (enum memmodel model) + { + switch (model) + { +- case MEMMODEL_ACQ_REL: +- case MEMMODEL_SEQ_CST: +- case MEMMODEL_SYNC_SEQ_CST: +- case MEMMODEL_RELEASE: +- case MEMMODEL_SYNC_RELEASE: +- return true; ++ case MEMMODEL_ACQ_REL: ++ case MEMMODEL_SEQ_CST: ++ case MEMMODEL_SYNC_SEQ_CST: ++ case MEMMODEL_RELEASE: ++ case MEMMODEL_SYNC_RELEASE: ++ return true; + +- case MEMMODEL_ACQUIRE: +- case MEMMODEL_CONSUME: +- case MEMMODEL_SYNC_ACQUIRE: +- case MEMMODEL_RELAXED: +- return false; ++ case MEMMODEL_ACQUIRE: ++ case MEMMODEL_CONSUME: ++ case MEMMODEL_SYNC_ACQUIRE: ++ case MEMMODEL_RELAXED: ++ return false; + +- default: +- gcc_unreachable (); ++ default: ++ gcc_unreachable (); + } + } + +-/* Implement TARGET_PRINT_OPERAND. The LARCH-specific operand codes are: ++/* Implement TARGET_PRINT_OPERAND. The LoongArch-specific operand codes are: + + 'E' Print CONST_INT OP element 0 of a replicated CONST_VECTOR in decimal. + 'X' Print CONST_INT OP in hexadecimal format. +@@ -5674,7 +5676,8 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + break; + + case 'N': +- loongarch_print_int_branch_condition (file, reverse_condition (code), letter); ++ loongarch_print_int_branch_condition (file, reverse_condition (code), ++ letter); + break; + + case 'F': +@@ -5683,19 +5686,20 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + + case 'W': + loongarch_print_float_branch_condition (file, reverse_condition (code), +- letter); ++ letter); + break; + + case 'T': + case 't': + { + int truth = (code == NE) == (letter == 'T'); +- fputc ("zfnt"[truth * 2 + ST_REG_P (REGNO (XEXP (op, 0)))], file); ++ fputc ("zfnt"[truth * 2 + FCC_REG_P (REGNO (XEXP (op, 0)))], file); + } + break; + + case 'Y': +- if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (loongarch_fp_conditions)) ++ if (code == CONST_INT ++ && UINTVAL (op) < ARRAY_SIZE (loongarch_fp_conditions)) + fputs (loongarch_fp_conditions[UINTVAL (op)], file); + else + output_operand_lossage ("'%%%c' is not a valid operand prefix", +@@ -5750,18 +5754,36 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + break; + + case 'A': +- if (loongarch_memmodel_needs_rel_and_acq_fence ((enum memmodel) INTVAL (op))) ++ if (loongarch_memmodel_needs_rel_acq_fence ((enum memmodel) INTVAL (op))) + fputs ("_db", file); + break; + + case 'G': + if (loongarch_memmodel_needs_release_fence ((enum memmodel) INTVAL (op))) +- fputs ("dbar\t0", file); ++ fputs ("dbar\t0x11", file); ++ break; ++ ++ case 'J': ++ if (TARGET_uARCH_LA664) ++ { ++ enum memmodel model = memmodel_from_int (INTVAL (op)); ++ if (is_mm_release (model)) ++ fputs ("dbar\t0x12", file); ++ } ++ break; ++ ++ case 'K': ++ if (TARGET_uARCH_LA664) ++ { ++ enum memmodel model = memmodel_from_int (INTVAL (op)); ++ if (is_mm_acquire (model)) ++ fputs ("dbar\t0x18", file); ++ } + break; + + case 'i': + if (code != REG) +- fputs ("i", file); ++ fputs ("i", file); + break; + + default: +@@ -5770,10 +5792,7 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + case REG: + { + unsigned int regno = REGNO (op); +- if ((letter == 'M') +- || letter == 'D') +- regno++; +- else if (letter && letter != 'z' && letter != 'M' && letter != 'L') ++ if (letter && letter != 'z') + output_operand_lossage ("invalid use of '%%%c'", letter); + fprintf (file, "%s", reg_names[regno]); + } +@@ -5781,8 +5800,8 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + + case MEM: + if (letter == 'D') +- output_address (GET_MODE (op), plus_constant (Pmode, +- XEXP (op, 0), 4)); ++ output_address (GET_MODE (op), ++ plus_constant (Pmode, XEXP (op, 0), 4)); + else if (letter == 'b') + { + gcc_assert (REG_P (XEXP (op, 0))); +@@ -5809,7 +5828,7 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + /* Implement TARGET_PRINT_OPERAND_ADDRESS. */ + + static void +-loongarch_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x) ++loongarch_print_operand_address (FILE *file, machine_mode /* mode */, rtx x) + { + struct loongarch_address_info addr; + +@@ -5821,6 +5840,11 @@ loongarch_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x) + loongarch_print_operand (file, addr.offset, 0); + return; + ++ case ADDRESS_REG_REG: ++ fprintf (file, "%s,%s", reg_names[REGNO (addr.reg)], ++ reg_names[REGNO (addr.offset)]); ++ return; ++ + case ADDRESS_CONST_INT: + fprintf (file, "%s,", reg_names[GP_REG_FIRST]); + output_addr_const (file, x); +@@ -5830,37 +5854,17 @@ loongarch_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x) + output_addr_const (file, loongarch_strip_unspec_address (x)); + return; + } +- if (GET_CODE (x) == CONST_INT) ++ if (CONST_INT_P (x)) + output_addr_const (file, x); + else + gcc_unreachable (); + } + +- +-/* Implement TARGET_ENCODE_SECTION_INFO. */ +- +-static void +-loongarch_encode_section_info (tree decl, rtx rtl, int first) +-{ +- default_encode_section_info (decl, rtl, first); +- +- if (TREE_CODE (decl) == FUNCTION_DECL) +- { +- rtx symbol = XEXP (rtl, 0); +- tree type = TREE_TYPE (decl); +- +- /* Encode whether the symbol is short or long. */ +- if ((TARGET_LONG_CALLS && !loongarch_near_type_p (type)) +- || loongarch_far_type_p (type)) +- SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL; +- } +-} +- +-/* Implement TARGET_SELECT_RTX_SECTION. */ ++/* Implement TARGET_ASM_SELECT_RTX_SECTION. */ + + static section * + loongarch_select_rtx_section (machine_mode mode, rtx x, +- unsigned HOST_WIDE_INT align) ++ unsigned HOST_WIDE_INT align) + { + /* ??? Consider using mergeable small data sections. */ + if (loongarch_rtx_constant_in_small_data_p (mode)) +@@ -5871,12 +5875,10 @@ loongarch_select_rtx_section (machine_mode mode, rtx x, + + /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION. + +- The complication here is that, with the combination +- !TARGET_ABSOLUTE_ABICALLS , jump tables will use +- absolute addresses, and should therefore not be included in the +- read-only part of a DSO. Handle such cases by selecting a normal +- data section instead of a read-only one. The logic apes that in +- default_function_rodata_section. */ ++ The complication here is that jump atbles will use absolute addresses, ++ and should therefore not be included in the read-only part of a DSO. ++ Handle such cases by selecting a normal data section instead of a ++ read-only one. The logic apes that in default_function_rodata_section. */ + + static section * + loongarch_function_rodata_section (tree decl) +@@ -5889,17 +5891,11 @@ loongarch_function_rodata_section (tree decl) + static bool + loongarch_in_small_data_p (const_tree decl) + { +- unsigned HOST_WIDE_INT size; ++ int size; + + if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL) + return false; + +- /* We don't yet generate small-data references for +- VxWorks RTP code. See the related -G handling in +- loongarch_option_override. */ +- if (TARGET_VXWORKS_RTP) +- return false; +- + if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0) + { + const char *name; +@@ -5918,23 +5914,12 @@ loongarch_in_small_data_p (const_tree decl) + /* We have traditionally not treated zero-sized objects as small data, + so this is now effectively part of the ABI. */ + size = int_size_in_bytes (TREE_TYPE (decl)); +- return size > 0 && size <= loongarch_small_data_threshold; ++ return size > 0 && size <= g_switch_value; + } + +-/* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use +- anchors for small data: the GP register acts as an anchor in that +- case. We also don't want to use them for PC-relative accesses, +- where the PC acts as an anchor. */ +- +-static bool +-loongarch_use_anchors_for_symbol_p (const_rtx symbol) +-{ +- return default_use_anchors_for_symbol_p (symbol); +-} +- +-/* The LARCH debug format wants all automatic variables and arguments ++/* The LoongArch debug format wants all automatic variables and arguments + to be in terms of the virtual frame pointer (stack pointer before +- any adjustment in the function), while the LARCH 3.0 linker wants ++ any adjustment in the function), while the LoongArch linker wants + the frame pointer to be the stack pointer after the initial + adjustment. So, we do the adjustment here. The arg pointer (which + is eliminated) points to the virtual frame pointer, while the frame +@@ -5961,7 +5946,7 @@ loongarch_debugger_offset (rtx addr, HOST_WIDE_INT offset) + + return offset; + } +- ++ + /* Implement ASM_OUTPUT_EXTERNAL. */ + + void +@@ -5971,7 +5956,7 @@ loongarch_output_external (FILE *file, tree decl, const char *name) + + /* We output the name if and only if TREE_SYMBOL_REFERENCED is + set in order to avoid putting out names that are never really +- used. */ ++ used. */ + if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))) + { + if (loongarch_in_small_data_p (decl)) +@@ -6014,33 +5999,6 @@ loongarch_output_dwarf_dtprel (FILE *file, int size, rtx x) + fputs ("+0x8000", file); + } + +-/* Implement TARGET_DWARF_REGISTER_SPAN. */ +- +-static rtx +-loongarch_dwarf_register_span (rtx reg) +-{ +- rtx high, low; +- machine_mode mode; +- +- mode = GET_MODE (reg); +- +- return NULL_RTX; +-} +- +-/* Implement TARGET_DWARF_FRAME_REG_MODE. */ +- +-static machine_mode +-loongarch_dwarf_frame_reg_mode (int regno) +-{ +- machine_mode mode = default_dwarf_frame_reg_mode (regno); +- +- if (FP_REG_P (regno) && loongarch_abi == ABILP32 && TARGET_FLOAT64) +- mode = SImode; +- +- return mode; +-} +- +- + /* Implement ASM_OUTPUT_ASCII. */ + + void +@@ -6072,7 +6030,7 @@ loongarch_output_ascii (FILE *stream, const char *string, size_t len) + cur_pos += 4; + } + +- if (cur_pos > 72 && i+1 < len) ++ if (cur_pos > 72 && i + 1 < len) + { + cur_pos = 17; + fprintf (stream, "\"\n\t.ascii\t\""); +@@ -6081,194 +6039,6 @@ loongarch_output_ascii (FILE *stream, const char *string, size_t len) + fprintf (stream, "\"\n"); + } + +-/* Emit either a label, .comm, or .lcomm directive. When using assembler +- macros, mark the symbol as written so that loongarch_asm_output_external +- won't emit an .extern for it. STREAM is the output file, NAME is the +- name of the symbol, INIT_STRING is the string that should be written +- before the symbol and FINAL_STRING is the string that should be +- written after it. FINAL_STRING is a printf format that consumes the +- remaining arguments. */ +- +-void +-loongarch_declare_object (FILE *stream, const char *name, const char *init_string, +- const char *final_string, ...) +-{ +- va_list ap; +- +- fputs (init_string, stream); +- assemble_name (stream, name); +- va_start (ap, final_string); +- vfprintf (stream, final_string, ap); +- va_end (ap); +- +- tree name_tree = get_identifier (name); +- TREE_ASM_WRITTEN (name_tree) = 1; +-} +- +-/* Declare a common object of SIZE bytes using asm directive INIT_STRING. +- NAME is the name of the object and ALIGN is the required alignment +- in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third +- alignment argument. */ +- +-void +-loongarch_declare_common_object (FILE *stream, const char *name, +- const char *init_string, +- unsigned HOST_WIDE_INT size, +- unsigned int align, bool takes_alignment_p) +-{ +- if (!takes_alignment_p) +- { +- size += (align / BITS_PER_UNIT) - 1; +- size -= size % (align / BITS_PER_UNIT); +- loongarch_declare_object (stream, name, init_string, +- "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size); +- } +- else +- loongarch_declare_object (stream, name, init_string, +- "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n", +- size, align / BITS_PER_UNIT); +-} +- +-/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the +- elfos.h version, but we also need to handle -muninit-const-in-rodata. */ +- +-void +-loongarch_output_aligned_decl_common (FILE *stream, tree decl, const char *name, +- unsigned HOST_WIDE_INT size, +- unsigned int align) +-{ +- loongarch_declare_common_object (stream, name, "\n\t.comm\t", +- size, align, true); +-} +- +-#ifdef ASM_OUTPUT_SIZE_DIRECTIVE +-extern int size_directive_output; +- +-/* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF +- definitions except that it uses loongarch_declare_object to emit the label. */ +- +-void +-loongarch_declare_object_name (FILE *stream, const char *name, +- tree decl ATTRIBUTE_UNUSED) +-{ +-#ifdef ASM_OUTPUT_TYPE_DIRECTIVE +-#ifdef USE_GNU_UNIQUE_OBJECT +- /* As in elfos.h. */ +- if (USE_GNU_UNIQUE_OBJECT && DECL_ONE_ONLY (decl) +- && (!DECL_ARTIFICIAL (decl) || !TREE_READONLY (decl))) +- ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "gnu_unique_object"); +- else +-#endif +- ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object"); +-#endif +- +- size_directive_output = 0; +- if (!flag_inhibit_size_directive && DECL_SIZE (decl)) +- { +- HOST_WIDE_INT size; +- +- size_directive_output = 1; +- size = int_size_in_bytes (TREE_TYPE (decl)); +- ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size); +- } +- +- loongarch_declare_object (stream, name, "", ":\n"); +-} +- +-/* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */ +- +-void +-loongarch_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end) +-{ +- const char *name; +- +- name = XSTR (XEXP (DECL_RTL (decl), 0), 0); +- if (!flag_inhibit_size_directive +- && DECL_SIZE (decl) != 0 +- && !at_end +- && top_level +- && DECL_INITIAL (decl) == error_mark_node +- && !size_directive_output) +- { +- HOST_WIDE_INT size; +- +- size_directive_output = 1; +- size = int_size_in_bytes (TREE_TYPE (decl)); +- ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size); +- } +-} +-#endif +- +-/* Mark text contents as code or data, mainly for the purpose of correct +- disassembly. Emit a local symbol and set its type appropriately for +- that purpose. Also emit `.insn' if marking contents as code so that +- the ISA mode is recorded and any padding that follows is disassembled +- as correct instructions. */ +- +-void +-loongarch_set_text_contents_type (FILE *file ATTRIBUTE_UNUSED, +- const char *prefix ATTRIBUTE_UNUSED, +- unsigned long num ATTRIBUTE_UNUSED, +- bool function_p ATTRIBUTE_UNUSED) +-{ +-#ifdef ASM_OUTPUT_TYPE_DIRECTIVE +- char buf[(sizeof (num) * 10) / 4 + 2]; +- const char *fnname; +- char *sname; +- rtx symbol; +- +- sprintf (buf, "%lu", num); +- symbol = XEXP (DECL_RTL (current_function_decl), 0); +- fnname = targetm.strip_name_encoding (XSTR (symbol, 0)); +- sname = ACONCAT ((prefix, fnname, "_", buf, NULL)); +- +- ASM_OUTPUT_TYPE_DIRECTIVE (file, sname, function_p ? "function" : "object"); +- assemble_name (file, sname); +- fputs (":\n", file); +-// if (function_p) +-// fputs ("\t.insn\n", file); +-#endif +-} +- +- +-/* Implement TARGET_ASM_FILE_START. */ +- +-static void +-loongarch_file_start (void) +-{ +- default_file_start (); +- +- /* Generate a special section to describe the ABI switches used to +- produce the resultant binary. */ +-} +- +- +-/* Return true if REGNO is a register that is ordinarily call-clobbered +- but must nevertheless be preserved by an interrupt handler. */ +- +-static bool +-loongarch_interrupt_extra_call_saved_reg_p (unsigned int regno) +-{ +- if (GP_REG_P (regno) +- && cfun->machine->use_shadow_register_set == SHADOW_SET_NO) +- { +- /* $0 is hard-wired. */ +- if (regno == GP_REG_FIRST) +- return false; +- +- /* The function will return the stack pointer to its original value +- anyway. */ +- if (regno == STACK_POINTER_REGNUM) +- return false; +- +- /* Otherwise, return true for registers that aren't ordinarily +- call-clobbered. */ +- return call_used_regs[regno]; +- } +- +- return false; +-} +- + /* Implement TARGET_FRAME_POINTER_REQUIRED. */ + + static bool +@@ -6282,17 +6052,20 @@ loongarch_frame_pointer_required (void) + return false; + } + +-/* Make sure that we're not trying to eliminate to the wrong hard frame +- pointer. */ ++/* Implement TARGET_CAN_ELIMINATE. Make sure that we're not trying ++ to eliminate to the wrong hard frame pointer. */ + + static bool + loongarch_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to) + { ++ if (stack_realign_fp) ++ return ((from == ARG_POINTER_REGNUM ++ && to == HARD_FRAME_POINTER_REGNUM) ++ || (from == FRAME_POINTER_REGNUM ++ && to == STACK_POINTER_REGNUM)); + return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM); + } + +- +- + /* Implement RETURN_ADDR_RTX. We do not support moving back to a + previous frame. */ + +@@ -6315,73 +6088,21 @@ loongarch_set_return_address (rtx address, rtx scratch) + rtx slot_address; + + gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM)); ++ + if (frame_pointer_needed) + slot_address = loongarch_add_offset (scratch, hard_frame_pointer_rtx, +- -UNITS_PER_WORD); ++ -UNITS_PER_WORD); + else + slot_address = loongarch_add_offset (scratch, stack_pointer_rtx, +- cfun->machine->frame.gp_sp_offset); +- loongarch_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address); +-} +- +- +-/* Fill *BASE and *OFFSET such that *BASE + *OFFSET refers to the +- cprestore slot. LOAD_P is true if the caller wants to load from +- the cprestore slot; it is false if the caller wants to store to +- the slot. */ +- +-static void +-loongarch_get_cprestore_base_and_offset (rtx *base, HOST_WIDE_INT *offset, +- bool load_p) +-{ +- const struct loongarch_frame_info *frame; +- +- frame = &cfun->machine->frame; +- /* .cprestore always uses the stack pointer instead of the frame pointer. +- We have a free choice for direct stores, +- Using the stack pointer would sometimes give more +- (early) scheduling freedom, but using the frame pointer would +- sometimes give more (late) scheduling freedom. It's hard to +- predict which applies to a given function, so let's keep things +- simple. +- +- Loads must always use the frame pointer in functions that call +- alloca, and there's little benefit to using the stack pointer +- otherwise. */ +- if (frame_pointer_needed) +- { +- *base = hard_frame_pointer_rtx; +- *offset = frame->args_size - frame->hard_frame_pointer_offset; +- } +- else +- { +- *base = stack_pointer_rtx; +- *offset = frame->args_size; +- } +-} ++ cfun->machine->frame.gp_sp_offset); + +-/* Return true if X is the load or store address of the cprestore slot; +- LOAD_P says which. */ +- +-bool +-loongarch_cprestore_address_p (rtx x, bool load_p) +-{ +- rtx given_base, required_base; +- HOST_WIDE_INT given_offset, required_offset; +- +- loongarch_split_plus (x, &given_base, &given_offset); +- loongarch_get_cprestore_base_and_offset (&required_base, &required_offset, load_p); +- return given_base == required_base && given_offset == required_offset; ++ loongarch_emit_move (gen_frame_mem (GET_MODE (address), slot_address), ++ address); + } + +- +-/* A function to save or store a register. The first argument is the +- register and the second is the stack slot. */ +-typedef void (*loongarch_save_restore_fn) (rtx, rtx); +- + /* LOONGSON LA464 Emit insn pattern for gssq and gslq*/ + void +-loongarch_la464_emit_128bit_load(rtx operands[]) ++loongarch_la464_emit_128bit_load (rtx operands[]) + { + rtx op0; + rtx op1; +@@ -6389,9 +6110,9 @@ loongarch_la464_emit_128bit_load(rtx operands[]) + rtx op3; + + #if 0 /*for debug*/ +- printf("464po: emit 128 PO LOAD!\n"); +- printf("reg num of op0 is: %d\n",REGNO(operands[0])); +- printf("reg num of op2 is: %d\n",REGNO(operands[2])); ++ printf ("464po: emit 128 PO LOAD!\n"); ++ printf ("reg num of op0 is: %d\n",REGNO (operands[0])); ++ printf ("reg num of op2 is: %d\n",REGNO (operands[2])); + #endif + op0 = gen_rtx_REG (GET_MODE (operands[0]), REGNO (operands[0])); + op1 = operands[1]; +@@ -6403,8 +6124,8 @@ loongarch_la464_emit_128bit_load(rtx operands[]) + gen_rtx_SET (op2,op3)))); + } + +-void +-loongarch_la464_emit_128bit_store(rtx operands[]) ++void ++loongarch_la464_emit_128bit_store (rtx operands[]) + { + rtx op0; + rtx op1; +@@ -6412,10 +6133,10 @@ loongarch_la464_emit_128bit_store(rtx operands[]) + rtx op3; + + #if 0 /*for debug*/ +- printf("464po: emit 128 PO STORE!\n"); +- printf("reg num of op1 is: %d\n",REGNO(operands[1])); +- printf("reg num of op3 is: %d\n",REGNO(operands[3])); +-#endif ++ printf ("464po: emit 128 PO STORE!\n"); ++ printf ("reg num of op1 is: %d\n",REGNO (operands[1])); ++ printf ("reg num of op3 is: %d\n",REGNO (operands[3])); ++#endif + op0 = operands[0]; + op1 = gen_rtx_REG (GET_MODE (operands[1]), REGNO (operands[1])); + op2 = operands[2]; +@@ -6427,405 +6148,109 @@ loongarch_la464_emit_128bit_store(rtx operands[]) + + } + ++/* Return true if register REGNO can store a value of mode MODE. ++ The result of this function is cached in loongarch_hard_regno_mode_ok. */ + +- ++static bool ++loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode) ++{ ++ unsigned int size; ++ enum mode_class mclass; + +-/* Implement ASM_DECLARE_FUNCTION_NAME. */ ++ if (mode == FCCmode) ++ return FCC_REG_P (regno); + +-void loongarch_declare_function_name(FILE *stream ATTRIBUTE_UNUSED, +- const char *name, tree fndecl ATTRIBUTE_UNUSED) +-{ +- loongarch_start_function_definition (name); +-} ++ size = GET_MODE_SIZE (mode); ++ mclass = GET_MODE_CLASS (mode); + +-/* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */ ++ if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode) && !LASX_SUPPORTED_MODE_P (mode)) ++ return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD; + +-static void +-loongarch_output_function_prologue (FILE *file) +-{ ++ /* For LSX, allow TImode and 128-bit vector modes in all FPR. */ ++ if (FP_REG_P (regno) && LSX_SUPPORTED_MODE_P (mode)) ++ return true; ++ ++ /* For LASX, allow TImode and 256-bit vector modes in all FPR. FIXME: */ ++ if (FP_REG_P (regno) && LASX_SUPPORTED_MODE_P (mode)) ++ return true; ++ ++ if (FP_REG_P (regno)) ++ { ++ if (mclass == MODE_FLOAT ++ || mclass == MODE_COMPLEX_FLOAT ++ || mclass == MODE_VECTOR_FLOAT) ++ return size <= UNITS_PER_FPVALUE; ++ ++ /* Allow integer modes that fit into a single register. We need ++ to put integers into FPRs when using instructions like CVT ++ and TRUNC. There's no point allowing sizes smaller than a word, ++ because the FPU has no appropriate load/store instructions. */ ++ if (mclass == MODE_INT) ++ return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG; ++ } ++ ++ return false; + } + +-/* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */ ++/* Implement TARGET_HARD_REGNO_MODE_OK. */ + +-static void +-loongarch_output_function_epilogue (FILE *) ++static bool ++loongarch_hard_regno_mode_ok (unsigned int regno, machine_mode mode) + { +- const char *fnname; +- +- /* Get the function name the same way that toplev.c does before calling +- assemble_start_function. This is needed so that the name used here +- exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */ +- fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0); +- loongarch_end_function_definition (fnname); ++ return loongarch_hard_regno_mode_ok_p[mode][regno]; + } +- + +-#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP) ++static bool ++loongarch_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode) ++{ ++ if (ISA_HAS_LSX && FP_REG_P (regno) && GET_MODE_SIZE (mode) > 8) ++ return true; + +-#if PROBE_INTERVAL > 16384 +-#error Cannot use indexed addressing mode for stack probing +-#endif ++ return false; ++} + +-/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE, +- inclusive. These are offsets from the current stack pointer. */ ++/* Implement TARGET_HARD_REGNO_NREGS. */ + +-static void +-loongarch_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size) ++static unsigned int ++loongarch_hard_regno_nregs (unsigned int regno, machine_mode mode) + { ++ if (FCC_REG_P (regno)) ++ /* The size of FP status registers is always 4, because they only hold ++ FCCmode values, and FCCmode is always considered to be 4 bytes wide. */ ++ return (GET_MODE_SIZE (mode) + 3) / 4; + +- /* See if we have a constant small number of probes to generate. If so, +- that's the easy case. */ +- if ((TARGET_64BIT && (first + size <= 8 * PROBE_INTERVAL)) +- || (!TARGET_64BIT && (first + size <= 2048))) ++ if (FP_REG_P (regno)) + { +- HOST_WIDE_INT i; ++ if (LSX_SUPPORTED_MODE_P (mode)) ++ return 1; + +- /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until +- it exceeds SIZE. If only one probe is needed, this will not +- generate any code. Then probe at FIRST + SIZE. */ +- for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL) +- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx, +- -(first + i))); ++ if (LASX_SUPPORTED_MODE_P (mode)) ++ return 1; + +- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx, +- -(first + size))); ++ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG; + } + +- /* Otherwise, do the same as above, but in a loop. Note that we must be +- extra careful with variables wrapping around because we might be at +- the very top (or the very bottom) of the address space and we have +- to be able to handle this case properly; in particular, we use an +- equality test for the loop condition. */ +- else +- { +- HOST_WIDE_INT rounded_size; +- rtx r13 = LARCH_PROLOGUE_TEMP (Pmode); +- rtx r12 = LARCH_PROLOGUE_TEMP2 (Pmode); +- rtx r14 = LARCH_PROLOGUE_TEMP3 (Pmode); ++ /* All other registers are word-sized. */ ++ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; ++} + +- /* Sanity check for the addressing mode we're going to use. */ +- gcc_assert (first <= 16384); ++/* Implement CLASS_MAX_NREGS, taking the maximum of the cases ++ in loongarch_hard_regno_nregs. */ + ++int ++loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode) ++{ ++ int size; ++ HARD_REG_SET left; + +- /* Step 1: round SIZE to the previous multiple of the interval. */ ++ size = 0x8000; ++ COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]); ++ if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FCC_REGS])) ++ { ++ if (loongarch_hard_regno_mode_ok (FCC_REG_FIRST, mode)) ++ size = MIN (size, 4); + +- rounded_size = ROUND_DOWN (size, PROBE_INTERVAL); +- /* TEST_ADDR = SP + FIRST */ +- if (first != 0) +- { +- emit_move_insn (r14, GEN_INT (first)); +- emit_insn (gen_rtx_SET (r13, gen_rtx_MINUS (Pmode, stack_pointer_rtx, r14))); +- } +- else +- emit_move_insn (r13, stack_pointer_rtx); +- +- /* Step 2: compute initial and final value of the loop counter. */ +- +- emit_move_insn (r14, GEN_INT (PROBE_INTERVAL)); +- if (rounded_size == 0) +- emit_move_insn (r12, r13); +- /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */ +- else +- { +- emit_move_insn (r12, GEN_INT (rounded_size)); +- emit_insn (gen_rtx_SET (r12, gen_rtx_MINUS (Pmode, r13, r12))); +- /* Step 3: the loop +- +- do +- { +- TEST_ADDR = TEST_ADDR + PROBE_INTERVAL +- probe at TEST_ADDR +- } +- while (TEST_ADDR != LAST_ADDR) +- +- probes at FIRST + N * PROBE_INTERVAL for values of N from 1 +- until it is equal to ROUNDED_SIZE. */ +- +- emit_insn (PMODE_INSN (gen_probe_stack_range, (r13, r13, r12, r14))); +- } +- +- /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time +- that SIZE is equal to ROUNDED_SIZE. */ +- +- if (size != rounded_size) +- { +- if (TARGET_64BIT) +- emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size)); +- else +- { +- HOST_WIDE_INT i; +- for (i = 2048; i < (size - rounded_size); i += 2048 ) +- { +- emit_stack_probe (plus_constant (Pmode, r12, -i)); +- emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, r12, -2048))); +- } +- emit_stack_probe (plus_constant (Pmode, r12, -(size - rounded_size - i + 2048))); +- } +- } +- } +- +- /* Make sure nothing is scheduled before we are done. */ +- emit_insn (gen_blockage ()); +-} +- +-/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are +- absolute addresses. */ +- +-const char * +-loongarch_output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3) +-{ +- static int labelno = 0; +- char loop_lab[32], tmp[64]; +- rtx xops[3]; +- +- ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++); +- +- /* Loop. */ +- ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab); +- +- /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */ +- xops[0] = reg1; +- xops[1] = GEN_INT (-PROBE_INTERVAL); +- xops[2] = reg3; +- if (TARGET_64BIT) +- output_asm_insn ("sub.d\t%0,%0,%2", xops); +- else +- output_asm_insn ("sub.w\t%0,%0,%2", xops); +- +- /* Probe at TEST_ADDR, test if TEST_ADDR == LAST_ADDR and branch. */ +- xops[1] = reg2; +- strcpy (tmp, "bne\t%0,%1,"); +- if (TARGET_64BIT) +- output_asm_insn ("st.d\t$r0,%0,0", xops); +- else +- output_asm_insn ("st.w\t$r0,%0,0", xops); +- output_asm_insn (strcat (tmp, &loop_lab[1]), xops); +- +- return ""; +-} +- +-/* Expand the "prologue" pattern. */ +- +-void +-loongarch_expand_prologue (void) +-{ +- struct loongarch_frame_info *frame = &cfun->machine->frame; +- HOST_WIDE_INT size = frame->total_size; +- unsigned mask = frame->mask; +- rtx insn; +- +- if (flag_stack_usage_info) +- current_function_static_stack_size = size; +- +- if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK +- || flag_stack_clash_protection) +- { +- if (crtl->is_leaf && !cfun->calls_alloca) +- { +- if (size > PROBE_INTERVAL && size > get_stack_check_protect ()) +- loongarch_emit_probe_stack_range (get_stack_check_protect (), +- size - get_stack_check_protect ()); +- } +- else if (size > 0) +- loongarch_emit_probe_stack_range (get_stack_check_protect (), size); +- } +- +- /* When optimizing for size, call a subroutine to save the registers. */ +- if (loongarch_use_save_libcall (frame)) +- { +- rtx dwarf = NULL_RTX; +- dwarf = loongarch_adjust_libcall_cfi_prologue (); +- +- frame->mask = 0; /* Temporarily fib that we need not save GPRs. */ +- size -= frame->save_libcall_adjustment; +- insn = emit_insn (gen_gpr_save (GEN_INT (mask))); +- +- RTX_FRAME_RELATED_P (insn) = 1; +- REG_NOTES (insn) = dwarf; +- } +- +- /* Save the registers. */ +- if ((frame->mask | frame->fmask) != 0) +- { +- HOST_WIDE_INT step1 = MIN (size, loongarch_first_stack_step (frame)); +- +- insn = gen_add3_insn (stack_pointer_rtx, +- stack_pointer_rtx, +- GEN_INT (-step1)); +- RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; +- size -= step1; +- loongarch_for_each_saved_reg (size, loongarch_save_reg); +- } +- +- frame->mask = mask; /* Undo the above fib. */ +- +- /* Set up the frame pointer, if we're using one. */ +- if (frame_pointer_needed) +- { +- insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx, +- GEN_INT (frame->hard_frame_pointer_offset - size)); +- RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; +- +- loongarch_emit_stack_tie (); +- } +- +- /* Allocate the rest of the frame. */ +- if (size > 0) +- { +- if (SMALL_OPERAND (-size)) +- { +- insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, +- GEN_INT (-size)); +- RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; +- } +- else +- { +- loongarch_emit_move (N_LARCH_PROLOGUE_TEMP (Pmode), GEN_INT (-size)); +- emit_insn (gen_add3_insn (stack_pointer_rtx, +- stack_pointer_rtx, +- N_LARCH_PROLOGUE_TEMP (Pmode))); +- +- /* Describe the effect of the previous instructions. */ +- insn = plus_constant (Pmode, stack_pointer_rtx, -size); +- insn = gen_rtx_SET (stack_pointer_rtx, insn); +- loongarch_set_frame_expr (insn); +- } +- } +-} +- +- +-/* Return true if register REGNO can store a value of mode MODE. +- The result of this function is cached in loongarch_hard_regno_mode_ok. */ +- +-static bool +-loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode) +-{ +- unsigned int size; +- enum mode_class mclass; +- +- if (mode == FCCmode) +- return ST_REG_P (regno); +- +- size = GET_MODE_SIZE (mode); +- mclass = GET_MODE_CLASS (mode); +- +- if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode) && !LASX_SUPPORTED_MODE_P (mode)) +- return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD; +- +- /* For LSX, allow TImode and 128-bit vector modes in all FPR. */ +- if (FP_REG_P (regno) && LSX_SUPPORTED_MODE_P (mode)) +- return true; +- +- /* For LASX, allow TImode and 256-bit vector modes in all FPR. FIXME: */ +- if (FP_REG_P (regno) && LASX_SUPPORTED_MODE_P (mode)) +- return true; +- +- if (FP_REG_P (regno) +- && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0 +- || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG))) +- { +- if (mclass == MODE_FLOAT +- || mclass == MODE_COMPLEX_FLOAT +- || mclass == MODE_VECTOR_FLOAT) +- return size <= UNITS_PER_FPVALUE; +- +- /* Allow integer modes that fit into a single register. We need +- to put integers into FPRs when using instructions like CVT +- and TRUNC. There's no point allowing sizes smaller than a word, +- because the FPU has no appropriate load/store instructions. */ +- if (mclass == MODE_INT) +- return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG; +- } +- +- return false; +-} +- +-/* Implement TARGET_HARD_REGNO_MODE_OK. */ +- +-static bool +-loongarch_hard_regno_mode_ok (unsigned int regno, machine_mode mode) +-{ +- return loongarch_hard_regno_mode_ok_p[mode][regno]; +-} +- +-/* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */ +- +-bool +-loongarch_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED, +- unsigned int new_reg) +-{ +- /* Interrupt functions can only use registers that have already been +- saved by the prologue, even if they would normally be call-clobbered. */ +- if (cfun->machine->interrupt_handler_p && !df_regs_ever_live_p (new_reg)) +- return false; +- +- return true; +-} +- +-/* Return nonzero if register REGNO can be used as a scratch register +- in peephole2. */ +- +-bool +-loongarch_hard_regno_scratch_ok (unsigned int regno) +-{ +- /* See loongarch_hard_regno_rename_ok. */ +- if (cfun->machine->interrupt_handler_p && !df_regs_ever_live_p (regno)) +- return false; +- +- return true; +-} +- +-static bool +-loongarch_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode) +-{ +- if (ISA_HAS_LSX && FP_REG_P (regno) && GET_MODE_SIZE (mode) > 8) +- return true; +- +- return false; +-} +- +-/* Implement TARGET_HARD_REGNO_NREGS. */ +- +-static unsigned int +-loongarch_hard_regno_nregs (unsigned int regno, machine_mode mode) +-{ +- if (ST_REG_P (regno)) +- /* The size of FP status registers is always 4, because they only hold +- FCCmode values, and FCCmode is always considered to be 4 bytes wide. */ +- return (GET_MODE_SIZE (mode) + 3) / 4; +- +- if (FP_REG_P (regno)) +- { +- if (LSX_SUPPORTED_MODE_P (mode)) +- return 1; +- +- if (LASX_SUPPORTED_MODE_P (mode)) +- return 1; +- +- return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG; +- } +- +- /* All other registers are word-sized. */ +- return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; +-} +- +-/* Implement CLASS_MAX_NREGS, taking the maximum of the cases +- in loongarch_hard_regno_nregs. */ +- +-int +-loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode) +-{ +- int size; +- HARD_REG_SET left; +- +- size = 0x8000; +- COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]); +- if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS])) +- { +- if (loongarch_hard_regno_mode_ok (ST_REG_FIRST, mode)) +- size = MIN (size, 4); +- +- AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]); ++ AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FCC_REGS]); + } + if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS])) + { +@@ -6849,8 +6274,8 @@ loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode) + /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */ + + static bool +-loongarch_can_change_mode_class (machine_mode from, +- machine_mode to, reg_class_t rclass) ++loongarch_can_change_mode_class (machine_mode from, machine_mode to, ++ reg_class_t rclass) + { + /* Allow conversions between different Loongson integer vectors, + and between those vectors and DImode. */ +@@ -6866,42 +6291,10 @@ loongarch_can_change_mode_class (machine_mode from, + if (LSX_SUPPORTED_MODE_P (from) && LSX_SUPPORTED_MODE_P (to)) + return true; + +- /* Otherwise, there are several problems with changing the modes of +- values in floating-point registers: +- +- - When a multi-word value is stored in paired floating-point +- registers, the first register always holds the low word. We +- therefore can't allow FPRs to change between single-word and +- multi-word modes on big-endian targets. +- +- - GCC assumes that each word of a multiword register can be +- accessed individually using SUBREGs. This is not true for +- floating-point registers if they are bigger than a word. +- +- - Loading a 32-bit value into a 64-bit floating-point register +- will not sign-extend the value, despite what LOAD_EXTEND_OP +- says. We can't allow FPRs to change from SImode to a wider +- mode on 64-bit targets. +- +- - If the FPU has already interpreted a value in one format, we +- must not ask it to treat the value as having a different +- format. +- +- We therefore disallow all mode changes involving FPRs. */ +- + return !reg_classes_intersect_p (FP_REGS, rclass); + } + +-/* Implement target hook small_register_classes_for_mode_p. */ +- +-static bool +-loongarch_small_register_classes_for_mode_p (machine_mode mode +- ATTRIBUTE_UNUSED) +-{ +- return 0; +-} +- +-/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction, ++/* Return true if moves in mode MODE can use the FPU's fmov.fmt instruction, + or use the LSX's move.v instruction. */ + + static bool +@@ -6909,6 +6302,7 @@ loongarch_mode_ok_for_mov_fmt_p (machine_mode mode) + { + switch (mode) + { ++ case E_FCCmode: + case E_SFmode: + return TARGET_HARD_FLOAT; + +@@ -6976,7 +6370,7 @@ loongarch_move_to_gpr_cost (reg_class_t from) + return 2; + + case FP_REGS: +- /* MFC1, etc. */ ++ /* MOVFR2GR, etc. */ + return 4; + + default: +@@ -6998,7 +6392,7 @@ loongarch_move_from_gpr_cost (reg_class_t to) + return 2; + + case FP_REGS: +- /* MTC1, etc. */ ++ /* MOVGR2FR, etc. */ + return 4; + + default: +@@ -7011,8 +6405,8 @@ loongarch_move_from_gpr_cost (reg_class_t to) + the maximum for us. */ + + static int +-loongarch_register_move_cost (machine_mode mode, +- reg_class_t from, reg_class_t to) ++loongarch_register_move_cost (machine_mode mode, reg_class_t from, ++ reg_class_t to) + { + reg_class_t dregs; + int cost1, cost2; +@@ -7024,7 +6418,7 @@ loongarch_register_move_cost (machine_mode mode, + if (from == FP_REGS) + { + if (to == FP_REGS && loongarch_mode_ok_for_mov_fmt_p (mode)) +- /* MOV.FMT. */ ++ /* FMOV.FMT. */ + return 4; + } + +@@ -7054,28 +6448,6 @@ loongarch_memory_move_cost (machine_mode mode, reg_class_t rclass, bool in) + { + return (loongarch_cost->memory_latency + + memory_move_secondary_cost (mode, rclass, in)); +-} +- +-/* Implement TARGET_SECONDARY_MEMORY_NEEDED. +- +- When targeting the o32 FPXX ABI, all moves with a length of doubleword +- or greater must be performed by FR-mode-aware instructions. +- This can be achieved using MOVFRH2GR.S/MOVGR2FRH.W when these instructions are +- available but otherwise moves must go via memory. +- Using MOVGR2FR/MOVFR2GR to access the lower-half of these registers would require +- a forbidden single-precision access. We require all double-word moves to use +- memory because adding even and odd floating-point registers classes +- would have a significant impact on the backend. */ +- +-static bool +-loongarch_secondary_memory_needed (machine_mode mode, reg_class_t class1, +- reg_class_t class2) +-{ +- /* Ignore spilled pseudos. */ +- if (lra_in_progress && (class1 == NO_REGS || class2 == NO_REGS)) +- return false; +- +- return false; + } + + /* Return the register class required for a secondary register when +@@ -7084,9 +6456,10 @@ loongarch_secondary_memory_needed (machine_mode mode, reg_class_t class1, + is the destination. Return NO_REGS if no secondary register is + needed. */ + +-enum reg_class +-loongarch_secondary_reload_class (enum reg_class rclass, +- machine_mode mode, rtx x, bool) ++static reg_class_t ++loongarch_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x, ++ reg_class_t rclass, machine_mode mode, ++ secondary_reload_info *sri ATTRIBUTE_UNUSED) + { + int regno; + +@@ -7094,15 +6467,12 @@ loongarch_secondary_reload_class (enum reg_class rclass, + + /* Copying from accumulator registers to anywhere other than a general + register requires a temporary general register. */ +-// if (reg_class_subset_p (rclass, ACC_REGS)) ?????? +-// return GP_REG_P (regno) ? NO_REGS : GR_REGS; + if (reg_class_subset_p (rclass, FP_REGS)) + { + if (regno < 0 + || (MEM_P (x) + && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))) +- /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use +- pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */ ++ /* In this case we can use fld.s, fst.s, fld.d or fst.d. */ + return NO_REGS; + + if (MEM_P (x) && LSX_SUPPORTED_MODE_P (mode)) +@@ -7110,17 +6480,18 @@ loongarch_secondary_reload_class (enum reg_class rclass, + return NO_REGS; + + if (GP_REG_P (regno) || x == CONST0_RTX (mode)) +- /* In this case we can use movgr2fr.s, movfr2gr.s, movgr2fr.d or movfr2gr.d. */ ++ /* In this case we can use movgr2fr.s, movfr2gr.s, movgr2fr.d or ++ * movfr2gr.d. */ + return NO_REGS; + + if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x)) +- /* We can force the constant to memory and use lwc1 +- and ldc1. As above, we will use pairs of lwc1s if ++ /* We can force the constant to memory and use fld.s ++ and fld.d. As above, we will use pairs of lwc1s if + ldc1 is not supported. */ + return NO_REGS; + + if (FP_REG_P (regno) && loongarch_mode_ok_for_mov_fmt_p (mode)) +- /* In this case we can use mov.fmt. */ ++ /* In this case we can use fmov.{s/d}. */ + return NO_REGS; + + /* Otherwise, we need to reload through an integer register. */ +@@ -7132,7 +6503,19 @@ loongarch_secondary_reload_class (enum reg_class rclass, + return NO_REGS; + } + +- ++/* Implement TARGET_MODE_REP_EXTENDED */ ++ ++static int ++loongarch_mode_rep_extended (scalar_int_mode mode, scalar_int_mode mode_rep) ++{ ++ /* On 64-bit targets, SImode register values are sign-extended to DImode. */ ++ if (TARGET_64BIT && mode == SImode && mode_rep == DImode) ++ return SIGN_EXTEND; ++ ++ return UNKNOWN; ++} ++ ++ + /* Implement TARGET_VALID_POINTER_MODE. */ + + static bool +@@ -7160,7 +6543,7 @@ loongarch_scalar_mode_supported_p (scalar_mode mode) + + return default_scalar_mode_supported_p (mode); + } +- ++ + /* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */ + + static machine_mode +@@ -7233,17 +6616,15 @@ loongarch_adjust_insn_length (rtx_insn *insn, int length) + length += 4; + + /* See how many nops might be needed to avoid hardware hazards. */ +- if (!cfun->machine->ignore_hazard_length_p +- && INSN_P (insn) ++ if (INSN_P (insn) + && INSN_CODE (insn) >= 0) + switch (get_attr_hazard (insn)) + { + case HAZARD_NONE: + break; + +- case HAZARD_DELAY: + case HAZARD_FORBIDDEN_SLOT: +- length += NOP_INSN_LENGTH; ++ length += 4; + break; + } + +@@ -7258,8 +6639,8 @@ loongarch_adjust_insn_length (rtx_insn *insn, int length) + + const char * + loongarch_output_conditional_branch (rtx_insn *insn, rtx *operands, +- const char *branch_if_true, +- const char *branch_if_false) ++ const char *branch_if_true, ++ const char *branch_if_false) + { + unsigned int length; + rtx taken; +@@ -7272,8 +6653,7 @@ loongarch_output_conditional_branch (rtx_insn *insn, rtx *operands, + return branch_if_true; + } + +- /* Generate a reversed branch around a direct jump. This fallback does +- not use branch-likely instructions. */ ++ /* Generate a reversed branch around a direct jump. */ + rtx_code_label *not_taken = gen_label_rtx (); + taken = operands[0]; + +@@ -7281,37 +6661,7 @@ loongarch_output_conditional_branch (rtx_insn *insn, rtx *operands, + operands[0] = not_taken; + output_asm_insn (branch_if_false, operands); + +- /* If INSN has a delay slot, we must provide delay slots for both the +- branch to NOT_TAKEN and the conditional jump. We must also ensure +- that INSN's delay slot is executed in the appropriate cases. */ +- if (final_sequence) +- { +- /* This first delay slot will always be executed, so use INSN's +- delay slot if is not annulled. */ +- if (!INSN_ANNULLED_BRANCH_P (insn)) +- { +- final_scan_insn (final_sequence->insn (1), +- asm_out_file, optimize, 1, NULL); +- final_sequence->insn (1)->set_deleted (); +- } +- fprintf (asm_out_file, "\n"); +- } +- +- output_asm_insn (LARCH_ABSOLUTE_JUMP ("b\t%0"), &taken); +- +- /* Now deal with its delay slot; see above. */ +- if (final_sequence) +- { +- /* This delay slot will only be executed if the branch is taken. +- Use INSN's delay slot if is annulled. */ +- if (INSN_ANNULLED_BRANCH_P (insn)) +- { +- final_scan_insn (final_sequence->insn (1), +- asm_out_file, optimize, 1, NULL); +- final_sequence->insn (1)->set_deleted (); +- } +- fprintf (asm_out_file, "\n"); +- } ++ output_asm_insn ("b\t%0", &taken); + + /* Output NOT_TAKEN. */ + targetm.asm_out.internal_label (asm_out_file, "L", +@@ -7326,21 +6676,23 @@ loongarch_output_conditional_branch (rtx_insn *insn, rtx *operands, + OPERANDS[3] is the second operand and may be zero or a register. */ + + const char * +-loongarch_output_equal_conditional_branch (rtx_insn* insn, rtx *operands, +- bool inverted_p) ++loongarch_output_equal_conditional_branch (rtx_insn *insn, rtx *operands, ++ bool inverted_p) + { + const char *branch[2]; + if (operands[3] == const0_rtx) + { + branch[!inverted_p] = LARCH_BRANCH ("b%C1z", "%2,%0"); + branch[inverted_p] = LARCH_BRANCH ("b%N1z", "%2,%0"); +- } else ++ } ++ else + { + branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,%z3,%0"); + branch[inverted_p] = LARCH_BRANCH ("b%N1", "%2,%z3,%0"); + } + +- return loongarch_output_conditional_branch (insn, operands, branch[1], branch[0]); ++ return loongarch_output_conditional_branch (insn, operands, branch[1], ++ branch[0]); + } + + /* Return the assembly code for INSN, which branches to OPERANDS[0] +@@ -7351,7 +6703,7 @@ loongarch_output_equal_conditional_branch (rtx_insn* insn, rtx *operands, + + const char * + loongarch_output_order_conditional_branch (rtx_insn *insn, rtx *operands, +- bool inverted_p) ++ bool inverted_p) + { + const char *branch[2]; + +@@ -7377,7 +6729,7 @@ loongarch_output_order_conditional_branch (rtx_insn *insn, rtx *operands, + branch[!inverted_p] = LARCH_BRANCH ("b", "%0"); + branch[inverted_p] = "\t# branch never"; + break; +- default: ++ default: + gcc_unreachable (); + } + } +@@ -7385,31 +6737,19 @@ loongarch_output_order_conditional_branch (rtx_insn *insn, rtx *operands, + { + switch (GET_CODE (operands[1])) + { +- case LE: +- branch[!inverted_p] = LARCH_BRANCH ("bge", "%3,%2,%0"); +- branch[inverted_p] = LARCH_BRANCH ("blt", "%3,%2,%0"); +- break; +- case LEU: +- branch[!inverted_p] = LARCH_BRANCH ("bgeu", "%3,%2,%0"); +- branch[inverted_p] = LARCH_BRANCH ("bltu", "%3,%2,%0"); +- break; +- case GT: +- branch[!inverted_p] = LARCH_BRANCH ("blt", "%3,%2,%0"); +- branch[inverted_p] = LARCH_BRANCH ("bge", "%3,%2,%0"); +- break; +- case GTU: +- branch[!inverted_p] = LARCH_BRANCH ("bltu", "%3,%2,%0"); +- branch[inverted_p] = LARCH_BRANCH ("bgeu", "%3,%2,%0"); +- break; +- case LT: +- case LTU: +- case GE: +- case GEU: +- branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,%3,%0"); +- branch[inverted_p] = LARCH_BRANCH ("b%N1", "%2,%3,%0"); +- break; +- default: +- gcc_unreachable (); ++ case LE: ++ case LEU: ++ case GT: ++ case GTU: ++ case LT: ++ case LTU: ++ case GE: ++ case GEU: ++ branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,%3,%0"); ++ branch[inverted_p] = LARCH_BRANCH ("b%N1", "%2,%3,%0"); ++ break; ++ default: ++ gcc_unreachable (); + } + } + } +@@ -7419,30 +6759,11 @@ loongarch_output_order_conditional_branch (rtx_insn *insn, rtx *operands, + { + /* These cases are equivalent to comparisons against zero. */ + case LEU: +- inverted_p = !inverted_p; +- /* Fall through. */ + case GTU: +- branch[!inverted_p] = LARCH_BRANCH ("bne", "%2,%.,%0"); +- branch[inverted_p] = LARCH_BRANCH ("beq", "%2,%.,%0"); +- break; +- +- /* These cases are always true or always false. */ + case LTU: +- inverted_p = !inverted_p; +- /* Fall through. */ + case GEU: +- branch[!inverted_p] = LARCH_BRANCH ("beq", "%.,%.,%0"); +- branch[inverted_p] = LARCH_BRANCH ("bne", "%.,%.,%0"); +- break; +- +- case LE: +- branch[!inverted_p] = LARCH_BRANCH ("bge", "$r0,%2,%0"); +- branch[inverted_p] = LARCH_BRANCH ("blt", "$r0,%2,%0"); +- break; ++ case LE: + case GT: +- branch[!inverted_p] = LARCH_BRANCH ("blt", "$r0,%2,%0"); +- branch[inverted_p] = LARCH_BRANCH ("bge", "$r0,%2,%0"); +- break; + case LT: + case GE: + branch[!inverted_p] = LARCH_BRANCH ("b%C1", "%2,$r0,%0"); +@@ -7451,98 +6772,14 @@ loongarch_output_order_conditional_branch (rtx_insn *insn, rtx *operands, + default: + gcc_unreachable (); + } +- } +- return loongarch_output_conditional_branch (insn, operands, branch[1], branch[0]); ++ } ++ return loongarch_output_conditional_branch (insn, operands, branch[1], ++ branch[0]); + } +- +-/* Return the assembly code for DIV or DDIV instruction DIVISION, which has +- the operands given by OPERANDS. Add in a divide-by-zero check if needed. + +- When working around R4000 and R4400 errata, we need to make sure that +- the division is not immediately followed by a shift[1][2]. We also +- need to stop the division from being put into a branch delay slot[3]. +- The easiest way to avoid both problems is to add a nop after the +- division. When a divide-by-zero check is needed, this nop can be +- used to fill the branch delay slot. +- +- [1] If a double-word or a variable shift executes immediately +- after starting an integer division, the shift may give an +- incorrect result. See quotations of errata #16 and #28 from +- "LARCH R4000PC/SC Errata, Processor Revision 2.2 and 3.0" +- in loongarch.md for details. +- +- [2] A similar bug to [1] exists for all revisions of the +- R4000 and the R4400 when run in an MC configuration. +- From "LARCH R4000MC Errata, Processor Revision 2.2 and 3.0": +- +- "19. In this following sequence: +- +- ddiv (or ddivu or div or divu) +- dsll32 (or dsrl32, dsra32) +- +- if an MPT stall occurs, while the divide is slipping the cpu +- pipeline, then the following double shift would end up with an +- incorrect result. +- +- Workaround: The compiler needs to avoid generating any +- sequence with divide followed by extended double shift." +- +- This erratum is also present in "LARCH R4400MC Errata, Processor +- Revision 1.0" and "LARCH R4400MC Errata, Processor Revision 2.0 +- & 3.0" as errata #10 and #4, respectively. +- +- [3] From "LARCH R4000PC/SC Errata, Processor Revision 2.2 and 3.0" +- (also valid for LARCH R4000MC processors): +- +- "52. R4000SC: This bug does not apply for the R4000PC. +- +- There are two flavors of this bug: +- +- 1) If the instruction just after divide takes an RF exception +- (tlb-refill, tlb-invalid) and gets an instruction cache +- miss (both primary and secondary) and the line which is +- currently in secondary cache at this index had the first +- data word, where the bits 5..2 are set, then R4000 would +- get a wrong result for the div. +- +- ##1 +- nop +- div r8, r9 +- ------------------- # end-of page. -tlb-refill +- nop +- ##2 +- nop +- div r8, r9 +- ------------------- # end-of page. -tlb-invalid +- nop +- +- 2) If the divide is in the taken branch delay slot, where the +- target takes RF exception and gets an I-cache miss for the +- exception vector or where I-cache miss occurs for the +- target address, under the above mentioned scenarios, the +- div would get wrong results. +- +- ##1 +- j r2 # to next page mapped or unmapped +- div r8,r9 # this bug would be there as long +- # as there is an ICache miss and +- nop # the "data pattern" is present +- +- ##2 +- beq r0, r0, NextPage # to Next page +- div r8,r9 +- nop +- +- This bug is present for div, divu, ddiv, and ddivu +- instructions. +- +- Workaround: For item 1), OS could make sure that the next page +- after the divide instruction is also mapped. For item 2), the +- compiler could make sure that the divide instruction is not in +- the branch delay slot." +- +- These processors have PRId values of 0x00004220 and 0x00004300 for +- the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */ ++/* Return the assembly code for DIV.{W/D} instruction DIVISION, which has ++ the operands given by OPERANDS. Add in a divide-by-zero check if needed. ++ */ + + const char * + loongarch_output_division (const char *division, rtx *operands) +@@ -7571,13 +6808,13 @@ loongarch_lsx_output_division (const char *division, rtx *operands) + s = division; + if (TARGET_CHECK_ZERO_DIV) + { +- if(ISA_HAS_LASX && GET_MODE_SIZE (mode) == 32) ++ if (ISA_HAS_LASX && GET_MODE_SIZE (mode) == 32) + { + output_asm_insn ("xvsetallnez.%v0\t$fcc7,%u2",operands); + output_asm_insn (s, operands); + output_asm_insn ("bcnez\t$fcc7,1f", operands); + } +- else if(ISA_HAS_LSX) ++ else if (ISA_HAS_LSX) + { + output_asm_insn ("vsetallnez.%v0\t$fcc7,%w2",operands); + output_asm_insn (s, operands); +@@ -7587,80 +6824,13 @@ loongarch_lsx_output_division (const char *division, rtx *operands) + } + return s; + } +- +-/* Return true if destination of IN_INSN is used as add source in +- OUT_INSN. Both IN_INSN and OUT_INSN are of type fmadd. Example: +- madd.s dst, x, y, z +- madd.s a, dst, b, c */ +- +-bool +-loongarch_fmadd_bypass (rtx_insn *out_insn, rtx_insn *in_insn) +-{ +- int dst_reg, src_reg; +- +- gcc_assert (get_attr_type (in_insn) == TYPE_FMADD); +- gcc_assert (get_attr_type (out_insn) == TYPE_FMADD); +- +- extract_insn (in_insn); +- dst_reg = REG_P (recog_data.operand[0]); +- +- extract_insn (out_insn); +- src_reg = REG_P (recog_data.operand[1]); +- +- if (dst_reg == src_reg) +- return true; +- +- return false; +-} +- +-/* Return true if IN_INSN is a multiply-add or multiply-subtract +- instruction and if OUT_INSN assigns to the accumulator operand. */ +- +-bool +-loongarch_linked_madd_p (rtx_insn *out_insn, rtx_insn *in_insn) +-{ +- enum attr_accum_in accum_in; +- int accum_in_opnum; +- rtx accum_in_op; +- +- if (recog_memoized (in_insn) < 0) +- return false; +- +- accum_in = get_attr_accum_in (in_insn); +- if (accum_in == ACCUM_IN_NONE) +- return false; +- +- accum_in_opnum = accum_in - ACCUM_IN_0; +- +- extract_insn (in_insn); +- gcc_assert (accum_in_opnum < recog_data.n_operands); +- accum_in_op = recog_data.operand[accum_in_opnum]; +- +- return reg_set_p (accum_in_op, out_insn); +-} +- +-/* True if the dependency between OUT_INSN and IN_INSN is on the store +- data rather than the address. We need this because the cprestore +- pattern is type "store", but is defined using an UNSPEC_VOLATILE, +- which causes the default routine to abort. We just return false +- for that case. */ +- +-bool +-loongarch_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn) +-{ +- if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE) +- return false; +- +- return store_data_bypass_p (out_insn, in_insn); +-} +- + + /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output +- dependencies have no cost, except on the 20Kc where output-dependence +- is treated like input-dependence. */ ++ dependencies have no cost. */ + + static int +-loongarch_adjust_cost (rtx_insn *, int dep_type, rtx_insn *, int cost, unsigned int) ++loongarch_adjust_cost (rtx_insn *, int dep_type, rtx_insn *, int cost, ++ unsigned int) + { + if (dep_type != 0 && (dep_type != REG_DEP_OUTPUT)) + return 0; +@@ -7672,15 +6842,10 @@ loongarch_adjust_cost (rtx_insn *, int dep_type, rtx_insn *, int cost, unsigned + static int + loongarch_issue_rate (void) + { +- switch (loongarch_tune) +- { +- case PROCESSOR_LOONGARCH64: +- case PROCESSOR_LA464: +- return 4; +- +- default: +- return 1; +- } ++ if ((unsigned long) la_target.cpu_tune < N_TUNE_TYPES) ++ return loongarch_cpu_issue_rate[la_target.cpu_tune]; ++ else ++ return 1; + } + + /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should +@@ -7689,24 +6854,20 @@ loongarch_issue_rate (void) + static int + loongarch_multipass_dfa_lookahead (void) + { +- if (TUNE_LOONGARCH64 || TUNE_LA464) +- return 4; +- +- return 0; +-} +- +- +-static void +-loongarch_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, +- int max_ready ATTRIBUTE_UNUSED) +-{ ++ if ((unsigned long) la_target.cpu_tune < N_ARCH_TYPES) ++ return loongarch_cpu_multipass_dfa_lookahead[la_target.cpu_tune]; ++ else ++ return 0; + } + + /* Implement TARGET_SCHED_REORDER. */ + + static int +-loongarch_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, +- rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED) ++loongarch_sched_reorder (FILE *file ATTRIBUTE_UNUSED, ++ int verbose ATTRIBUTE_UNUSED, ++ rtx_insn **ready ATTRIBUTE_UNUSED, ++ int *nreadyp ATTRIBUTE_UNUSED, ++ int cycle ATTRIBUTE_UNUSED) + { + return loongarch_issue_rate (); + } +@@ -7714,17 +6875,29 @@ loongarch_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUS + /* Implement TARGET_SCHED_REORDER2. */ + + static int +-loongarch_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, +- rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED) ++loongarch_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, ++ int verbose ATTRIBUTE_UNUSED, ++ rtx_insn **ready ATTRIBUTE_UNUSED, ++ int *nreadyp ATTRIBUTE_UNUSED, ++ int cycle ATTRIBUTE_UNUSED) + { + return cached_can_issue_more; + } + ++/* Implement TARGET_SCHED_INIT. */ ++ ++static void ++loongarch_sched_init (FILE *file ATTRIBUTE_UNUSED, ++ int verbose ATTRIBUTE_UNUSED, ++ int max_ready ATTRIBUTE_UNUSED) ++{} ++ + /* Implement TARGET_SCHED_VARIABLE_ISSUE. */ + + static int +-loongarch_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, +- rtx_insn *insn, int more) ++loongarch_variable_issue (FILE *file ATTRIBUTE_UNUSED, ++ int verbose ATTRIBUTE_UNUSED, rtx_insn *insn, ++ int more) + { + /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */ + if (USEFUL_INSN_P (insn)) +@@ -7742,1243 +6915,2339 @@ loongarch_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNU + cached_can_issue_more = more; + return more; + } +- +-/* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY), +- return the first operand of the associated PREF or PREFX insn. */ +- +-rtx +-loongarch_prefetch_cookie (rtx write, rtx locality) +-{ +- /* store_streamed / load_streamed. */ +- if (INTVAL (locality) <= 0) +- return GEN_INT (INTVAL (write) + 4); +- +- /* store / load. */ +- if (INTVAL (locality) <= 2) +- return write; +- +- /* store_retained / load_retained. */ +- return GEN_INT (INTVAL (write) + 6); +-} +- +- +-/* Return whether CFG is used in loongarch_reorg. */ + +-static bool +-loongarch_cfg_in_reorg (void) +-{ +- return (TARGET_RELAX_PIC_CALLS); +-} +- +-/* If INSN is a call, return the underlying CALL expr. Return NULL_RTX +- otherwise. If INSN has two call rtx, then store the second one in +- SECOND_CALL. */ ++/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text ++ in order to avoid duplicating too much logic from elsewhere. */ + +-static rtx +-loongarch_call_expr_from_insn (rtx_insn *insn, rtx *second_call) ++static void ++loongarch_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, ++ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, ++ tree function) + { +- rtx x; +- rtx x2; +- +- if (!CALL_P (insn)) +- return NULL_RTX; +- +- x = PATTERN (insn); +- if (GET_CODE (x) == PARALLEL) +- { +- /* Calls returning complex values have two CALL rtx. Look for the second +- one here, and return it via the SECOND_CALL arg. */ +- x2 = XVECEXP (x, 0, 1); +- if (GET_CODE (x2) == SET) +- x2 = XEXP (x2, 1); +- if (GET_CODE (x2) == CALL) +- *second_call = x2; +- +- x = XVECEXP (x, 0, 0); +- } +- if (GET_CODE (x) == SET) +- x = XEXP (x, 1); +- gcc_assert (GET_CODE (x) == CALL); ++ rtx this_rtx, temp1, temp2, fnaddr; ++ rtx_insn *insn; ++ bool use_sibcall_p; + +- return x; +-} ++ /* Pretend to be a post-reload pass while generating rtl. */ ++ reload_completed = 1; + +-/* REG is set in DEF. See if the definition is one of the ways we load a +- register with a symbol address for a loongarch_use_pic_fn_addr_reg_p call. +- If it is, return the symbol reference of the function, otherwise return +- NULL_RTX. ++ /* Mark the end of the (empty) prologue. */ ++ emit_note (NOTE_INSN_PROLOGUE_END); + +- If RECURSE_P is true, use loongarch_find_pic_call_symbol to interpret +- the values of source registers, otherwise treat such registers as +- having an unknown value. */ ++ /* Determine if we can use a sibcall to call FUNCTION directly. */ ++ fnaddr = XEXP (DECL_RTL (function), 0); ++ use_sibcall_p = const_call_insn_operand (fnaddr, Pmode); + +-static rtx +-loongarch_pic_call_symbol_from_set (df_ref def, rtx reg, bool recurse_p) +-{ +- rtx_insn *def_insn; +- rtx set; ++ /* We need two temporary registers in some cases. */ ++ temp1 = gen_rtx_REG (Pmode, 12); ++ temp2 = gen_rtx_REG (Pmode, 13); + +- if (DF_REF_IS_ARTIFICIAL (def)) +- return NULL_RTX; ++ /* Find out which register contains the "this" pointer. */ ++ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)) ++ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1); ++ else ++ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST); + +- def_insn = DF_REF_INSN (def); +- set = single_set (def_insn); +- if (set && rtx_equal_p (SET_DEST (set), reg)) ++ /* Add DELTA to THIS_RTX. */ ++ if (delta != 0) + { +- rtx note, src, symbol; +- +- /* First see whether the source is a plain symbol. This is used +- when calling symbols that are not lazily bound. */ +- src = SET_SRC (set); +- if (GET_CODE (src) == SYMBOL_REF) +- return src; +- +- /* Handle %call16 references. */ +- symbol = loongarch_strip_unspec_call (src); +- if (symbol) ++ rtx offset = GEN_INT (delta); ++ if (!IMM12_OPERAND (delta)) + { +- gcc_assert (GET_CODE (symbol) == SYMBOL_REF); +- return symbol; ++ loongarch_emit_move (temp1, offset); ++ offset = temp1; + } +- +- /* If we have something more complicated, look for a +- REG_EQUAL or REG_EQUIV note. */ +- note = find_reg_equal_equiv_note (def_insn); +- if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF) +- return XEXP (note, 0); +- +- /* Follow at most one simple register copy. Such copies are +- interesting in cases like: +- +- for (...) +- { +- locally_binding_fn (...); +- } +- +- and: +- +- locally_binding_fn (...); +- ... +- locally_binding_fn (...); +- +- where the load of locally_binding_fn can legitimately be +- hoisted or shared. However, we do not expect to see complex +- chains of copies, so a full worklist solution to the problem +- would probably be overkill. */ +- if (recurse_p && REG_P (src)) +- return loongarch_find_pic_call_symbol (def_insn, src, false); ++ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset)); + } + +- return NULL_RTX; +-} ++ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */ ++ if (vcall_offset != 0) ++ { ++ rtx addr; + +-/* Find the definition of the use of REG in INSN. See if the definition +- is one of the ways we load a register with a symbol address for a +- loongarch_use_pic_fn_addr_reg_p call. If it is return the symbol reference +- of the function, otherwise return NULL_RTX. RECURSE_P is as for +- loongarch_pic_call_symbol_from_set. */ ++ /* Set TEMP1 to *THIS_RTX. */ ++ loongarch_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx)); + +-static rtx +-loongarch_find_pic_call_symbol (rtx_insn *insn, rtx reg, bool recurse_p) +-{ +- df_ref use; +- struct df_link *defs; +- rtx symbol; ++ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */ ++ addr = loongarch_add_offset (temp2, temp1, vcall_offset); + +- use = df_find_use (insn, regno_reg_rtx[REGNO (reg)]); +- if (!use) +- return NULL_RTX; +- defs = DF_REF_CHAIN (use); +- if (!defs) +- return NULL_RTX; +- symbol = loongarch_pic_call_symbol_from_set (defs->ref, reg, recurse_p); +- if (!symbol) +- return NULL_RTX; ++ /* Load the offset and add it to THIS_RTX. */ ++ loongarch_emit_move (temp1, gen_rtx_MEM (Pmode, addr)); ++ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1)); ++ } + +- /* If we have more than one definition, they need to be identical. */ +- for (defs = defs->next; defs; defs = defs->next) ++ /* Jump to the target function. Use a sibcall if direct jumps are ++ allowed, otherwise load the address into a register first. */ ++ if (use_sibcall_p) + { +- rtx other; +- +- other = loongarch_pic_call_symbol_from_set (defs->ref, reg, recurse_p); +- if (!rtx_equal_p (symbol, other)) +- return NULL_RTX; ++ insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx)); ++ SIBLING_CALL_P (insn) = 1; ++ } ++ else ++ { ++ loongarch_emit_move (temp1, fnaddr); ++ emit_jump_insn (gen_indirect_jump (temp1)); + } + +- return symbol; +-} +- +-/* Replace the args_size operand of the call expression CALL with the +- call-attribute UNSPEC and fill in SYMBOL as the function symbol. */ +- +-static void +-loongarch_annotate_pic_call_expr (rtx call, rtx symbol) +-{ +- rtx args_size; ++ /* Run just enough of rest_of_compilation. This sequence was ++ "borrowed" from alpha.c. */ ++ insn = get_insns (); ++ split_all_insns_noflow (); ++ shorten_branches (insn); ++ final_start_function (insn, file, 1); ++ final (insn, file, 1); ++ final_end_function (); + +- args_size = XEXP (call, 1); +- XEXP (call, 1) = gen_rtx_UNSPEC (GET_MODE (args_size), +- gen_rtvec (2, args_size, symbol), +- UNSPEC_CALL_ATTR); ++ /* Stop pretending to be a post-reload pass. */ ++ reload_completed = 0; + } + +-/* OPERANDS[ARGS_SIZE_OPNO] is the arg_size operand of a CALL expression. See +- if instead of the arg_size argument it contains the call attributes. If +- yes return true along with setting OPERANDS[ARGS_SIZE_OPNO] to the function +- symbol from the call attributes. Also return false if ARGS_SIZE_OPNO is +- -1. */ ++/* Allocate a chunk of memory for per-function machine-dependent data. */ + +-bool +-loongarch_get_pic_call_symbol (rtx *operands, int args_size_opno) ++static struct machine_function * ++loongarch_init_machine_status (void) + { +- rtx args_size, symbol; +- +- if (!TARGET_RELAX_PIC_CALLS || args_size_opno == -1) +- return false; +- +- args_size = operands[args_size_opno]; +- if (GET_CODE (args_size) != UNSPEC) +- return false; +- gcc_assert (XINT (args_size, 1) == UNSPEC_CALL_ATTR); +- +- symbol = XVECEXP (args_size, 0, 1); +- gcc_assert (GET_CODE (symbol) == SYMBOL_REF); +- +- operands[args_size_opno] = symbol; +- return true; ++ return ggc_cleared_alloc (); + } + +-/* Use DF to annotate PIC indirect calls with the function symbol they +- dispatch to. */ +- + static void +-loongarch_annotate_pic_calls (void) ++loongarch_cpu_option_override (struct loongarch_target *target, ++ struct gcc_options *opts, ++ struct gcc_options *opts_set) + { +- basic_block bb; +- rtx_insn *insn; +- +- FOR_EACH_BB_FN (bb, cfun) +- FOR_BB_INSNS (bb, insn) ++ /* strict alignment */ ++ switch (target->cpu_arch) + { +- rtx call, reg, symbol, second_call; ++ case CPU_LA264: ++ /* Using -mstrict-align is recommended for la264 cores. */ ++ if (!opts_set->x_TARGET_STRICT_ALIGN) ++ { ++ opts->x_TARGET_STRICT_ALIGN = 1; ++ opts_set->x_TARGET_STRICT_ALIGN = 1; ++ } ++ break; ++ } + +- second_call = 0; +- call = loongarch_call_expr_from_insn (insn, &second_call); +- if (!call) +- continue; +- gcc_assert (MEM_P (XEXP (call, 0))); +- reg = XEXP (XEXP (call, 0), 0); +- if (!REG_P (reg)) +- continue; ++ /* software prefetching parameters (-fprefetch-loop-arrays) */ ++ maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, ++ loongarch_cpu_cache[target->cpu_tune].simultaneous_prefetches, ++ opts->x_param_values, opts_set->x_param_values); + +- symbol = loongarch_find_pic_call_symbol (insn, reg, true); +- if (symbol) +- { +- loongarch_annotate_pic_call_expr (call, symbol); +- if (second_call) +- loongarch_annotate_pic_call_expr (second_call, symbol); +- } +- } +-} +- ++ maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, ++ loongarch_cpu_cache[target->cpu_tune].l1d_line_size, ++ opts->x_param_values, opts_set->x_param_values); + +-/* A structure representing the state of the processor pipeline. +- Used by the loongarch_sim_* family of functions. */ +-struct loongarch_sim { +- /* The maximum number of instructions that can be issued in a cycle. +- (Caches loongarch_issue_rate.) */ +- unsigned int issue_rate; +- +- /* The current simulation time. */ +- unsigned int time; +- +- /* How many more instructions can be issued in the current cycle. */ +- unsigned int insns_left; +- +- /* LAST_SET[X].INSN is the last instruction to set register X. +- LAST_SET[X].TIME is the time at which that instruction was issued. +- INSN is null if no instruction has yet set register X. */ +- struct { +- rtx_insn *insn; +- unsigned int time; +- } last_set[FIRST_PSEUDO_REGISTER]; +- +- /* The pipeline's current DFA state. */ +- state_t dfa_state; +-}; ++ maybe_set_param_value (PARAM_L1_CACHE_SIZE, ++ loongarch_cpu_cache[target->cpu_tune].l1d_size, ++ opts->x_param_values, opts_set->x_param_values); + +-/* Reset STATE to the initial simulation state. */ ++ maybe_set_param_value (PARAM_L2_CACHE_SIZE, ++ loongarch_cpu_cache[target->cpu_tune].l2d_size, ++ opts->x_param_values, opts_set->x_param_values); ++} + + static void +-loongarch_sim_reset (struct loongarch_sim *state) ++loongarch_option_override_internal (struct gcc_options *opts, ++ struct gcc_options *opts_set) + { +- curr_state = state->dfa_state; ++ int i, regno, mode; + +- state->time = 0; +- state->insns_left = state->issue_rate; +- memset (&state->last_set, 0, sizeof (state->last_set)); +- state_reset (curr_state); ++ if (flag_pic) ++ g_switch_value = 0; + +- targetm.sched.init (0, false, 0); +- advance_state (curr_state); +-} ++ loongarch_init_target (&la_target, ++ la_opt_cpu_arch, la_opt_cpu_tune, la_opt_fpu, ++ la_opt_simd, la_opt_abi_base, la_opt_abi_ext, ++ la_opt_cmodel); + +-/* Initialize STATE before its first use. DFA_STATE points to an +- allocated but uninitialized DFA state. */ ++ /* Handle target-specific options: compute defaults/conflicts etc. */ ++ loongarch_config_target (&la_target, NULL, 0); + +-static void +-loongarch_sim_init (struct loongarch_sim *state, state_t dfa_state) +-{ +- if (targetm.sched.init_dfa_pre_cycle_insn) +- targetm.sched.init_dfa_pre_cycle_insn (); ++ loongarch_update_gcc_opt_status (&la_target, opts, opts_set); ++ loongarch_cpu_option_override (&la_target, opts, opts_set); + +- if (targetm.sched.init_dfa_post_cycle_insn) +- targetm.sched.init_dfa_post_cycle_insn (); ++ if (TARGET_ABI_LP64) ++ flag_pcc_struct_return = 0; + +- state->issue_rate = loongarch_issue_rate (); +- state->dfa_state = dfa_state; +- loongarch_sim_reset (state); +-} ++ /* Decide which rtx_costs structure to use. */ ++ if (optimize_size) ++ loongarch_cost = &loongarch_rtx_cost_optimize_size; ++ else ++ loongarch_cost = &loongarch_cpu_rtx_cost_data[la_target.cpu_tune]; + +- ++ /* If the user hasn't specified a branch cost, use the processor's ++ default. */ ++ if (loongarch_branch_cost == 0) ++ loongarch_branch_cost = loongarch_cost->branch_cost; + +-/* Set up costs based on the current architecture and tuning settings. */ ++ if (loongarch_vector_access_cost == 0) ++ loongarch_vector_access_cost = 5; + +-static void +-loongarch_set_tuning_info (void) +-{ + +- loongarch_tuning_info.arch = loongarch_arch; +- loongarch_tuning_info.tune = loongarch_tune; +- loongarch_tuning_info.initialized_p = true; ++ /* Enable sw prefetching at -O3 and higher. */ ++ if (opts->x_flag_prefetch_loop_arrays < 0 ++ && (opts->x_optimize >= 3 || opts->x_flag_profile_use) ++ && !opts->x_optimize_size) ++ opts->x_flag_prefetch_loop_arrays = 1; + +- dfa_start (); ++ switch (la_target.cmodel) ++ { ++ case CMODEL_TINY_STATIC: ++ case CMODEL_EXTREME: ++ if (opts->x_flag_plt) ++ error ("code model %qs and %qs not support %s mode", ++ "tiny-static", "extreme", "plt"); ++ break; + +- struct loongarch_sim state; +- loongarch_sim_init (&state, alloca (state_size ())); ++ case CMODEL_NORMAL: ++ case CMODEL_TINY: ++ case CMODEL_LARGE: ++ break; + +- dfa_finish (); +-} ++ default: ++ gcc_unreachable (); ++ } + +-/* Implement TARGET_EXPAND_TO_RTL_HOOK. */ ++ loongarch_init_print_operand_punct (); + +-static void +-loongarch_expand_to_rtl_hook (void) +-{ +- /* We need to call this at a point where we can safely create sequences +- of instructions, so TARGET_OVERRIDE_OPTIONS is too early. We also +- need to call it at a point where the DFA infrastructure is not +- already in use, so we can't just call it lazily on demand. +- +- At present, loongarch_tuning_info is only needed during post-expand +- RTL passes such as split_insns, so this hook should be early enough. +- We may need to move the call elsewhere if loongarch_tuning_info starts +- to be used for other things (such as rtx_costs, or expanders that +- could be called during gimple optimization). */ +- loongarch_set_tuning_info (); +-} +- +-/* This structure records that the current function has a LO_SUM +- involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is +- the largest offset applied to BASE by all such LO_SUMs. */ +-struct loongarch_lo_sum_offset { +- rtx base; +- HOST_WIDE_INT offset; +-}; ++ /* Set up array to map GCC register number to debug register number. ++ Ignore the special purpose register numbers. */ + +-/* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */ ++ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) ++ { ++ if (GP_REG_P (i) || FP_REG_P (i)) ++ loongarch_dwarf_regno[i] = i; ++ else ++ loongarch_dwarf_regno[i] = INVALID_REGNUM; ++ } + +-static hashval_t +-loongarch_hash_base (rtx base) +-{ +- int do_not_record_p; ++ /* Set up loongarch_hard_regno_mode_ok. */ ++ for (mode = 0; mode < MAX_MACHINE_MODE; mode++) ++ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) ++ loongarch_hard_regno_mode_ok_p[mode][regno] ++ = loongarch_hard_regno_mode_ok_uncached (regno, (machine_mode) mode); + +- return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false); +-} ++ /* Function to allocate machine-dependent function status. */ ++ init_machine_status = &loongarch_init_machine_status; + +-/* Hashtable helpers. */ ++ /* If not optimizing for size, set the default ++ alignment to what the target wants. */ ++ if (!opts->x_optimize_size) ++ { ++ if (opts->x_align_loops <= 0) ++ opts->x_align_loops = 16; ++ if (opts->x_align_jumps <= 0) ++ opts->x_align_jumps = 16; ++ if (opts->x_align_functions <= 0) ++ opts->x_align_functions = 16; ++ } + +-struct loongarch_lo_sum_offset_hasher : free_ptr_hash +-{ +- typedef rtx_def *compare_type; +- static inline hashval_t hash (const loongarch_lo_sum_offset *); +- static inline bool equal (const loongarch_lo_sum_offset *, const rtx_def *); +-}; ++ if (loongarch_veclibabi_name ++ && strcmp (loongarch_veclibabi_name, "sleef") != 0) ++ { ++ error ("unknown vectorization library ABI type (%qs) for " ++ "%qs", loongarch_veclibabi_name, "-mveclibabi="); ++ inform (input_location, ++ "valid arguments to %<-mveclibabi=%> are: %s", "sleef"); ++ } ++ if (!ISA_HAS_LASX) ++ loongarch_stack_realign = 0; + +-/* Hash-table callbacks for loongarch_lo_sum_offsets. */ ++ /* -mrecip options. */ ++ static struct ++ { ++ const char *string; /* option name */ ++ unsigned int mask; /* mask bits to set */ ++ } ++ const recip_options[] = ++ { ++ { "all", RECIP_MASK_ALL }, ++ { "none", RECIP_MASK_NONE }, ++ { "div", RECIP_MASK_DIV }, ++ { "sqrt", RECIP_MASK_SQRT }, ++ { "rsqrt", RECIP_MASK_RSQRT }, ++ { "vec-div", RECIP_MASK_VEC_DIV }, ++ { "vec-sqrt", RECIP_MASK_VEC_SQRT }, ++ { "vec-rsqrt", RECIP_MASK_VEC_RSQRT }, ++ }; + +-inline hashval_t +-loongarch_lo_sum_offset_hasher::hash (const loongarch_lo_sum_offset *entry) +-{ +- return loongarch_hash_base (entry->base); +-} ++ if (loongarch_recip_name) ++ { ++ char *p = ASTRDUP (loongarch_recip_name); ++ char *q; ++ unsigned int mask, i; ++ bool invert; ++ ++ while ((q = strtok (p, ",")) != NULL) ++ { ++ p = NULL; ++ if (*q == '!') ++ { ++ invert = true; ++ q++; ++ } ++ else ++ invert = false; ++ ++ if (!strcmp (q, "default")) ++ mask = RECIP_MASK_ALL; ++ else ++ { ++ for (i = 0; i < ARRAY_SIZE (recip_options); i++) ++ if (!strcmp (q, recip_options[i].string)) ++ { ++ mask = recip_options[i].mask; ++ break; ++ } ++ ++ if (i == ARRAY_SIZE (recip_options)) ++ { ++ error ("unknown option for -mrecip=%s", q); ++ invert = false; ++ mask = RECIP_MASK_NONE; ++ } ++ } ++ ++ if (invert) ++ recip_mask &= ~mask; ++ else ++ recip_mask |= mask; ++ } ++ } ++ if (loongarch_recip) ++ recip_mask |= RECIP_MASK_ALL; + +-inline bool +-loongarch_lo_sum_offset_hasher::equal (const loongarch_lo_sum_offset *entry, +- const rtx_def *value) +-{ +- return rtx_equal_p (entry->base, value); + } + +-typedef hash_table loongarch_offset_table; +- + +-/* Subroutine of loongarch_reorg to manage passes that require DF. */ ++/* Implement TARGET_OPTION_OVERRIDE. */ + + static void +-loongarch_df_reorg (void) ++loongarch_option_override (void) + { +- /* Create def-use chains. */ +- df_set_flags (DF_EQ_NOTES); +- df_chain_add_problem (DF_UD_CHAIN); +- df_analyze (); +- +- if (TARGET_RELAX_PIC_CALLS) +- loongarch_annotate_pic_calls (); +- +- df_finish_pass (false); ++ loongarch_option_override_internal (&global_options, &global_options_set); + } + +- +-/* Implement TARGET_MACHINE_DEPENDENT_REORG. */ ++/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */ + + static void +-loongarch_reorg (void) ++loongarch_conditional_register_usage (void) + { +- /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF.DF insn info is only kept up +- to date if the CFG is available. */ +- if (loongarch_cfg_in_reorg ()) +- compute_bb_for_insn (); +- if (loongarch_cfg_in_reorg ()) ++ if (!TARGET_HARD_FLOAT) + { +- loongarch_df_reorg (); +- free_bb_for_insn (); ++ AND_COMPL_HARD_REG_SET (accessible_reg_set, ++ reg_class_contents[(int) FP_REGS]); ++ AND_COMPL_HARD_REG_SET (accessible_reg_set, ++ reg_class_contents[(int) FCC_REGS]); + } ++ + } + +-/* We use a machine specific pass to do a second machine dependent reorg +- pass after delay branch scheduling. */ ++/* Implement EH_USES. */ + +-static unsigned int +-loongarch_machine_reorg2 (void) ++bool ++loongarch_eh_uses (unsigned int regno ATTRIBUTE_UNUSED) + { +-// loongarch_insert_insn_pseudos (); +- return 0; ++ return false; + } + +-namespace { +- +-const pass_data pass_data_loongarch_machine_reorg2 = +-{ +- RTL_PASS, /* type */ +- "mach2", /* name */ +- OPTGROUP_NONE, /* optinfo_flags */ +- TV_MACH_DEP, /* tv_id */ +- 0, /* properties_required */ +- 0, /* properties_provided */ +- 0, /* properties_destroyed */ +- 0, /* todo_flags_start */ +- 0, /* todo_flags_finish */ +-}; ++/* Implement EPILOGUE_USES. */ + +-class pass_loongarch_machine_reorg2 : public rtl_opt_pass ++bool ++loongarch_epilogue_uses (unsigned int regno) + { +-public: +- pass_loongarch_machine_reorg2(gcc::context *ctxt) +- : rtl_opt_pass(pass_data_loongarch_machine_reorg2, ctxt) +- {} +- +- /* opt_pass methods: */ +- virtual unsigned int execute (function *) { return loongarch_machine_reorg2 (); } +- +-}; // class pass_loongarch_machine_reorg2 +- +-} // anon namespace ++ /* Say that the epilogue uses the return address register. Note that ++ in the case of sibcalls, the values "used by the epilogue" are ++ considered live at the start of the called function. */ ++ if (regno == RETURN_ADDR_REGNUM) ++ return true; + +-rtl_opt_pass * +-make_pass_loongarch_machine_reorg2 (gcc::context *ctxt) +-{ +- return new pass_loongarch_machine_reorg2 (ctxt); ++ return false; + } + +- +-/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text +- in order to avoid duplicating too much logic from elsewhere. */ +- +-static void +-loongarch_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, +- HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, +- tree function) ++bool ++loongarch_load_store_bonding_p (rtx *operands, machine_mode mode, bool load_p) + { +- rtx this_rtx, temp1, temp2, fnaddr; +- rtx_insn *insn; +- bool use_sibcall_p; +- +- /* Pretend to be a post-reload pass while generating rtl. */ +- reload_completed = 1; +- +- /* Mark the end of the (empty) prologue. */ +- emit_note (NOTE_INSN_PROLOGUE_END); +- +- /* Determine if we can use a sibcall to call FUNCTION directly. */ +- fnaddr = XEXP (DECL_RTL (function), 0); +- use_sibcall_p = (loongarch_function_ok_for_sibcall (function, NULL) +- && const_call_insn_operand (fnaddr, Pmode)); +- +-// /* Determine if we need to load FNADDR from the GOT. */ +-// if (!use_sibcall_p +-// && (loongarch_got_symbol_type_p +-// (loongarch_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA)))) +-// { +-// /* Pick a global pointer. Use a call-clobbered register if +-// TARGET_CALL_SAVED_GP. */ +-// cfun->machine->global_pointer +-// = GLOBAL_POINTER_REGNUM; +-// cfun->machine->must_initialize_gp_p = true; +-// SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer); +-// +-// /* Set up the global pointer for n32 or n64 abicalls. */ +-// loongarch_emit_loadgp (); +-// } +- +- /* We need two temporary registers in some cases. */ +- temp1 = gen_rtx_REG (Pmode, 12); +- temp2 = gen_rtx_REG (Pmode, 13); +- +- /* Find out which register contains the "this" pointer. */ +- if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)) +- this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1); +- else +- this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST); +- +- /* Add DELTA to THIS_RTX. */ +- if (delta != 0) +- { +- rtx offset = GEN_INT (delta); +- if (!SMALL_OPERAND (delta)) +- { +- loongarch_emit_move (temp1, offset); +- offset = temp1; +- } +- emit_insn (gen_add3_insn (this_rtx, this_rtx, offset)); +- } +- +- /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */ +- if (vcall_offset != 0) +- { +- rtx addr; +- +- /* Set TEMP1 to *THIS_RTX. */ +- loongarch_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx)); +- +- /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */ +- addr = loongarch_add_offset (temp2, temp1, vcall_offset); +- +- /* Load the offset and add it to THIS_RTX. */ +- loongarch_emit_move (temp1, gen_rtx_MEM (Pmode, addr)); +- emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1)); +- } ++ rtx reg1, reg2, mem1, mem2, base1, base2; ++ enum reg_class rc1, rc2; ++ HOST_WIDE_INT offset1, offset2; + +- /* Jump to the target function. Use a sibcall if direct jumps are +- allowed, otherwise load the address into a register first. */ +- if (use_sibcall_p) ++ if (load_p) + { +- insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx)); +- SIBLING_CALL_P (insn) = 1; ++ reg1 = operands[0]; ++ reg2 = operands[2]; ++ mem1 = operands[1]; ++ mem2 = operands[3]; + } + else + { +- loongarch_emit_move (temp1, fnaddr); +- emit_jump_insn (gen_indirect_jump (temp1)); ++ reg1 = operands[1]; ++ reg2 = operands[3]; ++ mem1 = operands[0]; ++ mem2 = operands[2]; + } + +- /* Run just enough of rest_of_compilation. This sequence was +- "borrowed" from alpha.c. */ +- insn = get_insns (); +- split_all_insns_noflow (); +- shorten_branches (insn); +- final_start_function (insn, file, 1); +- final (insn, file, 1); +- final_end_function (); ++ if (loongarch_address_insns (XEXP (mem1, 0), mode, false) == 0 ++ || loongarch_address_insns (XEXP (mem2, 0), mode, false) == 0) ++ return false; + +- /* Clean up the vars set above. Note that final_end_function resets +- the global pointer for us. */ +- reload_completed = 0; +-} +- ++ loongarch_split_plus (XEXP (mem1, 0), &base1, &offset1); ++ loongarch_split_plus (XEXP (mem2, 0), &base2, &offset2); + +-/* Allocate a chunk of memory for per-function machine-dependent data. */ ++ /* Base regs do not match. */ ++ if (!REG_P (base1) || !rtx_equal_p (base1, base2)) ++ return false; + +-static struct machine_function * +-loongarch_init_machine_status (void) +-{ +- return ggc_cleared_alloc (); +-} ++ /* Either of the loads is clobbering base register. It is legitimate to bond ++ loads if second load clobbers base register. However, hardware does not ++ support such bonding. */ ++ if (load_p ++ && (REGNO (reg1) == REGNO (base1) || (REGNO (reg2) == REGNO (base1)))) ++ return false; + +-/* Return the processor associated with the given ISA level, or null +- if the ISA isn't valid. */ ++ /* Loading in same registers. */ ++ if (load_p && REGNO (reg1) == REGNO (reg2)) ++ return false; + +-static const struct loongarch_cpu_info * +-loongarch_cpu_info_from_isa (int isa) +-{ +- unsigned int i; ++ /* The loads/stores are not of same type. */ ++ rc1 = REGNO_REG_CLASS (REGNO (reg1)); ++ rc2 = REGNO_REG_CLASS (REGNO (reg2)); ++ if (rc1 != rc2 && !reg_class_subset_p (rc1, rc2) ++ && !reg_class_subset_p (rc2, rc1)) ++ return false; + +- for (i = 0; i < ARRAY_SIZE (loongarch_cpu_info_table); i++) +- if (loongarch_cpu_info_table[i].isa == isa) +- return loongarch_cpu_info_table + i; ++ if (abs (offset1 - offset2) != GET_MODE_SIZE (mode)) ++ return false; + +- return NULL; ++ return true; + } + +-/* Return a loongarch_cpu_info entry determined by an option valued +- OPT. */ ++/* Implement TARGET_TRAMPOLINE_INIT. */ + +-static const struct loongarch_cpu_info * +-loongarch_cpu_info_from_opt (int opt) ++static void ++loongarch_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) + { +- switch (opt) +- { +- case LARCH_ARCH_OPTION_NATIVE: +- gcc_unreachable (); +- +- default: +- return &loongarch_cpu_info_table[opt]; +- } +-} ++ rtx addr, end_addr, mem; ++ rtx trampoline[8]; ++ unsigned int i, j; ++ HOST_WIDE_INT end_addr_offset, static_chain_offset, target_function_offset; + +-/* Return a default loongarch_cpu_info entry, given that no -march= option +- was explicitly specified. */ ++ /* Work out the offsets of the pointers from the start of the ++ trampoline code. */ ++ end_addr_offset = TRAMPOLINE_CODE_SIZE; ++ static_chain_offset = end_addr_offset; ++ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode); + +-static const struct loongarch_cpu_info * +-loongarch_default_arch (void) +-{ +-#if defined (LARCH_CPU_STRING_DEFAULT) +- unsigned int i; +- for (i = 0; i < ARRAY_SIZE (loongarch_cpu_info_table); i++) +- if (strcmp (loongarch_cpu_info_table[i].name, LARCH_CPU_STRING_DEFAULT) == 0) +- return loongarch_cpu_info_table + i; +- gcc_unreachable (); +-#elif defined (LARCH_ISA_DEFAULT) +- return loongarch_cpu_info_from_isa (LARCH_ISA_DEFAULT); +-#else +- gcc_unreachable (); +-#endif +-} ++ /* Get pointers to the beginning and end of the code block. */ ++ addr = force_reg (Pmode, XEXP (m_tramp, 0)); ++ end_addr ++ = loongarch_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset)); + +-/* Set up globals to generate code for the ISA or processor +- described by INFO. */ ++#define OP(X) gen_int_mode (X, SImode) + +-static void +-loongarch_set_architecture (const struct loongarch_cpu_info *info) +-{ +- if (info != 0) +- { +- loongarch_arch_info = info; +- loongarch_arch = info->cpu; +- loongarch_isa = info->isa; +- if (loongarch_isa < 32) +- loongarch_isa_rev = 0; +- else +- loongarch_isa_rev = (loongarch_isa & 31) + 1; +- } +-} ++ /* Build up the code in TRAMPOLINE. */ ++ i = 0; ++ /*pcaddi $static_chain,0 ++ ld.[dw] $tmp,$static_chain,target_function_offset ++ ld.[dw] $static_chain,$static_chain,static_chain_offset ++ jirl $r0,$tmp,0 */ ++ trampoline[i++] = OP (0x18000000 | (STATIC_CHAIN_REGNUM - GP_REG_FIRST)); ++ trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000) ++ | 19 /* $t7 */ ++ | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5) ++ | ((target_function_offset & 0xfff) << 10)); ++ trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000) ++ | (STATIC_CHAIN_REGNUM - GP_REG_FIRST) ++ | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5) ++ | ((static_chain_offset & 0xfff) << 10)); ++ trampoline[i++] = OP (0x4c000000 | (19 << 5)); ++#undef OP + +-/* Likewise for tuning. */ ++ for (j = 0; j < i; j++) ++ { ++ mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode)); ++ loongarch_emit_move (mem, trampoline[j]); ++ } + +-static void +-loongarch_set_tune (const struct loongarch_cpu_info *info) +-{ +- if (info != 0) +- { +- loongarch_tune_info = info; +- loongarch_tune = info->cpu; +- } +-} ++ /* Set up the static chain pointer field. */ ++ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset); ++ loongarch_emit_move (mem, chain_value); + +-/* Implement TARGET_OPTION_OVERRIDE. */ ++ /* Set up the target function field. */ ++ mem = adjust_address (m_tramp, ptr_mode, target_function_offset); ++ loongarch_emit_move (mem, XEXP (DECL_RTL (fndecl), 0)); + +-static void +-loongarch_option_override (void) +-{ +- int i, start, regno, mode; ++ /* Flush the code part of the trampoline. */ ++ emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE))); ++ emit_insn (gen_clear_cache (addr, end_addr)); ++} + +-#ifdef SUBTARGET_OVERRIDE_OPTIONS +- SUBTARGET_OVERRIDE_OPTIONS; +-#endif ++/* Generate or test for an insn that supports a constant permutation. */ + ++#define MAX_VECT_LEN 32 + +- /* -mno-float overrides -mhard-float and -msoft-float. */ +- if (TARGET_NO_FLOAT) +- { +- target_flags |= MASK_SOFT_FLOAT_ABI; +- target_flags_explicit |= MASK_SOFT_FLOAT_ABI; +- } +- +- +- /* Set the small data limit. */ +- loongarch_small_data_threshold = (global_options_set.x_g_switch_value +- ? g_switch_value +- : LARCH_DEFAULT_GVALUE); +- +- /* The following code determines the architecture and register size. +- Similar code was added to GAS 2.14 (see tc-loongarch.c:md_after_parse_args()). +- The GAS and GCC code should be kept in sync as much as possible. */ +- +- if (global_options_set.x_loongarch_arch_option) +- loongarch_set_architecture (loongarch_cpu_info_from_opt (loongarch_arch_option)); ++struct expand_vec_perm_d ++{ ++ rtx target, op0, op1; ++ unsigned char perm[MAX_VECT_LEN]; ++ machine_mode vmode; ++ unsigned char nelt; ++ bool one_vector_p; ++ bool testing_p; ++}; + +- if (loongarch_arch_info == 0) +- loongarch_set_architecture (loongarch_default_arch ()); ++/* Construct (set target (vec_select op0 (parallel perm))) and ++ return true if that's a valid instruction in the active ISA. */ + +- /* Optimize for loongarch_arch, unless -mtune selects a different processor. */ +- if (global_options_set.x_loongarch_tune_option) +- loongarch_set_tune (loongarch_cpu_info_from_opt (loongarch_tune_option)); ++static bool ++loongarch_expand_vselect (rtx target, rtx op0, ++ const unsigned char *perm, unsigned nelt) ++{ ++ rtx rperm[MAX_VECT_LEN], x; ++ rtx_insn *insn; ++ unsigned i; + +- if (loongarch_tune_info == 0) +- loongarch_set_tune (loongarch_arch_info); ++ for (i = 0; i < nelt; ++i) ++ rperm[i] = GEN_INT (perm[i]); + +- if ((target_flags_explicit & MASK_64BIT) == 0) +- { +- /* Infer the integer register size from the ABI and processor. +- Restrict ourselves to 32-bit registers if that's all the +- processor has, or if the ABI cannot handle 64-bit registers. */ +- if (loongarch_abi == ABILP32) +- target_flags &= ~MASK_64BIT; +- else +- target_flags |= MASK_64BIT; +- } ++ x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm)); ++ x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x); ++ x = gen_rtx_SET (target, x); + +- if ((target_flags_explicit & MASK_FLOAT64) != 0) +- { +- if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64) +- error ("unsupported combination: %s", "-mfp64 -msingle-float"); +- } +- else ++ insn = emit_insn (x); ++ if (recog_memoized (insn) < 0) + { +- /* -msingle-float selects 32-bit float registers. On r6 and later, +- -mdouble-float selects 64-bit float registers, since the old paired +- register model is not supported. In other cases the float registers +- should be the same size as the integer ones. */ +- if (TARGET_64BIT && TARGET_DOUBLE_FLOAT) +- target_flags |= MASK_FLOAT64; +- else if (loongarch_abi == ABILP32 && ISA_HAS_LSX) +- target_flags |= MASK_FLOAT64; +- else +- target_flags &= ~MASK_FLOAT64; ++ remove_insn (insn); ++ return false; + } ++ return true; ++} + +- /* End of code shared with GAS. */ +- +- if (!TARGET_OLDABI) +- flag_pcc_struct_return = 0; ++/* Similar, but generate a vec_concat from op0 and op1 as well. */ + +- /* Decide which rtx_costs structure to use. */ +- if (optimize_size) +- loongarch_cost = &loongarch_rtx_cost_optimize_size; +- else +- loongarch_cost = &loongarch_rtx_cost_data[loongarch_tune]; ++static bool ++loongarch_expand_vselect_vconcat (rtx target, rtx op0, rtx op1, ++ const unsigned char *perm, unsigned nelt) ++{ ++ machine_mode v2mode; ++ rtx x; + +- /* If the user hasn't specified a branch cost, use the processor's +- default. */ +- if (loongarch_branch_cost == 0) +- loongarch_branch_cost = loongarch_cost->branch_cost; ++ if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0)).exists (&v2mode)) ++ return false; ++ x = gen_rtx_VEC_CONCAT (v2mode, op0, op1); ++ return loongarch_expand_vselect (target, x, perm, nelt); ++} + +- /* Prefer a call to memcpy over inline code when optimizing for size, +- though see MOVE_RATIO in loongarch.h. */ +- if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0) +- target_flags |= MASK_MEMCPY; ++/* Construct (set target (vec_select op0 (parallel selector))) and ++ return true if that's a valid instruction in the active ISA. */ + +- /* If we have a nonzero small-data limit, check that the -mgpopt +- setting is consistent with the other target flags. */ +- if (loongarch_small_data_threshold > 0) +- { +- if (TARGET_VXWORKS_RTP) +- warning (0, "cannot use small-data accesses for %qs", "-mrtp"); +- } ++static bool ++loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d) ++{ ++ rtx x, elts[MAX_VECT_LEN]; ++ rtvec v; ++ rtx_insn *insn; ++ unsigned i; + +- /* Make sure that when ISA_HAS_LSX is true, TARGET_FLOAT64 and +- TARGET_HARD_FLOAT_ABI and both true. */ +- if (ISA_HAS_LSX && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI)) +- error ("%<-mlsx%> must be used with %<-mfp64%> and %<-mhard-float%>"); ++ if (!ISA_HAS_LSX && !ISA_HAS_LASX) ++ return false; + +- /* If TARGET_LASX, enable TARGET_LSX. */ +- if (TARGET_LASX) +- target_flags |= MASK_LSX; ++ for (i = 0; i < d->nelt; i++) ++ elts[i] = GEN_INT (d->perm[i]); + +- /* .cfi_* directives generate a read-only section, so fall back on +- manual .eh_frame creation if we need the section to be writable. */ +- if (TARGET_WRITABLE_EH_FRAME) +- flag_dwarf2_cfi_asm = 0; ++ v = gen_rtvec_v (d->nelt, elts); ++ x = gen_rtx_PARALLEL (VOIDmode, v); + +- loongarch_init_print_operand_punct (); ++ if (!loongarch_const_vector_shuffle_set_p (x, d->vmode)) ++ return false; + +- /* Set up array to map GCC register number to debug register number. +- Ignore the special purpose register numbers. */ ++ x = gen_rtx_VEC_SELECT (d->vmode, d->op0, x); ++ x = gen_rtx_SET (d->target, x); + +- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) ++ insn = emit_insn (x); ++ if (recog_memoized (insn) < 0) + { +- loongarch_dbx_regno[i] = IGNORED_DWARF_REGNUM; +- if (GP_REG_P (i) || FP_REG_P (i)) +- loongarch_dwarf_regno[i] = i; +- else +- loongarch_dwarf_regno[i] = INVALID_REGNUM; ++ remove_insn (insn); ++ return false; + } ++ return true; ++} + +- start = GP_DBX_FIRST - GP_REG_FIRST; +- for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++) +- loongarch_dbx_regno[i] = i + start; +- +- start = FP_DBX_FIRST - FP_REG_FIRST; +- for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++) +- loongarch_dbx_regno[i] = i + start; +- +- /* Set up loongarch_hard_regno_mode_ok. */ +- for (mode = 0; mode < MAX_MACHINE_MODE; mode++) +- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) +- loongarch_hard_regno_mode_ok_p[mode][regno] +- = loongarch_hard_regno_mode_ok_uncached (regno, (machine_mode) mode); ++/* Try to simplify a two vector permutation using 2 intra-lane interleave ++ insns and cross-lane shuffle for 32-byte vectors. */ + +- /* Function to allocate machine-dependent function status. */ +- init_machine_status = &loongarch_init_machine_status; +- target_flags &= ~MASK_RELAX_PIC_CALLS; +- +- /* We register a second machine specific reorg pass after delay slot +- filling. Registering the pass must be done at start up. It's +- convenient to do it here. */ +- opt_pass *new_pass = make_pass_loongarch_machine_reorg2 (g); +- struct register_pass_info insert_pass_loongarch_machine_reorg2 = +- { +- new_pass, /* pass */ +- "dbr", /* reference_pass_name */ +- 1, /* ref_pass_instance_number */ +- PASS_POS_INSERT_AFTER /* po_op */ +- }; +- register_pass (&insert_pass_loongarch_machine_reorg2); ++static bool ++loongarch_expand_vec_perm_interleave (struct expand_vec_perm_d *d) ++{ ++ unsigned i, nelt; ++ rtx t1,t2,t3; ++ rtx (*gen_high) (rtx, rtx, rtx); ++ rtx (*gen_low) (rtx, rtx, rtx); ++ machine_mode mode = GET_MODE (d->target); + +- loongarch_register_frame_header_opt (); +-} ++ if (d->one_vector_p) ++ return false; ++ if (ISA_HAS_LASX && GET_MODE_SIZE (d->vmode) == 32) ++ ; ++ else ++ return false; + ++ nelt = d->nelt; ++ if (d->perm[0] != 0 && d->perm[0] != nelt / 2) ++ return false; ++ for (i = 0; i < nelt; i += 2) ++ if (d->perm[i] != d->perm[0] + i / 2 ++ || d->perm[i + 1] != d->perm[0] + i / 2 + nelt) ++ return false; + +-/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */ ++ if (d->testing_p) ++ return true; + +-static void +-loongarch_conditional_register_usage (void) +-{ +- if (!TARGET_HARD_FLOAT) ++ switch (d->vmode) + { +- AND_COMPL_HARD_REG_SET (accessible_reg_set, +- reg_class_contents[(int) FP_REGS]); +- AND_COMPL_HARD_REG_SET (accessible_reg_set, +- reg_class_contents[(int) ST_REGS]); ++ case E_V32QImode: ++ gen_high = gen_lasx_xvilvh_b; ++ gen_low = gen_lasx_xvilvl_b; ++ break; ++ case E_V16HImode: ++ gen_high = gen_lasx_xvilvh_h; ++ gen_low = gen_lasx_xvilvl_h; ++ break; ++ case E_V8SImode: ++ gen_high = gen_lasx_xvilvh_w; ++ gen_low = gen_lasx_xvilvl_w; ++ break; ++ case E_V4DImode: ++ gen_high = gen_lasx_xvilvh_d; ++ gen_low = gen_lasx_xvilvl_d; ++ break; ++ case E_V8SFmode: ++ gen_high = gen_lasx_xvilvh_w_f; ++ gen_low = gen_lasx_xvilvl_w_f; ++ break; ++ case E_V4DFmode: ++ gen_high = gen_lasx_xvilvh_d_f; ++ gen_low = gen_lasx_xvilvl_d_f; ++ break; ++ default: ++ gcc_unreachable (); + } +-} + +-/* Implement EH_USES. */ +- +-bool +-loongarch_eh_uses (unsigned int regno) +-{ +- return false; ++ t1 = gen_reg_rtx (mode); ++ t2 = gen_reg_rtx (mode); ++ emit_insn (gen_high (t1, d->op0, d->op1)); ++ emit_insn (gen_low (t2, d->op0, d->op1)); ++ if(mode == V4DFmode || mode == V8SFmode) ++ { ++ t3 = gen_reg_rtx (V4DFmode); ++ if (d->perm[0]) ++ emit_insn(gen_lasx_xvpermi_q_v4df (t3, gen_lowpart (V4DFmode, t1), ++ gen_lowpart (V4DFmode, t2),GEN_INT(0x31))); ++ else ++ emit_insn(gen_lasx_xvpermi_q_v4df (t3, gen_lowpart (V4DFmode, t1), ++ gen_lowpart (V4DFmode, t2),GEN_INT(0x20))); ++ } ++ else ++ { ++ t3 = gen_reg_rtx (V4DImode); ++ if (d->perm[0]) ++ emit_insn(gen_lasx_xvpermi_q_v4di (t3, gen_lowpart (V4DImode, t1), ++ gen_lowpart (V4DImode, t2),GEN_INT(0x31))); ++ else ++ emit_insn(gen_lasx_xvpermi_q_v4di (t3, gen_lowpart (V4DImode, t1), ++ gen_lowpart (V4DImode, t2),GEN_INT(0x20))); ++ } ++ emit_move_insn (d->target, gen_lowpart (mode, t3)); ++ return true; + } + +-/* Implement EPILOGUE_USES. */ ++/* Implement extract-even and extract-odd permutations.*/ + +-bool +-loongarch_epilogue_uses (unsigned int regno) ++static bool ++loongarch_expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd) + { +- /* Say that the epilogue uses the return address register. Note that +- in the case of sibcalls, the values "used by the epilogue" are +- considered live at the start of the called function. */ +- if (regno == RETURN_ADDR_REGNUM) +- return true; ++ rtx t1; ++ machine_mode mode = GET_MODE (d->target); ++ t1 = gen_reg_rtx (mode); + +- /* An interrupt handler must preserve some registers that are +- ordinarily call-clobbered. */ +- if (cfun->machine->interrupt_handler_p +- && loongarch_interrupt_extra_call_saved_reg_p (regno)) ++ if (d->testing_p) + return true; + +- return false; +-} ++ switch (d->vmode) ++ { ++ case E_V4DFmode: ++ /* Shuffle the lanes around into { 0 4 2 6 } and { 1 5 3 7 }. */ ++ if (odd) ++ emit_insn (gen_lasx_xvilvh_d_f (t1, d->op0, d->op1)); ++ else ++ emit_insn (gen_lasx_xvilvl_d_f (t1, d->op0, d->op1)); + +-/* Return true if MEM1 and MEM2 use the same base register, and the +- offset of MEM2 equals the offset of MEM1 plus 4. FIRST_REG is the +- register into (from) which the contents of MEM1 will be loaded +- (stored), depending on the value of LOAD_P. +- SWAP_P is true when the 1st and 2nd instructions are swapped. */ ++ /* Shuffle within the 256-bit lanes to produce the result required. ++ { 0 2 4 6 } | { 1 3 5 7 }. */ ++ emit_insn (gen_lasx_xvpermi_d_v4df (d->target, t1, GEN_INT (0xd8))); ++ break; + +-static bool +-loongarch_load_store_pair_p_1 (bool load_p, bool swap_p, +- rtx first_reg, rtx mem1, rtx mem2) +-{ +- rtx base1, base2; +- HOST_WIDE_INT offset1, offset2; ++ case E_V4DImode: ++ if (odd) ++ emit_insn (gen_lasx_xvilvh_d (t1, d->op0, d->op1)); ++ else ++ emit_insn (gen_lasx_xvilvl_d (t1, d->op0, d->op1)); + +- if (!MEM_P (mem1) || !MEM_P (mem2)) +- return false; ++ emit_insn (gen_lasx_xvpermi_d_v4di (d->target, t1, GEN_INT (0xd8))); ++ break; + +- loongarch_split_plus (XEXP (mem1, 0), &base1, &offset1); +- loongarch_split_plus (XEXP (mem2, 0), &base2, &offset2); ++ case E_V8SFmode: ++ /* Shuffle the lanes around into: ++ { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }. */ ++ if (odd) ++ emit_insn (gen_lasx_xvpickod_w_f (t1, d->op0, d->op1)); ++ else ++ emit_insn (gen_lasx_xvpickev_w_f (t1, d->op0, d->op1)); + +- if (!REG_P (base1) || !rtx_equal_p (base1, base2)) +- return false; ++ /* Shuffle within the 256-bit lanes to produce the result required. ++ { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }. */ ++ emit_insn (gen_lasx_xvpermi_d_v8sf (d->target, t1, GEN_INT (0xd8))); ++ break; + +- /* Avoid invalid load pair instructions. */ +- if (load_p && REGNO (first_reg) == REGNO (base1)) +- return false; ++ case E_V8SImode: ++ if (odd) ++ emit_insn (gen_lasx_xvpickod_w (t1, d->op0, d->op1)); ++ else ++ emit_insn (gen_lasx_xvpickev_w (t1, d->op0, d->op1)); + +- /* We must avoid this case for anti-dependence. +- Ex: lw $3, 4($3) +- lw $2, 0($3) +- first_reg is $2, but the base is $3. */ +- if (load_p +- && swap_p +- && REGNO (first_reg) + 1 == REGNO (base1)) +- return false; ++ emit_insn (gen_lasx_xvpermi_d_v8si (d->target, t1, GEN_INT (0xd8))); ++ break; + +- if (offset2 != offset1 + 4) +- return false; ++ case E_V16HImode: ++ if (odd) ++ emit_insn (gen_lasx_xvpickod_h (t1, d->op0, d->op1)); ++ else ++ emit_insn (gen_lasx_xvpickev_h (t1, d->op0, d->op1)); + +- if (!ULARCH_12BIT_OFFSET_P (offset1)) +- return false; ++ emit_insn (gen_lasx_xvpermi_d_v16hi (d->target, t1, GEN_INT (0xd8))); ++ break; + +- return true; +-} ++ case E_V32QImode: ++ if (odd) ++ emit_insn (gen_lasx_xvpickod_b (t1, d->op0, d->op1)); ++ else ++ emit_insn (gen_lasx_xvpickev_b (t1, d->op0, d->op1)); + +-bool +-loongarch_load_store_bonding_p (rtx *operands, machine_mode mode, bool load_p) +-{ +- rtx reg1, reg2, mem1, mem2, base1, base2; +- enum reg_class rc1, rc2; +- HOST_WIDE_INT offset1, offset2; ++ emit_insn (gen_lasx_xvpermi_d_v32qi (d->target, t1, GEN_INT (0xd8))); ++ break; + +- if (load_p) +- { +- reg1 = operands[0]; +- reg2 = operands[2]; +- mem1 = operands[1]; +- mem2 = operands[3]; +- } +- else +- { +- reg1 = operands[1]; +- reg2 = operands[3]; +- mem1 = operands[0]; +- mem2 = operands[2]; ++ default: ++ gcc_unreachable (); + } + +- if (loongarch_address_insns (XEXP (mem1, 0), mode, false) == 0 +- || loongarch_address_insns (XEXP (mem2, 0), mode, false) == 0) +- return false; +- +- loongarch_split_plus (XEXP (mem1, 0), &base1, &offset1); +- loongarch_split_plus (XEXP (mem2, 0), &base2, &offset2); +- +- /* Base regs do not match. */ +- if (!REG_P (base1) || !rtx_equal_p (base1, base2)) +- return false; ++ return true; ++} + +- /* Either of the loads is clobbering base register. It is legitimate to bond +- loads if second load clobbers base register. However, hardware does not +- support such bonding. */ +- if (load_p +- && (REGNO (reg1) == REGNO (base1) +- || (REGNO (reg2) == REGNO (base1)))) +- return false; ++/* Pattern match extract-even and extract-odd permutations. */ + +- /* Loading in same registers. */ +- if (load_p +- && REGNO (reg1) == REGNO (reg2)) ++static bool ++loongarch_expand_vec_perm_even_odd (struct expand_vec_perm_d *d) ++{ ++ unsigned i, odd, nelt = d->nelt; ++ if(!ISA_HAS_LASX) + return false; + +- /* The loads/stores are not of same type. */ +- rc1 = REGNO_REG_CLASS (REGNO (reg1)); +- rc2 = REGNO_REG_CLASS (REGNO (reg2)); +- if (rc1 != rc2 +- && !reg_class_subset_p (rc1, rc2) +- && !reg_class_subset_p (rc2, rc1)) ++ odd = d->perm[0]; ++ if (odd != 0 && odd != 1) + return false; + +- if (abs (offset1 - offset2) != GET_MODE_SIZE (mode)) +- return false; ++ for (i = 1; i < nelt; ++i) ++ if (d->perm[i] != 2 * i + odd) ++ return false; + +- return true; ++ return loongarch_expand_vec_perm_even_odd_1 (d, odd); + } + +-/* OPERANDS describes the operands to a pair of SETs, in the order +- dest1, src1, dest2, src2. Return true if the operands can be used +- in an LWP or SWP instruction; LOAD_P says which. */ ++/* Expand a variable vector permutation for LASX. */ + +-bool +-loongarch_load_store_pair_p (bool load_p, rtx *operands) ++void ++loongarch_expand_vec_perm_1 (rtx operands[]) + { +- rtx reg1, reg2, mem1, mem2; ++ rtx target = operands[0]; ++ rtx op0 = operands[1]; ++ rtx op1 = operands[2]; ++ rtx mask = operands[3]; ++ bool one_operand_shuffle = rtx_equal_p (op0, op1); ++ rtx t1, t2, t3, t4, t5, t6, vt, vec[32]; ++ machine_mode mode = GET_MODE (op0); ++ machine_mode maskmode = GET_MODE (mask); ++ int w, i; ++ ++ /* Number of elements in the vector. */ ++ w = GET_MODE_NUNITS (mode); ++ ++ if (mode == V4DImode || mode == V4DFmode) ++ { ++ maskmode = mode = V8SImode; ++ w = 8; ++ t1 = gen_reg_rtx (maskmode); ++ ++ /* Replicate the low bits of the V4DImode mask into V8SImode: ++ mask = { A B C D } ++ t1 = { A A B B C C D D }. */ ++ for (i = 0; i < w / 2; ++i) ++ vec[i*2 + 1] = vec[i*2] = GEN_INT (i * 2); ++ vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec)); ++ vt = force_reg (maskmode, vt); ++ mask = gen_lowpart (maskmode, mask); ++ emit_insn (gen_lasx_xvperm_w (t1, mask, vt)); ++ ++ /* Multiply the shuffle indicies by two. */ ++ t1 = expand_simple_binop (maskmode, PLUS, t1, t1, t1, 1, ++ OPTAB_DIRECT); ++ ++ /* Add one to the odd shuffle indicies: ++ t1 = { A*2, A*2+1, B*2, B*2+1, ... }. */ ++ for (i = 0; i < w / 2; ++i) ++ { ++ vec[i * 2] = const0_rtx; ++ vec[i * 2 + 1] = const1_rtx; ++ } ++ vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec)); ++ vt = validize_mem (force_const_mem (maskmode, vt)); ++ t1 = expand_simple_binop (maskmode, PLUS, t1, vt, t1, 1, ++ OPTAB_DIRECT); + +- if (load_p) +- { +- reg1 = operands[0]; +- reg2 = operands[2]; +- mem1 = operands[1]; +- mem2 = operands[3]; ++ /* Continue as if V8SImode (resp. V32QImode) was used initially. */ ++ operands[3] = mask = t1; ++ target = gen_reg_rtx (mode); ++ op0 = gen_lowpart (mode, op0); ++ op1 = gen_lowpart (mode, op1); + } +- else ++ switch (mode) + { +- reg1 = operands[1]; +- reg2 = operands[3]; +- mem1 = operands[0]; +- mem2 = operands[2]; ++ case E_V8SImode: ++ if (one_operand_shuffle) ++ { ++ emit_insn (gen_lasx_xvperm_w (target, op0, mask)); ++ if (target != operands[0]) ++ emit_move_insn (operands[0], ++ gen_lowpart (GET_MODE (operands[0]), target)); ++ } ++ else ++ { ++ t1 = gen_reg_rtx (V8SImode); ++ t2 = gen_reg_rtx (V8SImode); ++ emit_insn (gen_lasx_xvperm_w (t1, op0, mask)); ++ emit_insn (gen_lasx_xvperm_w (t2, op1, mask)); ++ goto merge_two; ++ } ++ return; ++ ++ case E_V8SFmode: ++ mask = gen_lowpart (V8SImode, mask); ++ if (one_operand_shuffle) ++ emit_insn (gen_lasx_xvperm_w_f (target, op0, mask)); ++ else ++ { ++ t1 = gen_reg_rtx (V8SFmode); ++ t2 = gen_reg_rtx (V8SFmode); ++ emit_insn (gen_lasx_xvperm_w_f (t1, op0, mask)); ++ emit_insn (gen_lasx_xvperm_w_f (t2, op1, mask)); ++ goto merge_two; ++ } ++ return; ++ ++ case E_V16HImode: ++ if (one_operand_shuffle) ++ { ++ t1 = gen_reg_rtx (V16HImode); ++ t2 = gen_reg_rtx (V16HImode); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (t1, op0, GEN_INT(0x44))); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (t2, op0, GEN_INT(0xee))); ++ emit_insn (gen_lasx_xvshuf_h (target, mask, t2, t1)); ++ } ++ else ++ { ++ t1 = gen_reg_rtx (V16HImode); ++ t2 = gen_reg_rtx (V16HImode); ++ t3 = gen_reg_rtx (V16HImode); ++ t4 = gen_reg_rtx (V16HImode); ++ t5 = gen_reg_rtx (V16HImode); ++ t6 = gen_reg_rtx (V16HImode); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (t3, op0, GEN_INT(0x44))); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (t4, op0, GEN_INT(0xee))); ++ emit_insn (gen_lasx_xvshuf_h (t1, mask, t4, t3)); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (t5, op1, GEN_INT(0x44))); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (t6, op1, GEN_INT(0xee))); ++ emit_insn (gen_lasx_xvshuf_h (t2, mask, t6, t5)); ++ goto merge_two; ++ } ++ return; ++ ++ case E_V32QImode: ++ if (one_operand_shuffle) ++ { ++ t1 = gen_reg_rtx (V32QImode); ++ t2 = gen_reg_rtx (V32QImode); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (t1, op0, GEN_INT(0x44))); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (t2, op0, GEN_INT(0xee))); ++ emit_insn (gen_lasx_xvshuf_b (target, t2, t1, mask)); ++ } ++ else ++ { ++ t1 = gen_reg_rtx (V32QImode); ++ t2 = gen_reg_rtx (V32QImode); ++ t3 = gen_reg_rtx (V32QImode); ++ t4 = gen_reg_rtx (V32QImode); ++ t5 = gen_reg_rtx (V32QImode); ++ t6 = gen_reg_rtx (V32QImode); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (t3, op0, GEN_INT(0x44))); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (t4, op0, GEN_INT(0xee))); ++ emit_insn (gen_lasx_xvshuf_b (t1, t4, t3, mask)); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (t5, op1, GEN_INT(0x44))); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (t6, op1, GEN_INT(0xee))); ++ emit_insn (gen_lasx_xvshuf_b (t2, t6, t5, mask)); ++ goto merge_two; ++ } ++ return; ++ ++ default: ++ gcc_assert (GET_MODE_SIZE (mode) == 32); ++ break; + } + +- if (REGNO (reg2) == REGNO (reg1) + 1) +- return loongarch_load_store_pair_p_1 (load_p, false, reg1, mem1, mem2); ++merge_two: ++ /* Then merge them together. The key is whether any given control ++ element contained a bit set that indicates the second word. */ ++ rtx xops[6]; ++ mask = operands[3]; ++ vt = GEN_INT (w); ++ vt = gen_const_vec_duplicate (maskmode, vt); ++ vt = force_reg (maskmode, vt); ++ if (GET_MODE (target) != mode) ++ target = gen_reg_rtx (mode); ++ xops[0] = target; ++ xops[1] = gen_lowpart (mode, t2); ++ xops[2] = gen_lowpart (mode, t1); ++ xops[3] = gen_rtx_GE (maskmode, mask, vt); ++ xops[4] = mask; ++ xops[5] = vt; ++ ++ loongarch_expand_vec_cond_expr (mode, maskmode, xops); ++ if (target != operands[0]) ++ emit_move_insn (operands[0], ++ gen_lowpart (GET_MODE (operands[0]), target)); ++} + +- if (REGNO (reg1) == REGNO (reg2) + 1) +- return loongarch_load_store_pair_p_1 (load_p, true, reg2, mem2, mem1); ++void ++loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel) ++{ ++ machine_mode vmode = GET_MODE (target); + +- return false; ++ gcc_checking_assert (vmode == E_V16QImode ++ || vmode == E_V2DImode || vmode == E_V2DFmode ++ || vmode == E_V4SImode || vmode == E_V4SFmode ++ || vmode == E_V8HImode); ++ gcc_checking_assert (GET_MODE (op0) == vmode); ++ gcc_checking_assert (GET_MODE (op1) == vmode); ++ gcc_checking_assert (GET_MODE (sel) == vmode); ++ gcc_checking_assert (ISA_HAS_LSX); ++ ++ switch (vmode) ++ { ++ case E_V16QImode: ++ emit_insn (gen_lsx_vshuf_b (target, op1, op0, sel)); ++ break; ++ case E_V2DFmode: ++ emit_insn (gen_lsx_vshuf_d_f (target, sel, op1, op0)); ++ break; ++ case E_V2DImode: ++ emit_insn (gen_lsx_vshuf_d (target, sel, op1, op0)); ++ break; ++ case E_V4SFmode: ++ emit_insn (gen_lsx_vshuf_w_f (target, sel, op1, op0)); ++ break; ++ case E_V4SImode: ++ emit_insn (gen_lsx_vshuf_w (target, sel, op1, op0)); ++ break; ++ case E_V8HImode: ++ emit_insn (gen_lsx_vshuf_h (target, sel, op1, op0)); ++ break; ++ default: ++ break; ++ } + } + +-/* Return true if REG1 and REG2 match the criteria for a movep insn. */ ++static bool ++loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d) ++{ ++ int i; ++ rtx target, op0, op1, sel, tmp; ++ rtx rperm[MAX_VECT_LEN]; + +-bool +-loongarch_movep_target_p (rtx reg1, rtx reg2) +-{ +- int regno1, regno2, pair; +- unsigned int i; +- static const int match[8] = { +- 0x00000060, /* 5, 6 */ +- 0x000000a0, /* 5, 7 */ +- 0x000000c0, /* 6, 7 */ +- 0x00200010, /* 4, 21 */ +- 0x00400010, /* 4, 22 */ +- 0x00000030, /* 4, 5 */ +- 0x00000050, /* 4, 6 */ +- 0x00000090 /* 4, 7 */ +- }; +- +- if (!REG_P (reg1) || !REG_P (reg2)) +- return false; ++ if (d->vmode == E_V2DImode || d->vmode == E_V2DFmode ++ || d->vmode == E_V4SImode || d->vmode == E_V4SFmode ++ || d->vmode == E_V8HImode || d->vmode == E_V16QImode) ++ { ++ target = d->target; ++ op0 = d->op0; ++ op1 = d->one_vector_p ? d->op0 : d->op1; + +- regno1 = REGNO (reg1); +- regno2 = REGNO (reg2); ++ if (GET_MODE (op0) != GET_MODE (op1) ++ || GET_MODE (op0) != GET_MODE (target)) ++ return false; + +- if (!GP_REG_P (regno1) || !GP_REG_P (regno2)) +- return false; ++ if (d->testing_p) ++ return true; + +- pair = (1 << regno1) | (1 << regno2); ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ rperm[i] = GEN_INT (d->perm[i]); ++ } + +- for (i = 0; i < ARRAY_SIZE (match); i++) +- if (pair == match[i]) +- return true; ++ if (d->vmode == E_V2DFmode) ++ { ++ sel = gen_rtx_CONST_VECTOR (E_V2DImode, gen_rtvec_v (d->nelt, rperm)); ++ tmp = gen_rtx_SUBREG (E_V2DImode, d->target, 0); ++ emit_move_insn (tmp, sel); ++ } ++ else if (d->vmode == E_V4SFmode) ++ { ++ sel = gen_rtx_CONST_VECTOR (E_V4SImode, gen_rtvec_v (d->nelt, rperm)); ++ tmp = gen_rtx_SUBREG (E_V4SImode, d->target, 0); ++ emit_move_insn (tmp, sel); ++ } ++ else ++ { ++ sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt, rperm)); ++ emit_move_insn (d->target, sel); ++ } ++ ++ switch (d->vmode) ++ { ++ case E_V2DFmode: ++ emit_insn (gen_lsx_vshuf_d_f (target, target, op1, op0)); ++ break; ++ case E_V2DImode: ++ emit_insn (gen_lsx_vshuf_d (target, target, op1, op0)); ++ break; ++ case E_V4SFmode: ++ emit_insn (gen_lsx_vshuf_w_f (target, target, op1, op0)); ++ break; ++ case E_V4SImode: ++ emit_insn (gen_lsx_vshuf_w (target, target, op1, op0)); ++ break; ++ case E_V8HImode: ++ emit_insn (gen_lsx_vshuf_h (target, target, op1, op0)); ++ break; ++ case E_V16QImode: ++ emit_insn (gen_lsx_vshuf_b (target, op1, op0, target)); ++ break; ++ default: ++ break; ++ } + ++ return true; ++ } + return false; + } +- +-/* Return the size in bytes of the trampoline code, padded to +- TRAMPOLINE_ALIGNMENT bits. The static chain pointer and target +- function address immediately follow. */ + +-int +-loongarch_trampoline_code_size (void) ++static bool ++loongarch_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) + { +- return 4 * 4; +-} ++ unsigned int i, nelt = d->nelt; ++ unsigned char perm2[MAX_VECT_LEN]; + +-/* Implement TARGET_TRAMPOLINE_INIT. */ ++ if (d->one_vector_p) ++ { ++ /* Try interleave with alternating operands. */ ++ memcpy (perm2, d->perm, sizeof(perm2)); ++ for (i = 1; i < nelt; i += 2) ++ perm2[i] += nelt; ++ if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2, nelt)) ++ return true; ++ } ++ else ++ { ++ if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, ++ d->perm, nelt)) ++ return true; + +-static void +-loongarch_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) +-{ +- rtx addr, end_addr, high, low, opcode, mem; +- rtx trampoline[8]; +- unsigned int i, j; +- HOST_WIDE_INT end_addr_offset, static_chain_offset, target_function_offset; ++ /* Try again with swapped operands. */ ++ for (i = 0; i < nelt; ++i) ++ perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1); ++ if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt)) ++ return true; ++ } + +- /* Work out the offsets of the pointers from the start of the +- trampoline code. */ +- end_addr_offset = loongarch_trampoline_code_size (); +- static_chain_offset = end_addr_offset; +- target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode); ++ if (loongarch_expand_lsx_shuffle (d)) ++ return true; ++ if (loongarch_expand_vec_perm_even_odd(d)) ++ return true; ++ if (loongarch_expand_vec_perm_interleave(d)) ++ return true; ++ return false; ++} + +- /* Get pointers to the beginning and end of the code block. */ +- addr = force_reg (Pmode, XEXP (m_tramp, 0)); +- end_addr = loongarch_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset)); ++// Following are the assist function for const vector permutation support. ++static bool ++loongarch_is_quad_duplicate (struct expand_vec_perm_d *d) ++{ ++ if (d->perm[0] >= d->nelt / 2) ++ return false; + +-#define OP(X) gen_int_mode (X, SImode) ++ bool result = true; ++ unsigned char lhs = d->perm[0]; ++ unsigned char rhs = d->perm[d->nelt / 2]; + +- /* Build up the code in TRAMPOLINE. */ +- i = 0; +- /* +- pcaddi $static_chain,0 +- ld.[dw] $tmp,$static_chain,target_function_offset +- ld.[dw] $static_chain,$static_chain,static_chain_offset +- jirl $r0,$tmp,0 +- */ +- trampoline[i++] = OP (0x18000000 | (STATIC_CHAIN_REGNUM - GP_REG_FIRST)); +- trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000) +- | 19 /* $t7 */ +- | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5) +- | ((target_function_offset & 0xfff) << 10)); +- trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000) +- | (STATIC_CHAIN_REGNUM - GP_REG_FIRST) +- | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5) +- | ((static_chain_offset & 0xfff) << 10)); +- trampoline[i++] = OP (0x4c000000 | (19 << 5)); +-#undef OP ++ if ((rhs - lhs) != d->nelt / 2) ++ return false; + +- for (j = 0; j < i; j++) ++ for (int i = 1; i < d->nelt; i += 1) + { +- mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode)); +- loongarch_emit_move (mem, trampoline[j]); ++ if ((i < d->nelt / 2) && (d->perm[i] != lhs)) ++ { ++ result = false; ++ break; ++ } ++ if ((i > d->nelt / 2) && (d->perm[i] != rhs)) ++ { ++ result = false; ++ break; ++ } + } + +- /* Set up the static chain pointer field. */ +- mem = adjust_address (m_tramp, ptr_mode, static_chain_offset); +- loongarch_emit_move (mem, chain_value); +- +- /* Set up the target function field. */ +- mem = adjust_address (m_tramp, ptr_mode, target_function_offset); +- loongarch_emit_move (mem, XEXP (DECL_RTL (fndecl), 0)); +- +- /* Flush the code part of the trampoline. */ +- emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE))); +- emit_insn (gen_clear_cache (addr, end_addr)); ++ return result; + } + +- +-/* Implement TARGET_SHIFT_TRUNCATION_MASK. We want to keep the default +- behavior of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even +- when TARGET_LOONGSON_MMI is true. */ +- +-static unsigned HOST_WIDE_INT +-loongarch_shift_truncation_mask (machine_mode mode) ++static bool ++loongarch_is_double_duplicate (struct expand_vec_perm_d *d) + { +- return GET_MODE_BITSIZE (mode) - 1; +-} ++ if (!d->one_vector_p) ++ return false; + +- +-/* Generate or test for an insn that supports a constant permutation. */ ++ if (d->nelt < 8) ++ return false; + +-#define MAX_VECT_LEN 32 ++ bool result = true; ++ unsigned char buf = d->perm[0]; + +-struct expand_vec_perm_d +-{ +- rtx target, op0, op1; +- unsigned char perm[MAX_VECT_LEN]; +- machine_mode vmode; +- unsigned char nelt; +- bool one_vector_p; +- bool testing_p; +-}; ++ for (int i = 1; i < d->nelt; i += 2) ++ { ++ if (d->perm[i] != buf) ++ { ++ result = false; ++ break; ++ } ++ if (d->perm[i - 1] != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += d->nelt / 4; ++ } + +-/* Construct (set target (vec_select op0 (parallel perm))) and +- return true if that's a valid instruction in the active ISA. */ ++ return result; ++} + + static bool +-loongarch_expand_vselect (rtx target, rtx op0, +- const unsigned char *perm, unsigned nelt) ++loongarch_is_odd_extraction (struct expand_vec_perm_d *d) + { +- rtx rperm[MAX_VECT_LEN], x; +- rtx_insn *insn; +- unsigned i; ++ bool result = true; ++ unsigned char buf = 1; + +- for (i = 0; i < nelt; ++i) +- rperm[i] = GEN_INT (perm[i]); ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 2; ++ } + +- x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm)); +- x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x); +- x = gen_rtx_SET (target, x); ++ return result; ++} + +- insn = emit_insn (x); +- if (recog_memoized (insn) < 0) ++static bool ++loongarch_is_even_extraction (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned char buf = 0; ++ ++ for (int i = 0; i < d->nelt; i += 1) + { +- remove_insn (insn); +- return false; ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 2; + } +- return true; +-} + +-/* Similar, but generate a vec_concat from op0 and op1 as well. */ ++ return result; ++} + + static bool +-loongarch_expand_vselect_vconcat (rtx target, rtx op0, rtx op1, +- const unsigned char *perm, unsigned nelt) ++loongarch_is_extraction_permutation (struct expand_vec_perm_d *d) + { +- machine_mode v2mode; +- rtx x; ++ bool result = true; ++ unsigned char buf = d->perm[0]; + +- if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0)).exists (&v2mode)) ++ if (buf != 0 || buf != d->nelt) + return false; +- x = gen_rtx_VEC_CONCAT (v2mode, op0, op1); +- return loongarch_expand_vselect (target, x, perm, nelt); +-} + +-/* Construct (set target (vec_select op0 (parallel selector))) and +- return true if that's a valid instruction in the active ISA. */ ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ ++ return result; ++} + + static bool +-loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d) ++loongarch_is_center_extraction (struct expand_vec_perm_d *d) + { +- rtx x, elts[MAX_VECT_LEN]; +- rtvec v; +- rtx_insn *insn; +- unsigned i; ++ bool result = true; ++ unsigned buf = d->nelt / 2; + +- if (!ISA_HAS_LSX && !ISA_HAS_LASX) ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_reversing_permutation (struct expand_vec_perm_d *d) ++{ ++ if (!d->one_vector_p) + return false; + +- for (i = 0; i < d->nelt; i++) +- elts[i] = GEN_INT (d->perm[i]); ++ bool result = true; ++ unsigned char buf = d->nelt - 1; + +- v = gen_rtvec_v (d->nelt, elts); +- x = gen_rtx_PARALLEL (VOIDmode, v); ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (d->perm[i] != buf) ++ { ++ result = false; ++ break; ++ } + +- if (!loongarch_const_vector_shuffle_set_p (x, d->vmode)) ++ buf -= 1; ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_di_misalign_extract (struct expand_vec_perm_d *d) ++{ ++ if (d->nelt != 4 && d->nelt != 8) + return false; + +- x = gen_rtx_VEC_SELECT (d->vmode, d->op0, x); +- x = gen_rtx_SET (d->target, x); ++ bool result = true; ++ unsigned char buf; + +- insn = emit_insn (x); +- if (recog_memoized (insn) < 0) ++ if (d->nelt == 4) + { +- remove_insn (insn); +- return false; ++ buf = 1; ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ ++ buf += 1; ++ } ++ } ++ else if (d->nelt == 8) ++ { ++ buf = 2; ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ ++ buf += 1; ++ } ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_si_misalign_extract (struct expand_vec_perm_d *d) ++{ ++ if (d->vmode != E_V8SImode && d->vmode != E_V8SFmode) ++ return false; ++ bool result = true; ++ unsigned char buf = 1; ++ ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_lasx_lowpart_interleave (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned char buf = 0; ++ ++ for (int i = 0;i < d->nelt; i += 2) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ ++ if (result) ++ { ++ buf = d->nelt; ++ for (int i = 1; i < d->nelt; i += 2) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_lasx_lowpart_interleave_2 (struct expand_vec_perm_d *d) ++{ ++ if (d->vmode != E_V32QImode) ++ return false; ++ bool result = true; ++ unsigned char buf = 0; ++ ++#define COMPARE_SELECTOR(INIT, BEGIN, END) \ ++ buf = INIT; \ ++ for (int i = BEGIN; i < END && result; i += 1) \ ++ { \ ++ if (buf != d->perm[i]) \ ++ { \ ++ result = false; \ ++ break; \ ++ } \ ++ buf += 1; \ ++ } ++ ++ COMPARE_SELECTOR (0, 0, 8); ++ COMPARE_SELECTOR (32, 8, 16); ++ COMPARE_SELECTOR (8, 16, 24); ++ COMPARE_SELECTOR (40, 24, 32); ++ ++#undef COMPARE_SELECTOR ++ return result; ++} ++ ++static bool ++loongarch_is_lasx_lowpart_extract (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned char buf = 0; ++ ++ for (int i = 0; i < d->nelt / 2; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ ++ if (result) ++ { ++ buf = d->nelt; ++ for (int i = d->nelt / 2; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_lasx_highpart_interleave (expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned char buf = d->nelt / 2; ++ ++ for (int i = 0; i < d->nelt; i += 2) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ ++ if (result) ++ { ++ buf = d->nelt + d->nelt / 2; ++ for (int i = 1; i < d->nelt;i += 2) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ buf += 1; ++ } ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_lasx_highpart_interleave_2 (struct expand_vec_perm_d *d) ++{ ++ if (d->vmode != E_V32QImode) ++ return false; ++ ++ bool result = true; ++ unsigned char buf = 0; ++ ++#define COMPARE_SELECTOR(INIT, BEGIN, END) \ ++ buf = INIT; \ ++ for (int i = BEGIN; i < END && result; i += 1) \ ++ { \ ++ if (buf != d->perm[i]) \ ++ { \ ++ result = false; \ ++ break; \ ++ } \ ++ buf += 1; \ ++ } ++ ++ COMPARE_SELECTOR (16, 0, 8); ++ COMPARE_SELECTOR (48, 8, 16); ++ COMPARE_SELECTOR (24, 16, 24); ++ COMPARE_SELECTOR (56, 24, 32); ++ ++#undef COMPARE_SELECTOR ++ return result; ++} ++ ++static bool ++loongarch_is_elem_duplicate (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ unsigned char buf = d->perm[0]; ++ ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (buf != d->perm[i]) ++ { ++ result = false; ++ break; ++ } ++ } ++ ++ return result; ++} ++ ++inline bool ++loongarch_is_op_reverse_perm (struct expand_vec_perm_d *d) ++{ ++ return (d->vmode == E_V4DFmode) ++ && d->perm[0] == 2 && d->perm[1] == 3 ++ && d->perm[2] == 0 && d->perm[3] == 1; ++} ++ ++static bool ++loongarch_is_single_op_perm (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ ++ for (int i = 0; i < d->nelt; i += 1) ++ { ++ if (d->perm[i] >= d->nelt) ++ { ++ result = false; ++ break; ++ } ++ } ++ ++ return result; ++} ++ ++static bool ++loongarch_is_divisible_perm (struct expand_vec_perm_d *d) ++{ ++ bool result = true; ++ ++ for (int i = 0; i < d->nelt / 2; i += 1) ++ { ++ if (d->perm[i] >= d->nelt) ++ { ++ result = false; ++ break; ++ } ++ } ++ ++ if (result) ++ { ++ for (int i = d->nelt / 2; i < d->nelt; i += 1) ++ { ++ if (d->perm[i] < d->nelt) ++ { ++ result = false; ++ break; ++ } ++ } ++ } ++ ++ return result; ++} ++ ++inline bool ++loongarch_is_triple_stride_extract (struct expand_vec_perm_d *d) ++{ ++ return (d->vmode == E_V4DImode || d->vmode == E_V4DFmode) ++ && d->perm[0] == 1 && d->perm[1] == 4 ++ && d->perm[2] == 7 && d->perm[3] == 0; ++} ++ ++/* In LASX, xvshuf.* insn does not have the behavior that gcc expects when ++ * compiler wants to emit a vector permutation. ++ * ++ * 1. What GCC provides via vectorize_vec_perm_const()'s paramater: ++ * When GCC wants to performs a vector permutation, it provides two op ++ * reigster, one target register, and a selector. ++ * In const vector permutation case, GCC provides selector as a char array ++ * that contains original value; in variable vector permuatation ++ * (performs via vec_perm insn template), it provides a vector register. ++ * We assume that nelt is the elements numbers inside single vector in current ++ * 256bit vector mode. ++ * ++ * 2. What GCC expects to perform: ++ * Two op registers(op0, op1) will "combine" into a 512bit temp vector storage ++ * that has 2*nelt elements inside it; the low 256bit is op0, and high 256bit ++ * is op1, then the elements are indexed as below: ++ * 0 ~ nelt - 1 nelt ~ 2 * nelt - 1 ++ * |-------------------------|-------------------------| ++ * Low 256bit (op0) High 256bit(op1) ++ * For example, the second element in op1(V8SImode) will be indexed with 9. ++ * Selector is a vector that has the same mode and number of elements with ++ * op0,op1 and target, it's look like this: ++ * 0 ~ nelt - 1 ++ * |-------------------------| ++ * 256bit (selector) ++ * It describes which element from 512bit temp vector storage will fit into ++ * target's every element slot. ++ * GCC expects that every element in selector can be ANY indices of 512bit ++ * vector storage(Selector can pick literally any element from op0 and op1, and ++ * then fits into any place of target register). This is also what LSX 128bit ++ * vshuf.* instruction do similarly, so we can handle 128bit vector permutation ++ * by single instruction easily. ++ * ++ * 3. What xvshuf.* instruction does: ++ * In short, it just do TWO 128bit vector permuatation, it's the reason that we ++ * need to do these jobs. We will explain it. ++ * op0, op1, target, and selector will be separate into high 128bit and low ++ * 128bit, and do permutation as the description below: ++ * ++ * a) op0's low 128bit and op1's low 128bit "combines" into a 256bit temp ++ * vector storage(TVS1), elements are indexed as below: ++ * 0 ~ nelt / 2 - 1 nelt / 2 ~ nelt - 1 ++ * |---------------------|---------------------| TVS1 ++ * op0's low 128bit op1's low 128bit ++ * op0's high 128bit and op1's high 128bit are "combined" into TVS2 in the ++ * same way. ++ * 0 ~ nelt / 2 - 1 nelt / 2 ~ nelt - 1 ++ * |---------------------|---------------------| TVS2 ++ * op0's high 128bit op1's high 128bit ++ * b) Selector's low 128bit describes which elements from TVS1 will fit into ++ * target vector's low 128bit. No TVS2 elements are allowed. ++ * c) Selector's high 128bit describes which elements from TVS2 will fit into ++ * target vector's high 128bit. No TVS1 elements are allowed. ++ * ++ * As we can see, if we want to handle vector permutation correctly, we can ++ * achieve it in three ways: ++ * a) Modify selector's elements, to make sure that every elements can inform ++ * correct value that will put into target vector. ++ b) Generate extra instruction before/after xvshuf.* instruction, for ++ adjusting op vector or target vector, to make sure target vector's value is ++ what GCC expects. ++ c) Use other instructions to process op and put correct result into target. ++ */ ++ ++/* Implementation of constant vector permuatation. This function identifies ++ * recognized pattern of permuation selector argument, and use one or more ++ * instruction(s) to finish the permutation job correctly. For unsupported ++ * patterns, it will return false. */ ++ ++static bool ++loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) ++{ ++ // Although we have the LSX vec_perm template, there's still some ++ // 128bit vector permuatation operations send to vectorize_vec_perm_const. ++ // In this case, we just simpliy wrap them by single vshuf.* instruction, ++ // because LSX vshuf.* instruction just have the same behavior that GCC ++ // expects. ++ if (d->vmode != E_V32QImode && d->vmode != E_V16HImode ++ && d->vmode != E_V4DImode && d->vmode != E_V4DFmode ++ && d->vmode != E_V8SImode && d->vmode != E_V8SFmode) ++ return loongarch_try_expand_lsx_vshuf_const (d); ++ ++ bool ok = false, reverse_hi_lo = false, extract_ev_od = false, ++ use_alt_op = false; ++ unsigned char idx; ++ int i; ++ rtx target, op0, op1, sel, tmp; ++ rtx op0_alt = NULL_RTX, op1_alt = NULL_RTX; ++ rtx rperm[MAX_VECT_LEN]; ++ unsigned char remapped[MAX_VECT_LEN]; ++ ++ // Try to figure out whether is a recognized permutation selector pattern, if ++ // yes, we will reassign some elements with new value in selector argument, ++ // and in some cases we will generate some assist insn to complete the ++ // permutation. (Even in some cases, we use other insn to impl permutation ++ // instead of xvshuf!) ++ ++ // Make sure to check d->testing_p is false everytime if you want to emit new ++ // insn, unless you want to crash into ICE directly. ++ if (loongarch_is_quad_duplicate (d)) ++ { ++ // Selector example: E_V8SImode, { 0, 0, 0, 0, 4, 4, 4, 4 } ++ // copy first elem from original selector to all elem in new selector. ++ idx = d->perm[0]; ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ remapped[i] = idx; ++ } ++ // Selector after: { 0, 0, 0, 0, 0, 0, 0, 0 } ++ } ++ else if (loongarch_is_double_duplicate (d)) ++ { ++ // Selector example: E_V8SImode, { 1, 1, 3, 3, 5, 5, 7, 7 } ++ // one_vector_p == true ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ idx = d->perm[i]; ++ remapped[i] = idx; ++ remapped[i + d->nelt / 2] = idx; ++ } ++ // Selector after: { 1, 1, 3, 3, 1, 1, 3, 3 } ++ } ++ else if (loongarch_is_odd_extraction (d) ++ || loongarch_is_even_extraction (d)) ++ { ++ // Odd extraction selector sample: E_V4DImode, { 1, 3, 5, 7 } ++ // Selector after: { 1, 3, 1, 3 } ++ // Even extraction selector sample: E_V4DImode, { 0, 2, 4, 6 } ++ // Selector after: { 0, 2, 0, 2 } ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ idx = d->perm[i]; ++ remapped[i] = idx; ++ remapped[i + d->nelt / 2] = idx; ++ } ++ // Additional insn is required for correct result. See codes below. ++ extract_ev_od = true; ++ } ++ else if (loongarch_is_extraction_permutation (d)) ++ { ++ // Selector sample: E_V8SImode, { 0, 1, 2, 3, 4, 5, 6, 7 } ++ if (d->perm[0] == 0) ++ { ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ remapped[i] = i; ++ remapped[i + d->nelt / 2] = i; ++ } ++ } ++ else ++ { ++ // { 8, 9, 10, 11, 12, 13, 14, 15 } ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ idx = i + d->nelt / 2; ++ remapped[i] = idx; ++ remapped[i + d->nelt / 2] = idx; ++ } ++ } ++ // Selector after: { 0, 1, 2, 3, 0, 1, 2, 3 } ++ // { 8, 9, 10, 11, 8, 9, 10, 11 } ++ } ++ else if (loongarch_is_center_extraction (d)) ++ { ++ // sample: E_V4DImode, { 2, 3, 4, 5 } ++ // In this condition, we can just copy high 128bit of op0 and low 128bit ++ // of op1 to the target register by using xvpermi.q insn. ++ if (!d->testing_p) ++ { ++ emit_move_insn (d->target, d->op1); ++ switch (d->vmode) ++ { ++ case E_V4DImode: ++ emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target, d->op0, GEN_INT (0x21))); ++ break; ++ case E_V4DFmode: ++ emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target, d->op0, GEN_INT (0x21))); ++ break; ++ case E_V8SImode: ++ emit_insn (gen_lasx_xvpermi_q_v8si (d->target, d->target, d->op0, GEN_INT (0x21))); ++ break; ++ case E_V8SFmode: ++ emit_insn (gen_lasx_xvpermi_q_v8sf (d->target, d->target, d->op0, GEN_INT (0x21))); ++ break; ++ case E_V16HImode: ++ emit_insn (gen_lasx_xvpermi_q_v16hi (d->target, d->target, d->op0, GEN_INT (0x21))); ++ break; ++ case E_V32QImode: ++ emit_insn (gen_lasx_xvpermi_q_v32qi (d->target, d->target, d->op0, GEN_INT (0x21))); ++ break; ++ default: ++ break; ++ } ++ } ++ ok = true; ++ // Finish the funtion directly. ++ goto expand_perm_const_2_end; ++ } ++ else if (loongarch_is_reversing_permutation (d)) ++ { ++ // Selector sample: E_V8SImode, { 7, 6, 5, 4, 3, 2, 1, 0 } ++ // one_vector_p == true ++ idx = d->nelt / 2 - 1; ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ remapped[i] = idx; ++ remapped[i + d->nelt / 2] = idx; ++ idx -= 1; ++ } ++ // Selector after: { 3, 2, 1, 0, 3, 2, 1, 0 } ++ // Additional insn will be generated to swap hi and lo 128bit of target ++ // register. ++ reverse_hi_lo = true; ++ } ++ else if (loongarch_is_di_misalign_extract (d) ++ || loongarch_is_si_misalign_extract (d)) ++ { ++ // Selector Sample: ++ // DI misalign: E_V4DImode, { 1, 2, 3, 4 } ++ // SI misalign: E_V8SImode, { 1, 2, 3, 4, 5, 6, 7, 8 } ++ if (!d->testing_p) ++ { ++ // Copy original op0/op1 value to new temp register. ++ // In some cases, operand register may be used in multiple place, so ++ // we need new regiter instead modify original one, to avoid runtime ++ // crashing or wrong value after execution. ++ use_alt_op = true; ++ op1_alt = gen_reg_rtx (d->vmode); ++ emit_move_insn (op1_alt, d->op1); ++ ++ // Adjust op1 for selecting correct value in high 128bit of target ++ // register. ++ // op1: E_V4DImode, { 4, 5, 6, 7 } -> { 2, 3, 4, 5 } ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, ++ conv_op0, GEN_INT (0x21))); ++ ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ remapped[i] = d->perm[i]; ++ remapped[i + d->nelt / 2] = d->perm[i]; ++ } ++ // Selector after: ++ // DI misalign: { 1, 2, 1, 2 } ++ // SI misalign: { 1, 2, 3, 4, 1, 2, 3, 4 } ++ } ++ } ++ else if (loongarch_is_lasx_lowpart_interleave (d)) ++ { ++ // Elements from op0's low 18bit and op1's 128bit are inserted into ++ // target register alternately. ++ //sample: E_V4DImode, { 0, 4, 1, 5 } ++ if (!d->testing_p) ++ { ++ // Prepare temp register instead of modify original op. ++ use_alt_op = true; ++ op1_alt = gen_reg_rtx (d->vmode); ++ op0_alt = gen_reg_rtx (d->vmode); ++ emit_move_insn (op1_alt, d->op1); ++ emit_move_insn (op0_alt, d->op0); ++ ++ // Generate subreg for fitting into insn gen function. ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ ++ // Adjust op value in temp register. ++ // op0 = {0,1,2,3}, op1 = {4,5,0,1} ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, ++ conv_op0, GEN_INT (0x02))); ++ // op0 = {0,1,4,5}, op1 = {4,5,0,1} ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0, ++ conv_op1, GEN_INT (0x01))); ++ ++ // Remap indices in selector based on the location of index inside ++ // selector, and vector element numbers in current vector mode. ++ ++ // Filling low 128bit of new selector. ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ // value in odd-indexed slot of low 128bit part of selector ++ // vector. ++ remapped[i] = i % 2 != 0 ? d->perm[i] - d->nelt / 2 : d->perm[i]; ++ } ++ // Then filling the high 128bit. ++ for (i = d->nelt / 2; i < d->nelt; i += 1) ++ { ++ // value in even-indexed slot of high 128bit part of ++ // selector vector. ++ remapped[i] = i % 2 == 0 ? d->perm[i] + (d->nelt / 2) * 3 : d->perm[i]; ++ } ++ } ++ } ++ else if (loongarch_is_lasx_lowpart_interleave_2 (d)) ++ { ++ // Special lowpart interleave case in V32QI vector mode. It does the same ++ // thing as we can see in if branch that above this line. ++ // Selector sample: E_V32QImode, ++ // {0, 1, 2, 3, 4, 5, 6, 7, 32, 33, 34, 35, 36, 37, 38, 39, 8, 9, 10, ++ // 11, 12, 13, 14, 15, 40, 41, 42, 43, 44, 45, 46, 47} ++ if (!d->testing_p) ++ { ++ // Solution for this case in very simple - covert op into V4DI mode, ++ // and do same thing as previous if branch. ++ op1_alt = gen_reg_rtx (d->vmode); ++ op0_alt = gen_reg_rtx (d->vmode); ++ emit_move_insn (op1_alt, d->op1); ++ emit_move_insn (op0_alt, d->op0); ++ ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, ++ conv_op0, GEN_INT (0x02))); ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0, ++ conv_op1, GEN_INT (0x01))); ++ remapped[0] = 0; ++ remapped[1] = 4; ++ remapped[2] = 1; ++ remapped[3] = 5; ++ ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ rperm[i] = GEN_INT (remapped[i]); ++ } ++ ++ sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v(4, rperm)); ++ sel = force_reg (E_V4DImode, sel); ++ emit_insn (gen_lasx_xvshuf_d (conv_target, sel, ++ conv_op1, conv_op0)); ++ } ++ ++ ok = true; ++ goto expand_perm_const_2_end; ++ } ++ else if (loongarch_is_lasx_lowpart_extract (d)) ++ { ++ // Copy op0's low 128bit to target's low 128bit, and copy op1's low ++ // 128bit to target's high 128bit. ++ // Selector sample: E_V4DImode, { 0, 1, 4 ,5 } ++ if (!d->testing_p) ++ { ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); ++ rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ ++ // We can achieve the expectation by using sinple xvpermi.q insn. ++ emit_move_insn (conv_target, conv_op1); ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_target, conv_target, ++ conv_op0, GEN_INT(0x20))); ++ } ++ ++ ok = true; ++ goto expand_perm_const_2_end; ++ } ++ else if (loongarch_is_lasx_highpart_interleave (d)) ++ { ++ // Similar to lowpart interleave, elements from op0's high 128bit and ++ // op1's high 128bit are inserted into target regiter alternately. ++ // Selector sample: E_V8SImode, { 4, 12, 5, 13, 6, 14, 7, 15 } ++ if (!d->testing_p) ++ { ++ // Prepare temp op register. ++ use_alt_op = true; ++ op1_alt = gen_reg_rtx (d->vmode); ++ op0_alt = gen_reg_rtx (d->vmode); ++ emit_move_insn (op1_alt, d->op1); ++ emit_move_insn (op0_alt, d->op0); ++ ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ // Adjust op value in temp regiter. ++ // op0 = { 0, 1, 2, 3 }, op1 = { 6, 7, 2, 3 } ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, ++ conv_op0, GEN_INT (0x13))); ++ // op0 = { 2, 3, 6, 7 }, op1 = { 6, 7, 2, 3 } ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0, ++ conv_op1, GEN_INT (0x01))); ++ // Remap indices in selector based on the location of index inside ++ // selector, and vector element numbers in current vector mode. ++ ++ // Filling low 128bit of new selector. ++ for (i = 0; i < d->nelt / 2; i += 1) ++ { ++ // value in even-indexed slot of low 128bit part of selector ++ // vector. ++ remapped[i] = i % 2 == 0 ? d->perm[i] - d->nelt / 2 : d->perm[i]; ++ } ++ // Then filling the high 128bit. ++ for (i = d->nelt / 2; i < d->nelt; i += 1) ++ { ++ // value in odd-indexed slot of high 128bit part of selector ++ // vector. ++ remapped[i] = i % 2 != 0 ? d->perm[i] - (d->nelt / 2) * 3 : d->perm[i]; ++ } ++ } ++ } ++ else if (loongarch_is_lasx_highpart_interleave_2 (d)) ++ { ++ // Special highpart interleave case in V32QI vector mode. It does the ++ // same thing as the normal version above. ++ // Selector sample: E_V32QImode, ++ // {16, 17, 18, 19, 20, 21, 22, 23, 48, 49, 50, 51, 52, 53, 54, 55, 24, ++ // 25, 26, 27, 28, 29, 30, 31, 56, 57, 58, 59, 60, 61, 62, 63} ++ if (!d->testing_p) ++ { ++ // Convert op into V4DImode and do the things. ++ op1_alt = gen_reg_rtx (d->vmode); ++ op0_alt = gen_reg_rtx (d->vmode); ++ emit_move_insn (op1_alt, d->op1); ++ emit_move_insn (op0_alt, d->op0); ++ ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, ++ conv_op0, GEN_INT (0x13))); ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0, ++ conv_op1, GEN_INT (0x01))); ++ remapped[0] = 2; ++ remapped[1] = 6; ++ remapped[2] = 3; ++ remapped[3] = 7; ++ ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ rperm[i] = GEN_INT (remapped[i]); ++ } ++ ++ sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v(4, rperm)); ++ sel = force_reg (E_V4DImode, sel); ++ emit_insn (gen_lasx_xvshuf_d (conv_target, sel, ++ conv_op1, conv_op0)); ++ } ++ ++ ok = true; ++ goto expand_perm_const_2_end; ++ } ++ else if (loongarch_is_elem_duplicate (d)) ++ { ++ // Brocast single element (from op0 or op1) to all slot of target ++ // register. ++ // Selector sample:E_V8SImode, { 2, 2, 2, 2, 2, 2, 2, 2 } ++ if (!d->testing_p) ++ { ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); ++ rtx temp_reg = gen_reg_rtx (d->vmode); ++ rtx conv_temp = gen_rtx_SUBREG (E_V4DImode, temp_reg, 0); ++ ++ emit_move_insn (temp_reg, d->op0); ++ ++ idx = d->perm[0]; ++ // We will use xvrepl128vei.* insn to achieve the result, but we need ++ // to make the high/low 128bit has the same contents that contain the ++ // value that we need to broardcast, because xvrepl128vei does the ++ // broardcast job from every 128bit of source register to ++ // corresponded part of target register! (A deep sigh.) ++ if (/*idx >= 0 &&*/ idx < d->nelt / 2) ++ { ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp, ++ conv_op0, GEN_INT (0x0))); ++ } ++ else if (idx >= d->nelt / 2 && idx < d->nelt) ++ { ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp, ++ conv_op0, GEN_INT (0x11))); ++ idx -= d->nelt / 2; ++ } ++ else if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2)) ++ { ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp, ++ conv_op1, GEN_INT (0x0))); ++ } ++ else if (idx >= (d->nelt + d->nelt / 2) && idx < d->nelt * 2) ++ { ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp, ++ conv_op1, GEN_INT (0x11))); ++ idx -= d->nelt / 2; ++ } ++ ++ // Then we can finally generate this insn. ++ switch (d->vmode) ++ { ++ case E_V4DImode: ++ emit_insn (gen_lasx_xvrepl128vei_d (d->target, temp_reg, GEN_INT (idx))); ++ break; ++ case E_V4DFmode: ++ emit_insn (gen_lasx_xvrepl128vei_d_f (d->target, temp_reg, GEN_INT (idx))); ++ break; ++ case E_V8SImode: ++ emit_insn (gen_lasx_xvrepl128vei_w (d->target, temp_reg, GEN_INT (idx))); ++ break; ++ case E_V8SFmode: ++ emit_insn (gen_lasx_xvrepl128vei_w_f (d->target, temp_reg, GEN_INT (idx))); ++ break; ++ case E_V16HImode: ++ emit_insn (gen_lasx_xvrepl128vei_h (d->target, temp_reg, GEN_INT (idx))); ++ break; ++ case E_V32QImode: ++ emit_insn (gen_lasx_xvrepl128vei_b (d->target, temp_reg, GEN_INT(idx))); ++ break; ++ default: ++ gcc_unreachable (); ++ break; ++ } ++ ++ // finish func directly. ++ ok = true; ++ goto expand_perm_const_2_end; ++ } ++ } ++ else if (loongarch_is_op_reverse_perm (d)) ++ { ++ // reverse high 128bit and low 128bit in op0. ++ // Selector sample: E_V4DFmode, { 2, 3, 0, 1 } ++ // Use xvpermi.q for doing this job. ++ if (!d->testing_p) ++ { ++ if (d->vmode == E_V4DImode) ++ { ++ emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target, d->op0, ++ GEN_INT (0x01))); ++ } ++ else if (d->vmode == E_V4DFmode) ++ { ++ emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target, d->op0, ++ GEN_INT (0x01))); ++ } ++ else ++ { ++ gcc_unreachable (); ++ } ++ } ++ ++ ok = true; ++ goto expand_perm_const_2_end; ++ } ++ else if (loongarch_is_single_op_perm (d)) ++ { ++ //Permutation that only select elements from op0. ++ if (!d->testing_p) ++ { ++ // Prepare temp register instead of modify original op. ++ use_alt_op = true; ++ op0_alt = gen_reg_rtx (d->vmode); ++ op1_alt = gen_reg_rtx (d->vmode); ++ ++ emit_move_insn (op0_alt, d->op0); ++ emit_move_insn (op1_alt, d->op1); ++ ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); ++ rtx conv_op0a = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ rtx conv_op1a = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ ++ // Duplicate op0's low 128bit in op0, then duplicate high 128bit ++ // in op1. After this, xvshuf.* insn's selector argument can ++ // access all elements we need for correct permutation result. ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0a, conv_op0a, conv_op0, ++ GEN_INT (0x00))); ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1a, conv_op1a, conv_op0, ++ GEN_INT (0x11))); ++ ++ // In this case, there's no need to remap selector's indices. ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ remapped[i] = d->perm[i]; ++ } ++ } ++ } ++ else if (loongarch_is_divisible_perm (d)) ++ { ++ // Divisible perm: ++ // Low 128bit of selector only selects elements of op0, ++ // and high 128bit of selector only selects elements of op1. ++ ++ if (!d->testing_p) ++ { ++ // Prepare temp register instead of modify original op. ++ use_alt_op = true; ++ op0_alt = gen_reg_rtx (d->vmode); ++ op1_alt = gen_reg_rtx (d->vmode); ++ ++ emit_move_insn (op0_alt, d->op0); ++ emit_move_insn (op1_alt, d->op1); ++ ++ rtx conv_op0a = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ rtx conv_op1a = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); ++ rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0); ++ ++ // Reorganize op0's hi/lo 128bit and op1's hi/lo 128bit, to make sure ++ //that selector's low 128bit can access all op0's elements, and ++ //selector's high 128bit can access all op1's elements. ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0a, conv_op0a, conv_op1, ++ GEN_INT (0x02))); ++ emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1a, conv_op1a, conv_op0, ++ GEN_INT (0x31))); ++ ++ // No need to modify indices. ++ for (i = 0; i < d->nelt;i += 1) ++ { ++ remapped[i] = d->perm[i]; ++ } ++ } ++ } ++ else if (loongarch_is_triple_stride_extract (d)) ++ { ++ // Selector sample: E_V4DFmode, { 1, 4, 7, 0 } ++ if (!d->testing_p) ++ { ++ // Resolve it with brute force modification. ++ remapped[0] = 1; ++ remapped[1] = 2; ++ remapped[2] = 3; ++ remapped[3] = 0; ++ } ++ } ++ else ++ { ++ // When all of the detections above are failed, we will try last ++ // strategy. ++ // The for loop tries to detect following rules based on indices' value ++ // , its position inside of selector vector ,and strange behavior of xvshuf.* insn; ++ // Then we take corresponding action. (Replace with new value, or give up ++ // whole permutation expansion.) ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ idx = d->perm[i]/* % (2 * d->nelt)*/; ++ ++ // if index is located in low 128bit of selector vector ++ if (i < d->nelt / 2) ++ { ++ // Fail case 1: index tries to reach element that located in op0's ++ // high 128bit. ++ if (idx >= d->nelt / 2 && idx < d->nelt) ++ { ++ goto expand_perm_const_2_end; ++ } ++ // Fail case 2: index tries to reach element that located in ++ // op1's high 128bit. ++ if (idx >= (d->nelt + d->nelt / 2)) ++ { ++ goto expand_perm_const_2_end; ++ } ++ ++ // Success case: index tries to reach elements that located in ++ // op1's low 128bit. Apply - (nelt / 2) offset to original value. ++ if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2)) ++ { ++ idx -= d->nelt / 2; ++ } ++ } ++ // if index is located in high 128bit of selector vector ++ else ++ { ++ // Fail case 1: index tries to reach element that located in ++ // op1's low 128bit. ++ if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2)) ++ { ++ goto expand_perm_const_2_end; ++ } ++ // Fail case 2: index tries to reach element that located in ++ // op0's low 128bit. ++ if (idx < (d->nelt / 2)) ++ { ++ goto expand_perm_const_2_end; ++ } ++ // Success case: index tries to reach element that located in ++ // op0's high 128bit. ++ if (idx >= d->nelt / 2 && idx < d->nelt) ++ { ++ idx -= d->nelt / 2; ++ } ++ } ++ // No need to process other case that we did not mentioned. ++ ++ // Assign with original or processed value. ++ remapped[i] = idx; ++ } ++ } ++ ++ ok = true; ++ // If testing_p is true, compiler is trying to figure out that backend can ++ // handle this permutation, but doesn't want to generate actual insn. So if ++ // true, exit directly. ++ if (d->testing_p) ++ { ++ goto expand_perm_const_2_end; ++ } ++ ++ // Convert remapped selector array to RTL array. ++ for (i = 0; i < d->nelt; i += 1) ++ { ++ rperm[i] = GEN_INT (remapped[i]); ++ } ++ ++ // Copy selector vector from memory to vector regiter for later insn gen ++ // function. ++ // if vector's element in floating point value, we cannot fit selector ++ // argument into insn gen function directly, because of the insn template ++ // definition. As a solution, generate a integral mode subreg of target, ++ // then copy selector vector(that is in integral mode) to this subreg. ++ switch (d->vmode) ++ { ++ case E_V4DFmode: ++ sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (d->nelt, rperm)); ++ tmp = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ emit_move_insn (tmp, sel); ++ break; ++ case E_V8SFmode: ++ sel = gen_rtx_CONST_VECTOR (E_V8SImode, gen_rtvec_v (d->nelt, rperm)); ++ tmp = gen_rtx_SUBREG (E_V8SImode, d->target, 0); ++ emit_move_insn (tmp, sel); ++ break; ++ default: ++ sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt, rperm)); ++ emit_move_insn (d->target, sel); ++ break; + } +- return true; +-} +- +-static bool +-loongarch_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) +-{ +- unsigned int i, nelt = d->nelt; +- unsigned char perm2[MAX_VECT_LEN]; + +- if (d->one_vector_p) ++ target = d->target; ++ // If temp op registers are requested in previous if branch, then use temp ++ // register intead of original one. ++ if (use_alt_op) + { +- /* Try interleave with alternating operands. */ +- memcpy (perm2, d->perm, sizeof(perm2)); +- for (i = 1; i < nelt; i += 2) +- perm2[i] += nelt; +- if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2, nelt)) +- return true; ++ op0 = op0_alt != NULL_RTX ? op0_alt : d->op0; ++ op1 = op1_alt != NULL_RTX ? op1_alt : d->op1; + } + else + { +- if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, +- d->perm, nelt)) +- return true; ++ op0 = d->op0; ++ op1 = d->one_vector_p ? d->op0 : d->op1; ++ } + +- /* Try again with swapped operands. */ +- for (i = 0; i < nelt; ++i) +- perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1); +- if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt)) +- return true; ++ // We FINALLY can generate xvshuf.* insn. ++ switch (d->vmode) ++ { ++ case E_V4DFmode: ++ emit_insn (gen_lasx_xvshuf_d_f (target, target, op1, op0)); ++ break; ++ case E_V4DImode: ++ emit_insn (gen_lasx_xvshuf_d (target, target, op1, op0)); ++ break; ++ case E_V8SFmode: ++ emit_insn (gen_lasx_xvshuf_w_f (target, target, op1, op0)); ++ break; ++ case E_V8SImode: ++ emit_insn (gen_lasx_xvshuf_w (target, target, op1, op0)); ++ break; ++ case E_V16HImode: ++ emit_insn (gen_lasx_xvshuf_h (target, target, op1, op0)); ++ break; ++ case E_V32QImode: ++ emit_insn (gen_lasx_xvshuf_b (target, op1, op0, target)); ++ break; ++ default: ++ gcc_unreachable (); ++ break; + } + +- if (loongarch_expand_lsx_shuffle (d)) +- return true; +- return false; ++ // extra insn for swapping the hi/lo 128bit of target vector register. ++ if (reverse_hi_lo) ++ { ++ switch (d->vmode) ++ { ++ case E_V4DFmode: ++ emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target, d->target, GEN_INT (0x1))); ++ break; ++ case E_V4DImode: ++ emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target, d->target, GEN_INT (0x1))); ++ break; ++ case E_V8SFmode: ++ emit_insn (gen_lasx_xvpermi_q_v8sf (d->target, d->target, d->target, GEN_INT (0x1))); ++ break; ++ case E_V8SImode: ++ emit_insn (gen_lasx_xvpermi_q_v8si (d->target, d->target, d->target, GEN_INT (0x1))); ++ break; ++ case E_V16HImode: ++ emit_insn (gen_lasx_xvpermi_q_v16hi (d->target, d->target, d->target, GEN_INT (0x1))); ++ break; ++ case E_V32QImode: ++ emit_insn (gen_lasx_xvpermi_q_v32qi (d->target, d->target, d->target, GEN_INT (0x1))); ++ break; ++ default: ++ break; ++ } ++ } ++ // extra insn required by odd/even extraction. Swapping the second and third ++ // 64bit in target vector register. ++ else if (extract_ev_od) ++ { ++ rtx converted = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ emit_insn (gen_lasx_xvpermi_d_v4di (converted, converted, GEN_INT (0xD8))); ++ } ++ ++expand_perm_const_2_end: ++ return ok; + } + + /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */ +@@ -9043,13 +9312,19 @@ loongarch_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0, + if (!d.one_vector_p) + d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3); + ++ ok = loongarch_expand_vec_perm_const_2 (&d); ++ if (ok) ++ return ok; ++ + start_sequence (); + ok = loongarch_expand_vec_perm_const_1 (&d); + end_sequence (); + return ok; + } + +- ok = loongarch_expand_vec_perm_const_1 (&d); ++ ok = loongarch_expand_vec_perm_const_2 (&d); ++ if (!ok) ++ ok = loongarch_expand_vec_perm_const_1 (&d); + + /* If we were given a two-vector permutation which just happened to + have both input vectors equal, we folded this into a one-vector +@@ -9070,16 +9345,18 @@ loongarch_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0, + return ok; + } + +-/* Implement TARGET_SCHED_REASSOCIATION_WIDTH. */ +- + static int +-loongarch_sched_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED, +- machine_mode mode) ++loongarch_cpu_sched_reassociation_width (struct loongarch_target *target, ++ unsigned int opc, machine_mode mode) + { +- switch (loongarch_tune) ++ /* unreferenced argument */ ++ (void) opc; ++ ++ switch (target->cpu_tune) + { +- case PROCESSOR_LOONGARCH64: +- case PROCESSOR_LA464: ++ case CPU_LOONGARCH64: ++ case CPU_LA464: ++ case CPU_LA664: + /* Vector part. */ + if (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode)) + { +@@ -9094,10 +9371,164 @@ loongarch_sched_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED, + else if (FLOAT_MODE_P (mode)) + return 4; + break; ++ } ++ ++ /* default is 1 */ ++ return 1; ++} ++ ++/* Implement TARGET_SCHED_REASSOCIATION_WIDTH. */ ++ ++static int ++loongarch_sched_reassociation_width (unsigned int opc, machine_mode mode) ++{ ++ return loongarch_cpu_sched_reassociation_width (&la_target, opc, mode); ++} ++ ++/* Implement extract a scalar element from vecotr register */ ++ ++void ++loongarch_expand_vector_extract (rtx target, rtx vec, int elt) ++{ ++ machine_mode mode = GET_MODE (vec); ++ machine_mode inner_mode = GET_MODE_INNER (mode); ++ rtx tmp; ++ ++ switch (mode) ++ { ++ case E_V8HImode: ++ case E_V16QImode: ++ break; ++ ++ case E_V32QImode: ++ if (ISA_HAS_LASX) ++ { ++ if (elt >= 16) ++ { ++ tmp = gen_reg_rtx (V32QImode); ++ emit_insn (gen_lasx_xvpermi_d_v32qi (tmp, vec, GEN_INT (0xe))); ++ loongarch_expand_vector_extract (target, gen_lowpart (V16QImode, tmp), elt & 15); ++ } ++ else ++ loongarch_expand_vector_extract (target, gen_lowpart (V16QImode, vec), elt & 15); ++ return; ++ } ++ break; ++ ++ case E_V16HImode: ++ if (ISA_HAS_LASX) ++ { ++ if (elt >= 8) ++ { ++ tmp = gen_reg_rtx (V16HImode); ++ emit_insn (gen_lasx_xvpermi_d_v16hi (tmp, vec, GEN_INT (0xe))); ++ loongarch_expand_vector_extract (target, gen_lowpart (V8HImode, tmp), elt & 7); ++ } ++ else ++ loongarch_expand_vector_extract (target, gen_lowpart (V8HImode, vec), elt & 7); ++ return; ++ } ++ break; ++ + default: + break; + } +- return 1; ++ ++ tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt))); ++ tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp); ++ ++ /* Let the rtl optimizers know about the zero extension performed. */ ++ if (inner_mode == QImode || inner_mode == HImode) ++ { ++ tmp = gen_rtx_ZERO_EXTEND (SImode, tmp); ++ target = gen_lowpart (SImode, target); ++ } ++ if (inner_mode == SImode || inner_mode == DImode) ++ { ++ tmp = gen_rtx_SIGN_EXTEND (inner_mode, tmp); ++ } ++ ++ emit_insn (gen_rtx_SET (target, tmp)); ++} ++ ++/* Generate code to copy vector bits i / 2 ... i - 1 from vector SRC ++ to bits 0 ... i / 2 - 1 of vector DEST, which has the same mode. ++ The upper bits of DEST are undefined, though they shouldn't cause ++ exceptions (some bits from src or all zeros are ok). */ ++ ++static void ++emit_reduc_half (rtx dest, rtx src, int i) ++{ ++ rtx tem, d = dest; ++ switch (GET_MODE (src)) ++ { ++ case E_V4SFmode: ++ tem = gen_lsx_vbsrl_w_f (dest, src, GEN_INT (i == 128 ? 8 : 4)); ++ break; ++ case E_V2DFmode: ++ tem = gen_lsx_vbsrl_d_f (dest, src, GEN_INT (8)); ++ break; ++ case E_V8SFmode: ++ if (i == 256) ++ tem = gen_lasx_xvpermi_d_v8sf (dest, src, GEN_INT (0xe)); ++ else ++ tem = gen_lasx_xvshuf4i_w_f (dest, src, ++ GEN_INT (i == 128 ? 2 + (3 << 2) : 1)); ++ break; ++ case E_V4DFmode: ++ if (i == 256) ++ tem = gen_lasx_xvpermi_d_v4df (dest, src, GEN_INT (0xe)); ++ else ++ tem = gen_lasx_xvpermi_d_v4df (dest, src, const1_rtx); ++ break; ++ case E_V32QImode: ++ case E_V16HImode: ++ case E_V8SImode: ++ case E_V4DImode: ++ d = gen_reg_rtx (V4DImode); ++ if (i == 256) ++ tem = gen_lasx_xvpermi_d_v4di (d, gen_lowpart (V4DImode, src), GEN_INT (0xe)); ++ else ++ tem = gen_lasx_xvbsrl_d (d, gen_lowpart (V4DImode, src), GEN_INT (i/16)); ++ break; ++ case E_V16QImode: ++ case E_V8HImode: ++ case E_V4SImode: ++ case E_V2DImode: ++ d = gen_reg_rtx (V2DImode); ++ tem = gen_lsx_vbsrl_d (d, gen_lowpart (V2DImode, src), GEN_INT (i/16)); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ emit_insn (tem); ++ if (d != dest) ++ emit_move_insn (dest, gen_lowpart (GET_MODE (dest), d)); ++} ++ ++/* Expand a vector reduction. FN is the binary pattern to reduce; ++ DEST is the destination; IN is the input vector. */ ++ ++void ++loongarch_expand_vector_reduc (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in) ++{ ++ rtx half, dst, vec = in; ++ machine_mode mode = GET_MODE (in); ++ int i; ++ ++ for (i = GET_MODE_BITSIZE (mode); ++ i > GET_MODE_UNIT_BITSIZE (mode); ++ i >>= 1) ++ { ++ half = gen_reg_rtx (mode); ++ emit_reduc_half (half, vec, i); ++ if (i == GET_MODE_UNIT_BITSIZE (mode) * 2) ++ dst = dest; ++ else ++ dst = gen_reg_rtx (mode); ++ emit_insn (fn (dst, half, vec)); ++ vec = dst; ++ } + } + + /* Expand an integral vector unpack operation. */ +@@ -9110,14 +9541,14 @@ loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) + rtx (*extend) (rtx, rtx); + rtx (*cmpFunc) (rtx, rtx, rtx); + rtx (*swap_hi_lo) (rtx, rtx, rtx, rtx); +- rtx tmp, dest, zero; +- machine_mode halfmode = BLKmode; ++ rtx tmp, dest /*, zero */; ++ /* machine_mode halfmode = BLKmode; */ + + if (ISA_HAS_LASX && GET_MODE_SIZE (imode) == 32) + { + switch (imode) + { +- ++ + case E_V8SImode: + if (unsigned_p) + extend = gen_lasx_vext2xv_du_wu; +@@ -9125,7 +9556,7 @@ loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) + extend = gen_lasx_vext2xv_d_w; + swap_hi_lo = gen_lasx_xvpermi_q_v8si; + break; +- ++ + case E_V16HImode: + if (unsigned_p) + extend = gen_lasx_vext2xv_wu_hu; +@@ -9133,7 +9564,7 @@ loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) + extend = gen_lasx_vext2xv_w_h; + swap_hi_lo = gen_lasx_xvpermi_q_v16hi; + break; +- ++ + case E_V32QImode: + if (unsigned_p) + extend = gen_lasx_vext2xv_hu_bu; +@@ -9141,7 +9572,7 @@ loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) + extend = gen_lasx_vext2xv_h_b; + swap_hi_lo = gen_lasx_xvpermi_q_v32qi; + break; +- ++ + default: + gcc_unreachable (); + break; +@@ -9268,7 +9699,7 @@ loongarch_expand_vector_init (rtx target, rtx vals) + machine_mode vmode = GET_MODE (target); + machine_mode imode = GET_MODE_INNER (vmode); + unsigned i, nelt = GET_MODE_NUNITS (vmode); +- unsigned nvar = 0, one_var = -1u; ++ unsigned nvar = 0 /*, one_var = -1u*/ ; + bool all_same = true; + rtx x; + +@@ -9276,7 +9707,7 @@ loongarch_expand_vector_init (rtx target, rtx vals) + { + x = XVECEXP (vals, 0, i); + if (!loongarch_constant_elt_p (x)) +- nvar++, one_var = i; ++ nvar++ /*, one_var = i */ ; + if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0))) + all_same = false; + } +@@ -9311,7 +9742,7 @@ loongarch_expand_vector_init (rtx target, rtx vals) + temp2 = same; + else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) + { +- if(GET_CODE (same) == MEM) ++ if (GET_CODE (same) == MEM) + { + rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); + loongarch_emit_move (reg_tmp, same); +@@ -9322,7 +9753,7 @@ loongarch_expand_vector_init (rtx target, rtx vals) + } + else + { +- if(GET_CODE (same) == MEM) ++ if (GET_CODE (same) == MEM) + { + rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); + loongarch_emit_move (reg_tmp, same); +@@ -9505,7 +9936,7 @@ loongarch_expand_vector_init (rtx target, rtx vals) + temp2 = same; + else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) + { +- if(GET_CODE (same) == MEM) ++ if (GET_CODE (same) == MEM) + { + rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); + loongarch_emit_move (reg_tmp, same); +@@ -9516,7 +9947,7 @@ loongarch_expand_vector_init (rtx target, rtx vals) + } + else + { +- if(GET_CODE (same) == MEM) ++ if (GET_CODE (same) == MEM) + { + rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); + loongarch_emit_move (reg_tmp, same); +@@ -9614,9 +10045,8 @@ loongarch_expand_vector_init (rtx target, rtx vals) + /* Implement HARD_REGNO_CALLER_SAVE_MODE. */ + + machine_mode +-loongarch_hard_regno_caller_save_mode (unsigned int regno, +- unsigned int nregs, +- machine_mode mode) ++loongarch_hard_regno_caller_save_mode (unsigned int regno, unsigned int nregs, ++ machine_mode mode) + { + /* For performance, avoid saving/restoring upper parts of a register + by returning MODE as save mode when the mode is known. */ +@@ -9785,7 +10215,8 @@ loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode, + if (mode != vimode) + { + xop1 = gen_reg_rtx (vimode); +- emit_move_insn (xop1, gen_rtx_SUBREG (vimode, operands[1], 0)); ++ emit_move_insn (xop1, simplify_gen_subreg (vimode, operands[1], ++ GET_MODE (operands[1]), 0)); + } + emit_move_insn (src1, xop1); + } +@@ -9802,7 +10233,8 @@ loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode, + if (mode != vimode) + { + xop2 = gen_reg_rtx (vimode); +- emit_move_insn (xop2, gen_rtx_SUBREG (vimode, operands[2], 0)); ++ emit_move_insn (xop2, simplify_gen_subreg (vimode, operands[2], ++ GET_MODE (operands[2]), 0)); + } + emit_move_insn (src2, xop2); + } +@@ -9821,13 +10253,14 @@ loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode, + gen_rtx_AND (vimode, mask, src1)); + /* The result is placed back to a register with the mask. */ + emit_insn (gen_rtx_SET (mask, bsel)); +- emit_move_insn (operands[0], gen_rtx_SUBREG (mode, mask, 0)); ++ emit_move_insn (operands[0], simplify_gen_subreg (mode, mask, ++ GET_MODE (mask), 0)); + } + } + + /* Expand integer vector comparison */ + bool +-loongarch_expand_int_vec_cmp(rtx operands[]) ++loongarch_expand_int_vec_cmp (rtx operands[]) + { + + rtx_code code = GET_CODE (operands[1]); +@@ -9837,7 +10270,7 @@ loongarch_expand_int_vec_cmp(rtx operands[]) + + /* Expand integer vector comparison */ + bool +-loongarch_expand_fp_vec_cmp(rtx operands[]) ++loongarch_expand_fp_vec_cmp (rtx operands[]) + { + rtx_code code = GET_CODE (operands[1]); + loongarch_expand_lsx_cmp (operands[0], code, operands[2], operands[3]); +@@ -9845,61 +10278,16 @@ loongarch_expand_fp_vec_cmp(rtx operands[]) + } + + +-/* Implement TARGET_CASE_VALUES_THRESHOLD. */ +- +-unsigned int +-loongarch_case_values_threshold (void) +-{ +- return default_case_values_threshold (); +-} +- +- + /* Implement TARGET_SPILL_CLASS. */ + + static reg_class_t + loongarch_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED, +- machine_mode mode ATTRIBUTE_UNUSED) ++ machine_mode mode ATTRIBUTE_UNUSED) + { + return NO_REGS; + } + +-/* Implement TARGET_LRA_P. */ +- +-static bool +-loongarch_lra_p (void) +-{ +- return loongarch_lra_flag; +-} +- +-/* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS. */ +- +-static reg_class_t +-loongarch_ira_change_pseudo_allocno_class (int regno, reg_class_t allocno_class, +- reg_class_t best_class ATTRIBUTE_UNUSED) +-{ +- /* LRA will allocate an FPR for an integer mode pseudo instead of spilling +- to memory if an FPR is present in the allocno class. It is rare that +- we actually need to place an integer mode value in an FPR so where +- possible limit the allocation to GR_REGS. This will slightly pessimize +- code that involves integer to/from float conversions as these will have +- to reload into FPRs in LRA. Such reloads are sometimes eliminated and +- sometimes only partially eliminated. We choose to take this penalty +- in order to eliminate usage of FPRs in code that does not use floating +- point data. +- +- This change has a similar effect to increasing the cost of FPR->GPR +- register moves for integer modes so that they are higher than the cost +- of memory but changing the allocno class is more reliable. +- +- This is also similar to forbidding integer mode values in FPRs entirely +- but this would lead to an inconsistency in the integer to/from float +- instructions that say integer mode values must be placed in FPRs. */ +- if (INTEGRAL_MODE_P (PSEUDO_REGNO_MODE (regno)) && allocno_class == ALL_REGS) +- return GR_REGS; +- return allocno_class; +-} +- +-/* Implement TARGET_PROMOTE_FUNCTION_MODE */ ++/* Implement TARGET_PROMOTE_FUNCTION_MODE. */ + + /* This function is equivalent to default_promote_function_mode_always_promote + except that it returns a promoted mode even if type is NULL_TREE. This is +@@ -9909,10 +10297,10 @@ loongarch_ira_change_pseudo_allocno_class (int regno, reg_class_t allocno_class, + + static machine_mode + loongarch_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, +- machine_mode mode, +- int *punsignedp ATTRIBUTE_UNUSED, +- const_tree fntype ATTRIBUTE_UNUSED, +- int for_return ATTRIBUTE_UNUSED) ++ machine_mode mode, ++ int *punsignedp ATTRIBUTE_UNUSED, ++ const_tree fntype ATTRIBUTE_UNUSED, ++ int for_return ATTRIBUTE_UNUSED) + { + int unsignedp; + +@@ -9933,16 +10321,6 @@ loongarch_truly_noop_truncation (poly_uint64 outprec, poly_uint64 inprec) + return !TARGET_64BIT || inprec <= 32 || outprec > 32; + } + +-/* Implement TARGET_CONSTANT_ALIGNMENT. */ +- +-static HOST_WIDE_INT +-loongarch_constant_alignment (const_tree exp, HOST_WIDE_INT align) +-{ +- if (TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR) +- return MAX (align, BITS_PER_WORD); +- return align; +-} +- + /* Implement TARGET_STARTING_FRAME_OFFSET. See loongarch_compute_frame_info + for details about the frame layout. */ + +@@ -9963,8 +10341,10 @@ loongarch_la464_128_store_p (rtx operands[]) + int offset1; + rtx dst0 = operands[0]; + rtx dst1 = operands[2]; ++ /* + rtx src0 = operands[1]; + rtx src1 = operands[3]; ++ */ + int base_reg0; + int base_reg1; + +@@ -10030,13 +10410,15 @@ loongarch_la464_128_load_p (rtx operands[]) + int offset0; + int offset1; + rtx dst0 = operands[0]; ++ /* + rtx dst1 = operands[2]; ++ */ + rtx src0 = operands[1]; + rtx src1 = operands[3]; + int base_reg0; + int base_reg1; + int dst_reg0; +- ++ + dst_reg0 = REGNO (dst0); + + if (GET_CODE (XEXP (src0, 0)) == PLUS) +@@ -10209,6 +10591,138 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) + return force_reg (vec_mode, v); + } + ++/* Use rsqrte instruction and Newton-Rhapson to compute the approximation of ++ a single precision floating point [reciprocal] square root. */ ++ ++void loongarch_emit_swrsqrtsf (rtx res, rtx a, machine_mode mode, bool recip) ++{ ++ rtx x0, e0, e1, e2, mhalf, monehalf; ++ REAL_VALUE_TYPE r; ++ machine_mode imode; ++ int unspec; ++ ++ x0 = gen_reg_rtx (mode); ++ e0 = gen_reg_rtx (mode); ++ e1 = gen_reg_rtx (mode); ++ e2 = gen_reg_rtx (mode); ++ ++ real_arithmetic (&r, ABS_EXPR, &dconsthalf, NULL); ++ mhalf = const_double_from_real_value (r, SFmode); ++ ++ real_arithmetic (&r, PLUS_EXPR, &dconsthalf, &dconst1); ++ monehalf = const_double_from_real_value (r, SFmode); ++ unspec = UNSPEC_RSQRTE; ++ ++ if (VECTOR_MODE_P (mode)) ++ { ++ mhalf = loongarch_build_const_vector (mode, true, mhalf); ++ monehalf = loongarch_build_const_vector (mode, true, monehalf); ++ if (GET_MODE_SIZE (mode) == 32) ++ imode = mode == V4DFmode ? V4DImode : V8SImode; ++ if (GET_MODE_SIZE (mode) == 16) ++ imode = mode == V2DFmode ? V2DImode : V4SImode; ++ } ++ ++ /* rsqrt(a) = rsqrte(a) * (1.5 - 0.5 * a * rsqrte(a) * rsqrte(a)) ++ sqrt(a) = a * rsqrte(a) * (1.5 - 0.5 * a * rsqrte(a) * rsqrte(a))*/ ++ ++ a = force_reg (mode, a); ++ ++ /* x0 = rsqrt(a) estimate */ ++ emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, a), ++ unspec))); ++ ++ /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */ ++ if (!recip) ++ { ++ rtx zero = force_reg (mode, CONST0_RTX(mode)); ++ ++ if (VECTOR_MODE_P (mode)) ++ { ++ rtx mask = gen_reg_rtx (imode); ++ emit_insn (gen_rtx_SET (mask, gen_rtx_NE (imode, a, zero))); ++ emit_insn (gen_rtx_SET (x0, gen_rtx_AND (mode, x0, gen_lowpart(mode, mask)))); ++ } ++ else ++ { ++ rtx target = emit_conditional_move (x0, GT, a, zero, mode, ++ x0, zero, mode, 0); ++ if (target != x0) ++ emit_move_insn (x0, target); ++ } ++ } ++ ++ /* e0 = x0 * a */ ++ emit_insn (gen_rtx_SET (e0, gen_rtx_MULT (mode, x0, a))); ++ /* e1 = e0 * x0 */ ++ emit_insn (gen_rtx_SET (e1, gen_rtx_MULT (mode, e0, x0))); ++ ++ /* e2 = 1.5 - e1 * 0.5 */ ++ mhalf = force_reg (mode, mhalf); ++ monehalf = force_reg (mode, monehalf); ++ emit_insn (gen_rtx_SET (e2, gen_rtx_FMA (mode, gen_rtx_NEG(mode, e1), mhalf, monehalf))); ++ ++ if (recip) ++ /* res = e2 * x0 */ ++ emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, x0, e2))); ++ else ++ /* res = e2 * e0 */ ++ emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, e2, e0))); ++} ++ ++/* Use recipe instruction and Newton-Rhapson to compute the approximation of ++ a single precision floating point divide. */ ++ ++void loongarch_emit_swdivsf (rtx res, rtx a, rtx b, machine_mode mode) ++{ ++ rtx x0, x1, e0, mtwo; ++ REAL_VALUE_TYPE r; ++ x0 = gen_reg_rtx (mode); ++ e0 = gen_reg_rtx (mode); ++ x1 = gen_reg_rtx (mode); ++ ++ real_arithmetic (&r, ABS_EXPR, &dconst2, NULL); ++ mtwo = const_double_from_real_value (r, SFmode); ++ ++ if (VECTOR_MODE_P (mode)) ++ mtwo = loongarch_build_const_vector (mode, true, mtwo); ++ ++ mtwo = force_reg (mode, mtwo); ++ ++ /* a / b = a * recipe(b) * (2.0 - b * recipe(b)) */ ++ ++ /* x0 = 1./b estimate */ ++ emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, b), ++ UNSPEC_RECIPE))); ++ /* 2.0 - b * x0; */ ++ emit_insn (gen_rtx_SET (e0, gen_rtx_FMA (mode,gen_rtx_NEG(mode, b), x0, mtwo))); ++ ++ /* x1 = x0 * e0 */ ++ emit_insn (gen_rtx_SET (x1, gen_rtx_MULT (mode, x0, e0))); ++ ++ /* res = a * x1 */ ++ emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, a, x1))); ++} ++ ++/* LoongArch only implements preld hint=0 (prefetch for load) and hint=8 ++ (prefetch for store), other hint just scale to hint = 0 and hint = 1. */ ++ ++rtx ++loongarch_prefetch_cookie (rtx write, rtx locality) ++{ ++ if (INTVAL (locality) == 1 && INTVAL (write) == 0) ++ return GEN_INT (INTVAL (write) + 2); ++ ++ /* store. */ ++ if (INTVAL (write) == 1) ++ return GEN_INT (INTVAL (write) + 7); ++ ++ /* load. */ ++ if (INTVAL (write) == 0) ++ return GEN_INT (INTVAL (write)); ++ ++ gcc_unreachable (); ++} + + + /* Initialize the GCC target structure. */ +@@ -10225,10 +10739,6 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) + #undef TARGET_LEGITIMIZE_ADDRESS + #define TARGET_LEGITIMIZE_ADDRESS loongarch_legitimize_address + +-#undef TARGET_ASM_FUNCTION_PROLOGUE +-#define TARGET_ASM_FUNCTION_PROLOGUE loongarch_output_function_prologue +-#undef TARGET_ASM_FUNCTION_EPILOGUE +-#define TARGET_ASM_FUNCTION_EPILOGUE loongarch_output_function_epilogue + #undef TARGET_ASM_SELECT_RTX_SECTION + #define TARGET_ASM_SELECT_RTX_SECTION loongarch_select_rtx_section + #undef TARGET_ASM_FUNCTION_RODATA_SECTION +@@ -10249,19 +10759,12 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) + #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD + #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ + loongarch_multipass_dfa_lookahead +-#undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P +-#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \ +- loongarch_small_register_classes_for_mode_p + + #undef TARGET_FUNCTION_OK_FOR_SIBCALL + #define TARGET_FUNCTION_OK_FOR_SIBCALL loongarch_function_ok_for_sibcall + +-#undef TARGET_INSERT_ATTRIBUTES +-#define TARGET_INSERT_ATTRIBUTES loongarch_insert_attributes +-#undef TARGET_MERGE_DECL_ATTRIBUTES +-#define TARGET_MERGE_DECL_ATTRIBUTES loongarch_merge_decl_attributes +-#undef TARGET_CAN_INLINE_P +-#define TARGET_CAN_INLINE_P loongarch_can_inline_p ++#undef TARGET_GET_DRAP_RTX ++#define TARGET_GET_DRAP_RTX loongarch_get_drap_rtx + + #undef TARGET_VALID_POINTER_MODE + #define TARGET_VALID_POINTER_MODE loongarch_valid_pointer_mode +@@ -10276,43 +10779,49 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) + #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST + #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \ + loongarch_builtin_vectorization_cost ++#undef TARGET_VECTORIZE_ADD_STMT_COST ++#define TARGET_VECTORIZE_ADD_STMT_COST loongarch_add_stmt_cost + ++#undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT ++#define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT loongarch_builtin_support_vector_misalignment ++#undef TARGET_MODE_REP_EXTENDED ++#define TARGET_MODE_REP_EXTENDED loongarch_mode_rep_extended + + #undef TARGET_IN_SMALL_DATA_P + #define TARGET_IN_SMALL_DATA_P loongarch_in_small_data_p + +-#undef TARGET_MACHINE_DEPENDENT_REORG +-#define TARGET_MACHINE_DEPENDENT_REORG loongarch_reorg +- +-#undef TARGET_PREFERRED_RELOAD_CLASS ++#undef TARGET_PREFERRED_RELOAD_CLASS + #define TARGET_PREFERRED_RELOAD_CLASS loongarch_preferred_reload_class + +-#undef TARGET_EXPAND_TO_RTL_HOOK +-#define TARGET_EXPAND_TO_RTL_HOOK loongarch_expand_to_rtl_hook +-#undef TARGET_ASM_FILE_START +-#define TARGET_ASM_FILE_START loongarch_file_start + #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE + #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true + + #undef TARGET_EXPAND_BUILTIN_VA_START + #define TARGET_EXPAND_BUILTIN_VA_START loongarch_va_start + +-#undef TARGET_PROMOTE_FUNCTION_MODE ++#undef TARGET_PROMOTE_FUNCTION_MODE + #define TARGET_PROMOTE_FUNCTION_MODE loongarch_promote_function_mode + #undef TARGET_RETURN_IN_MEMORY + #define TARGET_RETURN_IN_MEMORY loongarch_return_in_memory + ++#undef TARGET_FUNCTION_VALUE ++#define TARGET_FUNCTION_VALUE loongarch_function_value ++#undef TARGET_LIBCALL_VALUE ++#define TARGET_LIBCALL_VALUE loongarch_libcall_value ++ + #undef TARGET_ASM_OUTPUT_MI_THUNK + #define TARGET_ASM_OUTPUT_MI_THUNK loongarch_output_mi_thunk + #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK +-#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true ++#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \ ++ hook_bool_const_tree_hwi_hwi_const_tree_true + + #undef TARGET_PRINT_OPERAND + #define TARGET_PRINT_OPERAND loongarch_print_operand + #undef TARGET_PRINT_OPERAND_ADDRESS + #define TARGET_PRINT_OPERAND_ADDRESS loongarch_print_operand_address + #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P +-#define TARGET_PRINT_OPERAND_PUNCT_VALID_P loongarch_print_operand_punct_valid_p ++#define TARGET_PRINT_OPERAND_PUNCT_VALID_P \ ++ loongarch_print_operand_punct_valid_p + + #undef TARGET_SETUP_INCOMING_VARARGS + #define TARGET_SETUP_INCOMING_VARARGS loongarch_setup_incoming_varargs +@@ -10344,6 +10853,10 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) + #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \ + loongarch_autovectorize_vector_sizes + ++#undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION ++#define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \ ++ loongarch_builtin_vectorized_function ++ + #undef TARGET_INIT_BUILTINS + #define TARGET_INIT_BUILTINS loongarch_init_builtins + #undef TARGET_BUILTIN_DECL +@@ -10351,8 +10864,11 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) + #undef TARGET_EXPAND_BUILTIN + #define TARGET_EXPAND_BUILTIN loongarch_expand_builtin + ++/* The generic ELF target does not always have TLS support. */ ++#ifdef HAVE_AS_TLS + #undef TARGET_HAVE_TLS + #define TARGET_HAVE_TLS HAVE_AS_TLS ++#endif + + #undef TARGET_CANNOT_FORCE_CONST_MEM + #define TARGET_CANNOT_FORCE_CONST_MEM loongarch_cannot_force_const_mem +@@ -10360,35 +10876,24 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) + #undef TARGET_LEGITIMATE_CONSTANT_P + #define TARGET_LEGITIMATE_CONSTANT_P loongarch_legitimate_constant_p + +-#undef TARGET_ENCODE_SECTION_INFO +-#define TARGET_ENCODE_SECTION_INFO loongarch_encode_section_info +- +-#undef TARGET_ATTRIBUTE_TABLE +-#define TARGET_ATTRIBUTE_TABLE loongarch_attribute_table + /* All our function attributes are related to how out-of-line copies should + be compiled or called. They don't in themselves prevent inlining. */ + #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P + #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true + + #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P +-#define TARGET_USE_BLOCKS_FOR_CONSTANT_P loongarch_use_blocks_for_constant_p +-#undef TARGET_USE_ANCHORS_FOR_SYMBOL_P +-#define TARGET_USE_ANCHORS_FOR_SYMBOL_P loongarch_use_anchors_for_symbol_p +- +-#undef TARGET_COMP_TYPE_ATTRIBUTES +-#define TARGET_COMP_TYPE_ATTRIBUTES loongarch_comp_type_attributes ++#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true + + #ifdef HAVE_AS_DTPRELWORD + #undef TARGET_ASM_OUTPUT_DWARF_DTPREL + #define TARGET_ASM_OUTPUT_DWARF_DTPREL loongarch_output_dwarf_dtprel + #endif +-#undef TARGET_DWARF_REGISTER_SPAN +-#define TARGET_DWARF_REGISTER_SPAN loongarch_dwarf_register_span +-#undef TARGET_DWARF_FRAME_REG_MODE +-#define TARGET_DWARF_FRAME_REG_MODE loongarch_dwarf_frame_reg_mode + + #undef TARGET_LEGITIMATE_ADDRESS_P +-#define TARGET_LEGITIMATE_ADDRESS_P loongarch_legitimate_address_p ++#define TARGET_LEGITIMATE_ADDRESS_P loongarch_legitimate_address_p ++ ++#undef TARGET_COMPUTE_FRAME_LAYOUT ++#define TARGET_COMPUTE_FRAME_LAYOUT loongarch_compute_frame_info + + #undef TARGET_FRAME_POINTER_REQUIRED + #define TARGET_FRAME_POINTER_REQUIRED loongarch_frame_pointer_required +@@ -10402,18 +10907,12 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) + #undef TARGET_TRAMPOLINE_INIT + #define TARGET_TRAMPOLINE_INIT loongarch_trampoline_init + +-#undef TARGET_SHIFT_TRUNCATION_MASK +-#define TARGET_SHIFT_TRUNCATION_MASK loongarch_shift_truncation_mask +- + #undef TARGET_VECTORIZE_VEC_PERM_CONST + #define TARGET_VECTORIZE_VEC_PERM_CONST loongarch_vectorize_vec_perm_const + + #undef TARGET_SCHED_REASSOCIATION_WIDTH + #define TARGET_SCHED_REASSOCIATION_WIDTH loongarch_sched_reassociation_width + +-#undef TARGET_CASE_VALUES_THRESHOLD +-#define TARGET_CASE_VALUES_THRESHOLD loongarch_case_values_threshold +- + #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV + #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV loongarch_atomic_assign_expand_fenv + +@@ -10422,13 +10921,6 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) + + #undef TARGET_SPILL_CLASS + #define TARGET_SPILL_CLASS loongarch_spill_class +-#undef TARGET_LRA_P +-#define TARGET_LRA_P loongarch_lra_p +-#undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS +-#define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS loongarch_ira_change_pseudo_allocno_class +- +-#undef TARGET_HARD_REGNO_SCRATCH_OK +-#define TARGET_HARD_REGNO_SCRATCH_OK loongarch_hard_regno_scratch_ok + + #undef TARGET_HARD_REGNO_NREGS + #define TARGET_HARD_REGNO_NREGS loongarch_hard_regno_nregs +@@ -10445,9 +10937,6 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) + #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS + #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 2 + +-#undef TARGET_SECONDARY_MEMORY_NEEDED +-#define TARGET_SECONDARY_MEMORY_NEEDED loongarch_secondary_memory_needed +- + #undef TARGET_CAN_CHANGE_MODE_CLASS + #define TARGET_CAN_CHANGE_MODE_CLASS loongarch_can_change_mode_class + +@@ -10460,6 +10949,9 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert) + #undef TARGET_STARTING_FRAME_OFFSET + #define TARGET_STARTING_FRAME_OFFSET loongarch_starting_frame_offset + ++#undef TARGET_SECONDARY_RELOAD ++#define TARGET_SECONDARY_RELOAD loongarch_secondary_reload ++ + struct gcc_target targetm = TARGET_INITIALIZER; +- ++ + #include "gt-loongarch.h" +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index 18d17afb8..1b26230cb 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -1,9 +1,7 @@ +-/* Definitions of target machine for GNU compiler. LARCH version. +- Copyright (C) 1989-2018 Free Software Foundation, Inc. +- Contributed by A. Lichnewsky (lich@inria.inria.fr). +- Changed by Michael Meissner (meissner@osf.org). +- 64-bit r4000 support by Ian Lance Taylor (ian@cygnus.com) and +- Brendan Eich (brendan@microunity.com). ++/* Definitions of target machine for GNU compiler. LoongArch version. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Technology Co. Ltd. ++ Based on MIPS and RISC-V target for GNU compiler. + + This file is part of GCC. + +@@ -21,318 +19,36 @@ You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +- +-#include "config/vxworks-dummy.h" +- +-#ifdef GENERATOR_FILE +-/* This is used in some insn conditions, so needs to be declared, but +- does not need to be defined. */ +-extern int target_flags_explicit; +-#endif +- +-/* LARCH external variables defined in loongarch.c. */ +- +-/* Which ABI to use. ABILP32 (original 32, or o32), ABILPX32 (n32), +- ABILP64 (n64) are all defined by SGI. */ +- +-#define ABILP32 0 +-#define ABILPX32 1 +-#define ABILP64 2 +- +-/* Information about one recognized processor. Defined here for the +- benefit of TARGET_CPU_CPP_BUILTINS. */ +-struct loongarch_cpu_info { +- /* The 'canonical' name of the processor as far as GCC is concerned. +- It's typically a manufacturer's prefix followed by a numerical +- designation. It should be lowercase. */ +- const char *name; +- +- /* The internal processor number that most closely matches this +- entry. Several processors can have the same value, if there's no +- difference between them from GCC's point of view. */ +- enum processor cpu; +- +- /* The ISA level that the processor implements. */ +- int isa; +- +- /* A mask of PTF_* values. */ +- unsigned int tune_flags; +-}; ++/* LoongArch external variables defined in loongarch.c. */ + + #include "config/loongarch/loongarch-opts.h" + + /* Macros to silence warnings about numbers being signed in traditional + C and unsigned in ISO C when compiled on 32-bit hosts. */ + +-#define BITMASK_HIGH (((unsigned long)1) << 31) /* 0x80000000 */ +-#define BITMASK_UPPER16 ((unsigned long)0xffff << 16) /* 0xffff0000 */ +-#define BITMASK_LOWER16 ((unsigned long)0xffff) /* 0x0000ffff */ ++#define BITMASK_HIGH (((unsigned long) 1) << 31) /* 0x80000000 */ + +- + /* Run-time compilation parameters selecting different hardware subsets. */ + +-/* True if we are generating position-independent VxWorks RTP code. */ +-#define TARGET_RTP_PIC (TARGET_VXWORKS_RTP && flag_pic) +- +-/* True if we can optimize sibling calls. For simplicity, we only +- handle cases in which call_insn_operand will reject invalid +- sibcall addresses. There are two cases in which this isn't true: +- +- - TARGET_USE_GOT && !TARGET_EXPLICIT_RELOCS. call_insn_operand +- accepts global constants, but all sibcalls must be indirect. */ +-#define TARGET_SIBCALLS (1) +- +-/* True if we can use the J and JAL instructions. */ +-#define TARGET_ABSOLUTE_JUMPS (!flag_pic) +- +-/* True if the output must have a writable .eh_frame. +- See ASM_PREFERRED_EH_DATA_FORMAT for details. */ +-#ifdef HAVE_LD_PERSONALITY_RELAXATION +-#define TARGET_WRITABLE_EH_FRAME 0 +-#else +-#define TARGET_WRITABLE_EH_FRAME (flag_pic && TARGET_SHARED) +-#endif +- +- +-/* ISA has LSA available. */ +-#define ISA_HAS_LSA (1) +- +-/* ISA has DLSA available. */ +-#define ISA_HAS_DLSA (TARGET_64BIT) +- +-/* Architecture target defines. */ +-#define TARGET_LOONGARCH64 (loongarch_arch == PROCESSOR_LOONGARCH64) +-#define TUNE_LOONGARCH64 (loongarch_tune == PROCESSOR_LOONGARCH64) +-#define TARGET_LA464 (loongarch_arch == PROCESSOR_LA464) +-#define TUNE_LA464 (loongarch_tune == PROCESSOR_LA464) +-/* True if the pre-reload scheduler should try to create chains of +- multiply-add or multiply-subtract instructions. For example, +- suppose we have: +- +- t1 = a * b +- t2 = t1 + c * d +- t3 = e * f +- t4 = t3 - g * h +- +- t1 will have a higher priority than t2 and t3 will have a higher +- priority than t4. However, before reload, there is no dependence +- between t1 and t3, and they can often have similar priorities. +- The scheduler will then tend to prefer: +- +- t1 = a * b +- t3 = e * f +- t2 = t1 + c * d +- t4 = t3 - g * h +- +- which stops us from making full use of macc/madd-style instructions. +- This sort of situation occurs frequently in Fourier transforms and +- in unrolled loops. +- +- To counter this, the TUNE_MACC_CHAINS code will reorder the ready +- queue so that chained multiply-add and multiply-subtract instructions +- appear ahead of any other instruction that is likely to clobber lo. +- In the example above, if t2 and t3 become ready at the same time, +- the code ensures that t2 is scheduled first. +- +- Multiply-accumulate instructions are a bigger win for some targets +- than others, so this macro is defined on an opt-in basis. */ +-#define TUNE_MACC_CHAINS 0 +- +-#define TARGET_OLDABI (loongarch_abi == ABILP32) +-#define TARGET_NEWABI (loongarch_abi == ABILPX32 || loongarch_abi == ABILP64) +- +-/* TARGET_HARD_FLOAT and TARGET_SOFT_FLOAT reflect whether the FPU is +- directly accessible, while the command-line options select +- TARGET_HARD_FLOAT_ABI and TARGET_SOFT_FLOAT_ABI to reflect the ABI +- in use. */ +-#define TARGET_HARD_FLOAT (TARGET_HARD_FLOAT_ABI) +-#define TARGET_SOFT_FLOAT (TARGET_SOFT_FLOAT_ABI) +- +-/* False if SC acts as a memory barrier with respect to itself, +- otherwise a SYNC will be emitted after SC for atomic operations +- that require ordering between the SC and following loads and +- stores. It does not tell anything about ordering of loads and +- stores prior to and following the SC, only about the SC itself and +- those loads and stores follow it. */ +-#define TARGET_SYNC_AFTER_SC (1) +- +-/* Define preprocessor macros for the -march and -mtune options. +- PREFIX is either _LARCH_ARCH or _LARCH_TUNE, INFO is the selected +- processor. If INFO's canonical name is "foo", define PREFIX to +- be "foo", and define an additional macro PREFIX_FOO. */ +-#define LARCH_CPP_SET_PROCESSOR(PREFIX, INFO) \ +- do \ +- { \ +- char *macro, *p; \ +- \ +- macro = concat ((PREFIX), "_", (INFO)->name, NULL); \ +- for (p = macro; *p != 0; p++) \ +- if (*p == '+') \ +- *p = 'P'; \ +- else \ +- *p = TOUPPER (*p); \ +- \ +- builtin_define (macro); \ +- builtin_define_with_value ((PREFIX), (INFO)->name, 1); \ +- free (macro); \ +- } \ +- while (0) +- + /* Target CPU builtins. */ +-#define TARGET_CPU_CPP_BUILTINS() loongarch_cpu_cpp_builtins (pfile) +- +-/* Target CPU versions for D. */ +-#define TARGET_D_CPU_VERSIONS loongarch_d_target_versions ++#define TARGET_CPU_CPP_BUILTINS() loongarch_cpu_cpp_builtins (pfile) + +-/* Default target_flags if no switches are specified */ +- +-#ifndef TARGET_DEFAULT +-#define TARGET_DEFAULT 0 +-#endif +- +-#ifndef TARGET_CPU_DEFAULT +-#define TARGET_CPU_DEFAULT 0 +-#endif ++/* Default target_flags if no switches are specified. */ + + #ifdef IN_LIBGCC2 + #undef TARGET_64BIT +-/* Make this compile time constant for libgcc2 */ ++/* Make this compile time constant for libgcc2. */ + #ifdef __loongarch64 +-#define TARGET_64BIT 1 ++#define TARGET_64BIT 1 + #else +-#define TARGET_64BIT 0 ++#define TARGET_64BIT 0 + #endif +-#endif /* IN_LIBGCC2 */ ++#endif /* IN_LIBGCC2 */ + + #define TARGET_LIBGCC_SDATA_SECTION ".sdata" + +-#ifndef MULTILIB_ISA_DEFAULT +-#if LARCH_ISA_DEFAULT == 0 +-#define MULTILIB_ISA_DEFAULT "loongarch64" +-#endif +-#endif +- +-#ifndef LARCH_ABI_DEFAULT +-#define LARCH_ABI_DEFAULT ABILP32 +-#endif +- +-/* Use the most portable ABI flag for the ASM specs. */ +- +-#if LARCH_ABI_DEFAULT == ABILP32 +-#define MULTILIB_ABI_DEFAULT "mabi=lp32" +-#elif LARCH_ABI_DEFAULT == ABILP64 +-#define MULTILIB_ABI_DEFAULT "mabi=lp64" +-#endif +- +-#ifndef MULTILIB_DEFAULTS +-#define MULTILIB_DEFAULTS \ +- {MULTILIB_ISA_DEFAULT, MULTILIB_ABI_DEFAULT } +-#endif +- +-/* A spec condition that matches all -loongarch arguments. */ +- +-#define LARCH_ISA_LEVEL_OPTION_SPEC \ +- "loongarch" +- +-/* A spec condition that matches all architecture arguments. */ +- +-#define LARCH_ARCH_OPTION_SPEC \ +- LARCH_ISA_LEVEL_OPTION_SPEC "|march=*" +- +-/* A spec that infers a -loongarch argument from an -march argument. */ +- +-#define LARCH_ISA_LEVEL_SPEC \ +- "%{" LARCH_ISA_LEVEL_OPTION_SPEC ":;:}" +- +-/* A spec that injects the default multilib ISA if no architecture is +- specified. */ +- +-#define LARCH_DEFAULT_ISA_LEVEL_SPEC \ +- "%{" LARCH_ISA_LEVEL_OPTION_SPEC ":;: \ +- %{!march=*: -" MULTILIB_ISA_DEFAULT "}}" +- +-/* A spec that infers a -mhard-float or -msoft-float setting from an +- -march argument. Note that soft-float and hard-float code are not +- link-compatible. */ +- +-#define LARCH_ARCH_FLOAT_SPEC \ +- "%{mhard-float|msoft-float|mno-float|march=loongarch*:; \ +- march=vr41*|march=m4k|march=4k*|march=24kc|march=24kec \ +- |march=34kc|march=34kn|march=74kc|march=1004kc|march=5kc \ +- |march=m14k*|march=m5101|march=octeon|march=xlr: -msoft-float; \ +- march=*: -mhard-float}" +- +-/* A spec condition that matches 32-bit options. It only works if +- LARCH_ISA_LEVEL_SPEC has been applied. */ +- +-#define LARCH_32BIT_OPTION_SPEC \ +- "loongarch1|loongarch2|loongarch32*|mgp32" +- +-#if (LARCH_ABI_DEFAULT == ABILPX32 \ +- || LARCH_ABI_DEFAULT == ABILP64) +-#define OPT_ARCH64 "mabi=32|mgp32:;" +-#define OPT_ARCH32 "mabi=32|mgp32" +-#else +-#define OPT_ARCH64 "mabi=o64|mabi=n32|mabi=64|mgp64" +-#define OPT_ARCH32 "mabi=o64|mabi=n32|mabi=64|mgp64:;" +-#endif +- +-/* Support for a compile-time default CPU, et cetera. The rules are: +- --with-arch is ignored if -march is specified or a -loongarch is specified +- ; likewise --with-arch-32 and --with-arch-64. +- --with-tune is ignored if -mtune is specified; likewise +- --with-tune-32 and --with-tune-64. +- --with-abi is ignored if -mabi is specified. +- --with-float is ignored if -mhard-float or -msoft-float are +- specified. +- --with-fpu is ignored if -msoft-float, -msingle-float or -mdouble-float are +- specified. +- --with-fp-32 is ignored if -msoft-float, -msingle-float, -mlsx or -mfp are +- specified. +- --with-divide is ignored if -mdivide-traps or -mdivide-breaks are +- specified. */ +-#define OPTION_DEFAULT_SPECS \ +- {"arch", "%{" LARCH_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}" }, \ +- {"arch_32", "%{" OPT_ARCH32 ":%{" LARCH_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \ +- {"arch_64", "%{" OPT_ARCH64 ":%{" LARCH_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \ +- {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \ +- {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \ +- {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \ +- {"abi", "%{!mabi=*:-mabi=%(VALUE)}" }, \ +- {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \ +- {"fpu", "%{!msoft-float:%{!msingle-float:%{!mdouble-float:-m%(VALUE)-float}}}" }, \ +- {"fp_32", "%{" OPT_ARCH32 \ +- ":%{!msoft-float:%{!msingle-float:%{!mfp*:%{!mlsx:%{!mloongson-asx:-mfp%(VALUE)}}}}}" }, \ +- {"divide", "%{!mdivide-traps:%{!mdivide-breaks:-mdivide-%(VALUE)}}" } +- +-/* A spec that infers the: +- -mlsx setting from a -march=la464 argument. +- -mlasx setting from a -march=la464 argument. */ +-#define BASE_DRIVER_SELF_SPECS \ +- LARCH_ASE_LSX_SPEC \ +- LARCH_ASE_LASX_SPEC +- +-#define LARCH_ASE_LSX_SPEC \ +- "%{!mno-lsx: \ +- %{march=la464: -mlsx}}" +- +-#define LARCH_ASE_LASX_SPEC \ +- "%{!mno-lasx: \ +- %{march=la464: -mlasx}}" +- +-#define DRIVER_SELF_SPECS \ +- BASE_DRIVER_SELF_SPECS +- +-/* from N_LARCH */ +-#define ABI_SPEC \ +- "%{mabi=lp32:32}" \ +- "%{mabi=lp64:64}" \ +- +-#define STARTFILE_PREFIX_SPEC \ +- "/lib" ABI_SPEC "/ " \ +- "/usr/lib" ABI_SPEC "/ " \ +- "/lib/ " \ +- "/usr/lib/ " ++/* Driver native functions for SPEC processing in the GCC driver. */ ++#include "loongarch-driver.h" + + /* This definition replaces the formerly used 'm' constraint with a + different constraint letter in order to avoid changing semantics of +@@ -341,71 +57,11 @@ struct loongarch_cpu_info { + must not be used in insn definitions or inline assemblies. */ + #define TARGET_MEM_CONSTRAINT 'w' + +-/* True if the file format uses 64-bit symbols. At present, this is +- only true for n64, which uses 64-bit ELF. */ +-#define FILE_HAS_64BIT_SYMBOLS (loongarch_abi == ABILP64) +- +-/* True if symbols are 64 bits wide. This is usually determined by +- the ABI's file format, but it can be overridden by -msym32. Note that +- overriding the size with -msym32 changes the ABI of relocatable objects, +- although it doesn't change the ABI of a fully-linked object. */ +-#define ABI_HAS_64BIT_SYMBOLS (FILE_HAS_64BIT_SYMBOLS \ +- && Pmode == DImode) +- +-/* ISA supports instructions DMUL, DMULU, DMUH, DMUHU. */ +-#define ISA_HAS_DMUL (TARGET_64BIT) +- +-/* ISA has floating-point RECIP.fmt and RSQRT.fmt instructions. The +- LARCH64 rev. 1 ISA says that RECIP.D and RSQRT.D are unpredictable when +- doubles are stored in pairs of FPRs, so for safety's sake, we apply +- this restriction to the LARCH IV ISA too. */ +-#define ISA_HAS_FP_RECIP_RSQRT(MODE) \ +- ((MODE) == SFmode \ +- || (TARGET_FLOAT64 \ +- && (MODE) == DFmode)) +- +-/* The LSX ASE is available. */ +-#define ISA_HAS_LSX (TARGET_LSX) +- +-/* The LASX ASE is available. */ +-#define ISA_HAS_LASX (TARGET_LASX) +- + /* Tell collect what flags to pass to nm. */ + #ifndef NM_FLAGS + #define NM_FLAGS "-Bn" + #endif + +- +-/* SUBTARGET_ASM_DEBUGGING_SPEC handles passing debugging options to +- the assembler. It may be overridden by subtargets. +- +- Beginning with gas 2.13, -mdebug must be passed to correctly handle +- COFF debugging info. */ +- +-#ifndef SUBTARGET_ASM_DEBUGGING_SPEC +-#define SUBTARGET_ASM_DEBUGGING_SPEC "\ +-%{g} %{g0} %{g1} %{g2} %{g3} \ +-%{ggdb:-g} %{ggdb0:-g0} %{ggdb1:-g1} %{ggdb2:-g2} %{ggdb3:-g3} \ +-%{gstabs:-g} %{gstabs0:-g0} %{gstabs1:-g1} %{gstabs2:-g2} %{gstabs3:-g3} \ +-%{gstabs+:-g} %{gstabs+0:-g0} %{gstabs+1:-g1} %{gstabs+2:-g2} %{gstabs+3:-g3}" +-#endif +- +-/* FP_ASM_SPEC represents the floating-point options that must be passed +- to the assembler when FPXX support exists. Prior to that point the +- assembler could accept the options but were not required for +- correctness. We only add the options when absolutely necessary +- because passing -msoft-float to the assembler will cause it to reject +- all hard-float instructions which may require some user code to be +- updated. */ +- +-#ifdef HAVE_AS_DOT_MODULE +-#define FP_ASM_SPEC "\ +-%{mhard-float} %{msoft-float} \ +-%{msingle-float} %{mdouble-float}" +-#else +-#define FP_ASM_SPEC +-#endif +- + /* SUBTARGET_ASM_SPEC is always passed to the assembler. It may be + overridden by subtargets. */ + +@@ -414,29 +70,21 @@ struct loongarch_cpu_info { + #endif + + #undef ASM_SPEC +-#define ASM_SPEC "\ +-%{mabi=*} %{!mabi=*: %(asm_abi_default_spec)} \ +-" ++#define ASM_SPEC "%{mabi=lp64d:-mabi=lp64} %{subtarget_asm_spec}" ++ + /* Extra switches sometimes passed to the linker. */ + + #ifndef LINK_SPEC + #define LINK_SPEC "" +-#endif /* LINK_SPEC defined */ +- ++#endif /* LINK_SPEC defined */ + +-/* Specs for the compiler proper */ +- +-/* SUBTARGET_CC1_SPEC is passed to the compiler proper. It may be +- overridden by subtargets. */ +-#ifndef SUBTARGET_CC1_SPEC +-#define SUBTARGET_CC1_SPEC "" +-#endif ++/* Specs for the compiler proper. */ + + /* CC1_SPEC is the set of arguments to pass to the compiler proper. */ + + #undef CC1_SPEC + #define CC1_SPEC "\ +-%{G*} %{EB:-meb} %{EL:-mel} %{EB:%{EL:%emay not use both -EB and -EL}} \ ++%{G*} \ + %(subtarget_cc1_spec)" + + /* Preprocessor specs. */ +@@ -459,63 +107,38 @@ struct loongarch_cpu_info { + + Do not define this macro if it does not need to do anything. */ + +-#define EXTRA_SPECS \ +- { "subtarget_cc1_spec", SUBTARGET_CC1_SPEC }, \ +- { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \ +- { "subtarget_asm_debugging_spec", SUBTARGET_ASM_DEBUGGING_SPEC }, \ +- { "subtarget_asm_spec", SUBTARGET_ASM_SPEC }, \ +- { "asm_abi_default_spec", "-" MULTILIB_ABI_DEFAULT }, \ +- SUBTARGET_EXTRA_SPECS +- +-#ifndef SUBTARGET_EXTRA_SPECS +-#define SUBTARGET_EXTRA_SPECS +-#endif +- +-#define DBX_DEBUGGING_INFO 1 /* generate stabs (OSF/rose) */ +-#define DWARF2_DEBUGGING_INFO 1 /* dwarf2 debugging info */ +- +-#ifndef PREFERRED_DEBUGGING_TYPE +-#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG +-#endif +- +-/* The size of DWARF addresses should be the same as the size of symbols +- in the target file format. They shouldn't depend on things like -msym32, +- because many DWARF consumers do not allow the mixture of address sizes +- that one would then get from linking -msym32 code with -msym64 code. +-*/ +-#define DWARF2_ADDR_SIZE (FILE_HAS_64BIT_SYMBOLS ? 8 : 4) +- +-/* By default, turn on GDB extensions. */ +-#define DEFAULT_GDB_EXTENSIONS 1 ++#define EXTRA_SPECS \ ++ {"subtarget_cc1_spec", SUBTARGET_CC1_SPEC}, \ ++ {"subtarget_cpp_spec", SUBTARGET_CPP_SPEC}, \ ++ {"subtarget_asm_spec", SUBTARGET_ASM_SPEC}, + + /* Registers may have a prefix which can be ignored when matching + user asm and register definitions. */ + #ifndef REGISTER_PREFIX +-#define REGISTER_PREFIX "$" ++#define REGISTER_PREFIX "$" + #endif + + /* Local compiler-generated symbols must have a prefix that the assembler +- understands. By default, this is $, although some targets (e.g., +- NetBSD-ELF) need to override this. */ ++ understands. */ + +-#ifndef LOCAL_LABEL_PREFIX +-#define LOCAL_LABEL_PREFIX "$" +-#endif ++#define LOCAL_LABEL_PREFIX "." + + /* By default on the loongarch, external symbols do not have an underscore +- prepended, but some targets (e.g., NetBSD) require this. */ ++ prepended. */ + +-#ifndef USER_LABEL_PREFIX +-#define USER_LABEL_PREFIX "" ++#define USER_LABEL_PREFIX "" ++ ++#ifndef PREFERRED_DEBUGGING_TYPE ++#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG + #endif + +-/* On Sun 4, this limit is 2048. We use 1500 to be safe, +- since the length can run past this up to a continuation point. */ +-#undef DBX_CONTIN_LENGTH +-#define DBX_CONTIN_LENGTH 1500 ++/* The size of DWARF addresses should be the same as the size of symbols ++ in the target file format. */ ++#define DWARF2_ADDR_SIZE (TARGET_64BIT ? 8 : 4) + +-/* How to renumber registers for dbx and gdb. */ +-#define DBX_REGISTER_NUMBER(REGNO) loongarch_dbx_regno[REGNO] ++/* By default, produce dwarf version 2 format debugging output in response ++ to the ‘-g’ option. */ ++#define DWARF2_DEBUGGING_INFO 1 + + /* The mapping from gcc register number to DWARF 2 CFA column number. */ + #define DWARF_FRAME_REGNUM(REGNO) loongarch_dwarf_regno[REGNO] +@@ -530,7 +153,7 @@ struct loongarch_cpu_info { + #define EH_RETURN_DATA_REGNO(N) \ + ((N) < (4) ? (N) + GP_ARG_FIRST : INVALID_REGNUM) + +-#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4) ++#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4) + + #define EH_USES(N) loongarch_eh_uses (N) + +@@ -539,19 +162,7 @@ struct loongarch_cpu_info { + SFmode register saves. */ + #define DWARF_CIE_DATA_ALIGNMENT -4 + +-/* Correct the offset of automatic variables and arguments. Note that +- the LARCH debug format wants all automatic variables and arguments +- to be in terms of the virtual frame pointer (stack pointer before +- any adjustment in the function), while the LARCH 3.0 linker wants +- the frame pointer to be the stack pointer after the initial +- adjustment. */ +- +-#define DEBUGGER_AUTO_OFFSET(X) \ +- loongarch_debugger_offset (X, (HOST_WIDE_INT) 0) +-#define DEBUGGER_ARG_OFFSET(OFFSET, X) \ +- loongarch_debugger_offset (X, (HOST_WIDE_INT) OFFSET) +- +-/* Target machine storage layout */ ++/* Target machine storage layout. */ + + #define BITS_BIG_ENDIAN 0 + #define BYTES_BIG_ENDIAN 0 +@@ -576,27 +187,19 @@ struct loongarch_cpu_info { + #define BITS_PER_LASX_REG (UNITS_PER_LASX_REG * BITS_PER_UNIT) + + /* For LARCH, width of a floating point register. */ +-#define UNITS_PER_FPREG (TARGET_FLOAT64 ? 8 : 4) +- +-/* The number of consecutive floating-point registers needed to store the +- largest format supported by the FPU. */ +-#define MAX_FPRS_PER_FMT (TARGET_FLOAT64 || TARGET_SINGLE_FLOAT ? 1 : 2) +- +-/* The number of consecutive floating-point registers needed to store the +- smallest format supported by the FPU. */ +-#define MIN_FPRS_PER_FMT 1 ++#define UNITS_PER_FPREG (TARGET_DOUBLE_FLOAT ? 8 : 4) + + /* The largest size of value that can be held in floating-point + registers and moved with a single instruction. */ + #define UNITS_PER_HWFPVALUE \ +- (TARGET_SOFT_FLOAT_ABI ? 0 : MAX_FPRS_PER_FMT * UNITS_PER_FPREG) ++ (TARGET_SOFT_FLOAT ? 0 : UNITS_PER_FPREG) + + /* The largest size of value that can be held in floating-point + registers. */ +-#define UNITS_PER_FPVALUE \ +- (TARGET_SOFT_FLOAT_ABI ? 0 \ +- : TARGET_SINGLE_FLOAT ? UNITS_PER_FPREG \ +- : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT) ++#define UNITS_PER_FPVALUE \ ++ (TARGET_SOFT_FLOAT ? 0 \ ++ : TARGET_SINGLE_FLOAT ? UNITS_PER_FPREG \ ++ : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT) + + /* The number of bytes in a double. */ + #define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT) +@@ -609,7 +212,7 @@ struct loongarch_cpu_info { + + #define FLOAT_TYPE_SIZE 32 + #define DOUBLE_TYPE_SIZE 64 +-#define LONG_DOUBLE_TYPE_SIZE (TARGET_NEWABI ? 128 : 64) ++#define LONG_DOUBLE_TYPE_SIZE (TARGET_64BIT ? 128 : 64) + + /* Define the sizes of fixed-point types. */ + #define SHORT_FRACT_TYPE_SIZE 8 +@@ -620,8 +223,6 @@ struct loongarch_cpu_info { + #define SHORT_ACCUM_TYPE_SIZE 16 + #define ACCUM_TYPE_SIZE 32 + #define LONG_ACCUM_TYPE_SIZE 64 +-/* FIXME. LONG_LONG_ACCUM_TYPE_SIZE should be 128 bits, but GCC +- doesn't support 128-bit integers for LARCH32 currently. */ + #define LONG_LONG_ACCUM_TYPE_SIZE (TARGET_64BIT ? 128 : 64) + + /* long double is not a fixed mode, but the idea is that, if we +@@ -630,7 +231,7 @@ struct loongarch_cpu_info { + + /* Width in bits of a pointer. */ + #ifndef POINTER_SIZE +-#define POINTER_SIZE ((TARGET_64BIT) ? 64 : 32) ++#define POINTER_SIZE (TARGET_64BIT ? 64 : 32) + #endif + + /* Allocation boundary (in *bits*) for storing arguments in argument list. */ +@@ -642,8 +243,8 @@ struct loongarch_cpu_info { + /* Alignment of field after `int : 0' in a structure. */ + #define EMPTY_FIELD_BOUNDARY 32 + +-/* Every structure's size must be a multiple of this. */ +-/* 8 is observed right on a DECstation and on riscos 4.02. */ ++/* Number of bits which any structure or union's size must be a multiple of. ++ Each structure or union's size is rounded up to a multiple of this. */ + #define STRUCTURE_SIZE_BOUNDARY 8 + + /* There is no point aligning anything to a rounder boundary than +@@ -655,6 +256,9 @@ struct loongarch_cpu_info { + /* All accesses must be aligned. */ + #define STRICT_ALIGNMENT (TARGET_STRICT_ALIGN) + ++/* Glibc align malloc to 128 from glibc/sysdeps/generic/malloc-alignment.h. */ ++#define MALLOC_ABI_ALIGNMENT 128 ++ + /* Define this if you wish to imitate the way many other C compilers + handle alignment of bitfields and the structures that contain + them. +@@ -699,22 +303,17 @@ struct loongarch_cpu_info { + /* We need this for the same reason as DATA_ALIGNMENT, namely to cause + character arrays to be word-aligned so that `strcpy' calls that copy + constants to character arrays can be done inline, and 'strcmp' can be +- optimised to use word loads. */ +-#define LOCAL_ALIGNMENT(TYPE, ALIGN) \ +- DATA_ALIGNMENT (TYPE, ALIGN) +- +-#define PAD_VARARGS_DOWN \ +- (targetm.calls.function_arg_padding (TYPE_MODE (type), type) == PAD_DOWNWARD) ++ optimised to use word loads. */ ++#define LOCAL_ALIGNMENT(TYPE, ALIGN) DATA_ALIGNMENT (TYPE, ALIGN) + + /* Define if operations between registers always perform the operation + on the full register even if a narrower mode is specified. */ + #define WORD_REGISTER_OPERATIONS 1 + +-/* When in 64-bit mode, move insns will sign extend SImode and CCmode ++/* When in 64-bit mode, move insns will sign extend SImode and FCCmode + moves. All other references are zero extended. */ + #define LOAD_EXTEND_OP(MODE) \ +- (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \ +- ? SIGN_EXTEND : ZERO_EXTEND) ++ ((TARGET_64BIT && (MODE) == SImode) ? SIGN_EXTEND : UNKNOWN) + + /* Define this macro if it is advisable to hold scalars in registers + in a wider mode than that declared by the program. In such cases, +@@ -722,13 +321,13 @@ struct loongarch_cpu_info { + type, but kept valid in the wider mode. The signedness of the + extension may differ from that of the type. */ + +-#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ +- if (GET_MODE_CLASS (MODE) == MODE_INT \ ++#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ ++ if (GET_MODE_CLASS (MODE) == MODE_INT \ + && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \ +- { \ +- if ((MODE) == SImode) \ +- (UNSIGNEDP) = 0; \ +- (MODE) = Pmode; \ ++ { \ ++ if ((MODE) == SImode) \ ++ (UNSIGNEDP) = 0; \ ++ (MODE) = Pmode; \ + } + + /* Pmode is always the same as ptr_mode, but not always the same as word_mode. +@@ -738,11 +337,11 @@ struct loongarch_cpu_info { + /* Define if loading short immediate values into registers sign extends. */ + #define SHORT_IMMEDIATES_SIGN_EXTEND 1 + +-/* The [d]clz instructions have the natural values at 0. */ ++/* The clz.{w/d} instructions have the natural values at 0. */ + + #define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ + ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2) +- ++ + /* Standard register usage. */ + + /* Number of hardware registers. We have: +@@ -757,57 +356,39 @@ struct loongarch_cpu_info { + + #define FIRST_PSEUDO_REGISTER 74 + +-/* By default, fix the kernel registers ($26 and $27), the global +- pointer ($28) and the stack pointer ($29). This can change +- depending on the command-line options. +- +- Regarding coprocessor registers: without evidence to the contrary, +- it's best to assume that each coprocessor register has a unique +- use. This can be overridden, in, e.g., loongarch_option_override or +- TARGET_CONDITIONAL_REGISTER_USAGE should the assumption be +- inappropriate for a particular target. */ +- ++/* zero, tp, sp and x are fixed. */ + #define FIXED_REGISTERS \ +-{ \ ++{ /* General-purpose registers. */ \ + 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ /* Floating-point registers. */ \ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ /* Others. */ \ + 0, 0, 0, 0, 0, 0, 0, 1, 1, 1} + +- +-/* Set up this array for o32 by default. +- +- Note that we don't mark $31 as a call-clobbered register. The idea is +- that it's really the call instructions themselves which clobber $31. +- We don't care what the called function does with it afterwards. +- +- This approach makes it easier to implement sibcalls. Unlike normal +- calls, sibcalls don't clobber $31, so the register reaches the +- called function in tact. EPILOGUE_USES says that $31 is useful +- to the called function. */ +- ++/* The call RTLs themselves clobber ra. */ + #define CALL_USED_REGISTERS \ +-{ \ ++{ /* General registers. */ \ + 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ + 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ /* Floating-point registers. */ \ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ + 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ /* Others. */ \ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + + /* Internal macros to classify a register number as to whether it's a +- general purpose register, a floating point register, a +- multiply/divide register, or a status register. */ ++ general purpose register, a floating point register, or a status ++ register. */ + + #define GP_REG_FIRST 0 +-#define GP_REG_LAST 31 +-#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1) +-#define GP_DBX_FIRST 0 ++#define GP_REG_LAST 31 ++#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1) + + #define FP_REG_FIRST 32 +-#define FP_REG_LAST 63 +-#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1) +-#define FP_DBX_FIRST ((write_symbols == DBX_DEBUG) ? 38 : 32) ++#define FP_REG_LAST 63 ++#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1) + + #define LSX_REG_FIRST FP_REG_FIRST + #define LSX_REG_LAST FP_REG_LAST +@@ -823,20 +404,16 @@ struct loongarch_cpu_info { + would need to be handled by the DWARF unwinder. */ + #define DWARF_ALT_FRAME_RETURN_COLUMN 72 + +-#define ST_REG_FIRST 64 +-#define ST_REG_LAST 71 +-#define ST_REG_NUM (ST_REG_LAST - ST_REG_FIRST + 1) ++#define FCC_REG_FIRST 64 ++#define FCC_REG_LAST 71 ++#define FCC_REG_NUM (FCC_REG_LAST - FCC_REG_FIRST + 1) + +-#define GP_REG_P(REGNO) \ ++#define GP_REG_P(REGNO) \ + ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM) +-#define M16_REG_P(REGNO) \ +- (((REGNO) >= 2 && (REGNO) <= 7) || (REGNO) == 16 || (REGNO) == 17) +-#define M16STORE_REG_P(REGNO) \ +- (((REGNO) >= 2 && (REGNO) <= 7) || (REGNO) == 0 || (REGNO) == 17) +-#define FP_REG_P(REGNO) \ ++#define FP_REG_P(REGNO) \ + ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM) +-#define ST_REG_P(REGNO) \ +- ((unsigned int) ((int) (REGNO) - ST_REG_FIRST) < ST_REG_NUM) ++#define FCC_REG_P(REGNO) \ ++ ((unsigned int) ((int) (REGNO) - FCC_REG_FIRST) < FCC_REG_NUM) + #define LSX_REG_P(REGNO) \ + ((unsigned int) ((int) (REGNO) - LSX_REG_FIRST) < LSX_REG_NUM) + #define LASX_REG_P(REGNO) \ +@@ -846,10 +423,6 @@ struct loongarch_cpu_info { + #define LSX_REG_RTX_P(X) (REG_P (X) && LSX_REG_P (REGNO (X))) + #define LASX_REG_RTX_P(X) (REG_P (X) && LASX_REG_P (REGNO (X))) + +- +-#define HARD_REGNO_RENAME_OK(OLD_REG, NEW_REG) \ +- loongarch_hard_regno_rename_ok (OLD_REG, NEW_REG) +- + /* Select a register mode required for caller save of hard regno REGNO. */ + #define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ + loongarch_hard_regno_caller_save_mode (REGNO, NREGS, MODE) +@@ -862,35 +435,34 @@ struct loongarch_cpu_info { + #define ARG_POINTER_REGNUM 72 + #define FRAME_POINTER_REGNUM 73 + +-#define HARD_FRAME_POINTER_REGNUM \ +- (GP_REG_FIRST + 22) +- +-/* FIXME: */ +-/* #define HARD_FRAME_POINTER_IS_FRAME_POINTER (HARD_FRAME_POINTER_REGNUM == FRAME_POINTER_REGNUM) */ +-/* #define HARD_FRAME_POINTER_IS_ARG_POINTER (HARD_FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM) */ ++#define HARD_FRAME_POINTER_REGNUM (GP_REG_FIRST + 22) + + #define HARD_FRAME_POINTER_IS_FRAME_POINTER 0 + #define HARD_FRAME_POINTER_IS_ARG_POINTER 0 + +-/* FIXME: */ + /* Register in which static-chain is passed to a function. */ +-#define STATIC_CHAIN_REGNUM (GP_REG_FIRST + 20) /* $t8 */ +- +-#define LARCH_PROLOGUE_TEMP_REGNUM \ +- (GP_REG_FIRST + 13) +-#define LARCH_PROLOGUE_TEMP2_REGNUM \ +- (GP_REG_FIRST + 12) +-#define LARCH_PROLOGUE_TEMP3_REGNUM \ +- (GP_REG_FIRST + 14) +-#define LARCH_EPILOGUE_TEMP_REGNUM \ +- (GP_REG_FIRST + (12)) +- +-#define LARCH_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP_REGNUM) ++#define STATIC_CHAIN_REGNUM (GP_REG_FIRST + 20) /* $t8 */ ++ ++/* DRAP register if static-chain register is unavailable. */ ++#define DRAP_REGNUM (GP_REG_FIRST + 15) /* $t3 */ ++ ++#define GP_TEMP_FIRST (GP_REG_FIRST + 12) ++#define LARCH_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1) ++#define LARCH_PROLOGUE_TEMP2_REGNUM (GP_TEMP_FIRST) ++#define LARCH_PROLOGUE_TEMP3_REGNUM (GP_TEMP_FIRST + 2) ++#define LARCH_EPILOGUE_TEMP_REGNUM (GP_TEMP_FIRST) ++ ++#define CALLEE_SAVED_REG_NUMBER(REGNO) \ ++ ((REGNO) >= 22 && (REGNO) <= 31 ? (REGNO) - 22 : -1) ++ ++#define LARCH_PROLOGUE_TEMP(MODE) \ ++ gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP_REGNUM) + #define LARCH_PROLOGUE_TEMP2(MODE) \ + gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP2_REGNUM) + #define LARCH_PROLOGUE_TEMP3(MODE) \ + gen_rtx_REG (MODE, LARCH_PROLOGUE_TEMP3_REGNUM) +-#define LARCH_EPILOGUE_TEMP(MODE) gen_rtx_REG (MODE, LARCH_EPILOGUE_TEMP_REGNUM) ++#define LARCH_EPILOGUE_TEMP(MODE) \ ++ gen_rtx_REG (MODE, LARCH_EPILOGUE_TEMP_REGNUM) + + /* Define this macro if it is as good or better to call a constant + function address than to call an address kept in a register. */ +@@ -898,7 +470,6 @@ struct loongarch_cpu_info { + + #define THREAD_POINTER_REGNUM (GP_REG_FIRST + 2) + +- + /* Define the classes of registers for register constraints in the + machine description. Also define ranges of constants. + +@@ -908,7 +479,7 @@ struct loongarch_cpu_info { + + The name GENERAL_REGS must be the name of a class (or an alias for + another name such as ALL_REGS). This is the class of registers +- that is allowed by "g" or "r" in a register constraint. ++ that is allowed by "r" in a register constraint. + Also, registers outside this class are allocated only when + instructions express preferences for them. + +@@ -921,16 +492,16 @@ struct loongarch_cpu_info { + + enum reg_class + { +- NO_REGS, /* no registers in set */ +- SIBCALL_REGS, /* SIBCALL_REGS */ +- JALR_REGS, /* JALR_REGS */ +- GR_REGS, /* integer registers */ +- CSR_REGS, /* integer registers except for $r0 and $r1 for csr. */ +- FP_REGS, /* floating point registers */ +- ST_REGS, /* status registers (fp status) */ +- FRAME_REGS, /* arg pointer and frame pointer */ +- ALL_REGS, /* all registers */ +- LIM_REG_CLASSES /* max value + 1 */ ++ NO_REGS, /* no registers in set */ ++ SIBCALL_REGS, /* registers used by indirect sibcalls */ ++ JIRL_REGS, /* registers used by indirect calls */ ++ CSR_REGS, /* integer registers except for $r0 and $r1 for lcsr. */ ++ GR_REGS, /* integer registers */ ++ FP_REGS, /* floating point registers */ ++ FCC_REGS, /* status registers (fp status) */ ++ FRAME_REGS, /* arg pointer and frame pointer */ ++ ALL_REGS, /* all registers */ ++ LIM_REG_CLASSES /* max value + 1 */ + }; + + #define N_REG_CLASSES (int) LIM_REG_CLASSES +@@ -945,11 +516,11 @@ enum reg_class + { \ + "NO_REGS", \ + "SIBCALL_REGS", \ +- "JALR_REGS", \ +- "GR_REGS", \ ++ "JIRL_REGS", \ + "CSR_REGS", \ ++ "GR_REGS", \ + "FP_REGS", \ +- "ST_REGS", \ ++ "FCC_REGS", \ + "FRAME_REGS", \ + "ALL_REGS" \ + } +@@ -968,29 +539,28 @@ enum reg_class + #define REG_CLASS_CONTENTS \ + { \ + { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \ +- { 0x001ff000, 0x00000000, 0x00000000 }, /* SIBCALL_REGS */ \ +- { 0xff9ffff0, 0x00000000, 0x00000000 }, /* JALR_REGS */ \ +- { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \ ++ { 0x001fd000, 0x00000000, 0x00000000 }, /* SIBCALL_REGS */ \ ++ { 0xff9ffff0, 0x00000000, 0x00000000 }, /* JIRL_REGS */ \ + { 0xfffffffc, 0x00000000, 0x00000000 }, /* CSR_REGS */ \ ++ { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \ + { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \ +- { 0x00000000, 0x00000000, 0x000000ff }, /* ST_REGS */ \ ++ { 0x00000000, 0x00000000, 0x000000ff }, /* FCC_REGS */ \ + { 0x00000000, 0x00000000, 0x00000300 }, /* FRAME_REGS */ \ + { 0xffffffff, 0xffffffff, 0x000003ff } /* ALL_REGS */ \ + } + +- + /* A C expression whose value is a register class containing hard + register REGNO. In general there is more that one such class; + choose a class which is "minimal", meaning that no smaller class + also contains the register. */ + +-#define REGNO_REG_CLASS(REGNO) loongarch_regno_to_class[ (REGNO) ] ++#define REGNO_REG_CLASS(REGNO) loongarch_regno_to_class[(REGNO)] + + /* A macro whose definition is the name of the class to which a + valid base register must belong. A base register is one used in + an address which is the register value plus a displacement. */ + +-#define BASE_REG_CLASS (GR_REGS) ++#define BASE_REG_CLASS (GR_REGS) + + /* A macro whose definition is the name of the class to which a + valid index register must belong. An index register is one used +@@ -998,7 +568,7 @@ enum reg_class + factor or added to another register (as well as added to a + displacement). */ + +-#define INDEX_REG_CLASS NO_REGS ++#define INDEX_REG_CLASS GR_REGS + + /* We generally want to put call-clobbered registers ahead of + call-saved ones. (IRA expects this.) */ +@@ -1006,10 +576,6 @@ enum reg_class + #define REG_ALLOC_ORDER \ + { /* Call-clobbered GPRs. */ \ + 12, 13, 14, 15, 16, 17, 18, 19, 20, 4, 5, 6, 7, 8, 9, 10, 11, 1, \ +- /* The global pointer. This is call-clobbered for o32 and o64 \ +- abicalls, call-saved for n32 and n64 abicalls, and a program \ +- invariant otherwise. Putting it between the call-clobbered \ +- and call-saved registers should cope with all eventualities. */ \ + /* Call-saved GPRs. */ \ + 23, 24, 25, 26, 27, 28, 29, 30, 31, \ + /* GPRs that can never be exposed to the register allocator. */ \ +@@ -1017,31 +583,27 @@ enum reg_class + /* Call-clobbered FPRs. */ \ + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \ + 48, 49, 50, 51,52, 53, 54, 55, \ +- /* FPRs that are usually call-saved. The odd ones are actually \ +- call-clobbered for n32, but listing them ahead of the even \ +- registers might encourage the register allocator to fragment \ +- the available FPR pairs. We need paired FPRs to store long \ +- doubles, so it isn't clear that using a different order \ +- for n32 would be a win. */ \ + 56, 57, 58, 59, 60, 61, 62, 63, \ + /* None of the remaining classes have defined call-saved \ + registers. */ \ + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73} + ++#define IMM_BITS 12 ++#define IMM_REACH (HOST_WIDE_INT_1 << IMM_BITS) ++#define HWIT_1U HOST_WIDE_INT_1U ++ + /* True if VALUE is an unsigned 6-bit number. */ + +-#define UIMM6_OPERAND(VALUE) \ +- (((VALUE) & ~(unsigned HOST_WIDE_INT) 0x3f) == 0) ++#define UIMM6_OPERAND(VALUE) (((VALUE) & ~(unsigned HOST_WIDE_INT) 0x3f) == 0) + + /* True if VALUE is a signed 10-bit number. */ + +-#define IMM10_OPERAND(VALUE) \ +- ((unsigned HOST_WIDE_INT) (VALUE) + 0x200 < 0x400) ++#define IMM10_OPERAND(VALUE) ((unsigned HOST_WIDE_INT) (VALUE) + 0x200 < 0x400) + + /* True if VALUE is a signed 12-bit number. */ + + #define IMM12_OPERAND(VALUE) \ +- ((unsigned HOST_WIDE_INT) (VALUE) + 0x800 < 0x1000) ++ ((unsigned HOST_WIDE_INT) (VALUE) + IMM_REACH / 2 < IMM_REACH) + + /* True if VALUE is a signed 13-bit number. */ + +@@ -1053,67 +615,51 @@ enum reg_class + #define IMM16_OPERAND(VALUE) \ + ((unsigned HOST_WIDE_INT) (VALUE) + 0x8000 < 0x10000) + +- +-/* True if VALUE is a signed 12-bit number. */ +- +-#define SMALL_OPERAND(VALUE) \ +- ((unsigned HOST_WIDE_INT) (VALUE) + 0x800 < 0x1000) +- + /* True if VALUE is an unsigned 12-bit number. */ + +-#define SMALL_OPERAND_UNSIGNED(VALUE) \ +- (((VALUE) & ~(unsigned HOST_WIDE_INT) 0xfff) == 0) ++#define IMM12_OPERAND_UNSIGNED(VALUE) \ ++ (((VALUE) & ~(unsigned HOST_WIDE_INT) (IMM_REACH - 1)) == 0) + +-/* True if VALUE can be loaded into a register using LUI. */ ++/* True if VALUE can be loaded into a register using LU12I. */ + +-#define LUI_OPERAND(VALUE) \ +- (((VALUE) | 0x7ffff000) == 0x7ffff000 \ +- || ((VALUE) | 0x7ffff000) + 0x1000 == 0) ++#define LU12I_OPERAND(VALUE) \ ++ (((VALUE) | ((HWIT_1U << 31) - IMM_REACH)) == ((HWIT_1U << 31) - IMM_REACH) \ ++ || ((VALUE) | ((HWIT_1U << 31) - IMM_REACH)) + IMM_REACH == 0) + +-/* True if VALUE can be loaded into a register using LUI. */ ++/* True if VALUE can be loaded into a register using LU32I. */ + +-#define LU32I_OPERAND(VALUE) \ +- ((((VALUE) | 0x7ffff00000000) == 0x7ffff00000000) \ +- || ((VALUE) | 0x7ffff00000000) + 0x100000000 == 0) ++#define LU32I_OPERAND(VALUE) \ ++ (((VALUE) | (((HWIT_1U << 19) - 1) << 32)) == (((HWIT_1U << 19) - 1) << 32) \ ++ || ((VALUE) | (((HWIT_1U << 19) - 1) << 32)) + (HWIT_1U << 32) == 0) + +-/* True if VALUE can be loaded into a register using LUI. */ ++/* True if VALUE can be loaded into a register using LU52I. */ + +-#define LU52I_OPERAND(VALUE) \ +- ((((VALUE) | 0xfff0000000000000) == 0xfff0000000000000)) ++#define HWIT_UC_0xFFF HOST_WIDE_INT_UC(0xfff) ++#define LU52I_OPERAND(VALUE) \ ++ (((VALUE) | (HWIT_UC_0xFFF << 52)) == (HWIT_UC_0xFFF << 52)) + + /* Return a value X with the low 12 bits clear, and such that + VALUE - X is a signed 12-bit value. */ + +-#define CONST_HIGH_PART(VALUE) \ +- (((VALUE) + 0x800) & ~(unsigned HOST_WIDE_INT) 0xfff) ++#define CONST_HIGH_PART(VALUE) (((VALUE) + (IMM_REACH / 2)) & ~(IMM_REACH - 1)) + +-#define CONST_LOW_PART(VALUE) \ +- ((VALUE) - CONST_HIGH_PART (VALUE)) ++#define CONST_LOW_PART(VALUE) ((VALUE) - CONST_HIGH_PART (VALUE)) + +-#define SMALL_INT(X) SMALL_OPERAND (INTVAL (X)) +-#define SMALL_INT_UNSIGNED(X) SMALL_OPERAND_UNSIGNED (INTVAL (X)) +-#define LUI_INT(X) LUI_OPERAND (INTVAL (X)) ++#define IMM12_INT(X) IMM12_OPERAND (INTVAL (X)) ++#define IMM12_INT_UNSIGNED(X) IMM12_OPERAND_UNSIGNED (INTVAL (X)) ++#define LU12I_INT(X) LU12I_OPERAND (INTVAL (X)) + #define LU32I_INT(X) LU32I_OPERAND (INTVAL (X)) + #define LU52I_INT(X) LU52I_OPERAND (INTVAL (X)) +-#define ULARCH_12BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -2048, 2047)) ++#define LARCH_U12BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -2048, 2047)) + #define LARCH_9BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -256, 255)) +-#define LISA_16BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -32768, 32767)) +-#define LISA_SHIFT_2_OFFSET_P(OFFSET) (((OFFSET) & 0x3) == 0) +- +-/* The HI and LO registers can only be reloaded via the general +- registers. Condition code registers can only be loaded to the +- general registers, and from the floating point registers. */ +- +-#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \ +- loongarch_secondary_reload_class (CLASS, MODE, X, true) +-#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \ +- loongarch_secondary_reload_class (CLASS, MODE, X, false) ++#define LARCH_16BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -32768, 32767)) ++#define LARCH_SHIFT_2_OFFSET_P(OFFSET) (((OFFSET) & 0x3) == 0) + + /* Return the maximum number of consecutive registers + needed to represent mode MODE in a register of class CLASS. */ + + #define CLASS_MAX_NREGS(CLASS, MODE) loongarch_class_max_nregs (CLASS, MODE) +- ++ + /* Stack layout; function entry, exit and calling. */ + + #define STACK_GROWS_DOWNWARD 1 +@@ -1127,11 +673,13 @@ enum reg_class + + #define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta + +-#define ELIMINABLE_REGS \ +-{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ +- { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ +- { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ +- { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM},} ++#define ELIMINABLE_REGS \ ++ { \ ++ {ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ ++ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ ++ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ ++ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ ++ } + + #define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ + (OFFSET) = loongarch_initial_elimination_offset ((FROM), (TO)) +@@ -1142,11 +690,7 @@ enum reg_class + /* The argument pointer always points to the first argument. */ + #define FIRST_PARM_OFFSET(FNDECL) 0 + +-/* o32 and o64 reserve stack space for all argument registers. */ +-#define REG_PARM_STACK_SPACE(FNDECL) \ +- (TARGET_OLDABI \ +- ? (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD) \ +- : 0) ++#define REG_PARM_STACK_SPACE(FNDECL) 0 + + /* Define this if it is the responsibility of the caller to + allocate the area reserved for arguments passed in registers. +@@ -1155,22 +699,25 @@ enum reg_class + `crtl->outgoing_args_size'. */ + #define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1 + +-#define STACK_BOUNDARY (TARGET_NEWABI ? 128 : 64) +- ++#define STACK_BOUNDARY (TARGET_ABI_LP64 ? 128 : 64) ++ ++/* Maximum stack alignment. */ ++#define MAX_STACK_ALIGNMENT (loongarch_stack_realign ? MAX_OFILE_ALIGNMENT : STACK_BOUNDARY) ++ + /* Symbolic macros for the registers used to return integer and floating + point values. */ + + #define GP_RETURN (GP_REG_FIRST + 4) + #define FP_RETURN ((TARGET_SOFT_FLOAT) ? GP_RETURN : (FP_REG_FIRST + 0)) + +-#define MAX_ARGS_IN_REGISTERS (TARGET_OLDABI ? 4 : 8) ++#define MAX_ARGS_IN_REGISTERS 8 + + /* Symbolic macros for the first/last argument registers. */ + + #define GP_ARG_FIRST (GP_REG_FIRST + 4) +-#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) ++#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) + #define FP_ARG_FIRST (FP_REG_FIRST + 0) +-#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) ++#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) + + /* True if MODE is vector and supported in a LSX vector register. */ + #define LSX_SUPPORTED_MODE_P(MODE) \ +@@ -1188,60 +735,39 @@ enum reg_class + && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \ + || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT)) + ++#define RECIP_MASK_NONE 0x00 ++#define RECIP_MASK_DIV 0x01 ++#define RECIP_MASK_SQRT 0x02 ++#define RECIP_MASK_RSQRT 0x04 ++#define RECIP_MASK_VEC_DIV 0x08 ++#define RECIP_MASK_VEC_SQRT 0x10 ++#define RECIP_MASK_VEC_RSQRT 0x20 ++#define RECIP_MASK_ALL (RECIP_MASK_DIV | RECIP_MASK_SQRT \ ++ | RECIP_MASK_RSQRT | RECIP_MASK_VEC_SQRT \ ++ | RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_RSQRT) ++ ++#define TARGET_RECIP_DIV ((recip_mask & RECIP_MASK_DIV) != 0 || TARGET_uARCH_LA664) ++#define TARGET_RECIP_SQRT ((recip_mask & RECIP_MASK_SQRT) != 0 || TARGET_uARCH_LA664) ++#define TARGET_RECIP_RSQRT ((recip_mask & RECIP_MASK_RSQRT) != 0 || TARGET_uARCH_LA664) ++#define TARGET_RECIP_VEC_DIV ((recip_mask & RECIP_MASK_VEC_DIV) != 0 || TARGET_uARCH_LA664) ++#define TARGET_RECIP_VEC_SQRT ((recip_mask & RECIP_MASK_VEC_SQRT) != 0 || TARGET_uARCH_LA664) ++#define TARGET_RECIP_VEC_RSQRT ((recip_mask & RECIP_MASK_VEC_RSQRT) != 0 || TARGET_uARCH_LA664) ++ + /* 1 if N is a possible register number for function argument passing. + We have no FP argument registers when soft-float. */ + + /* Accept arguments in a0-a7, and in fa0-fa7 if permitted by the ABI. */ +-#define FUNCTION_ARG_REGNO_P(N) \ +- (IN_RANGE ((N), GP_ARG_FIRST, GP_ARG_LAST) \ ++#define FUNCTION_ARG_REGNO_P(N) \ ++ (IN_RANGE ((N), GP_ARG_FIRST, GP_ARG_LAST) \ + || (UNITS_PER_FP_ARG && IN_RANGE ((N), FP_ARG_FIRST, FP_ARG_LAST))) + +- +-/* This structure has to cope with two different argument allocation +- schemes. Most LARCH ABIs view the arguments as a structure, of which +- the first N words go in registers and the rest go on the stack. If I +- < N, the Ith word might go in Ith integer argument register or in a +- floating-point register. For these ABIs, we only need to remember +- the offset of the current argument into the structure. +- +- So for the standard ABIs, the first N words are allocated to integer +- registers, and loongarch_function_arg decides on an argument-by-argument +- basis whether that argument should really go in an integer register, +- or in a floating-point one. */ +- +-typedef struct loongarch_args { +- /* Always true for varargs functions. Otherwise true if at least +- one argument has been passed in an integer register. */ +- int gp_reg_found; +- +- /* The number of arguments seen so far. */ +- unsigned int arg_number; +- +- /* The number of integer registers used so far. This is the number +- of words that have been added to the argument structure, limited +- to MAX_ARGS_IN_REGISTERS. */ ++typedef struct { ++ /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */ + unsigned int num_gprs; + ++ /* Number of floating-point registers used so far, likewise. */ + unsigned int num_fprs; + +- /* The number of words passed on the stack. */ +- unsigned int stack_words; +- +- /* On the loongarch16, we need to keep track of which floating point +- arguments were passed in general registers, but would have been +- passed in the FP regs if this were a 32-bit function, so that we +- can move them to the FP regs if we wind up calling a 32-bit +- function. We record this information in fp_code, encoded in base +- four. A zero digit means no floating point argument, a one digit +- means an SFmode argument, and a two digit means a DFmode argument, +- and a three digit is not used. The low order digit is the first +- argument. Thus 6 == 1 * 4 + 2 means a DFmode argument followed by +- an SFmode argument. ??? A more sophisticated approach will be +- needed if LARCH_ABI != ABILP32. */ +- int fp_code; +- +- /* True if the function has a prototype. */ +- int prototype; + } CUMULATIVE_ARGS; + + /* Initialize a variable CUM of type CUMULATIVE_ARGS +@@ -1251,48 +777,37 @@ typedef struct loongarch_args { + #define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \ + memset (&(CUM), 0, sizeof (CUM)) + +- +-#define EPILOGUE_USES(REGNO) loongarch_epilogue_uses (REGNO) ++#define EPILOGUE_USES(REGNO) loongarch_epilogue_uses (REGNO) + ++#define STACK_ALIGN_SIZE_INTERNAL \ ++ (crtl->stack_realign_needed) \ ++? (crtl->stack_alignment_needed / BITS_PER_UNIT) \ ++: (TARGET_ABI_LP64 ? 16 : 8) + /* Treat LOC as a byte offset from the stack pointer and round it up + to the next fully-aligned offset. */ + #define LARCH_STACK_ALIGN(LOC) \ +- (TARGET_NEWABI ? ROUND_UP ((LOC), 16) : ROUND_UP ((LOC), 8)) ++ ROUND_UP ((LOC), TARGET_ABI_LP64 ? 16 : 8) + +- +-/* Output assembler code to FILE to increment profiler label # LABELNO +- for profiling a function entry. */ ++#define LARCH_STACK_ALIGN2(LOC) \ ++ ROUND_UP ((LOC), STACK_ALIGN_SIZE_INTERNAL) + + #define MCOUNT_NAME "_mcount" + + /* Emit rtl for profiling. Output assembler code to FILE + to call "_mcount" for profiling a function entry. */ +-#define PROFILE_HOOK(LABEL) \ +- { \ +- rtx fun, ra; \ +- ra = get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM); \ +- fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \ +- emit_library_call (fun, LCT_NORMAL, VOIDmode, ra, Pmode); \ ++#define PROFILE_HOOK(LABEL) \ ++ { \ ++ rtx fun, ra; \ ++ ra = get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM); \ ++ fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \ ++ emit_library_call (fun, LCT_NORMAL, VOIDmode, ra, Pmode); \ + } + + /* All the work done in PROFILE_HOOK, but still required. */ + #define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0) + +- +-/* The profiler preserves all interesting registers, including $31. */ +-#define LARCH_SAVE_REG_FOR_PROFILING_P(REGNO) false +- +-/* No loongarch port has ever used the profiler counter word, so don't emit it +- or the label for it. */ +- + #define NO_PROFILE_COUNTERS 1 + +-/* Define this macro if the code for function profiling should come +- before the function prologue. Normally, the profiling code comes +- after. */ +- +-/* #define PROFILE_BEFORE_PROLOGUE */ +- + /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, + the stack pointer does not matter. The value is tested only in + functions that have frame pointers. +@@ -1300,16 +815,13 @@ typedef struct loongarch_args { + + #define EXIT_IGNORE_STACK 1 + +- + /* Trampolines are a block of code followed by two pointers. */ + ++#define TRAMPOLINE_CODE_SIZE 16 + #define TRAMPOLINE_SIZE \ +- (loongarch_trampoline_code_size () + GET_MODE_SIZE (ptr_mode) * 2) +- +-/* Forcing a 64-bit alignment for 32-bit targets allows us to load two +- pointers from a single LUI base. */ +- +-#define TRAMPOLINE_ALIGNMENT 64 ++ ((Pmode == SImode) ? TRAMPOLINE_CODE_SIZE \ ++ : (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2)) ++#define TRAMPOLINE_ALIGNMENT POINTER_SIZE + + /* loongarch_trampoline_init calls this library function to flush + program and data caches. */ +@@ -1318,96 +830,64 @@ typedef struct loongarch_args { + #define CACHE_FLUSH_FUNC "_flush_cache" + #endif + +-#define LARCH_ICACHE_SYNC(ADDR, SIZE) \ +- /* Flush both caches. We need to flush the data cache in case \ +- the system has a write-back cache. */ \ +- emit_library_call (gen_rtx_SYMBOL_REF (Pmode, loongarch_cache_flush_func), \ +- LCT_NORMAL, VOIDmode, ADDR, Pmode, SIZE, Pmode, \ +- GEN_INT (3), TYPE_MODE (integer_type_node)) +- +- + /* Addressing modes, and classification of registers for them. */ + +-#define REGNO_OK_FOR_INDEX_P(REGNO) 0 ++#define REGNO_OK_FOR_INDEX_P(REGNO) \ ++ loongarch_regno_mode_ok_for_base_p (REGNO, VOIDmode, 1) ++ + #define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \ + loongarch_regno_mode_ok_for_base_p (REGNO, MODE, 1) +- ++ + /* Maximum number of registers that can appear in a valid memory address. */ + +-#define MAX_REGS_PER_ADDRESS 1 ++#define MAX_REGS_PER_ADDRESS 2 + + /* Check for constness inline but use loongarch_legitimate_address_p + to check whether a constant really is an address. */ + +-#define CONSTANT_ADDRESS_P(X) \ +- (CONSTANT_P (X) && memory_address_p (SImode, X)) ++#define CONSTANT_ADDRESS_P(X) (CONSTANT_P (X) && memory_address_p (SImode, X)) + + /* This handles the magic '..CURRENT_FUNCTION' symbol, which means + 'the start of the function that this code is output in'. */ + +-#define ASM_OUTPUT_LABELREF(FILE,NAME) \ +- do { \ +- if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \ +- asm_fprintf ((FILE), "%U%s", \ +- XSTR (XEXP (DECL_RTL (current_function_decl), \ +- 0), 0)); \ +- else \ +- asm_fprintf ((FILE), "%U%s", (NAME)); \ +- } while (0) +- +-/* Flag to mark a function decl symbol that requires a long call. */ +-#define SYMBOL_FLAG_LONG_CALL (SYMBOL_FLAG_MACH_DEP << 0) +-#define SYMBOL_REF_LONG_CALL_P(X) \ +- ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_LONG_CALL) != 0) +- +-/* This flag marks functions that cannot be lazily bound. */ +-#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1) +-#define SYMBOL_REF_BIND_NOW_P(RTX) \ +- ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0) +- +-/* True if we're generating a form of LARCH16 code in which jump tables +- are stored in the text section and encoded as 16-bit PC-relative +- offsets. This is only possible when general text loads are allowed, +- since the table access itself will be an "lh" instruction. If the +- PC-relative offsets grow too large, 32-bit offsets are used instead. */ +- +- +-#define CASE_VECTOR_MODE (ptr_mode) ++#define ASM_OUTPUT_LABELREF(FILE, NAME) \ ++ do \ ++ { \ ++ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \ ++ asm_fprintf ((FILE), "%U%s", \ ++ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \ ++ else \ ++ asm_fprintf ((FILE), "%U%s", (NAME)); \ ++ } \ ++ while (0) + +-/* Only use short offsets if their range will not overflow. */ +-#define CASE_VECTOR_SHORTEN_MODE(MIN, MAX, BODY) \ +- (ptr_mode ? HImode : SImode) ++#define CASE_VECTOR_MODE Pmode + ++#define CASE_VECTOR_SHORTEN_MODE(MIN, MAX, BODY) Pmode + + /* Define this as 1 if `char' should by default be signed; else as 0. */ + #ifndef DEFAULT_SIGNED_CHAR + #define DEFAULT_SIGNED_CHAR 1 + #endif + +-/* Although LDC1 and SDC1 provide 64-bit moves on 32-bit targets, +- we generally don't want to use them for copying arbitrary data. +- A single N-word move is usually the same cost as N single-word moves. */ ++/* The SPARC port says: ++ The maximum number of bytes that a single instruction ++ can move quickly between memory and registers or between ++ two memory locations. */ + #define MOVE_MAX UNITS_PER_WORD + /* We don't modify it for LSX as it is only used by the classic reload. */ + #define MAX_MOVE_MAX 8 + +-/* Define this macro as a C expression which is nonzero if +- accessing less than a word of memory (i.e. a `char' or a +- `short') is no faster than accessing a word of memory, i.e., if +- such access require more than one instruction or if there is no +- difference in cost between byte and (aligned) word loads. +- +- On RISC machines, it tends to generate better code to define +- this as 1, since it avoids making a QI or HI mode register. +- +-*/ +-#define SLOW_BYTE_ACCESS (1) +- +-/* Standard LARCH integer shifts truncate the shift amount to the +- width of the shifted operand. However, Loongson MMI shifts +- do not truncate the shift amount at all. */ +-#define SHIFT_COUNT_TRUNCATED (1) ++/* The SPARC port says: ++ Nonzero if access to memory by bytes is slow and undesirable. ++ For RISC chips, it means that access to memory by bytes is no ++ better than access by words when possible, so grab a whole word ++ and maybe make use of that. */ ++#define SLOW_BYTE_ACCESS 1 + ++/* Standard LoongArch integer shifts truncate the shift amount to the ++ width of the shifted operand. */ ++#define SHIFT_COUNT_TRUNCATED 1 + + /* Specify the machine mode that pointers have. + After generation of rtl, the compiler makes no further distinction +@@ -1422,7 +902,6 @@ typedef struct loongarch_args { + + #define FUNCTION_MODE SImode + +- + /* We allocate $fcc registers by hand and can't cope with moves of + CCmode registers to and from pseudos (or memory). */ + #define AVOID_CCMODE_COPIES +@@ -1433,14 +912,6 @@ typedef struct loongarch_args { + #define BRANCH_COST(speed_p, predictable_p) loongarch_branch_cost + #define LOGICAL_OP_NON_SHORT_CIRCUIT 0 + +-/* The LARCH port has several functions that return an instruction count. +- Multiplying the count by this value gives the number of bytes that +- the instructions occupy. */ +-#define BASE_INSN_LENGTH (4) +- +-/* The length of a NOP in bytes. */ +-#define NOP_INSN_LENGTH (4) +- + /* If defined, modifies the length assigned to instruction INSN as a + function of the context in which it is used. LENGTH is an lvalue + that contains the initially computed length of the insn and should +@@ -1451,17 +922,8 @@ typedef struct loongarch_args { + /* Return the asm template for a conditional branch instruction. + OPCODE is the opcode's mnemonic and OPERANDS is the asm template for + its operands. */ +-#define LARCH_BRANCH(OPCODE, OPERANDS) \ +- OPCODE "\t" OPERANDS ++#define LARCH_BRANCH(OPCODE, OPERANDS) OPCODE "\t" OPERANDS + +-#define LARCH_BRANCH_C(OPCODE, OPERANDS) \ +- OPCODE "%:\t" OPERANDS +- +-/* Return an asm string that forces INSN to be treated as an absolute +- J or JAL instruction instead of an assembler macro. */ +-#define LARCH_ABSOLUTE_JUMP(INSN) INSN +- +- + /* Control the assembler format that we output. */ + + /* Output to assembler file text saying following lines +@@ -1478,20 +940,19 @@ typedef struct loongarch_args { + #define ASM_APP_OFF " #NO_APP\n" + #endif + +-#define REGISTER_NAMES \ +-{ "$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7", \ +- "$r8", "$r9", "$r10", "$r11", "$r12", "$r13", "$r14", "$r15", \ +- "$r16", "$r17", "$r18", "$r19", "$r20", "$r21", "$r22", "$r23", \ +- "$r24", "$r25", "$r26", "$r27", "$r28", "$r29", "$r30", "$r31", \ +- "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", \ +- "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", \ +- "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23", \ +- "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31", \ +- "$fcc0","$fcc1","$fcc2","$fcc3","$fcc4","$fcc5","$fcc6","$fcc7", \ ++#define REGISTER_NAMES \ ++{ "$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7", \ ++ "$r8", "$r9", "$r10", "$r11", "$r12", "$r13", "$r14", "$r15", \ ++ "$r16", "$r17", "$r18", "$r19", "$r20", "$r21", "$r22", "$r23", \ ++ "$r24", "$r25", "$r26", "$r27", "$r28", "$r29", "$r30", "$r31", \ ++ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", \ ++ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", \ ++ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23", \ ++ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31", \ ++ "$fcc0","$fcc1","$fcc2","$fcc3","$fcc4","$fcc5","$fcc6","$fcc7", \ + "$arg", "$frame"} + +-/* List the "software" names for each register. Also list the numerical +- names for $fp and $sp. */ ++/* This macro defines additional names for hard registers. */ + + #define ADDITIONAL_REGISTER_NAMES \ + { \ +@@ -1595,61 +1056,17 @@ typedef struct loongarch_args { + { "xr31", 31 + FP_REG_FIRST } \ + } + +-#define DBR_OUTPUT_SEQEND(STREAM) \ +-do \ +- { \ +- /* Emit a blank line after the delay slot for emphasis. */ \ +- fputs ("\n", STREAM); \ +- } \ +-while (0) +- +-/* The LARCH implementation uses some labels for its own purpose. The +- following lists what labels are created, and are all formed by the +- pattern $L[a-z].*. The machine independent portion of GCC creates +- labels matching: $L[A-Z][0-9]+ and $L[0-9]+. +- +- LM[0-9]+ Silicon Graphics/ECOFF stabs label before each stmt. +- $Lb[0-9]+ Begin blocks for LARCH debug support +- $Lc[0-9]+ Label for use in s operation. +- $Le[0-9]+ End blocks for LARCH debug support */ +- +-#undef ASM_DECLARE_OBJECT_NAME +-#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \ +- loongarch_declare_object (STREAM, NAME, "", ":\n") +- + /* Globalizing directive for a label. */ + #define GLOBAL_ASM_OP "\t.globl\t" + +-/* This says how to define a global common symbol. */ +- +-#define ASM_OUTPUT_ALIGNED_DECL_COMMON loongarch_output_aligned_decl_common +- +-/* This says how to define a local common symbol (i.e., not visible to +- linker). */ +- +-#ifndef ASM_OUTPUT_ALIGNED_LOCAL +-#define ASM_OUTPUT_ALIGNED_LOCAL(STREAM, NAME, SIZE, ALIGN) \ +- loongarch_declare_common_object (STREAM, NAME, "\n\t.lcomm\t", SIZE, ALIGN, false) +-#endif +- + /* This says how to output an external. It would be possible not to +- output anything and let undefined symbol become external. However ++ output anything and let undefined symbol become external. However + the assembler uses length information on externals to allocate in + data/sdata bss/sbss, thereby saving exec time. */ + + #undef ASM_OUTPUT_EXTERNAL +-#define ASM_OUTPUT_EXTERNAL(STREAM,DECL,NAME) \ +- loongarch_output_external(STREAM,DECL,NAME) +- +-/* This is how to declare a function name. The actual work of +- emitting the label is moved to function_prologue, so that we can +- get the line number correctly emitted before the .ent directive, +- and after any .file directives. Define as empty so that the function +- is not declared before the .ent directive elsewhere. */ +- +-#undef ASM_DECLARE_FUNCTION_NAME +-#define ASM_DECLARE_FUNCTION_NAME(STREAM,NAME,DECL) \ +- loongarch_declare_function_name(STREAM,NAME,DECL) ++#define ASM_OUTPUT_EXTERNAL(STREAM, DECL, NAME) \ ++ loongarch_output_external (STREAM, DECL, NAME) + + /* This is how to store into the string LABEL + the symbol_ref name of an internal numbered label where +@@ -1657,8 +1074,8 @@ while (0) + This is suitable for output with `assemble_name'. */ + + #undef ASM_GENERATE_INTERNAL_LABEL +-#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \ +- sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM)) ++#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \ ++ sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long) (NUM)) + + /* Print debug labels as "foo = ." rather than "foo:" because they should + represent a byte pointer rather than an ISA-encoded address. This is +@@ -1677,159 +1094,108 @@ while (0) + At the time of writing, this hook is not used for the function end + label: + +- $LFExxx: ++ $LFExxx: + .end foo + + */ + +-#define ASM_OUTPUT_DEBUG_LABEL(FILE, PREFIX, NUM) \ ++#define ASM_OUTPUT_DEBUG_LABEL(FILE, PREFIX, NUM) \ + fprintf (FILE, "%s%s%d = .\n", LOCAL_LABEL_PREFIX, PREFIX, NUM) + + /* This is how to output an element of a case-vector that is absolute. */ + +-#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \ +- fprintf (STREAM, "\t%s\t%sL%d\n", \ +- ptr_mode == DImode ? ".dword" : ".word", \ +- LOCAL_LABEL_PREFIX, \ +- VALUE) +- +-/* This is how to output an element of a case-vector. We can make the +- entries GP-relative when .gp(d)word is supported. */ +- +-#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \ +-do { \ +- if (TARGET_RTP_PIC) \ +- { \ +- /* Make the entry relative to the start of the function. */ \ +- rtx fnsym = XEXP (DECL_RTL (current_function_decl), 0); \ +- fprintf (STREAM, "\t%s\t%sL%d-", \ +- Pmode == DImode ? ".dword" : ".word", \ +- LOCAL_LABEL_PREFIX, VALUE); \ +- assemble_name (STREAM, XSTR (fnsym, 0)); \ +- fprintf (STREAM, "\n"); \ +- } \ +- else \ +- fprintf (STREAM, "\t%s\t%sL%d-%sL%d\n", \ +- ptr_mode == DImode ? ".dword" : ".word", \ +- LOCAL_LABEL_PREFIX, VALUE, \ +- LOCAL_LABEL_PREFIX, REL); \ +-} while (0) +- +-/* Mark inline jump tables as data for the purpose of disassembly. For +- simplicity embed the jump table's label number in the local symbol +- produced so that multiple jump tables within a single function end +- up marked with unique symbols. Retain the alignment setting from +- `elfos.h' as we are replacing the definition from there. */ +- +-#undef ASM_OUTPUT_BEFORE_CASE_LABEL +-#define ASM_OUTPUT_BEFORE_CASE_LABEL(STREAM, PREFIX, NUM, TABLE) \ +- do \ +- { \ +- ASM_OUTPUT_ALIGN ((STREAM), 2); \ +- if (JUMP_TABLES_IN_TEXT_SECTION) \ +- loongarch_set_text_contents_type (STREAM, "__jump_", NUM, FALSE); \ +- } \ +- while (0) ++#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \ ++ fprintf (STREAM, "\t%s\t%sL%d\n", ptr_mode == DImode ? ".dword" : ".word", \ ++ LOCAL_LABEL_PREFIX, VALUE) + +-/* Reset text marking to code after an inline jump table. Like with +- the beginning of a jump table use the label number to keep symbols +- unique. */ ++/* This is how to output an element of a case-vector. */ + +-#define ASM_OUTPUT_CASE_END(STREAM, NUM, TABLE) \ +- do \ +- if (JUMP_TABLES_IN_TEXT_SECTION) \ +- loongarch_set_text_contents_type (STREAM, "__jend_", NUM, TRUE); \ ++#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \ ++ do \ ++ { \ ++ fprintf (STREAM, "\t%s\t%sL%d-%sL%d\n", \ ++ ptr_mode == DImode ? ".dword" : ".word", LOCAL_LABEL_PREFIX, \ ++ VALUE, LOCAL_LABEL_PREFIX, REL); \ ++ } \ + while (0) + ++#define JUMP_TABLES_IN_TEXT_SECTION 0 ++ + /* This is how to output an assembler line + that says to advance the location counter + to a multiple of 2**LOG bytes. */ + +-#define ASM_OUTPUT_ALIGN(STREAM,LOG) \ +- fprintf (STREAM, "\t.align\t%d\n", (LOG)) ++#define ASM_OUTPUT_ALIGN(STREAM, LOG) fprintf (STREAM, "\t.align\t%d\n", (LOG)) + +-#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM,LOG) \ ++/* "nop" instruction 54525952 (andi $r0,$r0,0) is ++ used for padding. */ ++#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, LOG) \ + fprintf (STREAM, "\t.align\t%d,54525952,4\n", (LOG)) + +- + /* This is how to output an assembler line to advance the location + counter by SIZE bytes. */ + + #undef ASM_OUTPUT_SKIP +-#define ASM_OUTPUT_SKIP(STREAM,SIZE) \ +- fprintf (STREAM, "\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE)) ++#define ASM_OUTPUT_SKIP(STREAM, SIZE) \ ++ fprintf (STREAM, "\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n", (SIZE)) + + /* This is how to output a string. */ + #undef ASM_OUTPUT_ASCII + #define ASM_OUTPUT_ASCII loongarch_output_ascii + +- +-/* Default to -G 8 */ +-#ifndef LARCH_DEFAULT_GVALUE +-#define LARCH_DEFAULT_GVALUE 8 +-#endif +- + /* Define the strings to put out for each section in the object file. */ +-#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */ +-#define DATA_SECTION_ASM_OP "\t.data" /* large data */ ++#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */ ++#define DATA_SECTION_ASM_OP "\t.data" /* large data */ + + #undef READONLY_DATA_SECTION_ASM_OP +-#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata" /* read-only data */ +- +-#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \ +-do \ +- { \ +- fprintf (STREAM, "\t%s\t%s,%s,-8\n\t%s\t%s,0(%s)\n", \ +- TARGET_64BIT ? "daddiu" : "addiu", \ +- reg_names[STACK_POINTER_REGNUM], \ +- reg_names[STACK_POINTER_REGNUM], \ +- TARGET_64BIT ? "sd" : "sw", \ +- reg_names[REGNO], \ +- reg_names[STACK_POINTER_REGNUM]); \ +- } \ +-while (0) +- +-#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \ +-do \ +- { \ +- loongarch_push_asm_switch (&loongarch_noreorder); \ +- fprintf (STREAM, "\t%s\t%s,0(%s)\n\t%s\t%s,%s,8\n", \ +- TARGET_64BIT ? "ld" : "lw", \ +- reg_names[REGNO], \ +- reg_names[STACK_POINTER_REGNUM], \ +- TARGET_64BIT ? "daddu" : "addu", \ +- reg_names[STACK_POINTER_REGNUM], \ +- reg_names[STACK_POINTER_REGNUM]); \ +- loongarch_pop_asm_switch (&loongarch_noreorder); \ +- } \ +-while (0) ++#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata" /* read-only data */ ++ ++#define ASM_OUTPUT_REG_PUSH(STREAM, REGNO) \ ++ do \ ++ { \ ++ fprintf (STREAM, "\t%s\t%s,%s,-8\n\t%s\t%s,%s,0\n", \ ++ TARGET_64BIT ? "addi.d" : "addi.w", \ ++ reg_names[STACK_POINTER_REGNUM], \ ++ reg_names[STACK_POINTER_REGNUM], \ ++ TARGET_64BIT ? "st.d" : "st.w", reg_names[REGNO], \ ++ reg_names[STACK_POINTER_REGNUM]); \ ++ } \ ++ while (0) ++ ++#define ASM_OUTPUT_REG_POP(STREAM, REGNO) \ ++ do \ ++ { \ ++ fprintf (STREAM, "\t%s\t%s,%s,0\n\t%s\t%s,%s,8\n", \ ++ TARGET_64BIT ? "ld.d" : "ld.w", reg_names[REGNO], \ ++ reg_names[STACK_POINTER_REGNUM], \ ++ TARGET_64BIT ? "addi.d" : "addi.w", \ ++ reg_names[STACK_POINTER_REGNUM], \ ++ reg_names[STACK_POINTER_REGNUM]); \ ++ } \ ++ while (0) + + /* How to start an assembler comment. +- The leading space is important (the loongarch native assembler requires it). */ ++ The leading space is important (the loongarch native assembler requires it). ++ */ + #ifndef ASM_COMMENT_START + #define ASM_COMMENT_START " #" + #endif +- ++ + #undef SIZE_TYPE + #define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int") + + #undef PTRDIFF_TYPE + #define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int") + +-/* The minimum alignment of any expanded block move. */ +-#define LARCH_MIN_MOVE_MEM_ALIGN 16 +- + /* The maximum number of bytes that can be copied by one iteration of + a movmemsi loop; see loongarch_block_move_loop. */ +-#define LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER \ +- (UNITS_PER_WORD * 4) ++#define LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER (UNITS_PER_WORD * 4) + + /* The maximum number of bytes that can be copied by a straight-line + implementation of movmemsi; see loongarch_block_move_straight. We want + to make sure that any loop-based implementation will iterate at + least twice. */ +-#define LARCH_MAX_MOVE_BYTES_STRAIGHT \ +- (LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER * 2) ++#define LARCH_MAX_MOVE_BYTES_STRAIGHT (LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER * 2) + + /* The base cost of a memcpy call, for MOVE_RATIO and friends. These + values were determined experimentally by benchmarking with CSiBE. +@@ -1847,73 +1213,29 @@ while (0) + we'll have to generate a load/store pair for each, halve the + value of LARCH_CALL_RATIO to take that into account. */ + +-#define MOVE_RATIO(speed) \ +- (HAVE_movmemsi \ ++#define MOVE_RATIO(speed) \ ++ (HAVE_movmemsi \ + ? LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD \ + : CLEAR_RATIO (speed) / 2) + + /* For CLEAR_RATIO, when optimizing for size, give a better estimate + of the length of a memset call, but use the default otherwise. */ + +-#define CLEAR_RATIO(speed)\ +- ((speed) ? 15 : LARCH_CALL_RATIO) ++#define CLEAR_RATIO(speed) ((speed) ? 15 : LARCH_CALL_RATIO) + + /* This is similar to CLEAR_RATIO, but for a non-zero constant, so when + optimizing for size adjust the ratio to account for the overhead of + loading the constant and replicating it across the word. */ + +-#define SET_RATIO(speed) \ +- ((speed) ? 15 : LARCH_CALL_RATIO - 2) +- +-/* Since the bits of the _init and _fini function is spread across +- many object files, each potentially with its own GP, we must assume +- we need to load our GP. We don't preserve $gp or $ra, since each +- init/fini chunk is supposed to initialize $gp, and crti/crtn +- already take care of preserving $ra and, when appropriate, $gp. */ +-#if (defined _ABI64 && _LARCH_SIM == _ABI64) +-#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ +- asm (SECTION_OP "\n\ +- .set push\n\ +- la $r20, " USER_LABEL_PREFIX #FUNC "\n\ +- jirl $r1, $r20, 0\n\ +- .set pop\n\ +- " TEXT_SECTION_ASM_OP); +-#endif +-#ifndef HAVE_AS_TLS +-#define HAVE_AS_TLS 0 +-#endif +- +-#ifndef HAVE_AS_NAN +-#define HAVE_AS_NAN 0 +-#endif ++#define SET_RATIO(speed) ((speed) ? 15 : LARCH_CALL_RATIO - 2) + + #ifndef USED_FOR_TARGET +-/* Information about ".set noFOO; ...; .set FOO" blocks. */ +-struct loongarch_asm_switch { +- /* The FOO in the description above. */ +- const char *name; +- +- /* The current block nesting level, or 0 if we aren't in a block. */ +- int nesting_level; +-}; +- + extern const enum reg_class loongarch_regno_to_class[]; +-extern const char *current_function_file; /* filename current function is in */ +-extern int num_source_filenames; /* current .file # */ +-extern int loongarch_dbx_regno[]; + extern int loongarch_dwarf_regno[]; +-extern bool loongarch_split_p[]; +-extern bool loongarch_use_pcrel_pool_p[]; +-extern enum processor loongarch_arch; /* which cpu to codegen for */ +-extern enum processor loongarch_tune; /* which cpu to schedule for */ +-extern int loongarch_isa; /* architectural level */ +-extern int loongarch_isa_rev; +-extern const struct loongarch_cpu_info *loongarch_arch_info; +-extern const struct loongarch_cpu_info *loongarch_tune_info; +-extern unsigned int loongarch_base_compression_flags; + + /* Information about a function's frame layout. */ +-struct GTY(()) loongarch_frame_info { ++struct GTY (()) loongarch_frame_info ++{ + /* The size of the frame in bytes. */ + HOST_WIDE_INT total_size; + +@@ -1930,216 +1252,67 @@ struct GTY(()) loongarch_frame_info { + /* Bit X is set if the function saves or restores GPR X. */ + unsigned int mask; + ++ unsigned int gpr_saved_num; ++ + /* Likewise FPR X. */ + unsigned int fmask; + +- /* Likewise doubleword accumulator X ($acX). */ +- unsigned int acc_mask; +- +- /* The number of GPRs, FPRs, doubleword accumulators and COP0 +- registers saved. */ +- unsigned int num_gp; +- unsigned int num_fp; +- unsigned int num_acc; +- unsigned int num_cop0_regs; +- +- /* The offset of the topmost GPR, FPR, accumulator and COP0-register +- save slots from the top of the frame, or zero if no such slots are +- needed. */ +- HOST_WIDE_INT gp_save_offset; +- HOST_WIDE_INT fp_save_offset; +- HOST_WIDE_INT acc_save_offset; +- HOST_WIDE_INT cop0_save_offset; +- +- /* Likewise, but giving offsets from the bottom of the frame. */ ++ /* How much the GPR save/restore routines adjust sp (or 0 if unused). */ ++ unsigned save_libcall_adjustment; ++ ++ /* Offsets of fixed-point and floating-point save areas from frame ++ bottom. */ + HOST_WIDE_INT gp_sp_offset; + HOST_WIDE_INT fp_sp_offset; +- HOST_WIDE_INT acc_sp_offset; +- HOST_WIDE_INT cop0_sp_offset; + +- /* Similar, but the value passed to _mcount. */ +- HOST_WIDE_INT ra_fp_offset; +- +- /* The offset of arg_pointer_rtx from the bottom of the frame. */ +- HOST_WIDE_INT arg_pointer_offset; ++ /* Offset of virtual frame pointer from stack pointer/frame bottom. */ ++ HOST_WIDE_INT frame_pointer_offset; + +- /* The offset of hard_frame_pointer_rtx from the bottom of the frame. */ ++ /* Offset of hard frame pointer from stack pointer/frame bottom. */ + HOST_WIDE_INT hard_frame_pointer_offset; + +- /* How much the GPR save/restore routines adjust sp (or 0 if unused). */ +- unsigned save_libcall_adjustment; +- +- /* Offset of virtual frame pointer from stack pointer/frame bottom */ +- HOST_WIDE_INT frame_pointer_offset; +-}; +- +-/* Enumeration for masked vectored (VI) and non-masked (EIC) interrupts. */ +-enum loongarch_int_mask +-{ +- INT_MASK_EIC = -1, +- INT_MASK_SW0 = 0, +- INT_MASK_SW1 = 1, +- INT_MASK_HW0 = 2, +- INT_MASK_HW1 = 3, +- INT_MASK_HW2 = 4, +- INT_MASK_HW3 = 5, +- INT_MASK_HW4 = 6, +- INT_MASK_HW5 = 7 ++ /* The offset of arg_pointer_rtx from the bottom of the frame. */ ++ HOST_WIDE_INT arg_pointer_offset; + }; + +-/* Enumeration to mark the existence of the shadow register set. +- SHADOW_SET_INTSTACK indicates a shadow register set with a valid stack +- pointer. */ +-enum loongarch_shadow_set ++struct GTY (()) machine_function + { +- SHADOW_SET_NO, +- SHADOW_SET_YES, +- SHADOW_SET_INTSTACK +-}; +- +-struct GTY(()) machine_function { + /* The next floating-point condition-code register to allocate +- for 8CC targets, relative to ST_REG_FIRST. */ ++ for 8CC targets, relative to FCC_REG_FIRST. */ + unsigned int next_fcc; + + /* The number of extra stack bytes taken up by register varargs. + This area is allocated by the callee at the very top of the frame. */ + int varargs_size; + +- /* The current frame information, calculated by loongarch_compute_frame_info. */ ++ /* The current frame information, calculated by loongarch_compute_frame_info. ++ */ + struct loongarch_frame_info frame; +- +- /* How many instructions it takes to load a label into $AT, or 0 if +- this property hasn't yet been calculated. */ +- unsigned int load_label_num_insns; +- +- /* True if loongarch_adjust_insn_length should ignore an instruction's +- hazard attribute. */ +- bool ignore_hazard_length_p; +- +- /* True if the whole function is suitable for .set noreorder and +- .set nomacro. */ +- bool all_noreorder_p; +- +- /* True if the function has "inflexible" and "flexible" references +- to the global pointer. See loongarch_cfun_has_inflexible_gp_ref_p +- and loongarch_cfun_has_flexible_gp_ref_p for details. */ +- bool has_inflexible_gp_insn_p; +- bool has_flexible_gp_insn_p; +- +- /* True if the function's prologue must load the global pointer +- value into pic_offset_table_rtx and store the same value in +- the function's cprestore slot (if any). Even if this value +- is currently false, we may decide to set it to true later; +- see loongarch_must_initialize_gp_p () for details. */ +- bool must_initialize_gp_p; +- +- /* True if the current function must restore $gp after any potential +- clobber. This value is only meaningful during the first post-epilogue +- split_insns pass; see loongarch_must_initialize_gp_p () for details. */ +- bool must_restore_gp_when_clobbered_p; +- +- /* True if this is an interrupt handler. */ +- bool interrupt_handler_p; +- +- /* Records the way in which interrupts should be masked. Only used if +- interrupts are not kept masked. */ +- enum loongarch_int_mask int_mask; +- +- /* Records if this is an interrupt handler that uses shadow registers. */ +- enum loongarch_shadow_set use_shadow_register_set; +- +- /* True if this is an interrupt handler that should keep interrupts +- masked. */ +- bool keep_interrupts_masked_p; +- +- /* True if this is an interrupt handler that should use DERET +- instead of ERET. */ +- bool use_debug_exception_return_p; +- +- /* True if at least one of the formal parameters to a function must be +- written to the frame header (probably so its address can be taken). */ +- bool does_not_use_frame_header; +- +- /* True if none of the functions that are called by this function need +- stack space allocated for their arguments. */ +- bool optimize_call_stack; +- +- /* True if one of the functions calling this function may not allocate +- a frame header. */ +- bool callers_may_not_allocate_frame; +- +- /* True if GCC stored callee saved registers in the frame header. */ +- bool use_frame_header_for_callee_saved_regs; + }; + #endif + +-/* Enable querying of DFA units. */ +-#define CPU_UNITS_QUERY 0 +- +-/* As on most targets, we want the .eh_frame section to be read-only where +- possible. And as on most targets, this means two things: +- +- (a) Non-locally-binding pointers must have an indirect encoding, +- so that the addresses in the .eh_frame section itself become +- locally-binding. +- +- (b) A shared library's .eh_frame section must encode locally-binding +- pointers in a relative (relocation-free) form. +- +- However, LARCH has traditionally not allowed directives like: +- +- .long x-. +- +- in cases where "x" is in a different section, or is not defined in the +- same assembly file. We are therefore unable to emit the PC-relative +- form required by (b) at assembly time. +- +- Fortunately, the linker is able to convert absolute addresses into +- PC-relative addresses on our behalf. Unfortunately, only certain +- versions of the linker know how to do this for indirect pointers, +- and for personality data. We must fall back on using writable +- .eh_frame sections for shared libraries if the linker does not +- support this feature. */ +-#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \ ++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ + (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_absptr) + +-#define SWITCHABLE_TARGET 1 +- +-/* Several named LARCH patterns depend on Pmode. These patterns have the +- form _si for Pmode == SImode and _di for Pmode == DImode. ++/* Several named LoongArch patterns depend on Pmode. These patterns have the ++ form si for Pmode == SImode and di for Pmode == DImode. + Add the appropriate suffix to generator function NAME and invoke it + with arguments ARGS. */ + #define PMODE_INSN(NAME, ARGS) \ +- (Pmode == SImode ? NAME ## _si ARGS : NAME ## _di ARGS) ++ (Pmode == SImode ? NAME##si ARGS : NAME##di ARGS) ++ ++/* Do emit .note.GNU-stack by default. */ ++#ifndef NEED_INDICATE_EXEC_STACK ++#define NEED_INDICATE_EXEC_STACK 1 ++#endif + +-/***********************/ +-/* N_LARCH-PORT */ +-/***********************/ + /* The `Q' extension is not yet supported. */ +-/* TODO: according to march */ ++/* TODO: according to march. */ + #define UNITS_PER_FP_REG (TARGET_DOUBLE_FLOAT ? 8 : 4) + + /* The largest type that can be passed in floating-point registers. */ +-/* TODO: according to mabi */ +-#define UNITS_PER_FP_ARG (TARGET_HARD_FLOAT ? (TARGET_64BIT ? 8 : 4) : 0) +- +-/* Internal macros to classify an ISA register's type. */ +- +-#define GP_TEMP_FIRST (GP_REG_FIRST + 12) +- +-#define CALLEE_SAVED_REG_NUMBER(REGNO) \ +- ((REGNO) >= 22 && (REGNO) <= 31 ? (REGNO) - 22 : -1) +- +-#define N_LARCH_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1) +-#define N_LARCH_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, N_LARCH_PROLOGUE_TEMP_REGNUM) +- +-#define LIBCALL_VALUE(MODE) \ +- loongarch_function_value (NULL_TREE, NULL_TREE, MODE) +- +-#define FUNCTION_VALUE(VALTYPE, FUNC) \ +- loongarch_function_value (VALTYPE, FUNC, VOIDmode) +- +-#define FRAME_GROWS_DOWNWARD 1 ++/* TODO: according to mabi. */ ++#define UNITS_PER_FP_ARG \ ++ (TARGET_HARD_FLOAT ? (TARGET_DOUBLE_FLOAT ? 8 : 4) : 0) + + #define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN) +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index be950c9e4..097c9f4db 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -1,7 +1,7 @@ +-;; Loongarch.md Machine Description for LARCH based processors +-;; Copyright (C) 1989-2018 Free Software Foundation, Inc. +-;; Contributed by A. Lichnewsky, lich@inria.inria.fr +-;; Changes by Michael Meissner, meissner@osf.org ++;; Machine Description for LoongArch for GNU compiler. ++;; Copyright (C) 2020-2022 Free Software Foundation, Inc. ++;; Contributed by Loongson Ltd. ++;; Based on MIPS target for GNU compiler. + + ;; This file is part of GCC. + +@@ -19,118 +19,96 @@ + ;; along with GCC; see the file COPYING3. If not see + ;; . + +-(define_enum "processor" [ +- loongarch +- loongarch64 +- la464 +-]) +- + (define_c_enum "unspec" [ + ;; Integer operations that are too cumbersome to describe directly. +- UNSPEC_WSBH +- UNSPEC_DSBH +- UNSPEC_DSHD ++ UNSPEC_REVB_2H ++ UNSPEC_REVB_4H ++ UNSPEC_REVH_D + + ;; Floating-point moves. + UNSPEC_LOAD_LOW + UNSPEC_LOAD_HIGH + UNSPEC_STORE_WORD + UNSPEC_MOVGR2FRH ++ UNSPEC_MOVGR2FR + UNSPEC_MOVFRH2GR ++ UNSPEC_MOVFR2GR ++ UNSPEC_MOVFCC2GR ++ UNSPEC_MOVGR2FCC ++ UNSPEC_MOVFR2FCC + +- ;; Floating-point environment. +- UNSPEC_MOVFCSR2GR +- UNSPEC_MOVGR2FCSR ++ ;; Floating point unspecs. ++ UNSPEC_FRINT ++ UNSPEC_FCLASS ++ UNSPEC_FCOPYSIGN + +- ;; GP manipulation. ++ ;; Override return address for exception handling. + UNSPEC_EH_RETURN + +- ;; +- UNSPEC_FRINT +- UNSPEC_FCLASS ++ ;; Bit operation + UNSPEC_BYTEPICK_W + UNSPEC_BYTEPICK_D + UNSPEC_BITREV_4B + UNSPEC_BITREV_8B + +- ;; Symbolic accesses. +- UNSPEC_LOAD_CALL +- +- ;; Blockage and synchronisation. +- UNSPEC_BLOCKAGE +- UNSPEC_DBAR +- UNSPEC_IBAR +- +- ;; CPUCFG +- UNSPEC_CPUCFG +- UNSPEC_ASRTLE_D +- UNSPEC_ASRTGT_D +- +- UNSPEC_CSRRD +- UNSPEC_CSRWR +- UNSPEC_CSRXCHG +- UNSPEC_IOCSRRD +- UNSPEC_IOCSRWR +- +- ;; cacop +- UNSPEC_CACOP +- +- ;; pte +- UNSPEC_LDDIR +- UNSPEC_LDPTE +- +- ;; Cache manipulation. +- UNSPEC_LARCH_CACHE +- +- ;; Interrupt handling. +- UNSPEC_ERTN +- UNSPEC_DI +- UNSPEC_EHB +- UNSPEC_RDPGPR +- +- ;; Used in a call expression in place of args_size. It's present for PIC +- ;; indirect calls where it contains args_size and the function symbol. +- UNSPEC_CALL_ATTR +- +- +- ;; Stack checking. +- UNSPEC_PROBE_STACK_RANGE +- +- ;; The `.insn' pseudo-op. +- UNSPEC_INSN_PSEUDO +- + ;; TLS + UNSPEC_TLS_GD + UNSPEC_TLS_LD + UNSPEC_TLS_LE + UNSPEC_TLS_IE + +- UNSPEC_LU52I_D +- ++ ;; Stack tie + UNSPEC_TIE + + ;; CRC + UNSPEC_CRC + UNSPEC_CRCC +- UNSPEC_ADDRESS_FIRST +-]) + +-(define_c_enum "unspecv" [ +- ;; Register save and restore. +- UNSPECV_GPR_SAVE +- UNSPECV_GPR_RESTORE ++ ;; RSQRT ++ UNSPEC_RSQRT ++ UNSPEC_RSQRTE + +- UNSPECV_MOVE_EXTREME ++ ;; RECIP ++ UNSPEC_RECIPE + ]) + ++(define_c_enum "unspecv" [ ++ ;; Blockage and synchronisation. ++ UNSPECV_BLOCKAGE ++ UNSPECV_DBAR ++ UNSPECV_IBAR ++ ++ ;; Privileged instructions ++ UNSPECV_CSRRD ++ UNSPECV_CSRWR ++ UNSPECV_CSRXCHG ++ UNSPECV_IOCSRRD ++ UNSPECV_IOCSRWR ++ UNSPECV_CACOP ++ UNSPECV_LDDIR ++ UNSPECV_LDPTE ++ UNSPECV_ERTN ++ ++ ;; Stack checking ++ UNSPECV_PROBE_STACK_RANGE ++ ++ ;; Floating-point environment ++ UNSPECV_MOVFCSR2GR ++ UNSPECV_MOVGR2FCSR ++ ++ ;; Others ++ UNSPECV_CPUCFG ++ UNSPECV_ASRTLE_D ++ UNSPECV_ASRTGT_D ++ UNSPECV_SYSCALL ++ UNSPECV_BREAK ++]) + + (define_constants + [(RETURN_ADDR_REGNUM 1) + (T0_REGNUM 12) + (T1_REGNUM 13) + (S0_REGNUM 23) +- (S1_REGNUM 24) +- (S2_REGNUM 25) + + ;; PIC long branch sequences are never longer than 100 bytes. + (MAX_PIC_BRANCH_LENGTH 100) +@@ -148,9 +126,9 @@ + (define_attr "got" "unset,load" + (const_string "unset")) + +-;; For jal instructions, this attribute is DIRECT when the target address ++;; For jirl instructions, this attribute is DIRECT when the target address + ;; is symbolic and INDIRECT when it is a register. +-(define_attr "jal" "unset,direct,indirect" ++(define_attr "jirl" "unset,direct,indirect" + (const_string "unset")) + + +@@ -158,7 +136,7 @@ + ;; are as for "type" (see below) but there are also the following + ;; move-specific values: + ;; +-;; sll0 "sll DEST,SRC,0", which on 64-bit targets is guaranteed ++;; sll0 "slli.w DEST,SRC,0", which on 64-bit targets is guaranteed + ;; to produce a sign-extended DEST, even if SRC is not + ;; properly sign-extended + ;; pick_ins BSTRPICK.W, BSTRPICK.D, BSTRINS.W or BSTRINS.D instruction +@@ -207,59 +185,6 @@ + (const_string "yes")] + (const_string "no"))) + +-;; Attributes describing a sync loop. These loops have the form: +-;; +-;; if (RELEASE_BARRIER == YES) sync +-;; 1: OLDVAL = *MEM +-;; if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2 +-;; CMP = 0 [delay slot] +-;; $TMP1 = OLDVAL & EXCLUSIVE_MASK +-;; $TMP2 = INSN1 (OLDVAL, INSN1_OP2) +-;; $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK) +-;; $AT |= $TMP1 | $TMP3 +-;; if (!commit (*MEM = $AT)) goto 1. +-;; if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot] +-;; CMP = 1 +-;; if (ACQUIRE_BARRIER == YES) sync +-;; 2: +-;; +-;; where "$" values are temporaries and where the other values are +-;; specified by the attributes below. Values are specified as operand +-;; numbers and insns are specified as enums. If no operand number is +-;; specified, the following values are used instead: +-;; +-;; - OLDVAL: $AT +-;; - CMP: NONE +-;; - NEWVAL: $AT +-;; - INCLUSIVE_MASK: -1 +-;; - REQUIRED_OLDVAL: OLDVAL & INCLUSIVE_MASK +-;; - EXCLUSIVE_MASK: 0 +-;; +-;; MEM and INSN1_OP2 are required. +-;; +-;; Ideally, the operand attributes would be integers, with -1 meaning "none", +-;; but the gen* programs don't yet support that. +-(define_attr "sync_mem" "none,0,1,2,3,4,5" (const_string "none")) +-(define_attr "sync_oldval" "none,0,1,2,3,4,5" (const_string "none")) +-(define_attr "sync_cmp" "none,0,1,2,3,4,5" (const_string "none")) +-(define_attr "sync_newval" "none,0,1,2,3,4,5" (const_string "none")) +-(define_attr "sync_inclusive_mask" "none,0,1,2,3,4,5" (const_string "none")) +-(define_attr "sync_exclusive_mask" "none,0,1,2,3,4,5" (const_string "none")) +-(define_attr "sync_required_oldval" "none,0,1,2,3,4,5" (const_string "none")) +-(define_attr "sync_insn1_op2" "none,0,1,2,3,4,5" (const_string "none")) +-(define_attr "sync_insn1" "move,li,addu,addiu,subu,and,andi,or,ori,xor,xori" +- (const_string "move")) +-(define_attr "sync_insn2" "nop,and,xor,not" +- (const_string "nop")) +-;; Memory model specifier. +-;; "0"-"9" values specify the operand that stores the memory model value. +-;; "10" specifies MEMMODEL_ACQ_REL, +-;; "11" specifies MEMMODEL_ACQUIRE. +-(define_attr "sync_memmodel" "" (const_int 10)) +- +-;; Accumulator operand for madd patterns. +-(define_attr "accum_in" "none,0,1,2,3,4,5" (const_string "none")) +- + ;; Classification of each insn. + ;; branch conditional branch + ;; jump unconditional jump +@@ -273,8 +198,8 @@ + ;; prefetch memory prefetch (register + offset) + ;; prefetchx memory indexed prefetch (register + register) + ;; condmove conditional moves +-;; mgtf move generate register to float register +-;; mftg move float register to generate register ++;; mgtf move general-purpose register to floating point register ++;; mftg move floating point register to general-purpose register + ;; const load constant + ;; arith integer arithmetic instructions + ;; logical integer logical instructions +@@ -283,10 +208,9 @@ + ;; signext sign extend instructions + ;; clz the clz and clo instructions + ;; trap trap if instructions +-;; imul integer multiply 2 operands +-;; imul3 integer multiply 3 operands +-;; idiv3 integer divide 3 operands +-;; move integer register move ({,D}ADD{,U} with rt = 0) ++;; imul integer multiply ++;; idiv integer divide ++;; move integer move + ;; fmove floating point register move + ;; fadd floating point add/subtract + ;; fmul floating point multiply +@@ -296,9 +220,11 @@ + ;; fabs floating point absolute value + ;; fneg floating point negation + ;; fcmp floating point compare ++;; fcopysign floating point copysign + ;; fcvt floating point convert + ;; fsqrt floating point square root + ;; frsqrt floating point reciprocal square root ++;; frsqrte float point reciprocal square root approximate + ;; multi multiword sequence (or user asm statements) + ;; atomic atomic memory update instruction + ;; syncloop memory atomic operation implemented as a sync loop +@@ -307,16 +233,15 @@ + (define_attr "type" + "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore, + prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical, +- shift,slt,signext,clz,trap,imul,imul3,idiv3,move, +- fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcvt,fsqrt, +- frsqrt,dspmac,dspmacsat,accext,accmod,dspalu,dspalusat, +- multi,atomic,syncloop,nop,ghost, ++ shift,slt,signext,clz,trap,imul,idiv,move, ++ fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcopysign,fcvt,fsqrt, ++ frsqrt,frsqrte,accext,accmod,multi,atomic,syncloop,nop,ghost, + simd_div,simd_fclass,simd_flog2,simd_fadd,simd_fcvt,simd_fmul,simd_fmadd, + simd_fdiv,simd_bitins,simd_bitmov,simd_insert,simd_sld,simd_mul,simd_fcmp, + simd_fexp2,simd_int_arith,simd_bit,simd_shift,simd_splat,simd_fill, + simd_permute,simd_shf,simd_sat,simd_pcnt,simd_copy,simd_branch,simd_clsx, + simd_fminmax,simd_logic,simd_move,simd_load,simd_store" +- (cond [(eq_attr "jal" "!unset") (const_string "call") ++ (cond [(eq_attr "jirl" "!unset") (const_string "call") + (eq_attr "got" "load") (const_string "load") + + (eq_attr "alu_type" "add,sub") (const_string "arith") +@@ -362,35 +287,22 @@ + (eq_attr "dword_mode" "yes")) + (const_string "multi") + (eq_attr "move_type" "move") (const_string "move") +- (eq_attr "move_type" "const") (const_string "const") +- (eq_attr "sync_mem" "!none") (const_string "syncloop")] ++ (eq_attr "move_type" "const") (const_string "const")] + (const_string "unknown"))) + +-(define_attr "compact_form" "always,maybe,never" +- (cond [(eq_attr "jal" "direct") +- (const_string "always") +- (eq_attr "jal" "indirect") +- (const_string "maybe") +- (eq_attr "type" "jump") +- (const_string "maybe")] +- (const_string "never"))) +- + ;; Mode for conversion types (fcvt) +-;; I2S integer to float single (SI/DI to SF) +-;; I2D integer to float double (SI/DI to DF) +-;; S2I float to integer (SF to SI/DI) +-;; D2I float to integer (DF to SI/DI) +-;; D2S double to float single +-;; S2D float single to double +- +-(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D" ++;; I2S integer to float single (SI/DI to SF) ++;; I2D integer to float double (SI/DI to DF) ++;; S2I float to integer (SF to SI/DI) ++;; D2I float to integer (DF to SI/DI) ++;; D2S double to float single ++;; S2D float single to double ++;; C2D fcc to DI ++ ++(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D" + (const_string "unknown")) + +-(define_attr "compression" "none,all" +- (const_string "none")) +- +-;; The number of individual instructions that a non-branch pattern generates, +-;; using units of BASE_INSN_LENGTH. ++;; The number of individual instructions that a non-branch pattern generates + (define_attr "insn_count" "" + (cond [;; "Ghost" instructions occupy no space. + (eq_attr "type" "ghost") +@@ -425,84 +337,30 @@ + (eq_attr "move_type" "store,fpstore") + (symbol_ref "loongarch_load_store_insns (operands[0], insn)") + +- (eq_attr "type" "idiv3") ++ (eq_attr "type" "idiv") + (symbol_ref "loongarch_idiv_insns (GET_MODE (PATTERN (insn)))")] + (const_int 1))) + +-;; Length of instruction in bytes. The default is derived from "insn_count", +-;; but there are special cases for branches (which must be handled here) +-;; and for compressed single instructions. +- +- +- ++;; Length of instruction in bytes. + (define_attr "length" "" + (cond [ +- ;; Branch instructions have a range of [-0x20000,0x1fffc]. +- ;; If a branch is outside this range, we have a choice of two +- ;; sequences. +- ;; +- ;; For PIC, an out-of-range branch like: +- ;; +- ;; bne r1,r2,target +- ;; +- ;; becomes the equivalent of: +- ;; +- ;; beq r1,r2,1f +- ;; la rd,target +- ;; jr rd +- ;; 1: +- ;; +- ;; The non-PIC case is similar except that we use a direct +- ;; jump instead of an la/jr pair. Since the target of this +- ;; jump is an absolute 28-bit bit address (the other bits +- ;; coming from the address of the delay slot) this form cannot +- ;; cross a 256MB boundary. We could provide the option of +- ;; using la/jr in this case too, but we do not do so at +- ;; present. +- ;; +- ;; from the shorten_branches reference address. +- (eq_attr "type" "branch") +- (cond [;; Any variant can handle the 17-bit range. +- (and (le (minus (match_dup 0) (pc)) (const_int 65532)) +- (le (minus (pc) (match_dup 0)) (const_int 65534))) +- (const_int 4) +- +- ;; The non-PIC case: branch, and J. +- (match_test "TARGET_ABSOLUTE_JUMPS") +- (const_int 8)] +- +- ;; Use MAX_PIC_BRANCH_LENGTH as a (gross) overestimate. +- ;; loongarch_adjust_insn_length substitutes the correct length. +- ;; +- ;; Note that we can't simply use (symbol_ref ...) here +- ;; because genattrtab needs to know the maximum length +- ;; of an insn. +- (const_int MAX_PIC_BRANCH_LENGTH)) +- ] +- (symbol_ref "get_attr_insn_count (insn) * BASE_INSN_LENGTH"))) +- +-;; Attribute describing the processor. +-(define_enum_attr "cpu" "processor" +- (const (symbol_ref "loongarch_tune"))) ++ ;; Branching further than +/- 128 KiB requires two instructions. ++ (eq_attr "type" "branch") ++ (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 131064)) ++ (le (minus (pc) (match_dup 0)) (const_int 131068))) ++ (const_int 4) ++ (const_int 8))] ++ (symbol_ref "get_attr_insn_count (insn) * 4"))) + + ;; The type of hardware hazard associated with this instruction. + ;; DELAY means that the next instruction cannot read the result + ;; of this one. +-(define_attr "hazard" "none,delay,forbidden_slot" ++(define_attr "hazard" "none,forbidden_slot" + (const_string "none")) + +-;; Can the instruction be put into a delay slot? +-(define_attr "can_delay" "no,yes" +- (if_then_else (and (eq_attr "type" "!branch,call,jump") +- (eq_attr "hazard" "none") +- (match_test "get_attr_insn_count (insn) == 1")) +- (const_string "yes") +- (const_string "no"))) +- + ;; Describe a user's asm statement. + (define_asm_attributes +- [(set_attr "type" "multi") +- (set_attr "can_delay" "no")]) ++ [(set_attr "type" "multi")]) + + ;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated + ;; from the same template. +@@ -512,141 +370,99 @@ + ;; modes. + (define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")]) + +-;; Likewise, but for XLEN-sized quantities. +-(define_mode_iterator X [(SI "!TARGET_64BIT") (DI "TARGET_64BIT")]) +- +-(define_mode_iterator MOVEP1 [SI SF]) +-(define_mode_iterator MOVEP2 [SI SF]) ++;; This mode iterator allows 16-bit and 32-bit GPR patterns and 32-bit 64-bit ++;; FPR patterns to be generated from the same template. + (define_mode_iterator JOIN_MODE [HI + SI + (SF "TARGET_HARD_FLOAT") +- (DF "TARGET_HARD_FLOAT +- && TARGET_DOUBLE_FLOAT")]) ++ (DF "TARGET_DOUBLE_FLOAT")]) + + ;; This mode iterator allows :P to be used for patterns that operate on + ;; pointer-sized quantities. Exactly one of the two alternatives will match. + (define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")]) + +-;; 32-bit integer moves for which we provide move patterns. +-(define_mode_iterator IMOVE32 +- [SI]) ++;; Likewise, but for XLEN-sized quantities. ++(define_mode_iterator X [(SI "!TARGET_64BIT") (DI "TARGET_64BIT")]) + + ;; 64-bit modes for which we provide move patterns. +-(define_mode_iterator MOVE64 +- [DI DF]) ++(define_mode_iterator MOVE64 [DI DF]) + + ;; 128-bit modes for which we provide move patterns on 64-bit targets. + (define_mode_iterator MOVE128 [TI TF]) + +-;; This mode iterator allows the QI and HI extension patterns to be +-;; defined from the same template. ++;; Iterator for sub-32-bit integer modes. + (define_mode_iterator SHORT [QI HI]) + + ;; Likewise the 64-bit truncate-and-shift patterns. + (define_mode_iterator SUBDI [QI HI SI]) + +-;; This mode iterator allows the QI HI SI and DI extension patterns to be ++;; Iterator for scalar fixed-point modes. + (define_mode_iterator QHWD [QI HI SI (DI "TARGET_64BIT")]) + +- +-;; This mode iterator allows :ANYF to be used wherever a scalar or vector +-;; floating-point mode is allowed. ++;; Iterator for hardware-supported floating-point modes. + (define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT") +- (DF "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT")]) +- +-;; Like ANYF, but only applies to scalar modes. +-(define_mode_iterator SCALARF [(SF "TARGET_HARD_FLOAT") +- (DF "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT")]) ++ (DF "TARGET_DOUBLE_FLOAT")]) + + ;; A floating-point mode for which moves involving FPRs may need to be split. + (define_mode_iterator SPLITF + [(DF "!TARGET_64BIT && TARGET_DOUBLE_FLOAT") + (DI "!TARGET_64BIT && TARGET_DOUBLE_FLOAT") +- (TF "TARGET_64BIT && TARGET_FLOAT64")]) ++ (TF "TARGET_64BIT && TARGET_DOUBLE_FLOAT")]) + +-;; In GPR templates, a string like "mul." will expand to "mul" in the +-;; 32-bit "mul.w" and "mul.d" in the 64-bit version. ++;; In GPR templates, a string like "mul." will expand to "mul.w" in the ++;; 32-bit version and "mul.d" in the 64-bit version. + (define_mode_attr d [(SI "w") (DI "d")]) + +-;; Same as d but upper-case. +-(define_mode_attr D [(SI "") (DI "D")]) +- + ;; This attribute gives the length suffix for a load or store instruction. + ;; The same suffixes work for zero and sign extensions. + (define_mode_attr size [(QI "b") (HI "h") (SI "w") (DI "d")]) + (define_mode_attr SIZE [(QI "B") (HI "H") (SI "W") (DI "D")]) + +-;; This attributes gives the mode mask of a SHORT. ++;; This attribute gives the mode mask of a SHORT. + (define_mode_attr mask [(QI "0x00ff") (HI "0xffff")]) + +-;; This attributes gives the size (bits) of a SHORT. +-(define_mode_attr qi_hi [(QI "7") (HI "15")]) +- +-;; Mode attributes for GPR loads. +-(define_mode_attr load [(SI "lw") (DI "ld")]) ++;; This attribute gives the size (bits) of a SHORT. ++(define_mode_attr 7_or_15 [(QI "7") (HI "15")]) + +-(define_mode_attr load_l [(SI "ld.w") (DI "ld.d")]) + ;; Instruction names for stores. + (define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd")]) + +-;; Similarly for LARCH IV indexed FPR loads and stores. +-(define_mode_attr floadx [(SF "fldx.s") (DF "fldx.d") (V2SF "fldx.d")]) +-(define_mode_attr fstorex [(SF "fstx.s") (DF "fstx.d") (V2SF "fstx.d")]) +- +-;; Similarly for LOONGSON indexed GPR loads and stores. ++;; Similarly for LoongArch indexed GPR loads and stores. + (define_mode_attr loadx [(QI "ldx.b") +- (HI "ldx.h") +- (SI "ldx.w") +- (DI "ldx.d")]) ++ (HI "ldx.h") ++ (SI "ldx.w") ++ (DI "ldx.d")]) + (define_mode_attr storex [(QI "stx.b") +- (HI "stx.h") +- (SI "stx.w") +- (DI "stx.d")]) +- +-;; This attribute gives the best constraint to use for registers of +-;; a given mode. +-(define_mode_attr reg [(SI "d") (DI "d") (FCC "z")]) ++ (HI "stx.h") ++ (SI "stx.w") ++ (DI "stx.d")]) + + ;; This attribute gives the format suffix for floating-point operations. + (define_mode_attr fmt [(SF "s") (DF "d") (V2SF "ps")]) ++(define_mode_attr ifmt [(SI "w") (DI "l")]) + + ;; This attribute gives the upper-case mode name for one unit of a + ;; floating-point mode or vector mode. + (define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF") (V4SF "SF") +- (V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI") +- (V2DF "DF")(V8SF "SF")(V32QI "QI")(V16HI "HI")(V8SI "SI")(V4DI "DI")(V4DF "DF")]) ++ (V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI") ++ (V2DF "DF")(V8SF "SF")(V32QI "QI")(V16HI "HI")(V8SI "SI")(V4DI "DI")(V4DF "DF")]) + + ;; As above, but in lower case. + (define_mode_attr unitmode [(SF "sf") (DF "df") (V2SF "sf") (V4SF "sf") +- (V16QI "qi") (V8QI "qi") (V8HI "hi") (V4HI "hi") +- (V4SI "si") (V2SI "si") (V2DI "di") (V2DF "df") +- (V8SI "si") (V4DI "di") (V32QI "qi") (V16HI "hi") ++ (V16QI "qi") (V8QI "qi") (V8HI "hi") (V4HI "hi") ++ (V4SI "si") (V2SI "si") (V2DI "di") (V2DF "df") ++ (V8SI "si") (V4DI "di") (V32QI "qi") (V16HI "hi") + (V8SF "sf") (V4DF "df")]) + + ;; This attribute gives the integer mode that has half the size of + ;; the controlling mode. + (define_mode_attr HALFMODE [(DF "SI") (DI "SI") (V2SF "SI") +- (V2SI "SI") (V4HI "SI") (V8QI "SI") +- (TF "DI")]) ++ (V2SI "SI") (V4HI "SI") (V8QI "SI") ++ (TF "DI")]) + ++;; This attribute gives the integer prefix for some instructions templates. + (define_mode_attr p [(SI "") (DI "d")]) + +-;; This attribute works around the early SB-1 rev2 core "F2" erratum: +-;; +-;; In certain cases, div.s and div.ps may have a rounding error +-;; and/or wrong inexact flag. +-;; +-;; Therefore, we only allow div.s if not working around SB-1 rev2 +-;; errata or if a slight loss of precision is OK. +-(define_mode_attr divide_condition +- [DF (SF "flag_unsafe_math_optimizations") +- (V2SF "TARGET_SB1 && (flag_unsafe_math_optimizations)")]) +- +-;; This attribute gives the conditions under which SQRT.fmt instructions +-;; can be used. +-(define_mode_attr sqrt_condition +- [SF DF (V2SF "TARGET_SB1")]) +- + ;; This code iterator allows signed and unsigned widening multiplications + ;; to use the same template. + (define_code_iterator any_extend [sign_extend zero_extend]) +@@ -659,13 +475,10 @@ + ;; from the same template. + (define_code_iterator any_shift [ashift ashiftrt lshiftrt]) + +-;; This code iterator allows unsigned and signed division to be generated +-;; from the same template. +-(define_code_iterator any_div [div udiv]) +- +-;; This code iterator allows unsigned and signed modulus to be generated ++;; This code iterator allows the three bitwise instructions to be generated + ;; from the same template. +-(define_code_iterator any_mod [mod umod]) ++(define_code_iterator any_bitwise [and ior xor]) ++(define_code_iterator neg_bitwise [and ior]) + + ;; This code iterator allows addition and subtraction to be generated + ;; from the same template. +@@ -679,13 +492,14 @@ + ;; from the same template + (define_code_iterator addsubmul [plus minus mult]) + ++;; This code iterator allows unsigned and signed division to be generated ++;; from the same template. ++(define_code_iterator any_div [div udiv mod umod]) ++ + ;; This code iterator allows all native floating-point comparisons to be + ;; generated from the same template. +-(define_code_iterator fcond [unordered uneq unlt unle eq lt le ordered ltgt ne]) +- +-;; This code iterator is used for comparisons that can be implemented +-;; by swapping the operands. +-(define_code_iterator swapped_fcond [ge gt unge ungt]) ++(define_code_iterator fcond [unordered uneq unlt unle eq lt le ++ ordered ltgt ne ge gt unge ungt]) + + ;; Equality operators. + (define_code_iterator equality_op [eq ne]) +@@ -725,6 +539,10 @@ + (plus "add") + (minus "sub") + (mult "mul") ++ (div "div") ++ (udiv "udiv") ++ (mod "mod") ++ (umod "umod") + (return "return") + (simple_return "simple_return")]) + +@@ -736,15 +554,13 @@ + (xor "xor") + (and "and") + (plus "addu") +- (minus "subu")]) +- +-;; expands to the name of the insn that implements +-;; a particular code to operate on immediate values. +-(define_code_attr immediate_insn [(ior "ori") +- (xor "xori") +- (and "andi")]) ++ (minus "subu") ++ (div "div") ++ (udiv "div") ++ (mod "mod") ++ (umod "mod")]) + +-;; is the c.cond.fmt condition associated with a particular code. ++;; is the fcmp.cond.fmt condition associated with a particular code. + (define_code_attr fcond [(unordered "cun") + (uneq "cueq") + (unlt "cult") +@@ -754,48 +570,17 @@ + (le "sle") + (ordered "cor") + (ltgt "sne") +- (ne "cune")]) +- +-;; Similar, but for swapped conditions. +-(define_code_attr swapped_fcond [(ge "sle") +- (gt "slt") +- (unge "cule") +- (ungt "cult")]) +- +-;; The value of the bit when the branch is taken for branch_bit patterns. +-;; Comparison is always against zero so this depends on the operator. +-(define_code_attr bbv [(eq "0") (ne "1")]) +- +-;; This is the inverse value of bbv. +-(define_code_attr bbinv [(eq "1") (ne "0")]) ++ (ne "cune") ++ (ge "sge") ++ (gt "sgt") ++ (unge "cuge") ++ (ungt "cugt")]) + + ;; The sel mnemonic to use depending on the condition test. + (define_code_attr sel [(eq "masknez") (ne "maskeqz")]) ++(define_code_attr fsel_invert [(eq "%2,%3") (ne "%3,%2")]) + (define_code_attr selinv [(eq "maskeqz") (ne "masknez")]) +- +-;; Pipeline descriptions. +-;; +-;; generic.md provides a fallback for processors without a specific +-;; pipeline description. It is derived from the old define_function_unit +-;; version and uses the "alu" and "imuldiv" units declared below. +-;; +-;; Some of the processor-specific files are also derived from old +-;; define_function_unit descriptions and simply override the parts of +-;; generic.md that don't apply. The other processor-specific files +-;; are self-contained. +-(define_automaton "alu,imuldiv") + +-(define_cpu_unit "alu" "alu") +-(define_cpu_unit "imuldiv" "imuldiv") +- +-;; Ghost instructions produce no real code and introduce no hazards. +-;; They exist purely to express an effect on dataflow. +-(define_insn_reservation "ghost" 0 +- (eq_attr "type" "ghost") +- "nothing") +- +-(include "generic.md") +- + ;; + ;; .................... + ;; +@@ -831,37 +616,22 @@ + [(set_attr "type" "fadd") + (set_attr "mode" "")]) + +-(define_expand "add3" +- [(set (match_operand:GPR 0 "register_operand") +- (plus:GPR (match_operand:GPR 1 "register_operand") +- (match_operand:GPR 2 "arith_operand")))] +- "") +- +-(define_insn "*add3" ++(define_insn "add3" + [(set (match_operand:GPR 0 "register_operand" "=r,r") + (plus:GPR (match_operand:GPR 1 "register_operand" "r,r") +- (match_operand:GPR 2 "arith_operand" "r,Q")))] ++ (match_operand:GPR 2 "arith_operand" "r,I")))] + "" +-{ +- if (which_alternative == 0) +- return "add.\t%0,%1,%2"; +- else +- return "addi.\t%0,%1,%2"; +-} ++ "add%i2.\t%0,%1,%2"; + [(set_attr "alu_type" "add") +- (set_attr "compression" "*,*") + (set_attr "mode" "")]) + +- + (define_insn "*addsi3_extended" + [(set (match_operand:DI 0 "register_operand" "=r,r") + (sign_extend:DI + (plus:SI (match_operand:SI 1 "register_operand" "r,r") +- (match_operand:SI 2 "arith_operand" "r,Q"))))] ++ (match_operand:SI 2 "arith_operand" "r,I"))))] + "TARGET_64BIT" +- "@ +- add.w\t%0,%1,%2 +- addi.w\t%0,%1,%2" ++ "add%i2.w\t%0,%1,%2" + [(set_attr "alu_type" "add") + (set_attr "mode" "SI")]) + +@@ -885,23 +655,23 @@ + + (define_insn "sub3" + [(set (match_operand:GPR 0 "register_operand" "=r") +- (minus:GPR (match_operand:GPR 1 "register_operand" "r") ++ (minus:GPR (match_operand:GPR 1 "register_operand" "rJ") + (match_operand:GPR 2 "register_operand" "r")))] + "" +- "sub.\t%0,%1,%2" ++ "sub.\t%0,%z1,%2" + [(set_attr "alu_type" "sub") +- (set_attr "compression" "*") + (set_attr "mode" "")]) + ++ + (define_insn "*subsi3_extended" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI +- (minus:SI (match_operand:SI 1 "register_operand" "r") +- (match_operand:SI 2 "register_operand" "r"))))] ++ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ") ++ (match_operand:SI 2 "register_operand" "r"))))] + "TARGET_64BIT" +- "sub.w\t%0,%1,%2" +- [(set_attr "alu_type" "sub") +- (set_attr "mode" "DI")]) ++ "sub.w\t%0,%z1,%2" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "SI")]) + + ;; + ;; .................... +@@ -911,17 +681,10 @@ + ;; .................... + ;; + +-(define_expand "mul3" +- [(set (match_operand:SCALARF 0 "register_operand") +- (mult:SCALARF (match_operand:SCALARF 1 "register_operand") +- (match_operand:SCALARF 2 "register_operand")))] +- "" +- "") +- +-(define_insn "*mul3" +- [(set (match_operand:SCALARF 0 "register_operand" "=f") +- (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f") +- (match_operand:SCALARF 2 "register_operand" "f")))] ++(define_insn "mul3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (mult:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")))] + "" + "fmul.\t%0,%1,%2" + [(set_attr "type" "fmul") +@@ -933,20 +696,27 @@ + (match_operand:GPR 2 "register_operand" "r")))] + "" + "mul.\t%0,%1,%2" +- [(set_attr "type" "imul3") ++ [(set_attr "type" "imul") + (set_attr "mode" "")]) + +- +- + (define_insn "mulsidi3_64bit" + [(set (match_operand:DI 0 "register_operand" "=r") + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) + (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))] +- "" ++ "TARGET_64BIT" + "mul.d\t%0,%1,%2" +- [(set_attr "type" "imul3") ++ [(set_attr "type" "imul") + (set_attr "mode" "DI")]) + ++(define_insn "*mulsi3_extended" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI ++ (mult:SI (match_operand:SI 1 "register_operand" "r") ++ (match_operand:SI 2 "register_operand" "r"))))] ++ "TARGET_64BIT" ++ "mul.w\t%0,%1,%2" ++ [(set_attr "type" "imul") ++ (set_attr "mode" "SI")]) + + ;; + ;; ........................ +@@ -956,9 +726,8 @@ + ;; ........................ + ;; + +- + (define_expand "mulditi3" +- [(set (match_operand:TI 0 "register_operand") ++ [(set (match_operand:TI 0 "register_operand") + (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand")) + (any_extend:TI (match_operand:DI 2 "register_operand"))))] + "TARGET_64BIT" +@@ -975,7 +744,7 @@ + }) + + (define_insn "muldi3_highpart" +- [(set (match_operand:DI 0 "register_operand" "=r") ++ [(set (match_operand:DI 0 "register_operand" "=r") + (truncate:DI + (lshiftrt:TI + (mult:TI (any_extend:TI +@@ -989,7 +758,7 @@ + (set_attr "mode" "DI")]) + + (define_expand "mulsidi3" +- [(set (match_operand:DI 0 "register_operand" "=r") ++ [(set (match_operand:DI 0 "register_operand" "=r") + (mult:DI (any_extend:DI + (match_operand:SI 1 "register_operand" " r")) + (any_extend:DI +@@ -1005,7 +774,7 @@ + }) + + (define_insn "mulsi3_highpart" +- [(set (match_operand:SI 0 "register_operand" "=r") ++ [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI + (lshiftrt:DI + (mult:DI (any_extend:DI +@@ -1018,97 +787,35 @@ + [(set_attr "type" "imul") + (set_attr "mode" "SI")]) + +-;; Floating point multiply accumulate instructions. + +-(define_expand "fma4" +- [(set (match_operand:ANYF 0 "register_operand") +- (fma:ANYF (match_operand:ANYF 1 "register_operand") +- (match_operand:ANYF 2 "register_operand") +- (match_operand:ANYF 3 "register_operand")))] +- "TARGET_HARD_FLOAT") ++;; .................... ++;; ++;; FLOATING POINT COPYSIGN ++;; ++;; .................... ++ ++;; FLOATING POINT COPYSIGN ++;; ++;; .................... + +-(define_insn "*fma4_madd4" ++(define_insn "copysign3" + [(set (match_operand:ANYF 0 "register_operand" "=f") +- (fma:ANYF (match_operand:ANYF 1 "register_operand" "f") +- (match_operand:ANYF 2 "register_operand" "f") +- (match_operand:ANYF 3 "register_operand" "f")))] ++ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")] ++ UNSPEC_FCOPYSIGN))] + "TARGET_HARD_FLOAT" +- "fmadd.\t%0,%1,%2,%3" +- [(set_attr "type" "fmadd") ++ "fcopysign.\t%0,%1,%2" ++ [(set_attr "type" "fcopysign") + (set_attr "mode" "")]) + +-;; The fms, fnma, and fnms instructions can be used even when HONOR_NANS +-;; is true because while IEEE 754-2008 requires the negate operation to +-;; negate the sign of a NAN and the LARCH neg instruction does not do this, +-;; the fma part of the instruction has no requirement on how the sign of +-;; a NAN is handled and so the final sign bit of the entire operation is +-;; undefined. +- +-(define_expand "fms4" +- [(set (match_operand:ANYF 0 "register_operand") +- (fma:ANYF (match_operand:ANYF 1 "register_operand") +- (match_operand:ANYF 2 "register_operand") +- (neg:ANYF (match_operand:ANYF 3 "register_operand"))))] +- "TARGET_HARD_FLOAT") +- + +-(define_insn "*fms4_msub4" +- [(set (match_operand:ANYF 0 "register_operand" "=f") +- (fma:ANYF (match_operand:ANYF 1 "register_operand" "f") +- (match_operand:ANYF 2 "register_operand" "f") +- (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] +- "TARGET_HARD_FLOAT" +- "fmsub.\t%0,%1,%2,%3" +- [(set_attr "type" "fmadd") +- (set_attr "mode" "")]) ++;; + +-;; fnma is defined in GCC as (fma (neg op1) op2 op3) +-;; (-op1 * op2) + op3 ==> -(op1 * op2) + op3 ==> -((op1 * op2) - op3) +-;; The loongarch nmsub instructions implement -((op1 * op2) - op3) +-;; This transformation means we may return the wrong signed zero +-;; so we check HONOR_SIGNED_ZEROS. + +-(define_expand "fnma4" +- [(set (match_operand:ANYF 0 "register_operand") +- (fma:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand")) +- (match_operand:ANYF 2 "register_operand") +- (match_operand:ANYF 3 "register_operand")))] +- "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)") + +-(define_insn "*fnma4_nmsub4" +- [(set (match_operand:ANYF 0 "register_operand" "=f") +- (fma:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) +- (match_operand:ANYF 2 "register_operand" "f") +- (match_operand:ANYF 3 "register_operand" "f")))] +- "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)" +- "fnmsub.\t%0,%1,%2,%3" +- [(set_attr "type" "fmadd") +- (set_attr "mode" "")]) + +-;; fnms is defined as: (fma (neg op1) op2 (neg op3)) +-;; ((-op1) * op2) - op3 ==> -(op1 * op2) - op3 ==> -((op1 * op2) + op3) +-;; The loongarch nmadd instructions implement -((op1 * op2) + op3) +-;; This transformation means we may return the wrong signed zero +-;; so we check HONOR_SIGNED_ZEROS. + +-(define_expand "fnms4" +- [(set (match_operand:ANYF 0 "register_operand") +- (fma:ANYF +- (neg:ANYF (match_operand:ANYF 1 "register_operand")) +- (match_operand:ANYF 2 "register_operand") +- (neg:ANYF (match_operand:ANYF 3 "register_operand"))))] +- "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)") + +-(define_insn "*fnms4_nmadd4" +- [(set (match_operand:ANYF 0 "register_operand" "=f") +- (fma:ANYF +- (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) +- (match_operand:ANYF 2 "register_operand" "f") +- (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] +- "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)" +- "fnmadd.\t%0,%1,%2,%3" +- [(set_attr "type" "fmadd") +- (set_attr "mode" "")]) + + ;; + ;; .................... +@@ -1118,144 +825,315 @@ + ;; .................... + ;; + ++;; Float division and modulus. + (define_expand "div3" + [(set (match_operand:ANYF 0 "register_operand") +- (div:ANYF (match_operand:ANYF 1 "reg_or_1_operand") ++ (div:ANYF (match_operand:ANYF 1 "register_operand") + (match_operand:ANYF 2 "register_operand")))] +- "" ++ "" + { +- if (const_1_operand (operands[1], mode)) +- if (!(ISA_HAS_FP_RECIP_RSQRT (mode) +- && flag_unsafe_math_optimizations)) +- operands[1] = force_reg (mode, operands[1]); ++ if (mode == SFmode ++ && TARGET_RECIP_DIV ++ && optimize_insn_for_speed_p () ++ && flag_finite_math_only && !flag_trapping_math ++ && flag_unsafe_math_optimizations) ++ { ++ loongarch_emit_swdivsf (operands[0], operands[1], ++ operands[2], SFmode); ++ DONE; ++ } + }) + +-;; These patterns work around the early SB-1 rev2 core "F1" erratum: +-;; +-;; If an mftg1 or dmftg1 happens to access the floating point register +-;; file at the same time a long latency operation (div, sqrt, recip, +-;; sqrt) iterates an intermediate result back through the floating +-;; point register file bypass, then instead returning the correct +-;; register value the mftg1 or dmftg1 operation returns the intermediate +-;; result of the long latency operation. +-;; +-;; The workaround is to insert an unconditional 'mov' from/to the +-;; long latency op destination register. +- + (define_insn "*div3" + [(set (match_operand:ANYF 0 "register_operand" "=f") + (div:ANYF (match_operand:ANYF 1 "register_operand" "f") + (match_operand:ANYF 2 "register_operand" "f")))] +- "" +-{ +- return "fdiv.\t%0,%1,%2"; +-} ++ "" ++ "fdiv.\t%0,%1,%2" + [(set_attr "type" "fdiv") + (set_attr "mode" "") + (set_attr "insn_count" "1")]) + ++;; In 3A5000, the reciprocal operation is the same as the division operation. ++ + (define_insn "*recip3" + [(set (match_operand:ANYF 0 "register_operand" "=f") + (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") + (match_operand:ANYF 2 "register_operand" "f")))] +- "ISA_HAS_FP_RECIP_RSQRT (mode) && flag_unsafe_math_optimizations" +-{ +- return "frecip.\t%0,%2"; +-} ++ "" ++ "frecip.\t%0,%2" + [(set_attr "type" "frdiv") + (set_attr "mode" "") + (set_attr "insn_count" "1")]) + ++;; In 3A6000, frecipe calculates the approximate value of the reciprocal operation ++ ++(define_insn "recipe2" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] ++ UNSPEC_RECIPE))] ++ "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations && TARGET_RECIP_DIV" ++ "frecipe.\t%0,%1" ++ [(set_attr "type" "frsqrte") ++ (set_attr "mode" "") ++ (set_attr "insn_count" "1")]) ++ + ;; Integer division and modulus. ++(define_expand "3" ++ [(set (match_operand:GPR 0 "register_operand") ++ (any_div:GPR (match_operand:GPR 1 "register_operand") ++ (match_operand:GPR 2 "register_operand")))] ++ "" ++{ ++ if (GET_MODE (operands[0]) == SImode) ++ { ++ rtx reg1 = gen_reg_rtx (DImode); ++ rtx reg2 = gen_reg_rtx (DImode); ++ ++ operands[1] = gen_rtx_SIGN_EXTEND (word_mode, operands[1]); ++ operands[2] = gen_rtx_SIGN_EXTEND (word_mode, operands[2]); ++ ++ emit_insn (gen_rtx_SET (reg1, operands[1])); ++ emit_insn (gen_rtx_SET (reg2, operands[2])); + +-(define_insn "div3" ++ emit_insn (gen_di3_fake (operands[0], reg1, reg2)); ++ DONE; ++ } ++}) ++ ++(define_insn "*3" + [(set (match_operand:GPR 0 "register_operand" "=&r") + (any_div:GPR (match_operand:GPR 1 "register_operand" "r") + (match_operand:GPR 2 "register_operand" "r")))] + "" +- { +- return loongarch_output_division ("div.\t%0,%1,%2", operands); +- } +- [(set_attr "type" "idiv3") ++{ ++ return loongarch_output_division (".\t%0,%1,%2", operands); ++} ++ [(set_attr "type" "idiv") + (set_attr "mode" "")]) + +-(define_insn "mod3" +- [(set (match_operand:GPR 0 "register_operand" "=&r") +- (any_mod:GPR (match_operand:GPR 1 "register_operand" "r") +- (match_operand:GPR 2 "register_operand" "r")))] ++(define_insn "di3_fake" ++ [(set (match_operand:SI 0 "register_operand" "=&r") ++ (any_div:SI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "register_operand" "r")))] + "" +- { +- return loongarch_output_division ("mod.\t%0,%1,%2", operands); +- } +- [(set_attr "type" "idiv3") +- (set_attr "mode" "")]) +- +-;; +-;; .................... +-;; +-;; SQUARE ROOT +-;; +-;; .................... +- +-;; These patterns work around the early SB-1 rev2 core "F1" erratum (see +-;; "*div[sd]f3" comment for details). +- +-(define_insn "sqrt2" +- [(set (match_operand:ANYF 0 "register_operand" "=f") +- (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))] +- "" + { +- return "fsqrt.\t%0,%1"; ++ return loongarch_output_division (".w\t%0,%1,%2", operands); + } +- [(set_attr "type" "fsqrt") +- (set_attr "mode" "") +- (set_attr "insn_count" "1")]) ++ [(set_attr "type" "idiv") ++ (set_attr "mode" "SI")]) + +-(define_insn "*rsqrta" +- [(set (match_operand:ANYF 0 "register_operand" "=f") +- (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") +- (sqrt:ANYF (match_operand:ANYF 2 "register_operand" "f"))))] +- "ISA_HAS_FP_RECIP_RSQRT (mode) && flag_unsafe_math_optimizations" +-{ +- return "frsqrt.\t%0,%2"; +-} +- [(set_attr "type" "frsqrt") +- (set_attr "mode" "") +- (set_attr "insn_count" "1")]) ++;; Floating point multiply accumulate instructions. + +-(define_insn "*rsqrtb" ++;; a * b + c ++(define_insn "fma4" + [(set (match_operand:ANYF 0 "register_operand" "=f") +- (sqrt:ANYF (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") +- (match_operand:ANYF 2 "register_operand" "f"))))] +- "ISA_HAS_FP_RECIP_RSQRT (mode) && flag_unsafe_math_optimizations" +-{ +- return "frsqrt.\t%0,%2"; +-} +- [(set_attr "type" "frsqrt") +- (set_attr "mode" "") +- (set_attr "insn_count" "1")]) +- +-;; +-;; .................... +-;; +-;; ABSOLUTE VALUE +-;; +-;; .................... +- +-;; Do not use the integer abs macro instruction, since that signals an +-;; exception on -2147483648 (sigh). +- +-;; The "legacy" (as opposed to "2008") form of ABS.fmt is an arithmetic +-;; instruction that treats all NaN inputs as invalid; it does not clear +-;; their sign bit. We therefore can't use that form if the signs of +-;; NaNs matter. ++ (fma:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f") ++ (match_operand:ANYF 3 "register_operand" "f")))] ++ "" ++ "fmadd.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "")]) + +-(define_insn "abs2" ++;; a * b - c ++(define_insn "fms4" + [(set (match_operand:ANYF 0 "register_operand" "=f") +- (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))] ++ (fma:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f") ++ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] + "" +- "fabs.\t%0,%1" +- [(set_attr "type" "fabs") ++ "fmsub.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "")]) ++ ++;; fnma is defined in GCC as (fma (neg op1) op2 op3) ++;; (-op1 * op2) + op3 ==> -(op1 * op2) + op3 ==> -((op1 * op2) - op3) ++;; The loongarch nmsub instructions implement -((op1 * op2) - op3) ++;; This transformation means we may return the wrong signed zero ++;; so we check HONOR_SIGNED_ZEROS. ++ ++;; -a * b + c ++(define_insn "fnma4" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (fma:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) ++ (match_operand:ANYF 2 "register_operand" "f") ++ (match_operand:ANYF 3 "register_operand" "f")))] ++ "!HONOR_SIGNED_ZEROS (mode)" ++ "fnmsub.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "")]) ++ ++;; fnms is defined as: (fma (neg op1) op2 (neg op3)) ++;; ((-op1) * op2) - op3 ==> -(op1 * op2) - op3 ==> -((op1 * op2) + op3) ++;; The loongarch nmadd instructions implement -((op1 * op2) + op3) ++;; This transformation means we may return the wrong signed zero ++;; so we check HONOR_SIGNED_ZEROS. ++ ++;; -a * b - c ++(define_insn "fnms4" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (fma:ANYF ++ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) ++ (match_operand:ANYF 2 "register_operand" "f") ++ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] ++ "!HONOR_SIGNED_ZEROS (mode)" ++ "fnmadd.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "")]) ++ ++;; -(-a * b - c), modulo signed zeros ++(define_insn "*fma4" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (neg:ANYF ++ (fma:ANYF ++ (neg:ANYF (match_operand:ANYF 1 "register_operand" " f")) ++ (match_operand:ANYF 2 "register_operand" " f") ++ (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))] ++ "!HONOR_SIGNED_ZEROS (mode)" ++ "fmadd.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "")]) ++ ++;; -(-a * b + c), modulo signed zeros ++(define_insn "*fms4" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (neg:ANYF ++ (fma:ANYF ++ (neg:ANYF (match_operand:ANYF 1 "register_operand" " f")) ++ (match_operand:ANYF 2 "register_operand" " f") ++ (match_operand:ANYF 3 "register_operand" " f"))))] ++ "!HONOR_SIGNED_ZEROS (mode)" ++ "fmsub.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "")]) ++ ++;; -(a * b + c) ++(define_insn "*fnms4" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (neg:ANYF ++ (fma:ANYF ++ (match_operand:ANYF 1 "register_operand" " f") ++ (match_operand:ANYF 2 "register_operand" " f") ++ (match_operand:ANYF 3 "register_operand" " f"))))] ++ "" ++ "fnmadd.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "")]) ++ ++;; -(a * b - c) ++(define_insn "*fnma4" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (neg:ANYF ++ (fma:ANYF ++ (match_operand:ANYF 1 "register_operand" " f") ++ (match_operand:ANYF 2 "register_operand" " f") ++ (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))] ++ "" ++ "fnmsub.\t%0,%1,%2,%3" ++ [(set_attr "type" "fmadd") ++ (set_attr "mode" "")]) ++ ++;; ++;; .................... ++;; ++;; SQUARE ROOT ++;; ++;; .................... ++ ++(define_insn "*sqrt2" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))] ++ "" ++ "fsqrt.\t%0,%1" ++ [(set_attr "type" "fsqrt") ++ (set_attr "mode" "") ++ (set_attr "insn_count" "1")]) ++ ++(define_expand "sqrt2" ++ [(set (match_operand:ANYF 0 "register_operand") ++ (sqrt:ANYF (match_operand:ANYF 1 "register_operand")))] ++ "" ++{ ++ if (mode == SFmode ++ && TARGET_RECIP_SQRT ++ && flag_unsafe_math_optimizations ++ && !optimize_insn_for_size_p () ++ && flag_finite_math_only && !flag_trapping_math) ++ { ++ loongarch_emit_swrsqrtsf (operands[0], operands[1], SFmode, 0); ++ DONE; ++ } ++}) ++ ++(define_expand "rsqrt2" ++ [(set (match_operand:ANYF 0 "register_operand") ++ (unspec:ANYF [(match_operand:ANYF 1 "register_operand")] ++ UNSPEC_RSQRT))] ++ "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations" ++{ ++ if (mode == SFmode ++ && TARGET_RECIP_RSQRT ++ && flag_unsafe_math_optimizations ++ && !optimize_insn_for_size_p () ++ && flag_finite_math_only && !flag_trapping_math) ++ { ++ loongarch_emit_swrsqrtsf (operands[0], operands[1], SFmode, 1); ++ DONE; ++ } ++}) ++ ++(define_insn "*rsqrt2" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] ++ UNSPEC_RSQRT))] ++ "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations" ++ "frsqrt.\t%0,%1" ++ [(set_attr "type" "frsqrt") ++ (set_attr "mode" "")]) ++ ++(define_insn "rsqrte" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] ++ UNSPEC_RSQRTE))] ++ "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations && TARGET_RECIP_SQRT" ++ "frsqrte.\t%0,%1" ++ [(set_attr "type" "frsqrte") ++ (set_attr "mode" "")]) ++ ++(define_insn "*rsqrta" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") ++ (sqrt:ANYF (match_operand:ANYF 2 "register_operand" "f"))))] ++ "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations" ++ "frsqrt.\t%0,%2" ++ [(set_attr "type" "frsqrt") ++ (set_attr "mode" "") ++ (set_attr "insn_count" "1")]) ++ ++(define_insn "*rsqrtb" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (sqrt:ANYF (div:ANYF (match_operand:ANYF 1 "const_1_operand" "") ++ (match_operand:ANYF 2 "register_operand" "f"))))] ++ "flag_unsafe_math_optimizations" ++ "frsqrt.\t%0,%2" ++ [(set_attr "type" "frsqrt") ++ (set_attr "mode" "") ++ (set_attr "insn_count" "1")]) ++ ++;; ++;; .................... ++;; ++;; ABSOLUTE VALUE ++;; ++;; .................... ++ ++(define_insn "abs2" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))] ++ "" ++ "fabs.\t%0,%1" ++ [(set_attr "type" "fabs") + (set_attr "mode" "")]) + + ;; +@@ -1290,7 +1168,54 @@ + [(set_attr "type" "clz") + (set_attr "mode" "")]) + ++;; ++;; .................... ++;; ++;; MIN/MAX ++;; ++;; .................... ++ ++(define_insn "smax3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (smax:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")))] ++ "" ++ "fmax.\t%0,%1,%2" ++ [(set_attr "type" "fmove") ++ (set_attr "mode" "")]) ++ ++(define_insn "smin3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (smin:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")))] ++ "" ++ "fmin.\t%0,%1,%2" ++ [(set_attr "type" "fmove") ++ (set_attr "mode" "")]) + ++(define_insn "smaxa3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (if_then_else:ANYF ++ (gt (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")) ++ (abs:ANYF (match_operand:ANYF 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "" ++ "fmaxa.\t%0,%1,%2" ++ [(set_attr "type" "fmove") ++ (set_attr "mode" "")]) ++ ++(define_insn "smina3" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (if_then_else:ANYF ++ (lt (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")) ++ (abs:ANYF (match_operand:ANYF 2 "register_operand" "f"))) ++ (match_dup 1) ++ (match_dup 2)))] ++ "" ++ "fmina.\t%0,%1,%2" ++ [(set_attr "type" "fmove") ++ (set_attr "mode" "")]) + + ;; + ;; .................... +@@ -1299,28 +1224,21 @@ + ;; + ;; .................... + +-(define_insn "negsi2" +- [(set (match_operand:SI 0 "register_operand" "=r") +- (neg:SI (match_operand:SI 1 "register_operand" "r")))] ++(define_insn "neg2" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (neg:GPR (match_operand:GPR 1 "register_operand" "r")))] + "" +-{ +- return "sub.w\t%0,%.,%1"; +-} ++ "sub.\t%0,%.,%1" + [(set_attr "alu_type" "sub") +- (set_attr "mode" "SI")]) +- +-(define_insn "negdi2" +- [(set (match_operand:DI 0 "register_operand" "=r") +- (neg:DI (match_operand:DI 1 "register_operand" "r")))] +- "TARGET_64BIT" +- "sub.d\t%0,%.,%1" +- [(set_attr "alu_type" "sub") +- (set_attr "mode" "DI")]) ++ (set_attr "mode" "")]) + +-;; The "legacy" (as opposed to "2008") form of NEG.fmt is an arithmetic +-;; instruction that treats all NaN inputs as invalid; it does not flip +-;; their sign bit. We therefore can't use that form if the signs of +-;; NaNs matter. ++(define_insn "one_cmpl2" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (not:GPR (match_operand:GPR 1 "register_operand" "r")))] ++ "" ++ "nor\t%0,%.,%1" ++ [(set_attr "alu_type" "not") ++ (set_attr "mode" "")]) + + (define_insn "neg2" + [(set (match_operand:ANYF 0 "register_operand" "=f") +@@ -1329,17 +1247,6 @@ + "fneg.\t%0,%1" + [(set_attr "type" "fneg") + (set_attr "mode" "")]) +- +-(define_insn "one_cmpl2" +- [(set (match_operand:GPR 0 "register_operand" "=r") +- (not:GPR (match_operand:GPR 1 "register_operand" "r")))] +- "" +-{ +- return "nor\t%0,%.,%1"; +-} +- [(set_attr "alu_type" "not") +- (set_attr "compression" "*") +- (set_attr "mode" "")]) + + + ;; +@@ -1350,133 +1257,58 @@ + ;; .................... + ;; + +- +-(define_expand "and3" +- [(set (match_operand:GPR 0 "register_operand") +- (and:GPR (match_operand:GPR 1 "register_operand") +- (match_operand:GPR 2 "and_reg_operand")))]) +- +-;; The middle-end is not allowed to convert ANDing with 0xffff_ffff into a +-;; zero_extendsidi2 because of TARGET_TRULY_NOOP_TRUNCATION, so handle these +-;; here. Note that this variant does not trigger for SI mode because we +-;; require a 64-bit HOST_WIDE_INT and 0xffff_ffff wouldn't be a canonical +-;; sign-extended SImode value. +-;; +-;; These are possible combinations for operand 1 and 2. +-;; (r=register, mem=memory, x=match, S=split): +-;; +-;; \ op1 r/EXT r/!EXT mem +-;; op2 +-;; +-;; andi x x +-;; 0xff x x x +-;; 0xffff x x x +-;; 0xffff_ffff x S x +-;; low-bitmask x +-;; register x x +-;; register =op1 +- +-(define_insn "*and3" +- [(set (match_operand:GPR 0 "register_operand" "=r,r,r,r,r,r,r") +- (and:GPR (match_operand:GPR 1 "nonimmediate_operand" "o,o,W,r,r,r,r") +- (match_operand:GPR 2 "and_operand" "Yb,Yh,Yw,K,Yx,Yw,r")))] +- " and_operands_ok (mode, operands[1], operands[2])" +-{ +- int len; +- +- switch (which_alternative) +- { +- case 0: +- operands[1] = gen_lowpart (QImode, operands[1]); +- return "ld.bu\t%0,%1"; +- case 1: +- operands[1] = gen_lowpart (HImode, operands[1]); +- return "ld.hu\t%0,%1"; +- case 2: +- operands[1] = gen_lowpart (SImode, operands[1]); +- if (loongarch_14bit_shifted_offset_address_p (XEXP (operands[1], 0), SImode)) +- return "ldptr.w\t%0,%1\n\tbstrins.d\t%0,$r0,63,32"; +- else if (loongarch_12bit_offset_address_p (XEXP (operands[1], 0), SImode)) +- return "ld.wu\t%0,%1"; +- else +- gcc_unreachable (); +- case 3: +- return "andi\t%0,%1,%x2"; +- case 4: +- len = low_bitmask_len (mode, INTVAL (operands[2])); +- operands[2] = GEN_INT (len-1); +- return "bstrpick.\t%0,%1,%2,0"; +- case 5: +- return "#"; +- case 6: +- return "and\t%0,%1,%2"; +- default: +- gcc_unreachable (); +- } +-} +- [(set_attr "move_type" "load,load,load,andi,pick_ins,shift_shift,logical") +- (set_attr "compression" "*,*,*,*,*,*,*") ++(define_insn "3" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r") ++ (any_bitwise:GPR (match_operand:GPR 1 "register_operand" "r,r") ++ (match_operand:GPR 2 "uns_arith_operand" "r,K")))] ++ "" ++ "%i2\t%0,%1,%2" ++ [(set_attr "type" "logical") + (set_attr "mode" "")]) + +-(define_expand "ior3" +- [(set (match_operand:GPR 0 "register_operand") +- (ior:GPR (match_operand:GPR 1 "register_operand") +- (match_operand:GPR 2 "uns_arith_operand")))] ++(define_insn "and3_extended" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (and:GPR (match_operand:GPR 1 "nonimmediate_operand" "r") ++ (match_operand:GPR 2 "low_bitmask_operand" "Yx")))] + "" + { +-}) ++ int len; + +-(define_insn "*ior3" +- [(set (match_operand:GPR 0 "register_operand" "=r,r") +- (ior:GPR (match_operand:GPR 1 "register_operand" "r,r") +- (match_operand:GPR 2 "uns_arith_operand" "r,K")))] +- "" +- "@ +- or\t%0,%1,%2 +- ori\t%0,%1,%x2" +- [(set_attr "alu_type" "or") +- (set_attr "compression" "*,*") ++ len = low_bitmask_len (mode, INTVAL (operands[2])); ++ operands[2] = GEN_INT (len-1); ++ return "bstrpick.\t%0,%1,%2,0"; ++} ++ [(set_attr "move_type" "pick_ins") + (set_attr "mode" "")]) + + (define_insn "*iorhi3" + [(set (match_operand:HI 0 "register_operand" "=r,r") +- (ior:HI (match_operand:HI 1 "register_operand" "r,r") +- (match_operand:HI 2 "uns_arith_operand" "K,r")))] ++ (ior:HI (match_operand:HI 1 "register_operand" "%r,r") ++ (match_operand:HI 2 "uns_arith_operand" "r,K")))] + "" +- "@ +- ori\t%0,%1,%x2 +- or\t%0,%1,%2" +- [(set_attr "alu_type" "or") ++ "or%i2\t%0,%1,%2" ++ [(set_attr "type" "logical") + (set_attr "mode" "HI")]) + +-(define_expand "xor3" +- [(set (match_operand:GPR 0 "register_operand") +- (xor:GPR (match_operand:GPR 1 "register_operand") +- (match_operand:GPR 2 "uns_arith_operand")))] +- "" +- "") +- +-(define_insn "*xor3" +- [(set (match_operand:GPR 0 "register_operand" "=r,r") +- (xor:GPR (match_operand:GPR 1 "register_operand" "r,r") +- (match_operand:GPR 2 "uns_arith_operand" "r,K")))] +- "" +- "@ +- xor\t%0,%1,%2 +- xori\t%0,%1,%x2" +- [(set_attr "alu_type" "xor") +- (set_attr "compression" "*,*") +- (set_attr "mode" "")]) +- +- + (define_insn "*nor3" + [(set (match_operand:GPR 0 "register_operand" "=r") +- (and:GPR (not:GPR (match_operand:GPR 1 "register_operand" "r")) ++ (and:GPR (not:GPR (match_operand:GPR 1 "register_operand" "%r")) + (not:GPR (match_operand:GPR 2 "register_operand" "r"))))] + "" + "nor\t%0,%1,%2" +- [(set_attr "alu_type" "nor") ++ [(set_attr "type" "logical") + (set_attr "mode" "")]) ++ ++(define_insn "n" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (neg_bitwise:GPR ++ (not:GPR (match_operand:GPR 1 "register_operand" "r")) ++ (match_operand:GPR 2 "register_operand" "r")))] ++ "" ++ "n\t%0,%2,%1" ++ [(set_attr "type" "logical") ++ (set_attr "mode" "")]) ++ + + ;; + ;; .................... +@@ -1485,163 +1317,109 @@ + ;; + ;; .................... + +- +- +-(define_insn "truncdfsf2" +- [(set (match_operand:SF 0 "register_operand" "=f") +- (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))] +- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" +- "fcvt.s.d\t%0,%1" +- [(set_attr "type" "fcvt") +- (set_attr "cnv_mode" "D2S") +- (set_attr "mode" "SF")]) +- +-;; Integer truncation patterns. Truncating SImode values to smaller +-;; modes is a no-op, as it is for most other GCC ports. Truncating +-;; DImode values to SImode is not a no-op for TARGET_64BIT since we +-;; need to make sure that the lower 32 bits are properly sign-extended +-;; (see TARGET_TRULY_NOOP_TRUNCATION). Truncating DImode values into modes +-;; smaller than SImode is equivalent to two separate truncations: +-;; +-;; A B +-;; DI ---> HI == DI ---> SI ---> HI +-;; DI ---> QI == DI ---> SI ---> QI +-;; +-;; Step A needs a real instruction but step B does not. +- +-(define_insn "truncdisi2" +- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,ZC,m") +- (truncate:SI (match_operand:DI 1 "register_operand" "r,r,r")))] +- "TARGET_64BIT" +- "@ +- slli.w\t%0,%1,0 +- stptr.w\t%1,%0 +- st.w\t%1,%0" +- [(set_attr "move_type" "sll0,store,store") +- (set_attr "mode" "SI")]) +- + (define_insn "truncdi2" +- [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,m") +- (truncate:SHORT (match_operand:DI 1 "register_operand" "r,r")))] ++ [(set (match_operand:SUBDI 0 "nonimmediate_operand" "=r,m,k") ++ (truncate:SUBDI (match_operand:DI 1 "register_operand" "r,r,r")))] + "TARGET_64BIT" + "@ + slli.w\t%0,%1,0 +- st.\t%1,%0" +- [(set_attr "move_type" "sll0,store") ++ st.\t%1,%0 ++ stx.\t%1,%0" ++ [(set_attr "move_type" "sll0,store,store") + (set_attr "mode" "SI")]) + +-;; Combiner patterns to optimize shift/truncate combinations. +- +-(define_insn "*ashr_trunc" +- [(set (match_operand:SUBDI 0 "register_operand" "=r") +- (truncate:SUBDI +- (ashiftrt:DI (match_operand:DI 1 "register_operand" "r") +- (match_operand:DI 2 "const_arith_operand" ""))))] +- "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)" +- "srai.d\t%0,%1,%2" +- [(set_attr "type" "shift") +- (set_attr "mode" "")]) ++(define_insn "truncdfsf2" ++ [(set (match_operand:SF 0 "register_operand" "=f") ++ (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))] ++ "TARGET_DOUBLE_FLOAT" ++ "fcvt.s.d\t%0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "cnv_mode" "D2S") ++ (set_attr "mode" "SF")]) + +-(define_insn "*lshr32_trunc" +- [(set (match_operand:SUBDI 0 "register_operand" "=r") +- (truncate:SUBDI +- (lshiftrt:DI (match_operand:DI 1 "register_operand" "r") +- (const_int 32))))] +- "TARGET_64BIT" +- "srai.d\t%0,%1,32" +- [(set_attr "type" "shift") +- (set_attr "mode" "")]) ++;;(define_insn "truncdisi2_extended" ++;; [(set (match_operand:SI 0 "nonimmediate_operand" "=ZC") ++;; (truncate:SI (match_operand:DI 1 "register_operand" "r")))] ++;; "TARGET_64BIT" ++;; "stptr.w\t%1,%0" ++;; [(set_attr "move_type" "store") ++;; (set_attr "mode" "SI")]) + + +- + ;; + ;; .................... + ;; + ;; ZERO EXTENSION + ;; + ;; .................... +- +-;; Extension insns. +- + (define_expand "zero_extendsidi2" + [(set (match_operand:DI 0 "register_operand") +- (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))] ++ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))] + "TARGET_64BIT") + +-(define_insn "*zero_extendsidi2_dext" +- [(set (match_operand:DI 0 "register_operand" "=r,r,r") +- (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,ZC,W")))] ++(define_insn_and_split "*zero_extendsidi2_internal" ++ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r") ++ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m,ZC,k")))] + "TARGET_64BIT" + "@ + bstrpick.d\t%0,%1,31,0 +- ldptr.w\t%0,%1\n\tlu32i.d\t%0,0 +- ld.wu\t%0,%1" +- [(set_attr "move_type" "arith,load,load") +- (set_attr "mode" "DI") +- (set_attr "insn_count" "1,2,1")]) +- +-;; See the comment before the *and3 pattern why this is generated by +-;; combine. +- +-(define_expand "zero_extend2" +- [(set (match_operand:GPR 0 "register_operand") +- (zero_extend:GPR (match_operand:SHORT 1 "nonimmediate_operand")))] +- "" +-{ +-}) +- +-(define_insn "*zero_extend2" +- [(set (match_operand:GPR 0 "register_operand" "=r,r") +- (zero_extend:GPR +- (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))] +- "" +-{ +- switch (which_alternative) +- { +- case 0: +- return "bstrpick.\t%0,%1,,0"; +- case 1: +- return "ld.u\t%0,%1"; +- default: +- gcc_unreachable (); ++ ld.wu\t%0,%1 ++ # ++ ldx.wu\t%0,%1" ++ "&& reload_completed ++ && MEM_P (operands[1]) ++ && (loongarch_14bit_shifted_offset_address_p (XEXP (operands[1], 0), SImode) ++ && !loongarch_12bit_offset_address_p (XEXP (operands[1], 0), SImode)) ++ && !paradoxical_subreg_p (operands[0])" ++ [(set (match_dup 3) (match_dup 1)) ++ (set (match_dup 0) ++ (ior:DI (zero_extend:DI (subreg:SI (match_dup 0) 0)) ++ (match_dup 2)))] ++ { ++ operands[1] = gen_lowpart (SImode, operands[1]); ++ operands[3] = gen_lowpart (SImode, operands[0]); ++ operands[2] = const0_rtx; + } +-} +- [(set_attr "move_type" "pick_ins,load") +- (set_attr "compression" "*,*") +- (set_attr "mode" "")]) +- ++ [(set_attr "move_type" "arith,load,load,load") ++ (set_attr "mode" "DI")]) + +-(define_expand "zero_extendqihi2" +- [(set (match_operand:HI 0 "register_operand") +- (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand")))] ++(define_insn "zero_extend2" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r,r") ++ (zero_extend:GPR ++ (match_operand:SHORT 1 "nonimmediate_operand" "r,m,k")))] + "" +-{ +-}) ++ "@ ++ bstrpick.w\t%0,%1,,0 ++ ld.u\t%0,%1 ++ ldx.u\t%0,%1" ++ [(set_attr "move_type" "pick_ins,load,load") ++ (set_attr "mode" "")]) + +-(define_insn "*zero_extendqihi2" +- [(set (match_operand:HI 0 "register_operand" "=r,r") +- (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))] ++(define_insn "zero_extendqihi2" ++ [(set (match_operand:HI 0 "register_operand" "=r,r,r") ++ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "r,k,m")))] + "" + "@ +- andi\t%0,%1,0x00ff ++ andi\t%0,%1,0xff ++ ldx.bu\t%0,%1 + ld.bu\t%0,%1" +- [(set_attr "move_type" "andi,load") ++ [(set_attr "move_type" "andi,load,load") + (set_attr "mode" "HI")]) + + ;; Combiner patterns to optimize truncate/zero_extend combinations. + + (define_insn "*zero_extend_trunc" + [(set (match_operand:GPR 0 "register_operand" "=r") +- (zero_extend:GPR ++ (zero_extend:GPR + (truncate:SHORT (match_operand:DI 1 "register_operand" "r"))))] + "TARGET_64BIT" +- "bstrpick.\t%0,%1,,0" ++ "bstrpick.w\t%0,%1,,0" + [(set_attr "move_type" "pick_ins") + (set_attr "mode" "")]) + + (define_insn "*zero_extendhi_truncqi" + [(set (match_operand:HI 0 "register_operand" "=r") +- (zero_extend:HI ++ (zero_extend:HI + (truncate:QI (match_operand:DI 1 "register_operand" "r"))))] + "TARGET_64BIT" + "andi\t%0,%1,0xff" +@@ -1655,142 +1433,77 @@ + ;; + ;; .................... + +-;; Extension insns. +-;; Those for integer source operand are ordered widest source type first. +- +-;; When TARGET_64BIT, all SImode integer and accumulator registers +-;; should already be in sign-extended form (see TARGET_TRULY_NOOP_TRUNCATION +-;; and truncdisi2). We can therefore get rid of register->register +-;; instructions if we constrain the source to be in the same register as +-;; the destination. +-;; +-;; Only the pre-reload scheduler sees the type of the register alternatives; +-;; we split them into nothing before the post-reload scheduler runs. +-;; These alternatives therefore have type "move" in order to reflect +-;; what happens if the two pre-reload operands cannot be tied, and are +-;; instead allocated two separate GPRs. We don't distinguish between +-;; the GPR and LO cases because we don't usually know during pre-reload +-;; scheduling whether an operand will be LO or not. + (define_insn_and_split "extendsidi2" +- [(set (match_operand:DI 0 "register_operand" "=r,r,r") +- (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "0,ZC,m")))] ++ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r") ++ (sign_extend:DI ++ (match_operand:SI 1 "nonimmediate_operand" "0,ZC,m,k")))] + "TARGET_64BIT" +- "@ +- # +- ldptr.w\t%0,%1 +- ld.w\t%0,%1" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ return "#"; ++ case 1: ++ { ++ rtx offset = XEXP (operands[1], 0); ++ if (GET_CODE (offset) == PLUS) ++ offset = XEXP (offset, 1); ++ else ++ offset = const0_rtx; ++ if (const_arith_operand (offset, Pmode) || (offset == const0_rtx)) ++ return "ld.w\t%0,%1"; ++ else ++ return "ldptr.w\t%0,%1"; ++ } ++ case 2: ++ return "ld.w\t%0,%1"; ++ case 3: ++ return "ldx.w\t%0,%1"; ++ default: ++ gcc_unreachable (); ++ } ++} + "&& reload_completed && register_operand (operands[1], VOIDmode)" + [(const_int 0)] + { + emit_note (NOTE_INSN_DELETED); + DONE; + } +- [(set_attr "move_type" "move,load,load") ++ [(set_attr "move_type" "move,load,load,load") + (set_attr "mode" "DI")]) + +-(define_expand "extend2" +- [(set (match_operand:GPR 0 "register_operand") +- (sign_extend:GPR (match_operand:SHORT 1 "nonimmediate_operand")))] +- "") +- +- +-(define_insn "*extend2_se" +- [(set (match_operand:GPR 0 "register_operand" "=r,r") +- (sign_extend:GPR +- (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))] ++(define_insn "extend2" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r,r") ++ (sign_extend:GPR ++ (match_operand:SHORT 1 "nonimmediate_operand" "r,m,k")))] + "" + "@ + ext.w.\t%0,%1 +- ld.\t%0,%1" +- [(set_attr "move_type" "signext,load") ++ ld.\t%0,%1 ++ ldx.\t%0,%1" ++ [(set_attr "move_type" "signext,load,load") + (set_attr "mode" "")]) + +-(define_expand "extendqihi2" +- [(set (match_operand:HI 0 "register_operand") +- (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand")))] +- "") +- +-(define_insn "*extendqihi2_seb" +- [(set (match_operand:HI 0 "register_operand" "=r,r") +- (sign_extend:HI +- (match_operand:QI 1 "nonimmediate_operand" "r,m")))] ++(define_insn "extendqihi2" ++ [(set (match_operand:HI 0 "register_operand" "=r,r,r") ++ (sign_extend:HI ++ (match_operand:QI 1 "nonimmediate_operand" "r,m,k")))] + "" + "@ + ext.w.b\t%0,%1 +- ld.b\t%0,%1" +- [(set_attr "move_type" "signext,load") +- (set_attr "mode" "SI")]) +- +-;; Combiner patterns for truncate/sign_extend combinations. The SI versions +-;; use the shift/truncate patterns. +- +-(define_insn_and_split "*extenddi_truncate" +- [(set (match_operand:DI 0 "register_operand" "=r") +- (sign_extend:DI +- (truncate:SHORT (match_operand:DI 1 "register_operand" "r"))))] +- "TARGET_64BIT" +- "#" +- "&& reload_completed" +- [(set (match_dup 2) +- (ashift:DI (match_dup 1) +- (match_dup 3))) +- (set (match_dup 0) +- (ashiftrt:DI (match_dup 2) +- (match_dup 3)))] +-{ +- operands[2] = gen_lowpart (DImode, operands[0]); +- operands[3] = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (mode)); +-} +- [(set_attr "move_type" "shift_shift") +- (set_attr "mode" "DI")]) +- +-(define_insn_and_split "*extendsi_truncate" +- [(set (match_operand:SI 0 "register_operand" "=r") +- (sign_extend:SI +- (truncate:SHORT (match_operand:DI 1 "register_operand" "r"))))] +- "TARGET_64BIT" +- "#" +- "&& reload_completed" +- [(set (match_dup 2) +- (ashift:DI (match_dup 1) +- (match_dup 3))) +- (set (match_dup 0) +- (truncate:SI (ashiftrt:DI (match_dup 2) +- (match_dup 3))))] +-{ +- operands[2] = gen_lowpart (DImode, operands[0]); +- operands[3] = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (mode)); +-} +- [(set_attr "move_type" "shift_shift") +- (set_attr "mode" "SI")]) +- +-(define_insn_and_split "*extendhi_truncateqi" +- [(set (match_operand:HI 0 "register_operand" "=r") +- (sign_extend:HI +- (truncate:QI (match_operand:DI 1 "register_operand" "r"))))] +- "TARGET_64BIT" +- "#" +- "&& reload_completed" +- [(set (match_dup 2) +- (ashift:DI (match_dup 1) +- (const_int 56))) +- (set (match_dup 0) +- (truncate:HI (ashiftrt:DI (match_dup 2) +- (const_int 56))))] +-{ +- operands[2] = gen_lowpart (DImode, operands[0]); +-} +- [(set_attr "move_type" "shift_shift") ++ ld.b\t%0,%1 ++ ldx.b\t%0,%1" ++ [(set_attr "move_type" "signext,load,load") + (set_attr "mode" "SI")]) + + (define_insn "extendsfdf2" + [(set (match_operand:DF 0 "register_operand" "=f") + (float_extend:DF (match_operand:SF 1 "register_operand" "f")))] +- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" ++ "TARGET_DOUBLE_FLOAT" + "fcvt.d.s\t%0,%1" +- [(set_attr "type" "fcvt") +- (set_attr "cnv_mode" "S2D") +- (set_attr "mode" "DF")]) ++ [(set_attr "type" "fcvt") ++ (set_attr "cnv_mode" "S2D") ++ (set_attr "mode" "DF")]) + + ;; + ;; .................... +@@ -1799,104 +1512,60 @@ + ;; + ;; .................... + +-(define_expand "fix_truncdfsi2" +- [(set (match_operand:SI 0 "register_operand") +- (fix:SI (match_operand:DF 1 "register_operand")))] +- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" +-"" +-) +- +-(define_insn "fix_truncdfsi2_insn" +- [(set (match_operand:SI 0 "register_operand" "=f") +- (fix:SI (match_operand:DF 1 "register_operand" "f")))] +- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" +- "ftintrz.w.d %0,%1" +- [(set_attr "type" "fcvt") +- (set_attr "mode" "DF") +- (set_attr "cnv_mode" "D2I")]) +- +- +-(define_expand "fix_truncsfsi2" +- [(set (match_operand:SI 0 "register_operand") +- (fix:SI (match_operand:SF 1 "register_operand")))] +- "TARGET_HARD_FLOAT" +-"" +-) +- +-(define_insn "fix_truncsfsi2_insn" +- [(set (match_operand:SI 0 "register_operand" "=f") +- (fix:SI (match_operand:SF 1 "register_operand" "f")))] +- "TARGET_HARD_FLOAT" +- "ftintrz.w.s %0,%1" +- [(set_attr "type" "fcvt") +- (set_attr "mode" "SF") +- (set_attr "cnv_mode" "S2I")]) +- +- +-(define_insn "fix_truncdfdi2" +- [(set (match_operand:DI 0 "register_operand" "=f") +- (fix:DI (match_operand:DF 1 "register_operand" "f")))] +- "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" +- "ftintrz.l.d %0,%1" +- [(set_attr "type" "fcvt") +- (set_attr "mode" "DF") +- (set_attr "cnv_mode" "D2I")]) ++;; conversion of a floating-point value to a integer + ++(define_insn "fix_trunc2" ++ [(set (match_operand:GPR 0 "register_operand" "=f") ++ (fix:GPR (match_operand:ANYF 1 "register_operand" "f")))] ++ "" ++ "ftintrz..\t%0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "")]) + +-(define_insn "fix_truncsfdi2" +- [(set (match_operand:DI 0 "register_operand" "=f") +- (fix:DI (match_operand:SF 1 "register_operand" "f")))] +- "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" +- "ftintrz.l.s %0,%1" +- [(set_attr "type" "fcvt") +- (set_attr "mode" "SF") +- (set_attr "cnv_mode" "S2I")]) +- ++;; conversion of an integral (or boolean) value to a floating-point value + + (define_insn "floatsidf2" + [(set (match_operand:DF 0 "register_operand" "=f") + (float:DF (match_operand:SI 1 "register_operand" "f")))] +- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" ++ "TARGET_DOUBLE_FLOAT" + "ffint.d.w\t%0,%1" +- [(set_attr "type" "fcvt") +- (set_attr "mode" "DF") ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "DF") + (set_attr "cnv_mode" "I2D")]) + +- + (define_insn "floatdidf2" + [(set (match_operand:DF 0 "register_operand" "=f") + (float:DF (match_operand:DI 1 "register_operand" "f")))] +- "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" ++ "TARGET_DOUBLE_FLOAT" + "ffint.d.l\t%0,%1" +- [(set_attr "type" "fcvt") +- (set_attr "mode" "DF") +- (set_attr "cnv_mode" "I2D")]) +- ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "DF") ++ (set_attr "cnv_mode" "I2D")]) + + (define_insn "floatsisf2" + [(set (match_operand:SF 0 "register_operand" "=f") + (float:SF (match_operand:SI 1 "register_operand" "f")))] + "TARGET_HARD_FLOAT" + "ffint.s.w\t%0,%1" +- [(set_attr "type" "fcvt") +- (set_attr "mode" "SF") ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "SF") + (set_attr "cnv_mode" "I2S")]) + +- + (define_insn "floatdisf2" + [(set (match_operand:SF 0 "register_operand" "=f") + (float:SF (match_operand:DI 1 "register_operand" "f")))] +- "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT" ++ "TARGET_DOUBLE_FLOAT" + "ffint.s.l\t%0,%1" +- [(set_attr "type" "fcvt") +- (set_attr "mode" "SF") ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "SF") + (set_attr "cnv_mode" "I2S")]) + ++;; Convert a floating-point value to an unsigned integer. + + (define_expand "fixuns_truncdfsi2" + [(set (match_operand:SI 0 "register_operand") + (unsigned_fix:SI (match_operand:DF 1 "register_operand")))] +- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" ++ "TARGET_DOUBLE_FLOAT" + { + rtx reg1 = gen_reg_rtx (DFmode); + rtx reg2 = gen_reg_rtx (DFmode); +@@ -1908,41 +1577,38 @@ + + real_2expN (&offset, 31, DFmode); + +- if (reg1) /* Turn off complaints about unreached code. */ +- { +- loongarch_emit_move (reg1, const_double_from_real_value (offset, DFmode)); +- do_pending_stack_adjust (); ++ loongarch_emit_move (reg1, ++ const_double_from_real_value (offset, DFmode)); ++ do_pending_stack_adjust (); + +- test = gen_rtx_GE (VOIDmode, operands[1], reg1); +- emit_jump_insn (gen_cbranchdf4 (test, operands[1], reg1, label1)); ++ test = gen_rtx_GE (VOIDmode, operands[1], reg1); ++ emit_jump_insn (gen_cbranchdf4 (test, operands[1], reg1, label1)); + +- emit_insn (gen_fix_truncdfsi2 (operands[0], operands[1])); +- emit_jump_insn (gen_rtx_SET (pc_rtx, +- gen_rtx_LABEL_REF (VOIDmode, label2))); +- emit_barrier (); ++ emit_insn (gen_fix_truncdfsi2 (operands[0], operands[1])); ++ emit_jump_insn (gen_rtx_SET (pc_rtx, ++ gen_rtx_LABEL_REF (VOIDmode, label2))); ++ emit_barrier (); + +- emit_label (label1); +- loongarch_emit_move (reg2, gen_rtx_MINUS (DFmode, operands[1], reg1)); +- loongarch_emit_move (reg3, GEN_INT (trunc_int_for_mode +- (BITMASK_HIGH, SImode))); ++ emit_label (label1); ++ loongarch_emit_move (reg2, gen_rtx_MINUS (DFmode, operands[1], reg1)); ++ loongarch_emit_move (reg3, GEN_INT (trunc_int_for_mode ++ (BITMASK_HIGH, SImode))); + +- emit_insn (gen_fix_truncdfsi2 (operands[0], reg2)); +- emit_insn (gen_iorsi3 (operands[0], operands[0], reg3)); ++ emit_insn (gen_fix_truncdfsi2 (operands[0], reg2)); ++ emit_insn (gen_iorsi3 (operands[0], operands[0], reg3)); + +- emit_label (label2); ++ emit_label (label2); + +- /* Allow REG_NOTES to be set on last insn (labels don't have enough +- fields, and can't be used for REG_NOTES anyway). */ +- emit_use (stack_pointer_rtx); +- DONE; +- } ++ /* Allow REG_NOTES to be set on last insn (labels don't have enough ++ fields, and can't be used for REG_NOTES anyway). */ ++ emit_use (stack_pointer_rtx); ++ DONE; + }) + +- + (define_expand "fixuns_truncdfdi2" + [(set (match_operand:DI 0 "register_operand") + (unsigned_fix:DI (match_operand:DF 1 "register_operand")))] +- "TARGET_HARD_FLOAT && TARGET_64BIT && TARGET_DOUBLE_FLOAT" ++ "TARGET_DOUBLE_FLOAT" + { + rtx reg1 = gen_reg_rtx (DFmode); + rtx reg2 = gen_reg_rtx (DFmode); +@@ -1980,7 +1646,6 @@ + DONE; + }) + +- + (define_expand "fixuns_truncsfsi2" + [(set (match_operand:SI 0 "register_operand") + (unsigned_fix:SI (match_operand:SF 1 "register_operand")))] +@@ -2022,11 +1687,10 @@ + DONE; + }) + +- + (define_expand "fixuns_truncsfdi2" + [(set (match_operand:DI 0 "register_operand") + (unsigned_fix:DI (match_operand:SF 1 "register_operand")))] +- "TARGET_HARD_FLOAT && TARGET_64BIT && TARGET_DOUBLE_FLOAT" ++ "TARGET_DOUBLE_FLOAT" + { + rtx reg1 = gen_reg_rtx (SFmode); + rtx reg2 = gen_reg_rtx (SFmode); +@@ -2067,35 +1731,35 @@ + ;; + ;; .................... + ;; +-;; DATA MOVEMENT ++;; EXTRACT AND INSERT + ;; + ;; .................... + + (define_expand "extzv" +- [(set (match_operand:GPR 0 "register_operand") +- (zero_extract:GPR (match_operand:GPR 1 "register_operand") +- (match_operand 2 "const_int_operand") +- (match_operand 3 "const_int_operand")))] ++ [(set (match_operand:X 0 "register_operand") ++ (zero_extract:X (match_operand:X 1 "register_operand") ++ (match_operand 2 "const_int_operand") ++ (match_operand 3 "const_int_operand")))] + "" + { + if (!loongarch_use_ins_ext_p (operands[1], INTVAL (operands[2]), +- INTVAL (operands[3]))) ++ INTVAL (operands[3]))) + FAIL; + }) + + (define_insn "*extzv" +- [(set (match_operand:GPR 0 "register_operand" "=r") +- (zero_extract:GPR (match_operand:GPR 1 "register_operand" "r") +- (match_operand 2 "const_int_operand" "") +- (match_operand 3 "const_int_operand" "")))] ++ [(set (match_operand:X 0 "register_operand" "=r") ++ (zero_extract:X (match_operand:X 1 "register_operand" "r") ++ (match_operand 2 "const_int_operand" "") ++ (match_operand 3 "const_int_operand" "")))] + "loongarch_use_ins_ext_p (operands[1], INTVAL (operands[2]), +- INTVAL (operands[3]))" ++ INTVAL (operands[3]))" + { +- operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]) -1 ); ++ operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]) - 1); + return "bstrpick.\t%0,%1,%2,%3"; + } +- [(set_attr "type" "arith") +- (set_attr "mode" "")]) ++ [(set_attr "type" "arith") ++ (set_attr "mode" "")]) + + (define_expand "insv" + [(set (zero_extract:GPR (match_operand:GPR 0 "register_operand") +@@ -2105,7 +1769,7 @@ + "" + { + if (!loongarch_use_ins_ext_p (operands[0], INTVAL (operands[1]), +- INTVAL (operands[2]))) ++ INTVAL (operands[2]))) + FAIL; + }) + +@@ -2115,26 +1779,20 @@ + (match_operand:SI 2 "const_int_operand" "")) + (match_operand:GPR 3 "reg_or_0_operand" "rJ"))] + "loongarch_use_ins_ext_p (operands[0], INTVAL (operands[1]), +- INTVAL (operands[2]))" ++ INTVAL (operands[2]))" + { +- operands[1] = GEN_INT (INTVAL (operands[1]) + INTVAL (operands[2]) -1 ); ++ operands[1] = GEN_INT (INTVAL (operands[1]) + INTVAL (operands[2]) - 1); + return "bstrins.\t%0,%z3,%1,%2"; + } +- [(set_attr "type" "arith") +- (set_attr "mode" "")]) +- +-;; Allow combine to split complex const_int load sequences, using operand 2 +-;; to store the intermediate results. See move_operand for details. +-(define_split +- [(set (match_operand:GPR 0 "register_operand") +- (match_operand:GPR 1 "splittable_const_int_operand")) +- (clobber (match_operand:GPR 2 "register_operand"))] +- "" +- [(const_int 0)] +-{ +- loongarch_move_integer (operands[2], operands[0], INTVAL (operands[1])); +- DONE; +-}) ++ [(set_attr "type" "arith") ++ (set_attr "mode" "")]) ++ ++;; ++;; .................... ++;; ++;; DATA MOVEMENT ++;; ++;; .................... + + ;; 64-bit integer moves + +@@ -2151,152 +1809,46 @@ + DONE; + }) + +- + (define_insn "*movdi_32bit" +- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,ZC,r,m,*f,*f,*r,*m") +- (match_operand:DI 1 "move_operand" "r,i,ZC,r,m,r,*J*r,*m,*f,*f"))] ++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m") ++ (match_operand:DI 1 "move_operand" "r,i,w,r,*J*r,*m,*f,*f"))] + "!TARGET_64BIT + && (register_operand (operands[0], DImode) + || reg_or_0_operand (operands[1], DImode))" + { return loongarch_output_move (operands[0], operands[1]); } +- [(set_attr "move_type" "move,const,load,store,load,store,mgtf,fpload,mftg,fpstore") +- (set (attr "mode") +- (if_then_else (eq_attr "move_type" "imul") +- (const_string "SI") +- (const_string "DI")))]) +- ++ [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore") ++ (set_attr "mode" "DI")]) + + (define_insn "*movdi_64bit" +- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,ZC,r,m,*f,*f,*r,*m") +- (match_operand:DI 1 "move_operand" "r,Yd,ZC,rJ,m,rJ,*r*J,*m,*f,*f"))] ++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m") ++ (match_operand:DI 1 "move_operand" "r,Yd,w,rJ,*r*J,*m,*f,*f"))] + "TARGET_64BIT + && (register_operand (operands[0], DImode) +- || reg_or_0_operand (operands[1], DImode)) +- && !((GET_CODE (operands[1]) == SYMBOL_REF || GET_CODE (operands[1]) == LABEL_REF) +- && symbolic_operand (operands[1], VOIDmode) +- && (loongarch_cmodel_var == LARCH_CMODEL_EXTREME))" ++ || reg_or_0_operand (operands[1], DImode))" + { return loongarch_output_move (operands[0], operands[1]); } +- [(set_attr "move_type" "move,const,load,store,load,store,mgtf,fpload,mftg,fpstore") ++ [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore") + (set_attr "mode" "DI")]) + +-(define_insn "movdi_extreme" +- [(parallel [(set (match_operand:DI 0 "register_operand" "=r") +- (unspec_volatile:DI [(match_operand:DI 1 "symbolic_operand" "")] +- UNSPECV_MOVE_EXTREME)) +- (use (match_operand:DI 2 "register_operand" "=&r"))])] +- "TARGET_64BIT && (loongarch_cmodel_var == LARCH_CMODEL_EXTREME)" +- { +- if (!loongarch_global_symbol_p (operands[1]) +- || loongarch_symbol_binds_local_p (operands[1])) +- return "la.local\t%0,%2,%1"; +- else +- return "la.global\t%0,%2,%1"; +- } +- [(set_attr "move_type" "const") +- (set_attr "mode" "DI")]) + ;; 32-bit Integer moves + +-;; Unlike most other insns, the move insns can't be split with +-;; different predicates, because register spilling and other parts of +-;; the compiler, have memoized the insn number already. +- +-(define_expand "mov" +- [(set (match_operand:IMOVE32 0 "") +- (match_operand:IMOVE32 1 ""))] +- "" +-{ +- if (loongarch_legitimize_move (mode, operands[0], operands[1])) +- DONE; +-}) +- +-;; The difference between these two is whether or not ints are allowed +-;; in FP registers (off by default, use -mdebugh to enable). +- +-(define_insn "*mov_internal" +- [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,ZC,r,m,*f,*f,*r,*m,*r,*z") +- (match_operand:IMOVE32 1 "move_operand" "r,Yd,ZC,rJ,m,rJ,*r*J,*m,*f,*f,*z,*r"))] +- "(register_operand (operands[0], mode) +- || reg_or_0_operand (operands[1], mode))" +- { return loongarch_output_move (operands[0], operands[1]); } +- [(set_attr "move_type" "move,const,load,store,load,store,mgtf,fpload,mftg,fpstore,mftg,mgtf") +- (set_attr "compression" "all,*,*,*,*,*,*,*,*,*,*,*") +- (set_attr "mode" "SI")]) +- +- +- +-;; LARCH supports loading and storing a floating point register from +-;; the sum of two general registers. We use two versions for each of +-;; these four instructions: one where the two general registers are +-;; SImode, and one where they are DImode. This is because general +-;; registers will be in SImode when they hold 32-bit values, but, +-;; since the 32-bit values are always sign extended, the [ls][wd]xc1 +-;; instructions will still work correctly. +- +-;; ??? Perhaps it would be better to support these instructions by +-;; modifying TARGET_LEGITIMATE_ADDRESS_P and friends. However, since +-;; these instructions can only be used to load and store floating +-;; point registers, that would probably cause trouble in reload. +- +-(define_insn "*_" +- [(set (match_operand:ANYF 0 "register_operand" "=f") +- (mem:ANYF (plus:P (match_operand:P 1 "register_operand" "r") +- (match_operand:P 2 "register_operand" "r"))))] +- "" +- "\t%0,%1,%2" +- [(set_attr "type" "fpidxload") +- (set_attr "mode" "")]) +- +-(define_insn "*_" +- [(set (mem:ANYF (plus:P (match_operand:P 1 "register_operand" "r") +- (match_operand:P 2 "register_operand" "r"))) +- (match_operand:ANYF 0 "register_operand" "f"))] +- "TARGET_HARD_FLOAT" +- "\t%0,%1,%2" +- [(set_attr "type" "fpidxstore") +- (set_attr "mode" "")]) +- +-;; Loongson index address load and store. +-(define_insn "*_" +- [(set (match_operand:GPR 0 "register_operand" "=r") +- (mem:GPR +- (plus:P (match_operand:P 1 "register_operand" "r") +- (match_operand:P 2 "register_operand" "r"))))] +- "" +- "\t%0,%1,%2" +- [(set_attr "type" "load") +- (set_attr "mode" "")]) +- +-(define_insn "*_" +- [(set (mem:GPR (plus:P (match_operand:P 1 "register_operand" "r") +- (match_operand:P 2 "register_operand" "r"))) +- (match_operand:GPR 0 "register_operand" "r"))] +- "" +- "\t%0,%1,%2" +- [(set_attr "type" "store") +- (set_attr "mode" "")]) +- +-;; SHORT mode sign_extend. +-(define_insn "*extend__" +- [(set (match_operand:GPR 0 "register_operand" "=r") +- (sign_extend:GPR +- (mem:SHORT +- (plus:P (match_operand:P 1 "register_operand" "r") +- (match_operand:P 2 "register_operand" "r")))))] ++(define_expand "movsi" ++ [(set (match_operand:SI 0 "") ++ (match_operand:SI 1 ""))] + "" +- "\t%0,%1,%2" +- [(set_attr "type" "load") +- (set_attr "mode" "")]) ++{ ++ if (loongarch_legitimize_move (SImode, operands[0], operands[1])) ++ DONE; ++}) + +-(define_insn "*extend_" +- [(set (mem:SHORT (plus:P (match_operand:P 1 "register_operand" "r") +- (match_operand:P 2 "register_operand" "r"))) +- (match_operand:SHORT 0 "register_operand" "r"))] +- "" +- "\t%0,%1,%2" +- [(set_attr "type" "store") ++(define_insn "*movsi_internal" ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m,*r,*z") ++ (match_operand:SI 1 "move_operand" "r,Yd,w,rJ,*r*J,*m,*f,*f,*z,*r"))] ++ "(register_operand (operands[0], SImode) ++ || reg_or_0_operand (operands[1], SImode))" ++ { return loongarch_output_move (operands[0], operands[1]); } ++ [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore,mftg,mgtf") + (set_attr "mode" "SI")]) + +- + ;; 16-bit Integer moves + + ;; Unlike most other insns, the move insns can't be split with +@@ -2314,13 +1866,12 @@ + }) + + (define_insn "*movhi_internal" +- [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,m") +- (match_operand:HI 1 "move_operand" "r,Yd,I,m,rJ"))] ++ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,m,r,k") ++ (match_operand:HI 1 "move_operand" "r,Yd,I,m,rJ,k,rJ"))] + "(register_operand (operands[0], HImode) + || reg_or_0_operand (operands[1], HImode))" + { return loongarch_output_move (operands[0], operands[1]); } +- [(set_attr "move_type" "move,const,const,load,store") +- (set_attr "compression" "all,all,*,*,*") ++ [(set_attr "move_type" "move,const,const,load,store,load,store") + (set_attr "mode" "HI")]) + + ;; 8-bit Integer moves +@@ -2340,13 +1891,12 @@ + }) + + (define_insn "*movqi_internal" +- [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m") +- (match_operand:QI 1 "move_operand" "r,I,m,rJ"))] ++ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,r,k") ++ (match_operand:QI 1 "move_operand" "r,I,m,rJ,k,rJ"))] + "(register_operand (operands[0], QImode) + || reg_or_0_operand (operands[1], QImode))" + { return loongarch_output_move (operands[0], operands[1]); } +- [(set_attr "move_type" "move,const,load,store") +- (set_attr "compression" "all,*,*,*") ++ [(set_attr "move_type" "move,const,load,store,load,store") + (set_attr "mode" "QI")]) + + ;; 32-bit floating point moves +@@ -2361,13 +1911,13 @@ + }) + + (define_insn "*movsf_hardfloat" +- [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m") +- (match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))] ++ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,f,k,m,*f,*r,*r,*r,*m") ++ (match_operand:SF 1 "move_operand" "f,G,m,f,k,f,G,*r,*f,*G*r,*m,*r"))] + "TARGET_HARD_FLOAT + && (register_operand (operands[0], SFmode) + || reg_or_0_operand (operands[1], SFmode))" + { return loongarch_output_move (operands[0], operands[1]); } +- [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,store,mgtf,mftg,move,load,store") ++ [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,fpload,fpstore,store,mgtf,mftg,move,load,store") + (set_attr "mode" "SF")]) + + (define_insn "*movsf_softfloat" +@@ -2380,7 +1930,6 @@ + [(set_attr "move_type" "move,load,store") + (set_attr "mode" "SF")]) + +- + ;; 64-bit floating point moves + + (define_expand "movdf" +@@ -2393,13 +1942,13 @@ + }) + + (define_insn "*movdf_hardfloat" +- [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m") +- (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))] +- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT ++ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,f,k,m,*f,*r,*r,*r,*m") ++ (match_operand:DF 1 "move_operand" "f,G,m,f,k,f,G,*r,*f,*r*G,*m,*r"))] ++ "TARGET_DOUBLE_FLOAT + && (register_operand (operands[0], DFmode) + || reg_or_0_operand (operands[1], DFmode))" + { return loongarch_output_move (operands[0], operands[1]); } +- [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,store,mgtf,mftg,move,load,store") ++ [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,fpload,fpstore,store,mgtf,mftg,move,load,store") + (set_attr "mode" "DF")]) + + (define_insn "*movdf_softfloat" +@@ -2433,11 +1982,10 @@ + { return loongarch_output_move (operands[0], operands[1]); } + [(set_attr "move_type" "move,const,load,store") + (set (attr "mode") +- (if_then_else (eq_attr "move_type" "imul") ++ (if_then_else (eq_attr "move_type" "imul") + (const_string "SI") + (const_string "TI")))]) + +- + ;; 128-bit floating point moves + + (define_expand "movtf" +@@ -2460,11 +2008,10 @@ + [(set_attr "move_type" "move,load,store,mgtf,mftg,fpload,fpstore") + (set_attr "mode" "TF")]) + +- + (define_split + [(set (match_operand:MOVE64 0 "nonimmediate_operand") + (match_operand:MOVE64 1 "move_operand"))] +- "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1], insn)" ++ "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1])" + [(const_int 0)] + { + loongarch_split_move_insn (operands[0], operands[1], curr_insn); +@@ -2474,7 +2021,7 @@ + (define_split + [(set (match_operand:MOVE128 0 "nonimmediate_operand") + (match_operand:MOVE128 1 "move_operand"))] +- "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1], insn)" ++ "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1])" + [(const_int 0)] + { + loongarch_split_move_insn (operands[0], operands[1], curr_insn); +@@ -2484,7 +2031,7 @@ + ;; Emit a doubleword move in which exactly one of the operands is + ;; a floating-point register. We can't just emit two normal moves + ;; because of the constraints imposed by the FPU register model; +-;; see loongarch_cannot_change_mode_class for details. Instead, we keep ++;; see loongarch_can_change_mode_class for details. Instead, we keep + ;; the FPR whole and use special patterns to refer to each word of + ;; the other operand. + +@@ -2516,6 +2063,108 @@ + DONE; + }) + ++;; Clear one FCC register ++ ++(define_insn "movfcc" ++ [(set (match_operand:FCC 0 "register_operand" "=z") ++ (const_int 0))] ++ "" ++ "movgr2cf\t%0,$r0") ++ ++;; Conditional move instructions. ++ ++(define_insn "*sel_using_" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r") ++ (if_then_else:GPR ++ (equality_op:GPR2 (match_operand:GPR2 1 "register_operand" "r,r") ++ (const_int 0)) ++ (match_operand:GPR 2 "reg_or_0_operand" "r,J") ++ (match_operand:GPR 3 "reg_or_0_operand" "J,r")))] ++ "register_operand (operands[2], mode) ++ != register_operand (operands[3], mode)" ++ "@ ++ \t%0,%2,%1 ++ \t%0,%3,%1" ++ [(set_attr "type" "condmove") ++ (set_attr "mode" "")]) ++ ++;; fsel copies the 3rd argument when the 1st is non-zero and the 2nd ++;; argument if the 1st is zero. This means operand 2 and 3 are ++;; inverted in the instruction. ++ ++(define_insn "*sel" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (if_then_else:ANYF ++ (equality_op:FCC (match_operand:FCC 1 "register_operand" "z") ++ (const_int 0)) ++ (match_operand:ANYF 2 "reg_or_0_operand" "f") ++ (match_operand:ANYF 3 "reg_or_0_operand" "f")))] ++ "TARGET_HARD_FLOAT" ++ "fsel\t%0,,%1" ++ [(set_attr "type" "condmove") ++ (set_attr "mode" "")]) ++ ++;; These are the main define_expand's used to make conditional moves. ++ ++(define_expand "movcc" ++ [(set (match_operand:GPR 0 "register_operand") ++ (if_then_else:GPR (match_operator 1 "comparison_operator" ++ [(match_operand:GPR 2 "reg_or_0_operand") ++ (match_operand:GPR 3 "reg_or_0_operand")])))] ++ "TARGET_COND_MOVE_INT" ++{ ++ if(loongarch_expand_conditional_move_la464 (operands)) ++ DONE; ++ else ++ FAIL; ++}) ++ ++(define_expand "movcc" ++ [(set (match_operand:ANYF 0 "register_operand") ++ (if_then_else:ANYF (match_operator 1 "comparison_operator" ++ [(match_operand:ANYF 2 "reg_or_0_operand") ++ (match_operand:ANYF 3 "reg_or_0_operand")])))] ++ "TARGET_COND_MOVE_FLOAT" ++{ ++ ++ if(loongarch_expand_conditional_move_la464 (operands)) ++ DONE; ++ else ++ FAIL; ++}) ++ ++(define_insn "lu32i_d" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ior:DI ++ (zero_extend:DI ++ (subreg:SI (match_operand:DI 1 "register_operand" "0") 0)) ++ (match_operand:DI 2 "const_lu32i_operand" "u")))] ++ "TARGET_64BIT" ++ "lu32i.d\t%0,%X2>>32" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "DI")]) ++ ++(define_insn "lu52i_d" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ior:DI ++ (and:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "lu52i_mask_operand")) ++ (match_operand 3 "const_lu52i_operand" "v")))] ++ "TARGET_64BIT" ++ "lu52i.d\t%0,%1,%X3>>52" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "DI")]) ++ ++;; Convert floating-point numbers to integers ++(define_insn "frint_" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] ++ UNSPEC_FRINT))] ++ "" ++ "frint.\t%0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "")]) ++ + ;; Load the low word of operand 0 with operand 1. + (define_insn "load_low" + [(set (match_operand:SPLITF 0 "register_operand" "=f,f") +@@ -2559,47 +2208,149 @@ + [(set_attr "move_type" "mftg,fpstore") + (set_attr "mode" "")]) + +-;; Move operand 1 to the high word of operand 0 using movgr2frh, preserving the ++;; Thread-Local Storage ++ ++(define_insn "got_load_tls_gd" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P ++ [(match_operand:P 1 "symbolic_operand" "")] ++ UNSPEC_TLS_GD))] ++ "" ++ "la.tls.gd\t%0,%1" ++ [(set_attr "got" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "got_load_tls_ld" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P ++ [(match_operand:P 1 "symbolic_operand" "")] ++ UNSPEC_TLS_LD))] ++ "" ++ "la.tls.ld\t%0,%1" ++ [(set_attr "got" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "got_load_tls_le" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P ++ [(match_operand:P 1 "symbolic_operand" "")] ++ UNSPEC_TLS_LE))] ++ "" ++ "la.tls.le\t%0,%1" ++ [(set_attr "got" "load") ++ (set_attr "mode" "")]) ++ ++(define_insn "got_load_tls_ie" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P ++ [(match_operand:P 1 "symbolic_operand" "")] ++ UNSPEC_TLS_IE))] ++ "" ++ "la.tls.ie\t%0,%1" ++ [(set_attr "got" "load") ++ (set_attr "mode" "")]) ++ ++;; Move operand 1 to the high word of operand 0 using movgr2frh.w, preserving the + ;; value in the low word. + (define_insn "movgr2frh" + [(set (match_operand:SPLITF 0 "register_operand" "=f") + (unspec:SPLITF [(match_operand: 1 "reg_or_0_operand" "rJ") +- (match_operand:SPLITF 2 "register_operand" "0")] +- UNSPEC_MOVGR2FRH))] +- "TARGET_HARD_FLOAT && TARGET_FLOAT64" +- "movgr2frh.w\t%z1,%0" ++ (match_operand:SPLITF 2 "register_operand" "0")] ++ UNSPEC_MOVGR2FRH))] ++ "TARGET_DOUBLE_FLOAT" ++ "movgr2frh.w\t%0,%z1" + [(set_attr "move_type" "mgtf") + (set_attr "mode" "")]) + +-;; Move high word of operand 1 to operand 0 using movfrh2gr. ++(define_insn "movsgr2fr" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (unspec:ANYF [(match_operand:SI 1 "register_operand" "r")] ++ UNSPEC_MOVGR2FR))] ++ "TARGET_DOUBLE_FLOAT" ++ "movgr2fr.w\t%0,%1" ++ ) ++(define_insn "movdgr2fr" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (unspec:ANYF [(match_operand:DI 1 "register_operand" "r")] ++ UNSPEC_MOVGR2FR))] ++ "TARGET_DOUBLE_FLOAT" ++ "movgr2fr.d\t%0,%1" ++ ) ++ ++;; Move high word of operand 1 to operand 0 using movfrh2gr.s. + (define_insn "movfrh2gr" + [(set (match_operand: 0 "register_operand" "=r") + (unspec: [(match_operand:SPLITF 1 "register_operand" "f")] + UNSPEC_MOVFRH2GR))] +- "TARGET_HARD_FLOAT && TARGET_FLOAT64" ++ "TARGET_DOUBLE_FLOAT" + "movfrh2gr.s\t%0,%1" + [(set_attr "move_type" "mftg") + (set_attr "mode" "")]) + ++(define_insn "movsfr2gr" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (unspec:GPR [(match_operand:SF 1 "register_operand" "f")] ++ UNSPEC_MOVFR2GR))] ++ "TARGET_DOUBLE_FLOAT" ++ "movfr2gr.s\t%0,%1" ++ ) ++(define_insn "movdfr2gr" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (unspec:GPR [(match_operand:DF 1 "register_operand" "f")] ++ UNSPEC_MOVFR2GR))] ++ "TARGET_DOUBLE_FLOAT" ++ "movfr2gr.d\t%0,%1" ++ ) ++ ++(define_insn "movfr2fcc" ++ [(set (match_operand:FCC 0 "register_operand" "=z") ++ (unspec:FCC [(match_operand:ANYF 1 "register_operand" "f")] ++ UNSPEC_MOVFR2FCC))] ++ "TARGET_HARD_FLOAT" ++ "movfr2cf\t%0,%1" ++ [(set_attr "mode" "")]) ++ ++(define_insn "movgr2fcc" ++ [(set (match_operand:FCC 0 "register_operand" "=z") ++ (unspec:FCC [(match_operand:GPR 1 "register_operand" "r")] ++ UNSPEC_MOVGR2FCC))] ++ "TARGET_HARD_FLOAT" ++ "movgr2cf\t%0,%1" ++ [(set_attr "mode" "")]) ++ ++(define_insn "movfcc2gr" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (unspec:GPR [(match_operand:FCC 1 "register_operand" "z")] ++ UNSPEC_MOVFCC2GR))] ++ "TARGET_HARD_FLOAT" ++ "movcf2gr\t%0,%1" ++ [ (set_attr "mode" "")]) ++ ++ + ;; Expand in-line code to clear the instruction cache between operand[0] and + ;; operand[1]. + (define_expand "clear_cache" + [(match_operand 0 "pmode_register_operand") + (match_operand 1 "pmode_register_operand")] + "" +- " + { +- emit_insn (gen_ibar (const0_rtx)); ++ emit_insn (gen_loongarch_ibar (const0_rtx)); + DONE; +-}") ++}) + +-(define_insn "ibar" +- [(unspec_volatile:SI [(match_operand 0 "const_uimm15_operand")] UNSPEC_IBAR)] ++(define_insn "loongarch_ibar" ++ [(unspec_volatile:SI ++ [(match_operand 0 "const_uimm15_operand")] ++ UNSPECV_IBAR) ++ (clobber (mem:BLK (scratch)))] + "" + "ibar\t%0") + +-(define_insn "dbar" +- [(unspec_volatile:SI [(match_operand 0 "const_uimm15_operand")] UNSPEC_DBAR)] ++(define_insn "loongarch_dbar" ++ [(unspec_volatile:SI ++ [(match_operand 0 "const_uimm15_operand")] ++ UNSPECV_DBAR) ++ (clobber (mem:BLK (scratch)))] + "" + "dbar\t%0") + +@@ -2607,118 +2358,142 @@ + + ;; Privileged state instruction + +-(define_insn "cpucfg" ++(define_insn "loongarch_cpucfg" + [(set (match_operand:SI 0 "register_operand" "=r") + (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")] +- UNSPEC_CPUCFG))] ++ UNSPECV_CPUCFG))] + "" + "cpucfg\t%0,%1" +- [(set_attr "type" "load") +- (set_attr "mode" "SI")]) ++ [(set_attr "type" "load") ++ (set_attr "mode" "SI")]) ++ ++(define_insn "loongarch_syscall" ++ [(unspec_volatile:SI ++ [(match_operand 0 "const_uimm15_operand")] ++ UNSPECV_SYSCALL) ++ (clobber (mem:BLK (scratch)))] ++ "" ++ "syscall\t%0") ++ ++(define_insn "loongarch_break" ++ [(unspec_volatile:SI ++ [(match_operand 0 "const_uimm15_operand")] ++ UNSPECV_BREAK) ++ (clobber (mem:BLK (scratch)))] ++ "" ++ "break\t%0") + +-(define_insn "asrtle_d" +- [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "r") +- (match_operand:DI 1 "register_operand" "r")] +- UNSPEC_ASRTLE_D)] ++(define_insn "loongarch_asrtle_d" ++ [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "r") ++ (match_operand:DI 1 "register_operand" "r")] ++ UNSPECV_ASRTLE_D)] + "TARGET_64BIT" + "asrtle.d\t%0,%1" +- [(set_attr "type" "load") +- (set_attr "mode" "DI")]) ++ [(set_attr "type" "load") ++ (set_attr "mode" "DI")]) + +-(define_insn "asrtgt_d" +- [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "r") +- (match_operand:DI 1 "register_operand" "r")] +- UNSPEC_ASRTGT_D)] ++(define_insn "loongarch_asrtgt_d" ++ [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "r") ++ (match_operand:DI 1 "register_operand" "r")] ++ UNSPECV_ASRTGT_D)] + "TARGET_64BIT" + "asrtgt.d\t%0,%1" +- [(set_attr "type" "load") +- (set_attr "mode" "DI")]) ++ [(set_attr "type" "load") ++ (set_attr "mode" "DI")]) + +-(define_insn "

csrrd" ++(define_insn "loongarch_csrrd_" + [(set (match_operand:GPR 0 "register_operand" "=r") + (unspec_volatile:GPR [(match_operand 1 "const_uimm14_operand")] +- UNSPEC_CSRRD))] ++ UNSPECV_CSRRD)) ++ (clobber (mem:BLK (scratch)))] + "" + "csrrd\t%0,%1" +- [(set_attr "type" "load") +- (set_attr "mode" "")]) ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) + +-(define_insn "

csrwr" ++(define_insn "loongarch_csrwr_" + [(set (match_operand:GPR 0 "register_operand" "=r") +- (unspec_volatile:GPR +- [(match_operand:GPR 1 "register_operand" "0") +- (match_operand 2 "const_uimm14_operand")] +- UNSPEC_CSRWR))] ++ (unspec_volatile:GPR ++ [(match_operand:GPR 1 "register_operand" "0") ++ (match_operand 2 "const_uimm14_operand")] ++ UNSPECV_CSRWR)) ++ (clobber (mem:BLK (scratch)))] + "" + "csrwr\t%0,%2" +- [(set_attr "type" "store") +- (set_attr "mode" "")]) ++ [(set_attr "type" "store") ++ (set_attr "mode" "")]) + +-(define_insn "

csrxchg" ++(define_insn "loongarch_csrxchg_" + [(set (match_operand:GPR 0 "register_operand" "=r") +- (unspec_volatile:GPR +- [(match_operand:GPR 1 "register_operand" "0") +- (match_operand:GPR 2 "register_operand" "q") +- (match_operand 3 "const_uimm14_operand")] +- UNSPEC_CSRXCHG))] ++ (unspec_volatile:GPR ++ [(match_operand:GPR 1 "register_operand" "0") ++ (match_operand:GPR 2 "register_operand" "q") ++ (match_operand 3 "const_uimm14_operand")] ++ UNSPECV_CSRXCHG)) ++ (clobber (mem:BLK (scratch)))] + "" + "csrxchg\t%0,%2,%3" +- [(set_attr "type" "load") +- (set_attr "mode" "")]) ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) + +-(define_insn "iocsrrd_" ++(define_insn "loongarch_iocsrrd_" + [(set (match_operand:QHWD 0 "register_operand" "=r") +- (unspec_volatile:QHWD [(match_operand:SI 1 "register_operand" "r")] +- UNSPEC_IOCSRRD))] ++ (unspec_volatile:QHWD [(match_operand:SI 1 "register_operand" "r")] ++ UNSPECV_IOCSRRD)) ++ (clobber (mem:BLK (scratch)))] + "" + "iocsrrd.\t%0,%1" +- [(set_attr "type" "load") +- (set_attr "mode" "")]) ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) + +-(define_insn "iocsrwr_" ++(define_insn "loongarch_iocsrwr_" + [(unspec_volatile:QHWD [(match_operand:QHWD 0 "register_operand" "r") +- (match_operand:SI 1 "register_operand" "r")] +- UNSPEC_IOCSRWR)] ++ (match_operand:SI 1 "register_operand" "r")] ++ UNSPECV_IOCSRWR) ++ (clobber (mem:BLK (scratch)))] + "" + "iocsrwr.\t%0,%1" +- [(set_attr "type" "load") +- (set_attr "mode" "")]) ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) + +-(define_insn "

cacop" ++(define_insn "loongarch_cacop_" + [(unspec_volatile:X [(match_operand 0 "const_uimm5_operand") +- (match_operand:X 1 "register_operand" "r") +- (match_operand 2 "const_imm12_operand")] +- UNSPEC_CACOP)] ++ (match_operand:X 1 "register_operand" "r") ++ (match_operand 2 "const_imm12_operand")] ++ UNSPECV_CACOP) ++ (clobber (mem:BLK (scratch)))] + "" + "cacop\t%0,%1,%2" +- [(set_attr "type" "load") +- (set_attr "mode" "")]) ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) + +-(define_insn "

lddir" ++(define_insn "loongarch_lddir_" + [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r") +- (match_operand:X 1 "register_operand" "r") +- (match_operand 2 "const_uimm5_operand")] +- UNSPEC_LDDIR)] ++ (match_operand:X 1 "register_operand" "r") ++ (match_operand 2 "const_uimm5_operand")] ++ UNSPECV_LDDIR) ++ (clobber (mem:BLK (scratch)))] + "" + "lddir\t%0,%1,%2" +- [(set_attr "type" "load") +- (set_attr "mode" "")]) ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) + +-(define_insn "

ldpte" ++(define_insn "loongarch_ldpte_" + [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r") + (match_operand 1 "const_uimm5_operand")] +- UNSPEC_LDPTE)] ++ UNSPECV_LDPTE) ++ (clobber (mem:BLK (scratch)))] + "" + "ldpte\t%0,%1" +- [(set_attr "type" "load") +- (set_attr "mode" "")]) ++ [(set_attr "type" "load") ++ (set_attr "mode" "")]) + + + ;; Block moves, see loongarch.c for more details. +-;; Argument 0 is the destination +-;; Argument 1 is the source +-;; Argument 2 is the length +-;; Argument 3 is the alignment ++;; Argument 0 is the destination. ++;; Argument 1 is the source. ++;; Argument 2 is the length. ++;; Argument 3 is the alignment. + + (define_expand "movmemsi" + [(parallel [(set (match_operand:BLK 0 "general_operand") +@@ -2740,30 +2515,19 @@ + ;; + ;; .................... + +-(define_expand "3" +- [(set (match_operand:GPR 0 "register_operand") +- (any_shift:GPR (match_operand:GPR 1 "register_operand") +- (match_operand:SI 2 "arith_operand")))] +- "" +-{ +-}) +- +-(define_insn "*3" ++(define_insn "3" + [(set (match_operand:GPR 0 "register_operand" "=r") + (any_shift:GPR (match_operand:GPR 1 "register_operand" "r") + (match_operand:SI 2 "arith_operand" "rI")))] + "" + { + if (CONST_INT_P (operands[2])) +- { + operands[2] = GEN_INT (INTVAL (operands[2]) + & (GET_MODE_BITSIZE (mode) - 1)); +- return "i.\t%0,%1,%2"; +- } else +- return ".\t%0,%1,%2"; ++ ++ return "%i2.\t%0,%1,%2"; + } + [(set_attr "type" "shift") +- (set_attr "compression" "none") + (set_attr "mode" "")]) + + (define_insn "*si3_extend" +@@ -2774,86 +2538,68 @@ + "TARGET_64BIT" + { + if (CONST_INT_P (operands[2])) +- { + operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f); +- return "i.w\t%0,%1,%2"; +- } else +- return ".w\t%0,%1,%2"; ++ ++ return "%i2.w\t%0,%1,%2"; + } + [(set_attr "type" "shift") + (set_attr "mode" "SI")]) + +-(define_insn "zero_extend_ashift1" +- [ (set (match_operand:DI 0 "register_operand" "=r") +- (and:DI (ashift:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0) +- (match_operand 2 "const_immlsa_operand" "")) +- (match_operand 3 "shift_mask_operand" "")))] +-"" +-"bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,$r0,%2" +-[(set_attr "type" "arith") +- (set_attr "mode" "DI") +- (set_attr "insn_count" "2")]) +- +-(define_insn "zero_extend_ashift2" +- [ (set (match_operand:DI 0 "register_operand" "=r") +- (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r") +- (match_operand 2 "const_immlsa_operand" "")) +- (match_operand 3 "shift_mask_operand" "")))] +-"" +-"bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,$r0,%2" +-[(set_attr "type" "arith") +- (set_attr "mode" "DI") +- (set_attr "insn_count" "2")]) +- +-(define_insn "alsl_paired1" +- [(set (match_operand:DI 0 "register_operand" "=&r") +- (plus:DI (and:DI (ashift:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0) +- (match_operand 2 "const_immlsa_operand" "")) +- (match_operand 3 "shift_mask_operand" "")) +- (match_operand:DI 4 "register_operand" "r")))] +- "" +- "bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,%4,%2" +- [(set_attr "type" "arith") +- (set_attr "mode" "DI") +- (set_attr "insn_count" "2")]) +- +-(define_insn "alsl_paired2" +- [(set (match_operand:DI 0 "register_operand" "=&r") +- (plus:DI (match_operand:DI 1 "register_operand" "r") +- (and:DI (ashift:DI (match_operand:DI 2 "register_operand" "r") +- (match_operand 3 "const_immlsa_operand" "")) +- (match_operand 4 "shift_mask_operand" ""))))] +- "" +- "bstrpick.d\t%0,%2,31,0\n\talsl.d\t%0,%0,%1,%3" +- [(set_attr "type" "arith") +- (set_attr "mode" "DI") +- (set_attr "insn_count" "2")]) +- +-(define_insn "alsl_" +- [(set (match_operand:GPR 0 "register_operand" "=r") +- (plus:GPR (ashift:GPR (match_operand:GPR 1 "register_operand" "r") +- (match_operand 2 "const_immlsa_operand" "")) +- (match_operand:GPR 3 "register_operand" "r")))] +- "ISA_HAS_LSA" +- "alsl.\t%0,%1,%3,%2" +- [(set_attr "type" "arith") +- (set_attr "mode" "")]) +- + (define_insn "rotr3" ++ [(set (match_operand:GPR 0 "register_operand" "=r,r") ++ (rotatert:GPR (match_operand:GPR 1 "register_operand" "r,r") ++ (match_operand:SI 2 "arith_operand" "r,I")))] ++ "" ++ "rotr%i2.\t%0,%1,%2" ++ [(set_attr "type" "shift,shift") ++ (set_attr "mode" "")]) ++ ++;; The following templates were added to generate "bstrpick.d + alsl.d" ++;; instruction pairs. ++;; It is required that the values of const_immalsl_operand and ++;; immediate_operand must have the following correspondence: ++;; ++;; (immediate_operand >> const_immalsl_operand) == 0xffffffff ++ ++(define_insn "zero_extend_ashift" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "const_immalsl_operand" "")) ++ (match_operand 3 "immediate_operand" "")))] ++ "TARGET_64BIT ++ && ((INTVAL (operands[3]) >> INTVAL (operands[2])) == 0xffffffff)" ++ "bstrpick.d\t%0,%1,31,0\n\talsl.d\t%0,%0,$r0,%2" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "DI") ++ (set_attr "insn_count" "2")]) ++ ++(define_insn "bstrpick_alsl_paired" ++ [(set (match_operand:DI 0 "register_operand" "=&r") ++ (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (and:DI (ashift:DI (match_operand:DI 2 "register_operand" "r") ++ (match_operand 3 "const_immalsl_operand" "")) ++ (match_operand 4 "immediate_operand" ""))))] ++ "TARGET_64BIT ++ && ((INTVAL (operands[4]) >> INTVAL (operands[3])) == 0xffffffff)" ++ "bstrpick.d\t%0,%2,31,0\n\talsl.d\t%0,%0,%1,%3" ++ [(set_attr "type" "arith") ++ (set_attr "mode" "DI") ++ (set_attr "insn_count" "2")]) ++ ++(define_insn "alsl3" + [(set (match_operand:GPR 0 "register_operand" "=r") +- (rotatert:GPR (match_operand:GPR 1 "register_operand" "r") +- (match_operand:SI 2 "arith_operand" "rI")))] ++ (plus:GPR (ashift:GPR (match_operand:GPR 1 "register_operand" "r") ++ (match_operand 2 "const_immalsl_operand" "")) ++ (match_operand:GPR 3 "register_operand" "r")))] + "" +-{ +- if (CONST_INT_P (operands[2])) +- { +- return "rotri.\t%0,%1,%2"; +- } else +- return "rotr.\t%0,%1,%2"; +-} +- [(set_attr "type" "shift") ++ "alsl.\t%0,%1,%3,%2" ++ [(set_attr "type" "arith") + (set_attr "mode" "")]) + ++ ++ ++;; Reverse the order of bytes of operand 1 and store the result in operand 0. ++ + (define_insn "bswaphi2" + [(set (match_operand:HI 0 "register_operand" "=r") + (bswap:HI (match_operand:HI 1 "register_operand" "r")))] +@@ -2867,7 +2613,7 @@ + "" + "#" + "" +- [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_WSBH)) ++ [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_REVB_2H)) + (set (match_dup 0) (rotatert:SI (match_dup 0) (const_int 16)))] + "" + [(set_attr "insn_count" "2")]) +@@ -2878,28 +2624,28 @@ + "TARGET_64BIT" + "#" + "" +- [(set (match_dup 0) (unspec:DI [(match_dup 1)] UNSPEC_DSBH)) +- (set (match_dup 0) (unspec:DI [(match_dup 0)] UNSPEC_DSHD))] ++ [(set (match_dup 0) (unspec:DI [(match_dup 1)] UNSPEC_REVB_4H)) ++ (set (match_dup 0) (unspec:DI [(match_dup 0)] UNSPEC_REVH_D))] + "" + [(set_attr "insn_count" "2")]) + +-(define_insn "wsbh" ++(define_insn "revb_2h" + [(set (match_operand:SI 0 "register_operand" "=r") +- (unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_WSBH))] ++ (unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_REVB_2H))] + "" + "revb.2h\t%0,%1" + [(set_attr "type" "shift")]) + +-(define_insn "dsbh" ++(define_insn "revb_4h" + [(set (match_operand:DI 0 "register_operand" "=r") +- (unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_DSBH))] ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_REVB_4H))] + "TARGET_64BIT" + "revb.4h\t%0,%1" + [(set_attr "type" "shift")]) + +-(define_insn "dshd" ++(define_insn "revh_d" + [(set (match_operand:DI 0 "register_operand" "=r") +- (unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_DSHD))] ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_REVH_D))] + "TARGET_64BIT" + "revh.d\t%0,%1" + [(set_attr "type" "shift")]) +@@ -2911,37 +2657,37 @@ + ;; + ;; .................... + +-;; Conditional branches on floating-point equality tests. ++;; Conditional branches + +-(define_insn "*branch_fp_fcc" ++(define_insn "*branch_fp_FCCmode" + [(set (pc) +- (if_then_else +- (match_operator 1 "equality_operator" +- [(match_operand:FCC 2 "register_operand" "z") +- (const_int 0)]) +- (label_ref (match_operand 0 "" "")) +- (pc)))] ++ (if_then_else ++ (match_operator 1 "equality_operator" ++ [(match_operand:FCC 2 "register_operand" "z") ++ (const_int 0)]) ++ (label_ref (match_operand 0 "" "")) ++ (pc)))] + "TARGET_HARD_FLOAT" + { + return loongarch_output_conditional_branch (insn, operands, +- LARCH_BRANCH ("b%F1", "%Z2%0"), +- LARCH_BRANCH ("b%W1", "%Z2%0")); ++ LARCH_BRANCH ("b%F1", "%Z2%0"), ++ LARCH_BRANCH ("b%W1", "%Z2%0")); + } + [(set_attr "type" "branch")]) + +-(define_insn "*branch_fp_inverted_fcc" ++(define_insn "*branch_fp_inverted_FCCmode" + [(set (pc) +- (if_then_else +- (match_operator 1 "equality_operator" +- [(match_operand:FCC 2 "register_operand" "z") +- (const_int 0)]) +- (pc) +- (label_ref (match_operand 0 "" ""))))] ++ (if_then_else ++ (match_operator 1 "equality_operator" ++ [(match_operand:FCC 2 "register_operand" "z") ++ (const_int 0)]) ++ (pc) ++ (label_ref (match_operand 0 "" ""))))] + "TARGET_HARD_FLOAT" + { + return loongarch_output_conditional_branch (insn, operands, +- LARCH_BRANCH ("b%W1", "%Z2%0"), +- LARCH_BRANCH ("b%F1", "%Z2%0")); ++ LARCH_BRANCH ("b%W1", "%Z2%0"), ++ LARCH_BRANCH ("b%F1", "%Z2%0")); + } + [(set_attr "type" "branch")]) + +@@ -2951,28 +2697,26 @@ + [(set (pc) + (if_then_else + (match_operator 1 "order_operator" +- [(match_operand:GPR 2 "register_operand" "r,r") +- (match_operand:GPR 3 "reg_or_0_operand" "J,r")]) ++ [(match_operand:X 2 "register_operand" "r,r") ++ (match_operand:X 3 "reg_or_0_operand" "J,r")]) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + { return loongarch_output_order_conditional_branch (insn, operands, false); } + [(set_attr "type" "branch") +- (set_attr "compact_form" "maybe,always") + (set_attr "hazard" "forbidden_slot")]) + + (define_insn "*branch_order_inverted" + [(set (pc) + (if_then_else + (match_operator 1 "order_operator" +- [(match_operand:GPR 2 "register_operand" "r,r") +- (match_operand:GPR 3 "reg_or_0_operand" "J,r")]) ++ [(match_operand:X 2 "register_operand" "r,r") ++ (match_operand:X 3 "reg_or_0_operand" "J,r")]) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + { return loongarch_output_order_conditional_branch (insn, operands, true); } + [(set_attr "type" "branch") +- (set_attr "compact_form" "maybe,always") + (set_attr "hazard" "forbidden_slot")]) + + ;; Conditional branch on equality comparison. +@@ -2981,14 +2725,13 @@ + [(set (pc) + (if_then_else + (match_operator 1 "equality_operator" +- [(match_operand:GPR 2 "register_operand" "r") +- (match_operand:GPR 3 "reg_or_0_operand" "rJ")]) ++ [(match_operand:X 2 "register_operand" "r") ++ (match_operand:X 3 "reg_or_0_operand" "rJ")]) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + { return loongarch_output_equal_conditional_branch (insn, operands, false); } + [(set_attr "type" "branch") +- (set_attr "compact_form" "maybe") + (set_attr "hazard" "forbidden_slot")]) + + +@@ -2996,22 +2739,21 @@ + [(set (pc) + (if_then_else + (match_operator 1 "equality_operator" +- [(match_operand:GPR 2 "register_operand" "r") +- (match_operand:GPR 3 "reg_or_0_operand" "rJ")]) ++ [(match_operand:X 2 "register_operand" "r") ++ (match_operand:X 3 "reg_or_0_operand" "rJ")]) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + { return loongarch_output_equal_conditional_branch (insn, operands, true); } + [(set_attr "type" "branch") +- (set_attr "compact_form" "maybe") + (set_attr "hazard" "forbidden_slot")]) + + + (define_expand "cbranch4" + [(set (pc) + (if_then_else (match_operator 0 "comparison_operator" +- [(match_operand:GPR 1 "register_operand") +- (match_operand:GPR 2 "nonmemory_operand")]) ++ [(match_operand:GPR 1 "register_operand") ++ (match_operand:GPR 2 "nonmemory_operand")]) + (label_ref (match_operand 3 "")) + (pc)))] + "" +@@ -3023,8 +2765,8 @@ + (define_expand "cbranch4" + [(set (pc) + (if_then_else (match_operator 0 "comparison_operator" +- [(match_operand:SCALARF 1 "register_operand") +- (match_operand:SCALARF 2 "register_operand")]) ++ [(match_operand:ANYF 1 "register_operand") ++ (match_operand:ANYF 2 "register_operand")]) + (label_ref (match_operand 3 "")) + (pc)))] + "" +@@ -3062,71 +2804,63 @@ + DONE; + }) + +-(define_insn "*seq_zero_" +- [(set (match_operand:GPR2 0 "register_operand" "=r") +- (eq:GPR2 (match_operand:GPR 1 "register_operand" "r") ++(define_insn "*seq_zero_" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (eq:GPR (match_operand:X 1 "register_operand" "r") + (const_int 0)))] + "" + "sltui\t%0,%1,1" + [(set_attr "type" "slt") +- (set_attr "mode" "")]) ++ (set_attr "mode" "")]) + + +-(define_insn "*sne_zero_" +- [(set (match_operand:GPR2 0 "register_operand" "=r") +- (ne:GPR2 (match_operand:GPR 1 "register_operand" "r") ++(define_insn "*sne_zero_" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (ne:GPR (match_operand:X 1 "register_operand" "r") + (const_int 0)))] + "" + "sltu\t%0,%.,%1" + [(set_attr "type" "slt") +- (set_attr "mode" "")]) ++ (set_attr "mode" "")]) + +-(define_insn "*sgt_" +- [(set (match_operand:GPR2 0 "register_operand" "=r") +- (any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r") +- (match_operand:GPR 2 "reg_or_0_operand" "rJ")))] ++(define_insn "*sgt_" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (any_gt:GPR (match_operand:X 1 "register_operand" "r") ++ (match_operand:X 2 "reg_or_0_operand" "rJ")))] + "" + "slt\t%0,%z2,%1" + [(set_attr "type" "slt") +- (set_attr "mode" "")]) ++ (set_attr "mode" "")]) + +- +-(define_insn "*sge_" +- [(set (match_operand:GPR2 0 "register_operand" "=r") +- (any_ge:GPR2 (match_operand:GPR 1 "register_operand" "r") ++(define_insn "*sge_" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (any_ge:GPR (match_operand:X 1 "register_operand" "r") + (const_int 1)))] + "" + "slti\t%0,%.,%1" + [(set_attr "type" "slt") +- (set_attr "mode" "")]) ++ (set_attr "mode" "")]) + +-(define_insn "*slt_" +- [(set (match_operand:GPR2 0 "register_operand" "=r") +- (any_lt:GPR2 (match_operand:GPR 1 "register_operand" "r") +- (match_operand:GPR 2 "arith_operand" "rI")))] ++(define_insn "*slt_" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (any_lt:GPR (match_operand:X 1 "register_operand" "r") ++ (match_operand:X 2 "arith_operand" "rI")))] + "" +-{ +- if (CONST_INT_P (operands[2])) +- { +- return "slti\t%0,%1,%2"; +- } else +- return "slt\t%0,%1,%2"; +-} ++ "slt%i2\t%0,%1,%2"; + [(set_attr "type" "slt") +- (set_attr "mode" "")]) ++ (set_attr "mode" "")]) + +- +-(define_insn "*sle_" +- [(set (match_operand:GPR2 0 "register_operand" "=r") +- (any_le:GPR2 (match_operand:GPR 1 "register_operand" "r") +- (match_operand:GPR 2 "sle_operand" "")))] ++(define_insn "*sle_" ++ [(set (match_operand:GPR 0 "register_operand" "=r") ++ (any_le:GPR (match_operand:X 1 "register_operand" "r") ++ (match_operand:X 2 "sle_operand" "")))] + "" + { + operands[2] = GEN_INT (INTVAL (operands[2]) + 1); + return "slti\t%0,%1,%2"; + } + [(set_attr "type" "slt") +- (set_attr "mode" "")]) ++ (set_attr "mode" "")]) + + + ;; +@@ -3136,23 +2870,15 @@ + ;; + ;; .................... + +-(define_insn "s__using_fcc" ++(define_insn "s__using_FCCmode" + [(set (match_operand:FCC 0 "register_operand" "=z") +- (fcond:FCC (match_operand:SCALARF 1 "register_operand" "f") +- (match_operand:SCALARF 2 "register_operand" "f")))] ++ (fcond:FCC (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")))] + "" + "fcmp..\t%Z0%1,%2" + [(set_attr "type" "fcmp") + (set_attr "mode" "FCC")]) + +-(define_insn "s__using_fcc" +- [(set (match_operand:FCC 0 "register_operand" "=z") +- (swapped_fcond:FCC (match_operand:SCALARF 1 "register_operand" "f") +- (match_operand:SCALARF 2 "register_operand" "f")))] +- "" +- "fcmp..\t%Z0%2,%1" +- [(set_attr "type" "fcmp") +- (set_attr "mode" "FCC")]) + + ;; + ;; .................... +@@ -3170,24 +2896,20 @@ + (define_insn "*jump_absolute" + [(set (pc) + (label_ref (match_operand 0)))] +- "TARGET_ABSOLUTE_JUMPS" ++ "!flag_pic" + { +- return LARCH_ABSOLUTE_JUMP ("b\t%l0"); ++ return "b\t%l0"; + } +- [(set_attr "type" "branch") +- (set_attr "compact_form" "maybe")]) ++ [(set_attr "type" "branch")]) + + (define_insn "*jump_pic" + [(set (pc) + (label_ref (match_operand 0)))] +- "!TARGET_ABSOLUTE_JUMPS" ++ "flag_pic" + { + return "b\t%0"; + } +- [(set_attr "type" "branch") +- (set_attr "compact_form" "maybe")]) +- +- ++ [(set_attr "type" "branch")]) + + (define_expand "indirect_jump" + [(set (pc) (match_operand 0 "register_operand"))] +@@ -3198,12 +2920,10 @@ + DONE; + }) + +-(define_insn "indirect_jump_" ++(define_insn "indirect_jump" + [(set (pc) (match_operand:P 0 "register_operand" "r"))] + "" +- { +- return "jr\t%0"; +- } ++ "jr\t%0" + [(set_attr "type" "jump") + (set_attr "mode" "none")]) + +@@ -3214,25 +2934,25 @@ + "" + { + if (flag_pic) +- operands[0] = expand_simple_binop (Pmode, PLUS, operands[0], +- gen_rtx_LABEL_REF (Pmode, operands[1]), +- NULL_RTX, 0, OPTAB_DIRECT); ++ operands[0] = expand_simple_binop (Pmode, PLUS, operands[0], ++ gen_rtx_LABEL_REF (Pmode, ++ operands[1]), ++ NULL_RTX, 0, OPTAB_DIRECT); + emit_jump_insn (PMODE_INSN (gen_tablejump, (operands[0], operands[1]))); + DONE; + }) + +-(define_insn "tablejump_" ++(define_insn "tablejump" + [(set (pc) + (match_operand:P 0 "register_operand" "r")) + (use (label_ref (match_operand 1 "" "")))] + "" +- { +- return "jr\t%0"; +- } ++ "jr\t%0" + [(set_attr "type" "jump") + (set_attr "mode" "none")]) + + ++ + ;; + ;; .................... + ;; +@@ -3254,22 +2974,25 @@ + ;; saved or used to pass arguments. + + (define_insn "blockage" +- [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)] ++ [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)] + "" + "" + [(set_attr "type" "ghost") + (set_attr "mode" "none")]) + +-(define_insn "probe_stack_range_" ++(define_insn "probe_stack_range" + [(set (match_operand:P 0 "register_operand" "=r") + (unspec_volatile:P [(match_operand:P 1 "register_operand" "0") + (match_operand:P 2 "register_operand" "r") +- (match_operand:P 3 "register_operand" "r")] +- UNSPEC_PROBE_STACK_RANGE))] ++ (match_operand:P 3 "register_operand" "r")] ++ UNSPECV_PROBE_STACK_RANGE))] + "" +- { return loongarch_output_probe_stack_range (operands[0], operands[2], operands[3]); } ++{ ++ return loongarch_output_probe_stack_range (operands[0], ++ operands[2], ++ operands[3]); ++} + [(set_attr "type" "unknown") +- (set_attr "can_delay" "no") + (set_attr "mode" "")]) + + (define_expand "epilogue" +@@ -3304,12 +3027,12 @@ + (define_insn "*" + [(any_return)] + "" +- { +- operands[0] = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM); +- return "jr\t%0"; +- } +- [(set_attr "type" "jump") +- (set_attr "mode" "none")]) ++{ ++ operands[0] = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM); ++ return "jr\t%0"; ++} ++ [(set_attr "type" "jump") ++ (set_attr "mode" "none")]) + + ;; Normal return. + +@@ -3317,46 +3040,18 @@ + [(any_return) + (use (match_operand 0 "pmode_register_operand" ""))] + "" +- { +- return "jr\t%0"; +- } +- [(set_attr "type" "jump") +- (set_attr "mode" "none")]) +- +-;; Exception return. +-(define_insn "loongarch_ertn" +- [(return) +- (unspec_volatile [(const_int 0)] UNSPEC_ERTN)] +- "" +- "ertn" +- [(set_attr "type" "trap") +- (set_attr "mode" "none")]) +- +-;; Disable interrupts. +-(define_insn "loongarch_di" +- [(unspec_volatile [(const_int 0)] UNSPEC_DI)] +- "" +- "di" +- [(set_attr "type" "trap") +- (set_attr "mode" "none")]) +- +-;; Execution hazard barrier. +-(define_insn "loongarch_ehb" +- [(unspec_volatile [(const_int 0)] UNSPEC_EHB)] +- "" +- "ehb" +- [(set_attr "type" "trap") +- (set_attr "mode" "none")]) ++ "jr\t%0" ++ [(set_attr "type" "jump") ++ (set_attr "mode" "none")]) + +-;; Read GPR from previous shadow register set. +-(define_insn "loongarch_rdpgpr_" +- [(set (match_operand:P 0 "register_operand" "=r") +- (unspec_volatile:P [(match_operand:P 1 "register_operand" "r")] +- UNSPEC_RDPGPR))] ++;; Exception return. ++(define_insn "loongarch_ertn" ++ [(return) ++ (unspec_volatile [(const_int 0)] UNSPECV_ERTN)] + "" +- "rdpgpr\t%0,%1" +- [(set_attr "type" "move") +- (set_attr "mode" "")]) ++ "ertn" ++ [(set_attr "type" "trap") ++ (set_attr "mode" "none")]) + + ;; This is used in compiling the unwind routines. + (define_expand "eh_return" +@@ -3366,22 +3061,22 @@ + if (GET_MODE (operands[0]) != word_mode) + operands[0] = convert_to_mode (word_mode, operands[0], 0); + if (TARGET_64BIT) +- emit_insn (gen_eh_set_lr_di (operands[0])); ++ emit_insn (gen_eh_set_ra_di (operands[0])); + else +- emit_insn (gen_eh_set_lr_si (operands[0])); ++ emit_insn (gen_eh_set_ra_si (operands[0])); + DONE; + }) + + ;; Clobber the return address on the stack. We can't expand this + ;; until we know where it will be put in the stack frame. + +-(define_insn "eh_set_lr_si" ++(define_insn "eh_set_ra_si" + [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN) + (clobber (match_scratch:SI 1 "=&r"))] + "! TARGET_64BIT" + "#") + +-(define_insn "eh_set_lr_di" ++(define_insn "eh_set_ra_di" + [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN) + (clobber (match_scratch:DI 1 "=&r"))] + "TARGET_64BIT" +@@ -3406,23 +3101,14 @@ + ;; + ;; .................... + +- + ;; Sibling calls. All these patterns use jump instructions. + +-;; If TARGET_SIBCALLS, call_insn_operand will only accept constant +-;; addresses if a direct jump is acceptable. Since the 'S' constraint +-;; is defined in terms of call_insn_operand, the same is true of the +-;; constraints. +- +-;; When we use an indirect jump, we need a register that will be +-;; preserved by the epilogue. +- + (define_expand "sibcall" + [(parallel [(call (match_operand 0 "") + (match_operand 1 "")) + (use (match_operand 2 "")) ;; next_arg_reg + (use (match_operand 3 ""))])] ;; struct_value_size_rtx +- "TARGET_SIBCALLS" ++ "" + { + rtx target = loongarch_legitimize_call_address (XEXP (operands[0], 0)); + +@@ -3433,172 +3119,170 @@ + (define_insn "sibcall_internal" + [(call (mem:SI (match_operand 0 "call_insn_operand" "j,c,a,t,h")) + (match_operand 1 "" ""))] +- "TARGET_SIBCALLS && SIBLING_CALL_P (insn)" ++ "SIBLING_CALL_P (insn)" + { + switch (which_alternative) + { + case 0: + return "jr\t%0"; + case 1: +- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) +- return "pcaddu18i\t$r12,(%%pcrel(%0+0x20000))>>18\n\t" +- "jirl\t$r0,$r12,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.local\t$r12,$r13,%0\n\tjr\t$r12"; ++ if (TARGET_CMODEL_LARGE) ++ return "pcaddu18i\t$r12,(%%pcrel(%0+0x20000))>>18\n\t" ++ "jirl\t$r0,$r12,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; ++ else if (TARGET_CMODEL_EXTREME) ++ return "la.local\t$r12,$r13,%0\n\tjr\t$r12"; + else +- return "b\t%0"; ++ return "b\t%0"; + case 2: +- if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) +- return "b\t%0"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; ++ if (TARGET_CMODEL_TINY_STATIC) ++ return "b\t%0"; ++ else if (TARGET_CMODEL_EXTREME) ++ return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; + else +- return "la.global\t$r12,%0\n\tjr\t$r12"; ++ return "la.global\t$r12,%0\n\tjr\t$r12"; + case 3: +- if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; ++ if (TARGET_CMODEL_EXTREME) ++ return "la.global\t$r12,$r13,%0\n\tjr\t$r12"; + else +- return "la.global\t$r12,%0\n\tjr\t$r12"; ++ return "la.global\t$r12,%0\n\tjr\t$r12"; + case 4: +- if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) +- return "b\t%%plt(%0)"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) +- return "pcaddu18i\t$r12,(%%plt(%0)+0x20000)>>18\n\t" +- "jirl\t$r0,$r12,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; ++ if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) ++ return "b\t%%plt(%0)"; ++ else if (TARGET_CMODEL_LARGE) ++ return "pcaddu18i\t$r12,(%%plt(%0)+0x20000)>>18\n\t" ++ "jirl\t$r0,$r12,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; + else +- sorry ("cmodel extreme and tiny static not support plt."); ++ /* Code model "extreme" and "tiny-static" do not support plt. */ ++ gcc_unreachable (); + default: + gcc_unreachable (); + } + } +- [(set_attr "jal" "indirect,direct,direct,direct,direct")]) ++ [(set_attr "jirl" "indirect,direct,direct,direct,direct")]) + + (define_expand "sibcall_value" + [(parallel [(set (match_operand 0 "") + (call (match_operand 1 "") + (match_operand 2 ""))) + (use (match_operand 3 ""))])] ;; next_arg_reg +- "TARGET_SIBCALLS" ++ "" + { + rtx target = loongarch_legitimize_call_address (XEXP (operands[1], 0)); + +- /* Handle return values created by loongarch_return_fpr_pair. */ ++ /* Handle return values created by loongarch_pass_fpr_pair. */ + if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 2) + { +- emit_call_insn (gen_sibcall_value_multiple_internal (XEXP (XVECEXP (operands[0], 0, 0), 0), +- target, operands[2], XEXP (XVECEXP (operands[0], 0, 1), 0))); ++ rtx arg1 = XEXP (XVECEXP (operands[0],0, 0), 0); ++ rtx arg2 = XEXP (XVECEXP (operands[0],0, 1), 0); ++ ++ emit_call_insn (gen_sibcall_value_multiple_internal (arg1, target, ++ operands[2], ++ arg2)); + } + else + { +- /* Handle return values created by loongarch_return_fpr_single. */ ++ /* Handle return values created by loongarch_return_fpr_single. */ + if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 1) +- operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); +- +- emit_call_insn (gen_sibcall_value_internal (operands[0], target, operands[2])); ++ operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); ++ ++ emit_call_insn (gen_sibcall_value_internal (operands[0], target, ++ operands[2])); + } + DONE; + }) + + (define_insn "sibcall_value_internal" + [(set (match_operand 0 "register_operand" "") +- (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) +- (match_operand 2 "" "")))] +- "TARGET_SIBCALLS && SIBLING_CALL_P (insn)" ++ (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) ++ (match_operand 2 "" "")))] ++ "SIBLING_CALL_P (insn)" + { + switch (which_alternative) + { + case 0: + return "jr\t%1"; + case 1: +- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) +- return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" +- "jirl\t$r0,$r12,%%pcrel(%1+4)-((%%pcrel(%1+4+0x20000))>>18<<18)"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.local\t$r12,$r13,%1\n\t" +- "jr\t$r12"; ++ if (TARGET_CMODEL_LARGE) ++ return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" ++ "jirl\t$r0,$r12,%%pcrel(%1+4)-((%%pcrel(%1+4+0x20000))>>18<<18)"; ++ else if (TARGET_CMODEL_EXTREME) ++ return "la.local\t$r12,$r13,%1\n\tjr\t$r12"; + else +- return "b\t%1"; ++ return "b\t%1"; + case 2: +- if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) +- return "b\t%1"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.global\t$r12,$r13,%1\n\t" +- "jr\t$r12"; ++ if (TARGET_CMODEL_TINY_STATIC) ++ return "b\t%1"; ++ else if (TARGET_CMODEL_EXTREME) ++ return "la.global\t$r12,$r13,%1\n\tjr\t$r12"; + else +- return "la.global\t$r12,%1\n\t" +- "jr\t$r12"; ++ return "la.global\t$r12,%1\n\tjr\t$r12"; + case 3: +- if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.global\t$r12,$r13,%1\n\t" +- "jr\t$r12"; ++ if (TARGET_CMODEL_EXTREME) ++ return "la.global\t$r12,$r13,%1\n\tjr\t$r12"; + else +- return "la.global\t$r12,%1\n\t" +- "jr\t$r12"; ++ return "la.global\t$r12,%1\n\tjr\t$r12"; + case 4: +- if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) +- return " b\t%%plt(%1)"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) +- return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" +- "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; ++ if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) ++ return " b\t%%plt(%1)"; ++ else if (TARGET_CMODEL_LARGE) ++ return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" ++ "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; + else +- sorry ("loongarch cmodel extreme and tiny-static not support plt."); ++ /* Code model "extreme" and "tiny-static" do not support plt. */ ++ gcc_unreachable (); + default: + gcc_unreachable (); + } + } +- [(set_attr "jal" "indirect,direct,direct,direct,direct")]) ++ [(set_attr "jirl" "indirect,direct,direct,direct,direct")]) + + (define_insn "sibcall_value_multiple_internal" + [(set (match_operand 0 "register_operand" "") +- (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) +- (match_operand 2 "" ""))) ++ (call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h")) ++ (match_operand 2 "" ""))) + (set (match_operand 3 "register_operand" "") + (call (mem:SI (match_dup 1)) + (match_dup 2)))] +- "TARGET_SIBCALLS && SIBLING_CALL_P (insn)" ++ "SIBLING_CALL_P (insn)" + { + switch (which_alternative) + { + case 0: + return "jr\t%1"; + case 1: +- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) +- return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" +- "jirl\t$r0,$r12,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.local\t$r12,$r13,%1\n\t" +- "jr\t$r12"; ++ if (TARGET_CMODEL_LARGE) ++ return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t" ++ "jirl\t$r0,$r12,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; ++ else if (TARGET_CMODEL_EXTREME) ++ return "la.local\t$r12,$r13,%1\n\tjr\t$r12"; + else +- return "b\t%1"; ++ return "b\t%1"; + case 2: +- if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) +- return "b\t%1"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.global\t$r12,$r13,%1\n\t" +- "jr\t$r12"; ++ if (TARGET_CMODEL_TINY_STATIC) ++ return "b\t%1"; ++ else if (TARGET_CMODEL_EXTREME) ++ return "la.global\t$r12,$r13,%1\n\tjr\t$r12"; + else +- return "la.global\t$r12,%1\n\t" +- "jr\t$r12"; ++ return "la.global\t$r12,%1\n\tjr\t$r12"; + case 3: +- if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.global\t$r12,$r13,%1\n\t" +- "jr\t$r12"; ++ if (TARGET_CMODEL_EXTREME) ++ return "la.global\t$r12,$r13,%1\n\tjr\t$r12"; + else +- return "la.global\t$r12,%1\n\t" +- "jr\t$r12"; ++ return "la.global\t$r12,%1\n\tjr\t$r12"; + case 4: +- if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) +- return "b\t%%plt(%1)"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) +- return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" +- "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; ++ if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) ++ return "b\t%%plt(%1)"; ++ else if (TARGET_CMODEL_LARGE) ++ return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t" ++ "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; + else +- sorry ("loongarch cmodel extreme and tiny-static not support plt."); ++ /* Code model "extreme" and "tiny-static" do not support plt. */ ++ gcc_unreachable (); + default: + gcc_unreachable (); + } + } +- [(set_attr "jal" "indirect,direct,direct,direct,direct")]) ++ [(set_attr "jirl" "indirect,direct,direct,direct,direct")]) + + (define_expand "call" + [(parallel [(call (match_operand 0 "") +@@ -3612,22 +3296,6 @@ + emit_call_insn (gen_call_internal (target, operands[1])); + DONE; + }) +-;; In the last case, we can generate the individual instructions with +-;; a define_split. There are several things to be wary of: +-;; +-;; - We can't expose the load of $gp before reload. If we did, +-;; it might get removed as dead, but reload can introduce new +-;; uses of $gp by rematerializing constants. +-;; +-;; - We shouldn't restore $gp after calls that never return. +-;; It isn't valid to insert instructions between a noreturn +-;; call and the following barrier. +-;; +-;; - The splitter deliberately changes the liveness of $gp. The unsplit +-;; instruction preserves $gp and so have no effect on its liveness. +-;; But once we generate the separate insns, it becomes obvious that +-;; $gp is not live on entry to the call. +-;; + + (define_insn "call_internal" + [(call (mem:SI (match_operand 0 "call_insn_operand" "e,c,a,t,h")) +@@ -3640,46 +3308,41 @@ + case 0: + return "jirl\t$r1,%0,0"; + case 1: +- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) +- return "pcaddu18i\t$r1,%%pcrel(%0+0x20000)>>18\n\t" +- "jirl\t$r1,$r1,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.local\t$r1,$r12,%0\n\t" +- "jirl\t$r1,$r1,0"; ++ if (TARGET_CMODEL_LARGE) ++ return "pcaddu18i\t$r1,%%pcrel(%0+0x20000)>>18\n\t" ++ "jirl\t$r1,$r1,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)"; ++ else if (TARGET_CMODEL_EXTREME) ++ return "la.local\t$r1,$r12,%0\n\tjirl\t$r1,$r1,0"; + else +- return "bl\t%0"; ++ return "bl\t%0"; + case 2: +- if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) +- return "bl\t%0"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.global\t$r1,$r12,%0\n\t" +- "jirl\t$r1,$r1,0"; ++ if (TARGET_CMODEL_TINY_STATIC) ++ return "bl\t%0"; ++ else if (TARGET_CMODEL_EXTREME) ++ return "la.global\t$r1,$r12,%0\n\tjirl\t$r1,$r1,0"; + else +- return "la.global\t$r1,%0\n\t" +- "jirl\t$r1,$r1,0"; ++ return "la.global\t$r1,%0\n\tjirl\t$r1,$r1,0"; + case 3: +- if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.global\t$r1,$r12,%0\n\t" +- "jirl\t$r1,$r1,0"; ++ if (TARGET_CMODEL_EXTREME) ++ return "la.global\t$r1,$r12,%0\n\tjirl\t$r1,$r1,0"; + else +- return "la.global\t$r1,%0\n\t" +- "jirl\t$r1,$r1,0"; ++ return "la.global\t$r1,%0\n\tjirl\t$r1,$r1,0"; + case 4: +- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) +- return "pcaddu18i\t$r1,(%%plt(%0)+0x20000)>>18\n\t" +- "jirl\t$r1,$r1,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) +- return "bl\t%%plt(%0)"; ++ if (TARGET_CMODEL_LARGE) ++ return "pcaddu18i\t$r1,(%%plt(%0)+0x20000)>>18\n\t" ++ "jirl\t$r1,$r1,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)"; ++ else if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) ++ return "bl\t%%plt(%0)"; + else +- sorry ("cmodel extreme and tiny-static not support plt."); ++ /* Code model "extreme" and "tiny-static" do not support plt. */ ++ gcc_unreachable (); + default: + gcc_unreachable (); + } + } +- [(set_attr "jal" "indirect,direct,direct,direct,direct") ++ [(set_attr "jirl" "indirect,direct,direct,direct,direct") + (set_attr "insn_count" "1,2,3,3,2")]) + +- + (define_expand "call_value" + [(parallel [(set (match_operand 0 "") + (call (match_operand 1 "") +@@ -3688,26 +3351,31 @@ + "" + { + rtx target = loongarch_legitimize_call_address (XEXP (operands[1], 0)); +- /* Handle return values created by loongarch_return_fpr_pair. */ ++ /* Handle return values created by loongarch_pass_fpr_pair. */ + if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 2) +- emit_call_insn (gen_call_value_multiple_internal (XEXP (XVECEXP (operands[0], 0, 0), 0), +- target, operands[2], XEXP (XVECEXP (operands[0], 0, 1), 0))); ++ { ++ rtx arg1 = XEXP (XVECEXP (operands[0], 0, 0), 0); ++ rtx arg2 = XEXP (XVECEXP (operands[0], 0, 1), 0); ++ ++ emit_call_insn (gen_call_value_multiple_internal (arg1, target, ++ operands[2], arg2)); ++ } + else + { +- /* Handle return values created by loongarch_return_fpr_single. */ ++ /* Handle return values created by loongarch_return_fpr_single. */ + if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 1) +- operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); +- +- emit_call_insn (gen_call_value_internal (operands[0], target, operands[2])); ++ operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0); ++ ++ emit_call_insn (gen_call_value_internal (operands[0], target, ++ operands[2])); + } + DONE; + }) + +-;; See comment for call_internal. + (define_insn "call_value_internal" + [(set (match_operand 0 "register_operand" "") +- (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) +- (match_operand 2 "" ""))) ++ (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) ++ (match_operand 2 "" ""))) + (clobber (reg:SI RETURN_ADDR_REGNUM))] + "" + { +@@ -3716,50 +3384,45 @@ + case 0: + return "jirl\t$r1,%1,0"; + case 1: +- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) +- return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" +- "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.local\t$r1,$r12,%1\n\t" +- "jirl\t$r1,$r1,0"; ++ if (TARGET_CMODEL_LARGE) ++ return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" ++ "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; ++ else if (TARGET_CMODEL_EXTREME) ++ return "la.local\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; + else +- return "bl\t%1"; ++ return "bl\t%1"; + case 2: +- if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) +- return "bl\t%1"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.global\t$r1,$r12,%1\n\t" +- "jirl\t$r1,$r1,0"; ++ if (TARGET_CMODEL_TINY_STATIC) ++ return "bl\t%1"; ++ else if (TARGET_CMODEL_EXTREME) ++ return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; + else +- return "la.global\t$r1,%1\n\t" +- "jirl\t$r1,$r1,0"; ++ return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0"; + case 3: +- if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.global\t$r1,$r12,%1\n\t" +- "jirl\t$r1,$r1,0"; ++ if (TARGET_CMODEL_EXTREME) ++ return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; + else +- return "la.global\t$r1,%1\n\t" +- "jirl\t$r1,$r1,0"; ++ return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0"; + case 4: +- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) +- return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" +- "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) +- return "bl\t%%plt(%1)"; ++ if (TARGET_CMODEL_LARGE) ++ return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" ++ "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; ++ else if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) ++ return "bl\t%%plt(%1)"; + else +- sorry ("loongarch cmodel extreme and tiny-static not support plt."); ++ /* Code model "extreme" and "tiny-static" do not support plt. */ ++ gcc_unreachable (); + default: + gcc_unreachable (); + } + } +- [(set_attr "jal" "indirect,direct,direct,direct,direct") ++ [(set_attr "jirl" "indirect,direct,direct,direct,direct") + (set_attr "insn_count" "1,2,3,3,2")]) + +-;; See comment for call_internal. + (define_insn "call_value_multiple_internal" + [(set (match_operand 0 "register_operand" "") +- (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) +- (match_operand 2 "" ""))) ++ (call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h")) ++ (match_operand 2 "" ""))) + (set (match_operand 3 "register_operand" "") + (call (mem:SI (match_dup 1)) + (match_dup 2))) +@@ -3771,48 +3434,43 @@ + case 0: + return "jirl\t$r1,%1,0"; + case 1: +- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) +- return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" +- "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.local\t$r1,$r12,%1\n\t" +- "jirl\t$r1,$r1,0"; ++ if (TARGET_CMODEL_LARGE) ++ return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t" ++ "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)"; ++ else if (TARGET_CMODEL_EXTREME) ++ return "la.local\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; + else +- return "bl\t%1"; ++ return "bl\t%1"; + case 2: +- if (loongarch_cmodel_var == LARCH_CMODEL_TINY_STATIC) +- return "bl\t%1"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.global\t$r1,$r12,%1\n\t" +- "jirl\t$r1,$r1,0 "; ++ if (TARGET_CMODEL_TINY_STATIC) ++ return "bl\t%1"; ++ else if (TARGET_CMODEL_EXTREME) ++ return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0 "; + else +- return "la.global\t$r1,%1\n\t" +- "jirl\t$r1,$r1,0"; ++ return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0"; + case 3: +- if (loongarch_cmodel_var == LARCH_CMODEL_EXTREME) +- return "la.global\t$r1,$r12,%1\n\t" +- "jirl\t$r1,$r1,0"; ++ if (TARGET_CMODEL_EXTREME) ++ return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0"; + else +- return "la.global\t$r1,%1\n\t" +- "jirl\t$r1,$r1,0"; ++ return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0"; + case 4: +- if (loongarch_cmodel_var == LARCH_CMODEL_LARGE) +- return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" +- "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; +- else if (loongarch_cmodel_var == LARCH_CMODEL_NORMAL || loongarch_cmodel_var == LARCH_CMODEL_TINY) +- return "bl\t%%plt(%1)"; ++ if (TARGET_CMODEL_LARGE) ++ return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t" ++ "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)"; ++ else if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY) ++ return "bl\t%%plt(%1)"; + else +- sorry ("loongarch cmodel extreme and tiny-static not support plt."); ++ /* Code model "extreme" and "tiny-static" do not support plt. */ ++ gcc_unreachable (); + default: + gcc_unreachable (); + } + } +- [(set_attr "jal" "indirect,direct,direct,direct,direct") ++ [(set_attr "jirl" "indirect,direct,direct,direct,direct") + (set_attr "insn_count" "1,2,3,3,2")]) + + + ;; Call subroutine returning any type. +- + (define_expand "untyped_call" + [(parallel [(call (match_operand 0 "") + (const_int 0)) +@@ -3842,105 +3500,109 @@ + ;; .................... + ;; + ++(define_insn "prefetch" ++ [(prefetch (match_operand 0 "address_operand" "p") ++ (match_operand 1 "const_int_operand" "n") ++ (match_operand 2 "const_int_operand" "n"))] ++ "" ++ { ++ operands[1] = loongarch_prefetch_cookie (operands[1], operands[2]); ++ return "preld\t%1,%a0"; ++ } ++ [(set_attr "type" "prefetch")]) + + (define_insn "*prefetch_indexed_" +- [(prefetch (plus:P (match_operand:P 0 "register_operand" "r") +- (match_operand:P 1 "register_operand" "r")) +- (match_operand 2 "const_int_operand" "n") +- (match_operand 3 "const_int_operand" "n"))] +- "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT" +-{ +- operands[2] = loongarch_prefetch_cookie (operands[2], operands[3]); +- return "prefx\t%2,%1(%0)"; +-} ++ [(prefetch (plus:P (match_operand 0 "register_operand" "r") ++ (match_operand 1 "register_operand" "r")) ++ (match_operand 2 "const_int_operand" "n") ++ (match_operand 3 "const_int_operand" "n"))] ++ "" ++ { ++ operands[2] = loongarch_prefetch_cookie (operands[2], operands[3]); ++ return "preldx\t%2,%1,%0"; ++ } + [(set_attr "type" "prefetchx")]) + + (define_insn "nop" + [(const_int 0)] + "" + "nop" +- [(set_attr "type" "nop") +- (set_attr "mode" "none")]) +- +-;; Like nop, but commented out when outside a .set noreorder block. +-(define_insn "hazard_nop" +- [(const_int 1)] +- "" +- { +- return "#nop"; +- } +- [(set_attr "type" "nop")]) ++ [(set_attr "type" "nop") ++ (set_attr "mode" "none")]) + +-;; The `.insn' pseudo-op. +-(define_insn "insn_pseudo" +- [(unspec_volatile [(const_int 0)] UNSPEC_INSN_PSEUDO)] +- "" +- ".insn" +- [(set_attr "mode" "none") +- (set_attr "insn_count" "0")]) +- +-;; Conditional move instructions. ++;; __builtin_loongarch_movfcsr2gr: move the FCSR into operand 0. ++(define_insn "loongarch_movfcsr2gr" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (unspec_volatile:SI [(match_operand 1 "const_uimm5_operand")] ++ UNSPECV_MOVFCSR2GR))] ++ "TARGET_HARD_FLOAT" ++ "movfcsr2gr\t%0,$r%1") + +-(define_insn "*sel_using_" +- [(set (match_operand:GPR 0 "register_operand" "=r,r") +- (if_then_else:GPR +- (equality_op:GPR2 (match_operand:GPR2 1 "register_operand" "r,r") +- (const_int 0)) +- (match_operand:GPR 2 "reg_or_0_operand" "r,J") +- (match_operand:GPR 3 "reg_or_0_operand" "J,r")))] +- "register_operand (operands[2], mode) +- != register_operand (operands[3], mode)" +- "@ +- \t%0,%2,%1 +- \t%0,%3,%1" +- [(set_attr "type" "condmove") +- (set_attr "mode" "")]) ++;; __builtin_loongarch_movgr2fcsr: move operand 0 into the FCSR. ++(define_insn "loongarch_movgr2fcsr" ++ [(unspec_volatile [(match_operand 0 "const_uimm5_operand") ++ (match_operand:SI 1 "register_operand" "r")] ++ UNSPECV_MOVGR2FCSR)] ++ "TARGET_HARD_FLOAT" ++ "movgr2fcsr\t$r%0,%1") + +-;; sel.fmt copies the 3rd argument when the 1st is non-zero and the 2nd +-;; argument if the 1st is zero. This means operand 2 and 3 are +-;; inverted in the instruction. ++(define_insn "fclass_" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] ++ UNSPEC_FCLASS))] ++ "TARGET_HARD_FLOAT" ++ "fclass.\t%0,%1" ++ [(set_attr "type" "unknown") ++ (set_attr "mode" "")]) + +-(define_insn "*sel" +- [(set (match_operand:SCALARF 0 "register_operand" "=f") +- (if_then_else:SCALARF +- (ne:FCC (match_operand:FCC 1 "register_operand" "z") +- (const_int 0)) +- (match_operand:SCALARF 2 "reg_or_0_operand" "f") +- (match_operand:SCALARF 3 "reg_or_0_operand" "f")))] ++(define_insn "bytepick_w" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (unspec:SI [(match_operand:SI 1 "register_operand" "r") ++ (match_operand:SI 2 "register_operand" "r") ++ (match_operand:SI 3 "const_0_to_3_operand" "n")] ++ UNSPEC_BYTEPICK_W))] + "" +- "fsel\t%0,%3,%2,%1" +- [(set_attr "type" "condmove") +- (set_attr "mode" "")]) ++ "bytepick.w\t%0,%1,%2,%z3" ++ [(set_attr "mode" "SI")]) + +-;; These are the main define_expand's used to make conditional moves. ++(define_insn "bytepick_d" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "register_operand" "r") ++ (match_operand:DI 3 "const_0_to_7_operand" "n")] ++ UNSPEC_BYTEPICK_D))] ++ "" ++ "bytepick.d\t%0,%1,%2,%z3" ++ [(set_attr "mode" "DI")]) + +-(define_expand "movcc" +- [(set (match_operand:GPR 0 "register_operand") +- (if_then_else:GPR (match_operator 1 "comparison_operator" +- [(match_operand:GPR 2 "reg_or_0_operand") +- (match_operand:GPR 3 "reg_or_0_operand")])))] +- "TARGET_COND_MOVE_INT" +-{ +- if (!INTEGRAL_MODE_P (GET_MODE (XEXP (operands[1], 0)))) +- FAIL; ++(define_insn "bitrev_4b" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (unspec:SI [(match_operand:SI 1 "register_operand" "r")] ++ UNSPEC_BITREV_4B))] ++ "" ++ "bitrev.4b\t%0,%1" ++ [(set_attr "type" "unknown") ++ (set_attr "mode" "SI")]) + +- loongarch_expand_conditional_move (operands); +- DONE; +-}) ++(define_insn "bitrev_8b" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r")] ++ UNSPEC_BITREV_8B))] ++ "" ++ "bitrev.8b\t%0,%1" ++ [(set_attr "type" "unknown") ++ (set_attr "mode" "DI")]) + +-(define_expand "movcc" +- [(set (match_operand:SCALARF 0 "register_operand") +- (if_then_else:SCALARF (match_operator 1 "comparison_operator" +- [(match_operand:SCALARF 2 "reg_or_0_operand") +- (match_operand:SCALARF 3 "reg_or_0_operand")])))] +- "TARGET_COND_MOVE_FLOAT" +-{ +- if (!FLOAT_MODE_P (GET_MODE (XEXP (operands[1], 0)))) +- FAIL; ++(define_insn "stack_tie" ++ [(set (mem:BLK (scratch)) ++ (unspec:BLK [(match_operand:X 0 "register_operand" "r") ++ (match_operand:X 1 "register_operand" "r")] ++ UNSPEC_TIE))] ++ "" ++ "" ++ [(set_attr "length" "0") ++ (set_attr "type" "ghost")]) + +- loongarch_expand_conditional_move (operands); +- DONE; +-}) + + (define_split + [(match_operand 0 "small_data_pattern")] +@@ -3948,97 +3610,30 @@ + [(match_dup 0)] + { operands[0] = loongarch_rewrite_small_data (operands[0]); }) + +-;; Thread-Local Storage +- +-(define_insn "got_load_tls_gd" +- [(set (match_operand:P 0 "register_operand" "=r") +- (unspec:P +- [(match_operand:P 1 "symbolic_operand" "")] +- UNSPEC_TLS_GD))] +- "" +- "la.tls.gd\t%0,%1" +- [(set_attr "got" "load") +- (set_attr "mode" "")]) +- +-(define_insn "got_load_tls_ld" +- [(set (match_operand:P 0 "register_operand" "=r") +- (unspec:P +- [(match_operand:P 1 "symbolic_operand" "")] +- UNSPEC_TLS_LD))] +- "" +- "la.tls.ld\t%0,%1" +- [(set_attr "got" "load") +- (set_attr "mode" "")]) +- +-(define_insn "got_load_tls_le" +- [(set (match_operand:P 0 "register_operand" "=r") +- (unspec:P +- [(match_operand:P 1 "symbolic_operand" "")] +- UNSPEC_TLS_LE))] +- "" +- "la.tls.le\t%0,%1" +- [(set_attr "got" "load") +- (set_attr "mode" "")]) +- +-(define_insn "got_load_tls_ie" +- [(set (match_operand:P 0 "register_operand" "=r") +- (unspec:P +- [(match_operand:P 1 "symbolic_operand" "")] +- UNSPEC_TLS_IE))] +- "" +- "la.tls.ie\t%0,%1" +- [(set_attr "got" "load") +- (set_attr "mode" "")]) +- +-(define_insn "loongarch_movfcsr2gr" +- [(set (match_operand:SI 0 "register_operand" "=r") +- (unspec_volatile:SI [(match_operand 1 "const_uimm5_operand")] UNSPEC_MOVFCSR2GR))] +- "TARGET_HARD_FLOAT" +- "movfcsr2gr\t%0,$r%1") +- +-(define_insn "loongarch_movgr2fcsr" +- [(unspec_volatile [(match_operand 0 "const_uimm5_operand") +- (match_operand:SI 1 "register_operand" "r")] +- UNSPEC_MOVGR2FCSR)] +- "TARGET_HARD_FLOAT" +- "movgr2fcsr\t$r%0,%1") +- + + ;; Match paired HI/SI/SF/DFmode load/stores. + (define_insn "*join2_load_store" +- [(set (match_operand:JOIN_MODE 0 "nonimmediate_operand" "=r,f,m,m,r,ZC") ++ [(set (match_operand:JOIN_MODE 0 "nonimmediate_operand" ++ "=&r,f,m,m,&r,ZC") + (match_operand:JOIN_MODE 1 "nonimmediate_operand" "m,m,r,f,ZC,r")) +- (set (match_operand:JOIN_MODE 2 "nonimmediate_operand" "=r,f,m,m,r,ZC") ++ (set (match_operand:JOIN_MODE 2 "nonimmediate_operand" ++ "=r,f,m,m,r,ZC") + (match_operand:JOIN_MODE 3 "nonimmediate_operand" "m,m,r,f,ZC,r"))] + "reload_completed" + { +- bool load_p = (which_alternative == 0 || which_alternative == 1); +- /* Reg-renaming pass reuses base register if it is dead after bonded loads. +- Hardware does not bond those loads, even when they are consecutive. +- However, order of the loads need to be checked for correctness. */ +- if (!load_p || !reg_overlap_mentioned_p (operands[0], operands[1])) +- { +- output_asm_insn (loongarch_output_move (operands[0], operands[1]), +- operands); +- output_asm_insn (loongarch_output_move (operands[2], operands[3]), +- &operands[2]); +- } +- else +- { +- output_asm_insn (loongarch_output_move (operands[2], operands[3]), +- &operands[2]); +- output_asm_insn (loongarch_output_move (operands[0], operands[1]), +- operands); +- } ++ /* The load destination does not overlap the source. */ ++ gcc_assert (!reg_overlap_mentioned_p (operands[0], operands[1])); ++ output_asm_insn (loongarch_output_move (operands[0], operands[1]), ++ operands); ++ output_asm_insn (loongarch_output_move (operands[2], operands[3]), ++ &operands[2]); + return ""; + } +- [(set_attr "move_type" "load,fpload,store,fpstore,load,store") ++ [(set_attr "move_type" ++ "load,fpload,store,fpstore,load,store") + (set_attr "insn_count" "2,2,2,2,2,2")]) + +-;; 2 HI/SI/SF/DF loads are joined. +-;; P5600 does not support bonding of two LBs, hence QI mode is not included. +-;; The loads must be non-volatile as they might be reordered at the time of asm +-;; generation. ++;; 2 HI/SI/SF/DF loads are bonded. + (define_peephole2 + [(set (match_operand:JOIN_MODE 0 "register_operand") + (match_operand:JOIN_MODE 1 "non_volatile_mem_operand")) +@@ -4051,8 +3646,7 @@ + (match_dup 3))])] + "") + +-;; 2 HI/SI/SF/DF stores are joined. +-;; P5600 does not support bonding of two SBs, hence QI mode is not included. ++;; 2 HI/SI/SF/DF stores are bonded. + (define_peephole2 + [(set (match_operand:JOIN_MODE 0 "memory_operand") + (match_operand:JOIN_MODE 1 "register_operand")) +@@ -4067,25 +3661,16 @@ + + ;; Match paired HImode loads. + (define_insn "*join2_loadhi" +- [(set (match_operand:SI 0 "register_operand" "=r") ++ [(set (match_operand:SI 0 "register_operand" "=&r") + (any_extend:SI (match_operand:HI 1 "non_volatile_mem_operand" "m"))) + (set (match_operand:SI 2 "register_operand" "=r") + (any_extend:SI (match_operand:HI 3 "non_volatile_mem_operand" "m")))] + "reload_completed" + { +- /* Reg-renaming pass reuses base register if it is dead after bonded loads. +- Hardware does not bond those loads, even when they are consecutive. +- However, order of the loads need to be checked for correctness. */ +- if (!reg_overlap_mentioned_p (operands[0], operands[1])) +- { +- output_asm_insn ("ld.h\t%0,%1", operands); +- output_asm_insn ("ld.h\t%2,%3", operands); +- } +- else +- { +- output_asm_insn ("ld.h\t%2,%3", operands); +- output_asm_insn ("ld.h\t%0,%1", operands); +- } ++ /* The load destination does not overlap the source. */ ++ gcc_assert (!reg_overlap_mentioned_p (operands[0], operands[1])); ++ output_asm_insn ("ld.h\t%0,%1", operands); ++ output_asm_insn ("ld.h\t%2,%3", operands); + + return ""; + } +@@ -4093,7 +3678,7 @@ + (set_attr "insn_count" "2")]) + + +-;; 2 HI loads are joined. ++;; 2 HI loads are bonded. + (define_peephole2 + [(set (match_operand:SI 0 "register_operand") + (any_extend:SI (match_operand:HI 1 "non_volatile_mem_operand"))) +@@ -4107,153 +3692,10 @@ + "") + + +-;; Logical AND NOT. +-(define_insn "loongson_gsandn" +- [(set (match_operand:GPR 0 "register_operand" "=r") +- (and:GPR +- (not:GPR (match_operand:GPR 1 "register_operand" "r")) +- (match_operand:GPR 2 "register_operand" "r")))] +- "" +- "andn\t%0,%2,%1" +- [(set_attr "type" "logical")]) +- +-;; Logical AND NOT. +-(define_insn "loongson_gsorn" +- [(set (match_operand:GPR 0 "register_operand" "=r") +- (ior:GPR +- (not:GPR (match_operand:GPR 1 "register_operand" "r")) +- (match_operand:GPR 2 "register_operand" "r")))] +- "" +- "orn\t%0,%2,%1" +- [(set_attr "type" "logical")]) +- +-(define_insn "smax3" +- [(set (match_operand:SCALARF 0 "register_operand" "=f") +- (smax:SCALARF (match_operand:SCALARF 1 "register_operand" "f") +- (match_operand:SCALARF 2 "register_operand" "f")))] +- "TARGET_HARD_FLOAT" +- "fmax.\t%0,%1,%2" +- [(set_attr "type" "fmove") +- (set_attr "mode" "")]) +- +-(define_insn "smin3" +- [(set (match_operand:SCALARF 0 "register_operand" "=f") +- (smin:SCALARF (match_operand:SCALARF 1 "register_operand" "f") +- (match_operand:SCALARF 2 "register_operand" "f")))] +- "TARGET_HARD_FLOAT" +- "fmin.\t%0,%1,%2" +- [(set_attr "type" "fmove") +- (set_attr "mode" "")]) +- +-(define_insn "smaxa3" +- [(set (match_operand:SCALARF 0 "register_operand" "=f") +- (if_then_else:SCALARF +- (gt (abs:SCALARF (match_operand:SCALARF 1 "register_operand" "f")) +- (abs:SCALARF (match_operand:SCALARF 2 "register_operand" "f"))) +- (match_dup 1) +- (match_dup 2)))] +- "TARGET_HARD_FLOAT" +- "fmaxa.\t%0,%1,%2" +- [(set_attr "type" "fmove") +- (set_attr "mode" "")]) +- +-(define_insn "smina3" +- [(set (match_operand:SCALARF 0 "register_operand" "=f") +- (if_then_else:SCALARF +- (lt (abs:SCALARF (match_operand:SCALARF 1 "register_operand" "f")) +- (abs:SCALARF (match_operand:SCALARF 2 "register_operand" "f"))) +- (match_dup 1) +- (match_dup 2)))] +- "TARGET_HARD_FLOAT" +- "fmina.\t%0,%1,%2" +- [(set_attr "type" "fmove") +- (set_attr "mode" "")]) +- +-(define_insn "frint_" +- [(set (match_operand:SCALARF 0 "register_operand" "=f") +- (unspec:SCALARF [(match_operand:SCALARF 1 "register_operand" "f")] +- UNSPEC_FRINT))] +- "" +- "frint.\t%0,%1" +- [(set_attr "type" "fcvt") +- (set_attr "mode" "")]) +- +-(define_insn "fclass_" +- [(set (match_operand:SCALARF 0 "register_operand" "=f") +- (unspec:SCALARF [(match_operand:SCALARF 1 "register_operand" "f")] +- UNSPEC_FCLASS))] +- "" +- "fclass.\t%0,%1" +- [(set_attr "type" "unknown") +- (set_attr "mode" "")]) +- +-(define_insn "bytepick_w" +- [(set (match_operand:SI 0 "register_operand" "=r") +- (unspec:SI [(match_operand:SI 1 "register_operand" "r") +- (match_operand:SI 2 "register_operand" "r") +- (match_operand:SI 3 "const_0_to_3_operand" "n")] +- UNSPEC_BYTEPICK_W))] +- "" +- "bytepick.w\t%0,%1,%2,%z3" +- [(set_attr "type" "dspalu") +- (set_attr "mode" "SI")]) +- +-(define_insn "bytepick_d" +- [(set (match_operand:DI 0 "register_operand" "=r") +- (unspec:DI [(match_operand:DI 1 "register_operand" "r") +- (match_operand:DI 2 "register_operand" "r") +- (match_operand:DI 3 "const_0_to_7_operand" "n")] +- UNSPEC_BYTEPICK_D))] +- "" +- "bytepick.d\t%0,%1,%2,%z3" +- [(set_attr "type" "dspalu") +- (set_attr "mode" "DI")]) +- +-(define_insn "bitrev_4b" +- [(set (match_operand:SI 0 "register_operand" "=r") +- (unspec:SI [(match_operand:SI 1 "register_operand" "r")] +- UNSPEC_BITREV_4B))] +- "" +- "bitrev.4b\t%0,%1" +- [(set_attr "type" "unknown") +- (set_attr "mode" "SI")]) +- +-(define_insn "bitrev_8b" +- [(set (match_operand:DI 0 "register_operand" "=r") +- (unspec:DI [(match_operand:DI 1 "register_operand" "r")] +- UNSPEC_BITREV_8B))] +- "" +- "bitrev.8b\t%0,%1" +- [(set_attr "type" "unknown") +- (set_attr "mode" "DI")]) +- +- +- +-(define_insn "lu32i_d" +- [(set (match_operand:DI 0 "register_operand" "=r") +- (ior:DI +- (zero_extend:DI +- (subreg:SI (match_operand:DI 1 "register_operand" "0") 0)) +- (match_operand:DI 2 "const_lu32i_operand" "u")))] +- "TARGET_64BIT" +- "lu32i.d\t%0,%X2>>32" +- [(set_attr "type" "arith") +- (set_attr "mode" "DI")]) +- +-(define_insn "lu52i_d" +- [(set (match_operand:DI 0 "register_operand" "=r") +- (ior:DI +- (and:DI (match_operand:DI 1 "register_operand" "r") +- (match_operand 2 "lu52i_mask_operand")) +- (match_operand 3 "const_lu52i_operand" "v")))] +- "TARGET_64BIT" +- "lu52i.d\t%0,%1,%X3>>52" +- [(set_attr "type" "arith") +- (set_attr "mode" "DI")]) + + (define_mode_iterator QHSD [QI HI SI DI]) + +-(define_insn "crc_w__w" ++(define_insn "loongarch_crc_w__w" + [(set (match_operand:SI 0 "register_operand" "=r") + (unspec:SI [(match_operand:QHSD 1 "register_operand" "r") + (match_operand:SI 2 "register_operand" "r")] +@@ -4263,7 +3705,7 @@ + [(set_attr "type" "unknown") + (set_attr "mode" "")]) + +-(define_insn "crcc_w__w" ++(define_insn "loongarch_crcc_w__w" + [(set (match_operand:SI 0 "register_operand" "=r") + (unspec:SI [(match_operand:QHSD 1 "register_operand" "r") + (match_operand:SI 2 "register_operand" "r")] +@@ -4277,6 +3719,9 @@ + + (include "sync.md") + ++(include "generic.md") ++(include "la464.md") ++ + ; The LoongArch SX Instructions. + (include "lsx.md") + +@@ -4286,35 +3731,6 @@ + ; The LoongArch ASX Instructions. + (include "lasx.md") + +-;; Is copying of this instruction disallowed? +-(define_attr "cannot_copy" "no,yes" (const_string "no")) +- +-(define_insn "stack_tie" +- [(set (mem:BLK (scratch)) +- (unspec:BLK [(match_operand:X 0 "register_operand" "r") +- (match_operand:X 1 "register_operand" "r")] +- UNSPEC_TIE))] +- "" +- "" +- [(set_attr "length" "0")] +-) +- +-(define_insn "gpr_save" +- [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_SAVE) +- (clobber (reg:SI T0_REGNUM)) +- (clobber (reg:SI T1_REGNUM))] +- "" +- { return loongarch_output_gpr_save (INTVAL (operands[0])); }) +- +-(define_insn "gpr_restore" +- [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_RESTORE)] +- "" +- "tail\t__loongarch_restore_%0") +- +-(define_insn "gpr_restore_return" +- [(return) +- (use (match_operand 0 "pmode_register_operand" "")) +- (const_int 0)] +- "" +- "") +- ++(define_c_enum "unspec" [ ++ UNSPEC_ADDRESS_FIRST ++]) +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +index 660de3674..075a2d6c7 100644 +--- a/gcc/config/loongarch/loongarch.opt ++++ b/gcc/config/loongarch/loongarch.opt +@@ -1,6 +1,14 @@ +- ++; Generated by "genstr" from the template "loongarch.opt.in" ++; and definitions from "loongarch-strings". ++; ++; Please do not edit this file directly. ++; It will be automatically updated during a gcc build ++; if you change "loongarch.opt.in" or "loongarch-strings". ++; ++; Generated by "genstr" from the template "loongarch.opt.in" ++; and definitions from "loongarch-strings". + ; +-; Copyright (C) 2005-2018 Free Software Foundation, Inc. ++; Copyright (C) 2020-2022 Free Software Foundation, Inc. + ; + ; This file is part of GCC. + ; +@@ -17,155 +25,225 @@ + ; You should have received a copy of the GNU General Public License + ; along with GCC; see the file COPYING3. If not see + ; . ++; + + HeaderInclude + config/loongarch/loongarch-opts.h + +-mabi= +-Target RejectNegative Joined Enum(loongarch_abi) Var(loongarch_abi) Init(LARCH_ABI_DEFAULT) +--mabi=ABI Generate code that conforms to the given ABI. ++HeaderInclude ++config/loongarch/loongarch-str.h + ++TargetVariable ++unsigned int recip_mask = 0 ++ ++; ISA related options ++;; Base ISA + Enum +-Name(loongarch_abi) Type(int) +-Known Loongarch ABIs (for use with the -mabi= option): ++Name(isa_base) Type(int) ++Basic ISAs of LoongArch: + + EnumValue +-Enum(loongarch_abi) String(lp32) Value(ABILP32) ++Enum(isa_base) String(la64) Value(ISA_BASE_LA64V100) ++ ++;; ISA extensions / adjustments ++Enum ++Name(isa_ext_fpu) Type(int) ++FPU types of LoongArch: + + EnumValue +-Enum(loongarch_abi) String(lpx32) Value(ABILPX32) ++Enum(isa_ext_fpu) String(none) Value(ISA_EXT_NONE) + + EnumValue +-Enum(loongarch_abi) String(lp64) Value(ABILP64) ++Enum(isa_ext_fpu) String(32) Value(ISA_EXT_FPU32) + +-march= +-Target RejectNegative Joined Var(loongarch_arch_option) ToLower Enum(loongarch_arch_opt_value) +--march=ISA Generate code for the given ISA. ++EnumValue ++Enum(isa_ext_fpu) String(64) Value(ISA_EXT_FPU64) + +-mbranch-cost= +-Target RejectNegative Joined UInteger Var(loongarch_branch_cost) +--mbranch-cost=COST Set the cost of branches to roughly COST instructions. ++mfpu= ++Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET) ++-mfpu=FPU Generate code for the given FPU. + +-mcheck-zero-division +-Target Report Mask(CHECK_ZERO_DIV) +-Trap on integer divide by zero. ++mfpu=0 ++Target RejectNegative Alias(mfpu=,none) ++ ++msoft-float ++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(msingle-float) ++Prevent the use of all hardware floating-point instructions. ++ ++msingle-float ++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(mdouble-float) ++Restrict the use of hardware floating-point instructions to 32-bit operations. + + mdouble-float +-Target Report RejectNegative InverseMask(SINGLE_FLOAT, DOUBLE_FLOAT) ++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(msoft-float) + Allow hardware floating-point instructions to cover both 32-bit and 64-bit operations. + +-mflush-func= +-Target RejectNegative Joined Var(loongarch_cache_flush_func) Init(CACHE_FLUSH_FUNC) +--mflush-func=FUNC Use FUNC to flush the cache before calling stack trampolines. ++Enum ++Name(isa_ext_simd) Type(int) ++SIMD extension levels of LoongArch: ++ ++EnumValue ++Enum(isa_ext_simd) String(none) Value(ISA_EXT_NONE) ++ ++EnumValue ++Enum(isa_ext_simd) String(lsx) Value(ISA_EXT_SIMD_LSX) + +-Mask(64BIT) ++EnumValue ++Enum(isa_ext_simd) String(lasx) Value(ISA_EXT_SIMD_LASX) + +-Mask(FLOAT64) ++msimd= ++Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET) ++-msimd=SIMD Generate code for the given SIMD extension. + +-mhard-float +-Target Report RejectNegative InverseMask(SOFT_FLOAT_ABI, HARD_FLOAT_ABI) +-Allow the use of hardware floating-point ABI and instructions. ++mlsx ++Target Driver Defer Var(la_deferred_options) ++Enable LoongArch SIMD Extension (LSX, 128-bit). + +-mlong-calls +-Target Report Var(TARGET_LONG_CALLS) +-Use indirect calls. ++mlasx ++Target Driver Defer Var(la_deferred_options) ++Enable LoongArch Advanced SIMD Extension (LASX, 256-bit). + +-mmemcpy +-Target Report Mask(MEMCPY) +-Don't optimize block moves. ++;; Base target models (implies ISA & tune parameters) ++Enum ++Name(cpu_type) Type(int) ++LoongArch CPU types: + +-mno-float +-Target Report RejectNegative Var(TARGET_NO_FLOAT) Condition(TARGET_SUPPORTS_NO_FLOAT) +-Prevent the use of all floating-point operations. ++EnumValue ++Enum(cpu_type) String(native) Value(CPU_NATIVE) + +-mno-flush-func +-Target RejectNegative +-Do not use a cache-flushing function before calling stack trampolines. ++EnumValue ++Enum(cpu_type) String(abi-default) Value(CPU_ABI_DEFAULT) + +-mrelax-pic-calls +-Target Report Mask(RELAX_PIC_CALLS) +-Try to allow the linker to turn PIC calls into direct calls. ++EnumValue ++Enum(cpu_type) String(loongarch64) Value(CPU_LOONGARCH64) + +-mshared +-Target Report Var(TARGET_SHARED) Init(1) +-When generating -mabicalls code, make the code suitable for use in shared libraries. ++EnumValue ++Enum(cpu_type) String(la664) Value(CPU_LA664) + +-msingle-float +-Target Report RejectNegative Mask(SINGLE_FLOAT) +-Restrict the use of hardware floating-point instructions to 32-bit operations. ++EnumValue ++Enum(cpu_type) String(la464) Value(CPU_LA464) + +-msoft-float +-Target Report RejectNegative Mask(SOFT_FLOAT_ABI) +-Prevent the use of all hardware floating-point instructions. ++EnumValue ++Enum(cpu_type) String(la264) Value(CPU_LA264) ++ ++EnumValue ++Enum(cpu_type) String(la364) Value(CPU_LA364) + +-mlra +-Target Report Var(loongarch_lra_flag) Init(1) Save +-Use LRA instead of reload. ++march= ++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) ++-march=PROCESSOR Generate code for the given PROCESSOR ISA. + + mtune= +-Target RejectNegative Joined Var(loongarch_tune_option) ToLower Enum(loongarch_arch_opt_value) +--mtune=PROCESSOR Optimize the output for PROCESSOR. ++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) ++-mtune=PROCESSOR Generate optimized code for PROCESSOR. + +-mframe-header-opt +-Target Report Var(flag_frame_header_optimization) Optimization +-Optimize frame header. + +-noasmopt +-Driver ++; ABI related options ++; (ISA constraints on ABI are handled dynamically) + +-mstrict-align +-Target Report Mask(STRICT_ALIGN) Save +-Do not generate unaligned memory accesses. ++;; Base ABI ++Enum ++Name(abi_base) Type(int) ++Base ABI types for LoongArch: + +-mlsx +-Target Report Mask(LSX) +-Use LoongArch SX Extension instructions. ++EnumValue ++Enum(abi_base) String(lp64d) Value(ABI_BASE_LP64D) + +-mlasx +-Target Report Var(TARGET_LASX) +-Use LoongArch ASX Extension instructions. ++EnumValue ++Enum(abi_base) String(lp64f) Value(ABI_BASE_LP64F) + +-malign-llsc-target +-Target Report Var(TARGET_ALIGN_LLSC_TARGET) +-Target align llsc target. ++EnumValue ++Enum(abi_base) String(lp64s) Value(ABI_BASE_LP64S) + +-mmax-inline-memcpy-size= +-Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) +--mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. ++mabi= ++Target RejectNegative Joined ToLower Enum(abi_base) Var(la_opt_abi_base) Init(M_OPT_UNSET) ++-mabi=BASEABI Generate code that conforms to the given BASEABI. ++ ++;; Legacy option: -mabi=lp64 ++mabi=lp64 ++Target RejectNegative Mask(LP64) ++-mabi=lp64 Legacy option that enables the lp64 integer ABI. ++ ++;; ABI Extension ++Variable ++int la_opt_abi_ext = M_OPT_UNSET ++ ++mbranch-cost= ++Target RejectNegative Joined UInteger Var(loongarch_branch_cost) ++-mbranch-cost=COST Set the cost of branches to roughly COST instructions. + + mvecarg + Target Report Var(TARGET_VECARG) Init(1) + Target pass vect arg uses vector register. + ++mmemvec-cost= ++Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) ++mmemvec-cost=COST Set the cost of vector memory access instructions. ++ ++mveclibabi= ++Target RejectNegative Joined Var(loongarch_veclibabi_name) ++Vector library ABI to use. ++ ++mstackrealign ++Target Var(loongarch_stack_realign) Init(1) ++Realign stack in prologue. ++ ++mforce-drap ++Target Var(loongarch_force_drap) Init(0) ++Always use Dynamic Realigned Argument Pointer (DRAP) to realign stack. ++ ++mcheck-zero-division ++Target Mask(CHECK_ZERO_DIV) ++Trap on integer divide by zero. ++ + mcond-move-int +-Target Report Var(TARGET_COND_MOVE_INT) Init(1) ++Target Var(TARGET_COND_MOVE_INT) Init(1) + Conditional moves for integral are enabled. + + mcond-move-float +-Target Report Var(TARGET_COND_MOVE_FLOAT) Init(1) ++Target Var(TARGET_COND_MOVE_FLOAT) Init(1) + Conditional moves for float are enabled. + +-; The code model option names for -mcmodel. ++mmemcpy ++Target Mask(MEMCPY) ++Prevent optimizing block moves, which is also the default behavior of -Os. + ++mstrict-align ++Target Var(TARGET_STRICT_ALIGN) Init(0) ++Do not generate unaligned memory accesses. ++ ++mmax-inline-memcpy-size= ++Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) ++-mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. ++ ++mrecip ++Target Report RejectNegative Var(loongarch_recip) ++Generate reciprocals instead of divss and sqrtss. ++ ++mrecip= ++Target Report RejectNegative Joined Var(loongarch_recip_name) ++Control generation of reciprocal estimates. ++ ++; The code model option names for -mcmodel. + Enum +-Name(cmodel) Type(enum loongarch_code_model) ++Name(cmodel) Type(int) + The code model option names for -mcmodel: + + EnumValue +-Enum(cmodel) String(normal) Value(LARCH_CMODEL_NORMAL) ++Enum(cmodel) String(normal) Value(CMODEL_NORMAL) + + EnumValue +-Enum(cmodel) String(tiny) Value(LARCH_CMODEL_TINY) ++Enum(cmodel) String(tiny) Value(CMODEL_TINY) + + EnumValue +-Enum(cmodel) String(tiny-static) Value(LARCH_CMODEL_TINY_STATIC) ++Enum(cmodel) String(tiny-static) Value(CMODEL_TINY_STATIC) + + EnumValue +-Enum(cmodel) String(large) Value(LARCH_CMODEL_LARGE) ++Enum(cmodel) String(large) Value(CMODEL_LARGE) + + EnumValue +-Enum(cmodel) String(extreme) Value(LARCH_CMODEL_EXTREME) ++Enum(cmodel) String(extreme) Value(CMODEL_EXTREME) + + mcmodel= +-Target RejectNegative Joined Enum(cmodel) Var(loongarch_cmodel_var) Init(LARCH_CMODEL_NORMAL) Save ++Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET) + Specify the code model. +diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md +index 1f7034366..2b1d6f109 100644 +--- a/gcc/config/loongarch/lsx.md ++++ b/gcc/config/loongarch/lsx.md +@@ -168,6 +168,9 @@ + ;; As ILSX but excludes V16QI. + (define_mode_iterator ILSX_DWH [V2DI V4SI V8HI]) + ++;; As LSX but excludes V16QI. ++(define_mode_iterator LSX_DWH [V2DF V4SF V2DI V4SI V8HI]) ++ + ;; As ILSX but excludes V2DI. + (define_mode_iterator ILSX_WHB [V4SI V8HI V16QI]) + +@@ -291,6 +294,10 @@ + (V2DI "d") + (V4SI "s")]) + ++(define_mode_attr flsxfrint ++ [(V2DF "d") ++ (V4SF "s")]) ++ + (define_mode_attr ilsxfmt + [(V2DF "l") + (V4SF "w")]) +@@ -327,6 +334,38 @@ + (V4SI "uimm5") + (V2DI "uimm6")]) + ++ ++(define_int_iterator FRINT_S [UNSPEC_LSX_VFRINTRP_S ++ UNSPEC_LSX_VFRINTRZ_S ++ UNSPEC_LSX_VFRINT ++ UNSPEC_LSX_VFRINTRM_S]) ++ ++(define_int_iterator FRINT_D [UNSPEC_LSX_VFRINTRP_D ++ UNSPEC_LSX_VFRINTRZ_D ++ UNSPEC_LSX_VFRINT ++ UNSPEC_LSX_VFRINTRM_D]) ++ ++(define_int_attr frint_pattern_s ++ [(UNSPEC_LSX_VFRINTRP_S "ceil") ++ (UNSPEC_LSX_VFRINTRZ_S "btrunc") ++ (UNSPEC_LSX_VFRINT "rint") ++ (UNSPEC_LSX_VFRINTRM_S "floor")]) ++ ++(define_int_attr frint_pattern_d ++ [(UNSPEC_LSX_VFRINTRP_D "ceil") ++ (UNSPEC_LSX_VFRINTRZ_D "btrunc") ++ (UNSPEC_LSX_VFRINT "rint") ++ (UNSPEC_LSX_VFRINTRM_D "floor")]) ++ ++(define_int_attr frint_suffix ++ [(UNSPEC_LSX_VFRINTRP_S "rp") ++ (UNSPEC_LSX_VFRINTRP_D "rp") ++ (UNSPEC_LSX_VFRINTRZ_S "rz") ++ (UNSPEC_LSX_VFRINTRZ_D "rz") ++ (UNSPEC_LSX_VFRINT "") ++ (UNSPEC_LSX_VFRINTRM_S "rm") ++ (UNSPEC_LSX_VFRINTRM_D "rm")]) ++ + (define_expand "vec_init" + [(match_operand:LSX 0 "register_operand") + (match_operand:LSX 1 "")] +@@ -513,12 +552,12 @@ + DONE; + }) + +-(define_insn "lsx_vinsgr2vr_" +- [(set (match_operand:LSX 0 "register_operand" "=f") +- (vec_merge:LSX +- (vec_duplicate:LSX ++(define_insn "lsx_vinsgr2vr_" ++ [(set (match_operand:ILSX 0 "register_operand" "=f") ++ (vec_merge:ILSX ++ (vec_duplicate:ILSX + (match_operand: 1 "reg_or_0_operand" "rJ")) +- (match_operand:LSX 2 "register_operand" "0") ++ (match_operand:ILSX 2 "register_operand" "0") + (match_operand 3 "const__operand" "")))] + "ISA_HAS_LSX" + { +@@ -688,11 +727,23 @@ + DONE; + }) + +-(define_insn "lsx_vshuf_" +- [(set (match_operand:ILSX_DWH 0 "register_operand" "=f") +- (unspec:ILSX_DWH [(match_operand:ILSX_DWH 1 "register_operand" "0") +- (match_operand:ILSX_DWH 2 "register_operand" "f") +- (match_operand:ILSX_DWH 3 "register_operand" "f")] ++(define_expand "vec_perm" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand:LSX 1 "register_operand") ++ (match_operand:LSX 2 "register_operand") ++ (match_operand:LSX 3 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ loongarch_expand_vec_perm (operands[0], operands[1], ++ operands[2], operands[3]); ++ DONE; ++}) ++ ++(define_insn "lsx_vshuf_" ++ [(set (match_operand:LSX_DWH 0 "register_operand" "=f") ++ (unspec:LSX_DWH [(match_operand:LSX_DWH 1 "register_operand" "0") ++ (match_operand:LSX_DWH 2 "register_operand" "f") ++ (match_operand:LSX_DWH 3 "register_operand" "f")] + UNSPEC_LSX_VSHUF))] + "ISA_HAS_LSX" + "vshuf.\t%w0,%w2,%w3" +@@ -731,7 +782,7 @@ + [(set (match_operand:LSX 0 "nonimmediate_operand") + (match_operand:LSX 1 "move_operand"))] + "reload_completed && ISA_HAS_LSX +- && loongarch_split_move_insn_p (operands[0], operands[1], insn)" ++ && loongarch_split_move_insn_p (operands[0], operands[1])" + [(const_int 0)] + { + loongarch_split_move_insn (operands[0], operands[1], curr_insn); +@@ -996,7 +1047,25 @@ + [(set_attr "type" "simd_fmul") + (set_attr "mode" "")]) + +-(define_insn "div3" ++(define_expand "div3" ++ [(set (match_operand:FLSX 0 "register_operand") ++ (div:FLSX (match_operand:FLSX 1 "register_operand") ++ (match_operand:FLSX 2 "register_operand")))] ++ "ISA_HAS_LSX" ++{ ++ if (mode == V4SFmode ++ && TARGET_RECIP_VEC_DIV ++ && optimize_insn_for_speed_p () ++ && flag_finite_math_only && !flag_trapping_math ++ && flag_unsafe_math_optimizations) ++ { ++ loongarch_emit_swdivsf (operands[0], operands[1], ++ operands[2], V4SFmode); ++ DONE; ++ } ++}) ++ ++(define_insn "*div3" + [(set (match_operand:FLSX 0 "register_operand" "=f") + (div:FLSX (match_operand:FLSX 1 "register_operand" "f") + (match_operand:FLSX 2 "register_operand" "f")))] +@@ -1025,7 +1094,23 @@ + [(set_attr "type" "simd_fmadd") + (set_attr "mode" "")]) + +-(define_insn "sqrt2" ++(define_expand "sqrt2" ++ [(set (match_operand:FLSX 0 "register_operand") ++ (sqrt:FLSX (match_operand:FLSX 1 "register_operand")))] ++ "ISA_HAS_LSX" ++{ ++ if (mode == V4SFmode ++ && TARGET_RECIP_VEC_SQRT ++ && flag_unsafe_math_optimizations ++ && optimize_insn_for_speed_p () ++ && flag_finite_math_only && !flag_trapping_math) ++ { ++ loongarch_emit_swrsqrtsf (operands[0], operands[1], V4SFmode, 0); ++ DONE; ++ } ++}) ++ ++(define_insn "*sqrt2" + [(set (match_operand:FLSX 0 "register_operand" "=f") + (sqrt:FLSX (match_operand:FLSX 1 "register_operand" "f")))] + "ISA_HAS_LSX" +@@ -1362,8 +1447,8 @@ + (V2DF "V4SI")]) + + (define_insn "lsx_vreplgr2vr_" +- [(set (match_operand:LSX 0 "register_operand" "=f,f") +- (vec_duplicate:LSX ++ [(set (match_operand:ILSX 0 "register_operand" "=f,f") ++ (vec_duplicate:ILSX + (match_operand: 1 "reg_or_0_operand" "r,J")))] + "ISA_HAS_LSX" + { +@@ -1389,7 +1474,7 @@ + DONE; + }) + +-(define_insn "lsx_vflogb_" ++(define_insn "logb2" + [(set (match_operand:FLSX 0 "register_operand" "=f") + (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] + UNSPEC_LSX_VFLOGB))] +@@ -1449,6 +1534,15 @@ + [(set_attr "type" "simd_fdiv") + (set_attr "mode" "")]) + ++(define_insn "lsx_vfrecipe_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_RECIPE))] ++ "ISA_HAS_LSX && flag_unsafe_math_optimizations && TARGET_RECIP_VEC_DIV" ++ "vfrecipe.\t%w0,%w1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ + (define_insn "lsx_vfrint_" + [(set (match_operand:FLSX 0 "register_operand" "=f") + (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] +@@ -1467,6 +1561,42 @@ + [(set_attr "type" "simd_fdiv") + (set_attr "mode" "")]) + ++(define_insn "lsx_vfrsqrte_" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_RSQRTE))] ++ "ISA_HAS_LSX && flag_unsafe_math_optimizations && TARGET_RECIP_VEC_SQRT" ++ "vfrsqrte.\t%w0,%w1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++(define_expand "rsqrt2" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRSQRT))] ++ "ISA_HAS_LSX" ++{ ++ if (mode == V4SFmode ++ && TARGET_RECIP_VEC_RSQRT ++ && flag_unsafe_math_optimizations ++ && optimize_insn_for_speed_p () ++ && flag_finite_math_only && !flag_trapping_math) ++ { ++ loongarch_emit_swrsqrtsf (operands[0], operands[1], V4SFmode, 1); ++ DONE; ++ } ++}) ++ ++(define_insn "*rsqrt2" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRSQRT))] ++ "ISA_HAS_LSX" ++ "vfrsqrt.\t%w0,%w1" ++ [(set_attr "type" "simd_fdiv") ++ (set_attr "mode" "")]) ++ ++ + (define_insn "lsx_vftint_s__" + [(set (match_operand: 0 "register_operand" "=f") + (unspec: [(match_operand:FLSX 1 "register_operand" "f")] +@@ -2172,8 +2302,8 @@ + + (define_insn "lsx_vreplvei__scalar" + [(set (match_operand:FLSX 0 "register_operand" "=f") +- (unspec:FLSX [(match_operand: 1 "register_operand" "f")] +- UNSPEC_LSX_VREPLVEI))] ++ (vec_duplicate:FLSX ++ (match_operand: 1 "register_operand" "f")))] + "ISA_HAS_LSX" + "vreplvei.\t%w0,%w1,0" + [(set_attr "type" "simd_splat") +@@ -2285,8 +2415,7 @@ + "vset.\t%Z3%w1\n\tbcnez\t%Z3%0"); + } + [(set_attr "type" "simd_branch") +- (set_attr "mode" "") +- (set_attr "compact_form" "never")]) ++ (set_attr "mode" "")]) + + (define_insn "lsx__v_" + [(set (pc) (if_then_else +@@ -2304,8 +2433,7 @@ + "vset.v\t%Z3%w1\n\tbcnez\t%Z3%0"); + } + [(set_attr "type" "simd_branch") +- (set_attr "mode" "TI") +- (set_attr "compact_form" "never")]) ++ (set_attr "mode" "TI")]) + + ;; vec_concate + (define_expand "vec_concatv2di" +@@ -2923,8 +3051,8 @@ + (set_attr "mode" "V4SF")]) + + (define_insn "lsx_vfrintrne_s" +- [(set (match_operand:V4SI 0 "register_operand" "=f") +- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] + UNSPEC_LSX_VFRINTRNE_S))] + "ISA_HAS_LSX" + "vfrintrne.s\t%w0,%w1" +@@ -2932,8 +3060,8 @@ + (set_attr "mode" "V4SF")]) + + (define_insn "lsx_vfrintrne_d" +- [(set (match_operand:V2DI 0 "register_operand" "=f") +- (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] + UNSPEC_LSX_VFRINTRNE_D))] + "ISA_HAS_LSX" + "vfrintrne.d\t%w0,%w1" +@@ -2941,8 +3069,8 @@ + (set_attr "mode" "V2DF")]) + + (define_insn "lsx_vfrintrz_s" +- [(set (match_operand:V4SI 0 "register_operand" "=f") +- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] + UNSPEC_LSX_VFRINTRZ_S))] + "ISA_HAS_LSX" + "vfrintrz.s\t%w0,%w1" +@@ -2950,8 +3078,8 @@ + (set_attr "mode" "V4SF")]) + + (define_insn "lsx_vfrintrz_d" +- [(set (match_operand:V2DI 0 "register_operand" "=f") +- (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] + UNSPEC_LSX_VFRINTRZ_D))] + "ISA_HAS_LSX" + "vfrintrz.d\t%w0,%w1" +@@ -2959,8 +3087,8 @@ + (set_attr "mode" "V2DF")]) + + (define_insn "lsx_vfrintrp_s" +- [(set (match_operand:V4SI 0 "register_operand" "=f") +- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] + UNSPEC_LSX_VFRINTRP_S))] + "ISA_HAS_LSX" + "vfrintrp.s\t%w0,%w1" +@@ -2968,8 +3096,8 @@ + (set_attr "mode" "V4SF")]) + + (define_insn "lsx_vfrintrp_d" +- [(set (match_operand:V2DI 0 "register_operand" "=f") +- (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] + UNSPEC_LSX_VFRINTRP_D))] + "ISA_HAS_LSX" + "vfrintrp.d\t%w0,%w1" +@@ -2977,8 +3105,8 @@ + (set_attr "mode" "V2DF")]) + + (define_insn "lsx_vfrintrm_s" +- [(set (match_operand:V4SI 0 "register_operand" "=f") +- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")] ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] + UNSPEC_LSX_VFRINTRM_S))] + "ISA_HAS_LSX" + "vfrintrm.s\t%w0,%w1" +@@ -2986,14 +3114,44 @@ + (set_attr "mode" "V4SF")]) + + (define_insn "lsx_vfrintrm_d" +- [(set (match_operand:V2DI 0 "register_operand" "=f") +- (unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")] ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] + UNSPEC_LSX_VFRINTRM_D))] + "ISA_HAS_LSX" + "vfrintrm.d\t%w0,%w1" + [(set_attr "type" "simd_shift") + (set_attr "mode" "V2DF")]) + ++;; Vector versions of the floating-point frint patterns. ++;; Expands to btrunc, ceil, floor, rint. ++(define_insn "v4sf2" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")] ++ FRINT_S))] ++ "ISA_HAS_LSX" ++ "vfrint.s\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V4SF")]) ++ ++(define_insn "v2df2" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")] ++ FRINT_D))] ++ "ISA_HAS_LSX" ++ "vfrint.d\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "V2DF")]) ++ ++;; Expands to round. ++(define_insn "round2" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] ++ UNSPEC_LSX_VFRINT))] ++ "ISA_HAS_LSX" ++ "vfrint.\t%w0,%w1" ++ [(set_attr "type" "simd_shift") ++ (set_attr "mode" "")]) ++ + ;; Offset load and broadcast + (define_expand "lsx_vldrepl_" + [(match_operand:LSX 0 "register_operand") +@@ -3019,6 +3177,18 @@ + (set_attr "mode" "") + (set_attr "length" "4")]) + ++(define_insn "lsx_vldrepl__insn_0" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (vec_duplicate:LSX ++ (mem: (match_operand:DI 1 "register_operand" "r"))))] ++ "ISA_HAS_LSX" ++{ ++ return "vldrepl.\t%w0,%1,0"; ++} ++ [(set_attr "type" "simd_load") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ + ;; Offset store by sel + (define_expand "lsx_vstelm_" + [(match_operand:LSX 0 "register_operand") +@@ -3047,6 +3217,20 @@ + (set_attr "mode" "") + (set_attr "length" "4")]) + ++;; Offset is "0" ++(define_insn "lsx_vstelm__insn_0" ++ [(set (mem: (match_operand:DI 0 "register_operand" "r")) ++ (vec_select: ++ (match_operand:LSX 1 "register_operand" "f") ++ (parallel [(match_operand:SI 2 "const__operand")])))] ++ "ISA_HAS_LSX" ++{ ++ return "vstelm.\t%w1,%0,0,%2"; ++} ++ [(set_attr "type" "simd_store") ++ (set_attr "mode" "") ++ (set_attr "length" "4")]) ++ + (define_expand "lsx_vld" + [(match_operand:V16QI 0 "register_operand") + (match_operand 1 "pmode_register_operand") +@@ -3179,3 +3363,101 @@ + } + [(set_attr "type" "simd_fcmp") + (set_attr "mode" "FCC")]) ++ ++;; Vector reduction operation ++(define_expand "reduc_plus_scal_v2di" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:V2DI 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (V2DImode); ++ emit_insn (gen_lsx_vhaddw_q_d (tmp, operands[1], operands[1])); ++ emit_insn (gen_vec_extractv2didi (operands[0], tmp, const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_plus_scal_v4si" ++ [(match_operand:SI 0 "register_operand") ++ (match_operand:V4SI 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (V2DImode); ++ rtx tmp1 = gen_reg_rtx (V2DImode); ++ emit_insn (gen_lsx_vhaddw_d_w (tmp, operands[1], operands[1])); ++ emit_insn (gen_lsx_vhaddw_q_d (tmp1, tmp, tmp)); ++ emit_insn (gen_vec_extractv4sisi (operands[0], gen_lowpart(V4SImode,tmp1), const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_plus_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:FLSX 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_add3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc__scal_" ++ [(any_bitwise: ++ (match_operand: 0 "register_operand") ++ (match_operand:ILSX 1 "register_operand"))] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_smax_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:LSX 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_smax3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_smin_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:LSX 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_smin3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_umax_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_umax3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "reduc_umin_scal_" ++ [(match_operand: 0 "register_operand") ++ (match_operand:ILSX 1 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx tmp = gen_reg_rtx (mode); ++ loongarch_expand_vector_reduc (gen_umin3, tmp, operands[1]); ++ emit_insn (gen_vec_extract (operands[0], tmp, ++ const0_rtx)); ++ DONE; ++}) +diff --git a/gcc/config/loongarch/lsxintrin.h b/gcc/config/loongarch/lsxintrin.h +index fe3043e3d..2d1598536 100644 +--- a/gcc/config/loongarch/lsxintrin.h ++++ b/gcc/config/loongarch/lsxintrin.h +@@ -3291,65 +3291,65 @@ __m128i __lsx_vftintrneh_l_s(__m128 _1) + /* Assembly instruction format: vd, vj. */ + /* Data types in instruction templates: V4SI, V4SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m128i __lsx_vfrintrne_s(__m128 _1) ++__m128 __lsx_vfrintrne_s(__m128 _1) + { +- return (__m128i)__builtin_lsx_vfrintrne_s((v4f32)_1); ++ return (__m128)__builtin_lsx_vfrintrne_s((v4f32)_1); + } + + /* Assembly instruction format: vd, vj. */ + /* Data types in instruction templates: V2DI, V2DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m128i __lsx_vfrintrne_d(__m128d _1) ++__m128d __lsx_vfrintrne_d(__m128d _1) + { +- return (__m128i)__builtin_lsx_vfrintrne_d((v2f64)_1); ++ return (__m128d)__builtin_lsx_vfrintrne_d((v2f64)_1); + } + + /* Assembly instruction format: vd, vj. */ + /* Data types in instruction templates: V4SI, V4SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m128i __lsx_vfrintrz_s(__m128 _1) ++__m128 __lsx_vfrintrz_s(__m128 _1) + { +- return (__m128i)__builtin_lsx_vfrintrz_s((v4f32)_1); ++ return (__m128)__builtin_lsx_vfrintrz_s((v4f32)_1); + } + + /* Assembly instruction format: vd, vj. */ + /* Data types in instruction templates: V2DI, V2DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m128i __lsx_vfrintrz_d(__m128d _1) ++__m128d __lsx_vfrintrz_d(__m128d _1) + { +- return (__m128i)__builtin_lsx_vfrintrz_d((v2f64)_1); ++ return (__m128d)__builtin_lsx_vfrintrz_d((v2f64)_1); + } + + /* Assembly instruction format: vd, vj. */ + /* Data types in instruction templates: V4SI, V4SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m128i __lsx_vfrintrp_s(__m128 _1) ++__m128 __lsx_vfrintrp_s(__m128 _1) + { +- return (__m128i)__builtin_lsx_vfrintrp_s((v4f32)_1); ++ return (__m128)__builtin_lsx_vfrintrp_s((v4f32)_1); + } + + /* Assembly instruction format: vd, vj. */ + /* Data types in instruction templates: V2DI, V2DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m128i __lsx_vfrintrp_d(__m128d _1) ++__m128d __lsx_vfrintrp_d(__m128d _1) + { +- return (__m128i)__builtin_lsx_vfrintrp_d((v2f64)_1); ++ return (__m128d)__builtin_lsx_vfrintrp_d((v2f64)_1); + } + + /* Assembly instruction format: vd, vj. */ + /* Data types in instruction templates: V4SI, V4SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m128i __lsx_vfrintrm_s(__m128 _1) ++__m128 __lsx_vfrintrm_s(__m128 _1) + { +- return (__m128i)__builtin_lsx_vfrintrm_s((v4f32)_1); ++ return (__m128)__builtin_lsx_vfrintrm_s((v4f32)_1); + } + + /* Assembly instruction format: vd, vj. */ + /* Data types in instruction templates: V2DI, V2DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m128i __lsx_vfrintrm_d(__m128d _1) ++__m128d __lsx_vfrintrm_d(__m128d _1) + { +- return (__m128i)__builtin_lsx_vfrintrm_d((v2f64)_1); ++ return (__m128d)__builtin_lsx_vfrintrm_d((v2f64)_1); + } + + /* Assembly instruction format: vd, rj, si8, idx. */ +@@ -4154,19 +4154,19 @@ __m128i __lsx_vsub_q(__m128i _1, __m128i _2) + + /* Assembly instruction format: vd, rj, si12. */ + /* Data types in instruction templates: V16QI, CVPOINTER, SI. */ +-#define __lsx_vldrepl_b(/*void **/ _1, /*si12*/ _2) ((__m128i)__builtin_lsx_vldrepl_b((void *)(_1), (_2))) ++#define __lsx_vldrepl_b(/*void **/ _1, /*si12*/ _2) ((__m128i)__builtin_lsx_vldrepl_b((void const *)(_1), (_2))) + + /* Assembly instruction format: vd, rj, si11. */ + /* Data types in instruction templates: V8HI, CVPOINTER, SI. */ +-#define __lsx_vldrepl_h(/*void **/ _1, /*si11*/ _2) ((__m128i)__builtin_lsx_vldrepl_h((void *)(_1), (_2))) ++#define __lsx_vldrepl_h(/*void **/ _1, /*si11*/ _2) ((__m128i)__builtin_lsx_vldrepl_h((void const *)(_1), (_2))) + + /* Assembly instruction format: vd, rj, si10. */ + /* Data types in instruction templates: V4SI, CVPOINTER, SI. */ +-#define __lsx_vldrepl_w(/*void **/ _1, /*si10*/ _2) ((__m128i)__builtin_lsx_vldrepl_w((void *)(_1), (_2))) ++#define __lsx_vldrepl_w(/*void **/ _1, /*si10*/ _2) ((__m128i)__builtin_lsx_vldrepl_w((void const *)(_1), (_2))) + + /* Assembly instruction format: vd, rj, si9. */ + /* Data types in instruction templates: V2DI, CVPOINTER, SI. */ +-#define __lsx_vldrepl_d(/*void **/ _1, /*si9*/ _2) ((__m128i)__builtin_lsx_vldrepl_d((void *)(_1), (_2))) ++#define __lsx_vldrepl_d(/*void **/ _1, /*si9*/ _2) ((__m128i)__builtin_lsx_vldrepl_d((void const *)(_1), (_2))) + + /* Assembly instruction format: vd, vj. */ + /* Data types in instruction templates: V16QI, V16QI. */ +@@ -4470,7 +4470,7 @@ __m128i __lsx_vextl_q_d(__m128i _1) + + /* Assembly instruction format: vd, rj, si12. */ + /* Data types in instruction templates: V16QI, CVPOINTER, SI. */ +-#define __lsx_vld(/*void **/ _1, /*si12*/ _2) ((__m128i)__builtin_lsx_vld((void *)(_1), (_2))) ++#define __lsx_vld(/*void **/ _1, /*si12*/ _2) ((__m128i)__builtin_lsx_vld((void const *)(_1), (_2))) + + /* Assembly instruction format: vd, rj, si12. */ + /* Data types in instruction templates: VOID, V16QI, CVPOINTER, SI. */ +@@ -4547,9 +4547,9 @@ __m128i __lsx_vshuf_b(__m128i _1, __m128i _2, __m128i _3) + /* Assembly instruction format: vd, rj, rk. */ + /* Data types in instruction templates: V16QI, CVPOINTER, DI. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +-__m128i __lsx_vldx(void * _1, long int _2) ++__m128i __lsx_vldx(void const * _1, long int _2) + { +- return (__m128i)__builtin_lsx_vldx((void *)_1, (long int)_2); ++ return (__m128i)__builtin_lsx_vldx((void const *)_1, (long int)_2); + } + + /* Assembly instruction format: vd, rj, rk. */ +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +index 20638559d..daacaf003 100644 +--- a/gcc/config/loongarch/predicates.md ++++ b/gcc/config/loongarch/predicates.md +@@ -1,5 +1,7 @@ +-;; Predicate definitions for LARCH. +-;; Copyright (C) 2004-2018 Free Software Foundation, Inc. ++;; Predicate definitions for LoongArch target. ++;; Copyright (C) 2020-2022 Free Software Foundation, Inc. ++;; Contributed by Loongson Co. Ltd. ++;; Based on MIPS target for GNU compiler. + ;; + ;; This file is part of GCC. + ;; +@@ -19,7 +21,7 @@ + + (define_predicate "const_uns_arith_operand" + (and (match_code "const_int") +- (match_test "SMALL_OPERAND_UNSIGNED (INTVAL (op))"))) ++ (match_test "IMM12_OPERAND_UNSIGNED (INTVAL (op))"))) + + (define_predicate "uns_arith_operand" + (ior (match_operand 0 "const_uns_arith_operand") +@@ -45,7 +47,7 @@ + (ior (match_operand 0 "const_arith_operand") + (match_operand 0 "register_operand"))) + +-(define_predicate "const_immlsa_operand" ++(define_predicate "const_immalsl_operand" + (and (match_code "const_int") + (match_test "IN_RANGE (INTVAL (op), 1, 4)"))) + +@@ -69,9 +71,6 @@ + (and (match_code "const_int") + (match_test "UIMM6_OPERAND (INTVAL (op))"))) + +-(define_predicate "const_uimm7_operand" +- (and (match_code "const_int") +- (match_test "IN_RANGE (INTVAL (op), 0, 127)"))) + + (define_predicate "const_uimm8_operand" + (and (match_code "const_int") +@@ -85,10 +84,6 @@ + (and (match_code "const_int") + (match_test "IN_RANGE (INTVAL (op), 0, 32767)"))) + +-(define_predicate "const_imm5_operand" +- (and (match_code "const_int") +- (match_test "IN_RANGE (INTVAL (op), -16, 15)"))) +- + (define_predicate "const_imm10_operand" + (and (match_code "const_int") + (match_test "IMM10_OPERAND (INTVAL (op))"))) +@@ -101,10 +96,6 @@ + (and (match_code "const_int") + (match_test "IMM13_OPERAND (INTVAL (op))"))) + +-(define_predicate "reg_imm10_operand" +- (ior (match_operand 0 "const_imm10_operand") +- (match_operand 0 "register_operand"))) +- + (define_predicate "aq8b_operand" + (and (match_code "const_int") + (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 0)"))) +@@ -137,6 +128,7 @@ + (and (match_code "const_int") + (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 3)"))) + ++ + (define_predicate "aq12b_operand" + (and (match_code "const_int") + (match_test "loongarch_signed_immediate_p (INTVAL (op), 12, 0)"))) +@@ -155,7 +147,7 @@ + + (define_predicate "sle_operand" + (and (match_code "const_int") +- (match_test "SMALL_OPERAND (INTVAL (op) + 1)"))) ++ (match_test "IMM12_OPERAND (INTVAL (op) + 1)"))) + + (define_predicate "sleu_operand" + (and (match_operand 0 "sle_operand") +@@ -223,179 +215,40 @@ + (and (match_code "const_int") + (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) + +-(define_predicate "const_4_to_7_operand" ++(define_predicate "const_4_to_7_operand" + (and (match_code "const_int") + (match_test "IN_RANGE (INTVAL (op), 4, 7)"))) +- ++ + (define_predicate "const_8_to_15_operand" + (and (match_code "const_int") + (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) +- +-(define_predicate "const_16_to_31_operand" +- (and (match_code "const_int") +- (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) +- +-(define_predicate "qi_mask_operand" +- (and (match_code "const_int") +- (match_test "UINTVAL (op) == 0xff"))) + +-(define_predicate "hi_mask_operand" ++(define_predicate "const_8_to_11_operand" + (and (match_code "const_int") +- (match_test "UINTVAL (op) == 0xffff"))) ++ (match_test "IN_RANGE (INTVAL (op), 8, 11)"))) + +-(define_predicate "lu52i_mask_operand" ++(define_predicate "const_12_to_15_operand" + (and (match_code "const_int") +- (match_test "UINTVAL (op) == 0xfffffffffffff"))) ++ (match_test "IN_RANGE (INTVAL (op), 12, 15)"))) + +-(define_predicate "shift_mask_operand" ++(define_predicate "const_16_to_31_operand" + (and (match_code "const_int") +- (ior (match_test "UINTVAL (op) == 0x3fffffffc") +- (match_test "UINTVAL (op) == 0x1fffffffe") +- (match_test "UINTVAL (op) == 0x7fffffff8") +- (match_test "UINTVAL (op) == 0xffffffff0")))) +- +- ++ (match_test "IN_RANGE (INTVAL (op), 0, 7)"))) + +-(define_predicate "si_mask_operand" ++(define_predicate "lu52i_mask_operand" + (and (match_code "const_int") +- (match_test "UINTVAL (op) == 0xffffffff"))) +- +-(define_predicate "and_load_operand" +- (ior (match_operand 0 "qi_mask_operand") +- (match_operand 0 "hi_mask_operand") +- (match_operand 0 "si_mask_operand"))) ++ (match_test "UINTVAL (op) == 0xfffffffffffff"))) + + (define_predicate "low_bitmask_operand" + (and (match_code "const_int") + (match_test "low_bitmask_len (mode, INTVAL (op)) > 12"))) + +-(define_predicate "and_reg_operand" +- (ior (match_operand 0 "register_operand") +- (match_operand 0 "const_uns_arith_operand") +- (match_operand 0 "low_bitmask_operand") +- (match_operand 0 "si_mask_operand"))) +- +-(define_predicate "and_operand" +- (ior (match_operand 0 "and_load_operand") +- (match_operand 0 "and_reg_operand"))) +- +-(define_predicate "d_operand" +- (and (match_code "reg") +- (match_test "GP_REG_P (REGNO (op))"))) +- +-(define_predicate "lwsp_swsp_operand" +- (and (match_code "mem") +- (match_test "lwsp_swsp_address_p (XEXP (op, 0), mode)"))) +- +-(define_predicate "db4_operand" +- (and (match_code "const_int") +- (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 4, 0)"))) +- +-(define_predicate "db7_operand" +- (and (match_code "const_int") +- (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 7, 0)"))) +- +-(define_predicate "db8_operand" +- (and (match_code "const_int") +- (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 8, 0)"))) +- +-(define_predicate "ib3_operand" +- (and (match_code "const_int") +- (match_test "loongarch_unsigned_immediate_p (INTVAL (op) - 1, 3, 0)"))) +- +-(define_predicate "sb4_operand" +- (and (match_code "const_int") +- (match_test "loongarch_signed_immediate_p (INTVAL (op), 4, 0)"))) +- +-(define_predicate "sb5_operand" +- (and (match_code "const_int") +- (match_test "loongarch_signed_immediate_p (INTVAL (op), 5, 0)"))) +- +-(define_predicate "sb8_operand" +- (and (match_code "const_int") +- (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 0)"))) +- +-(define_predicate "sd8_operand" +- (and (match_code "const_int") +- (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 3)"))) +- +-(define_predicate "ub4_operand" +- (and (match_code "const_int") +- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 0)"))) +- +-(define_predicate "ub8_operand" +- (and (match_code "const_int") +- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 8, 0)"))) +- +-(define_predicate "uh4_operand" +- (and (match_code "const_int") +- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 1)"))) +- +-(define_predicate "uw4_operand" +- (and (match_code "const_int") +- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 2)"))) +- +-(define_predicate "uw5_operand" +- (and (match_code "const_int") +- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 5, 2)"))) +- +-(define_predicate "uw6_operand" +- (and (match_code "const_int") +- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 6, 2)"))) +- +-(define_predicate "uw8_operand" +- (and (match_code "const_int") +- (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 8, 2)"))) +- +-(define_predicate "addiur2_operand" +- (and (match_code "const_int") +- (ior (match_test "INTVAL (op) == -1") +- (match_test "INTVAL (op) == 1") +- (match_test "INTVAL (op) == 4") +- (match_test "INTVAL (op) == 8") +- (match_test "INTVAL (op) == 12") +- (match_test "INTVAL (op) == 16") +- (match_test "INTVAL (op) == 20") +- (match_test "INTVAL (op) == 24")))) +- +-(define_predicate "addiusp_operand" +- (and (match_code "const_int") +- (ior (match_test "(IN_RANGE (INTVAL (op), 2, 257))") +- (match_test "(IN_RANGE (INTVAL (op), -258, -3))")))) +- +-(define_predicate "andi16_operand" +- (and (match_code "const_int") +- (ior (match_test "IN_RANGE (INTVAL (op), 1, 4)") +- (match_test "IN_RANGE (INTVAL (op), 7, 8)") +- (match_test "IN_RANGE (INTVAL (op), 15, 16)") +- (match_test "IN_RANGE (INTVAL (op), 31, 32)") +- (match_test "IN_RANGE (INTVAL (op), 63, 64)") +- (match_test "INTVAL (op) == 255") +- (match_test "INTVAL (op) == 32768") +- (match_test "INTVAL (op) == 65535")))) +- +-(define_predicate "movep_src_register" +- (and (match_code "reg") +- (ior (match_test ("IN_RANGE (REGNO (op), 2, 3)")) +- (match_test ("IN_RANGE (REGNO (op), 16, 20)"))))) +- +-(define_predicate "movep_src_operand" +- (ior (match_operand 0 "const_0_operand") +- (match_operand 0 "movep_src_register"))) +- +-(define_predicate "fcc_reload_operand" +- (and (match_code "reg,subreg") +- (match_test "ST_REG_P (true_regnum (op))"))) +- +-(define_predicate "muldiv_target_operand" +- (match_operand 0 "register_operand")) +- + (define_predicate "const_call_insn_operand" + (match_code "const,symbol_ref,label_ref") + { + enum loongarch_symbol_type symbol_type; + +- if (!loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_CALL, &symbol_type)) ++ if (!loongarch_symbolic_constant_p (op, &symbol_type)) + return false; + + switch (symbol_type) +@@ -403,9 +256,9 @@ + case SYMBOL_GOT_DISP: + /* Without explicit relocs, there is no special syntax for + loading the address of a call destination into a register. +- Using "la $25,foo; jal $25" would prevent the lazy binding +- of "foo", so keep the address of global symbols with the +- jal macro. */ ++ Using "la.global JIRL_REGS,foo; jirl JIRL_REGS" would prevent the lazy ++ binding of "foo", so keep the address of global symbols with the jirl ++ macro. */ + return 1; + + default: +@@ -420,7 +273,7 @@ + (define_predicate "is_const_call_local_symbol" + (and (match_operand 0 "const_call_insn_operand") + (ior (match_test "loongarch_global_symbol_p (op) == 0") +- (match_test "loongarch_symbol_binds_local_p (op) != 0")) ++ (match_test "loongarch_symbol_binds_local_p (op) != 0")) + (match_test "CONSTANT_P (op)"))) + + (define_predicate "is_const_call_weak_symbol" +@@ -446,7 +299,6 @@ + (define_predicate "splittable_const_int_operand" + (match_code "const_int") + { +- + /* Don't handle multi-word moves this way; we don't want to introduce + the individual word-mode moves until after reload. */ + if (GET_MODE_SIZE (mode) > UNITS_PER_WORD) +@@ -454,9 +306,8 @@ + + /* Otherwise check whether the constant can be loaded in a single + instruction. */ +-// return !LUI_INT (op) && !SMALL_INT (op) && !SMALL_INT_UNSIGNED (op); +- return !LUI_INT (op) && !SMALL_INT (op) && !SMALL_INT_UNSIGNED (op) +- && !LU52I_INT (op); ++ return !LU12I_INT (op) && !IMM12_INT (op) && !IMM12_INT_UNSIGNED (op) ++ && !LU52I_INT (op); + }) + + (define_predicate "move_operand" +@@ -504,73 +355,34 @@ + case CONST: + case SYMBOL_REF: + case LABEL_REF: +- return (loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &symbol_type)); ++ return (loongarch_symbolic_constant_p (op, &symbol_type)); + default: + return true; + } + }) + +-(define_predicate "consttable_operand" +- (match_test "CONSTANT_P (op)")) +- + (define_predicate "symbolic_operand" + (match_code "const,symbol_ref,label_ref") + { + enum loongarch_symbol_type type; +- return loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type); +-}) +- +-(define_predicate "force_to_mem_operand" +- (match_code "const,symbol_ref,label_ref") +-{ +- enum loongarch_symbol_type symbol_type; +- return (loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &symbol_type) +- && loongarch_use_pcrel_pool_p[(int) symbol_type]); +-}) +- +-(define_predicate "got_disp_operand" +- (match_code "const,symbol_ref,label_ref") +-{ +- enum loongarch_symbol_type type; +- return (loongarch_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type) +- && type == SYMBOL_GOT_DISP); ++ return loongarch_symbolic_constant_p (op, &type); + }) + +-(define_predicate "symbol_ref_operand" +- (match_code "symbol_ref")) +- +-(define_predicate "stack_operand" +- (and (match_code "mem") +- (match_test "loongarch_stack_address_p (XEXP (op, 0), GET_MODE (op))"))) +- +- +- + (define_predicate "equality_operator" + (match_code "eq,ne")) + +-(define_predicate "extend_operator" +- (match_code "zero_extend,sign_extend")) +- +-(define_predicate "trap_comparison_operator" +- (match_code "eq,ne,lt,ltu,ge,geu")) +- + (define_predicate "order_operator" + (match_code "lt,ltu,le,leu,ge,geu,gt,gtu")) + + ;; For NE, cstore uses sltu instructions in which the first operand is $0. + + (define_predicate "loongarch_cstore_operator" +- (ior (match_code "eq,gt,gtu,ge,geu,lt,ltu,le,leu") +- (match_code "ne"))) ++ (match_code "ne,eq,gt,gtu,ge,geu,lt,ltu,le,leu")) + + (define_predicate "small_data_pattern" + (and (match_code "set,parallel,unspec,unspec_volatile,prefetch") + (match_test "loongarch_small_data_pattern_p (op)"))) + +-(define_predicate "mem_noofs_operand" +- (and (match_code "mem") +- (match_code "reg" "0"))) +- + ;; Return 1 if the operand is in non-volatile memory. + (define_predicate "non_volatile_mem_operand" + (and (match_operand 0 "memory_operand") +@@ -606,12 +418,6 @@ + return loongarch_const_vector_same_int_p (op, mode, 0, 63); + }) + +-(define_predicate "const_vector_same_uimm8_operand" +- (match_code "const_vector") +-{ +- return loongarch_const_vector_same_int_p (op, mode, 0, 255); +-}) +- + (define_predicate "par_const_vector_shf_set_operand" + (match_code "parallel") + { +diff --git a/gcc/config/loongarch/rtems.h b/gcc/config/loongarch/rtems.h +deleted file mode 100644 +index bbb70b040..000000000 +--- a/gcc/config/loongarch/rtems.h ++++ /dev/null +@@ -1,39 +0,0 @@ +-/* Definitions for rtems targeting a LARCH using ELF. +- Copyright (C) 1996-2018 Free Software Foundation, Inc. +- Contributed by Joel Sherrill (joel@OARcorp.com). +- +- This file is part of GCC. +- +- GCC is free software; you can redistribute it and/or modify it +- under the terms of the GNU General Public License as published +- by the Free Software Foundation; either version 3, or (at your +- option) any later version. +- +- GCC is distributed in the hope that it will be useful, but WITHOUT +- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +- License for more details. +- +- Under Section 7 of GPL version 3, you are granted additional +- permissions described in the GCC Runtime Library Exception, version +- 3.1, as published by the Free Software Foundation. +- +- You should have received a copy of the GNU General Public License and +- a copy of the GCC Runtime Library Exception along with this program; +- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +- . */ +- +-/* Specify predefined symbols in preprocessor. */ +- +-#define TARGET_OS_CPP_BUILTINS() \ +-do { \ +- builtin_define ("__rtems__"); \ +- builtin_define ("__USE_INIT_FINI__"); \ +- builtin_assert ("system=rtems"); \ +-} while (0) +- +-/* No sdata. +- * The RTEMS BSPs expect -G0 +- */ +-#undef LARCH_DEFAULT_GVALUE +-#define LARCH_DEFAULT_GVALUE 0 +diff --git a/gcc/config/loongarch/sde.opt b/gcc/config/loongarch/sde.opt +deleted file mode 100644 +index 321217d51..000000000 +--- a/gcc/config/loongarch/sde.opt ++++ /dev/null +@@ -1,28 +0,0 @@ +-; LARCH SDE options. +-; +-; Copyright (C) 2010-2018 Free Software Foundation, Inc. +-; +-; This file is part of GCC. +-; +-; GCC is free software; you can redistribute it and/or modify it under +-; the terms of the GNU General Public License as published by the Free +-; Software Foundation; either version 3, or (at your option) any later +-; version. +-; +-; GCC is distributed in the hope that it will be useful, but WITHOUT +-; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +-; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +-; License for more details. +-; +-; You should have received a copy of the GNU General Public License +-; along with GCC; see the file COPYING3. If not see +-; . +- +-; -mcode-xonly is a traditional alias for -mcode-readable=pcrel and +-; -mno-data-in-code is a traditional alias for -mcode-readable=no. +- +-mno-data-in-code +-Target RejectNegative Alias(mcode-readable=, no) +- +-mcode-xonly +-Target RejectNegative Alias(mcode-readable=, pcrel) +diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md +index 5a16c4fa3..abc401339 100644 +--- a/gcc/config/loongarch/sync.md ++++ b/gcc/config/loongarch/sync.md +@@ -1,7 +1,7 @@ +-;; Machine description for LARCH atomic operations. +-;; Copyright (C) 2011-2018 Free Software Foundation, Inc. +-;; Contributed by Andrew Waterman (andrew@sifive.com). +-;; Based on LARCH target for GNU compiler. ++;; Machine description for LoongArch atomic operations. ++;; Copyright (C) 2020-2022 Free Software Foundation, Inc. ++;; Contributed by Loongson Co. Ltd. ++;; Based on MIPS and RISC-V target for GNU compiler. + + ;; This file is part of GCC. + +@@ -29,6 +29,7 @@ + UNSPEC_COMPARE_AND_SWAP_NAND + UNSPEC_SYNC_OLD_OP + UNSPEC_SYNC_EXCHANGE ++ UNSPEC_ATOMIC_LOAD + UNSPEC_ATOMIC_STORE + UNSPEC_MEMORY_BARRIER + ]) +@@ -37,21 +38,25 @@ + (define_code_attr atomic_optab + [(plus "add") (ior "or") (xor "xor") (and "and")]) + ++(define_mode_iterator AMO_BHWD [(QI "TARGET_uARCH_LA664") ++ (HI "TARGET_uARCH_LA664") ++ SI DI]) ++ + ;; This attribute gives the format suffix for atomic memory operations. +-(define_mode_attr amo [(SI "w") (DI "d")]) ++(define_mode_attr amo [(QI "b") (HI "h") (SI "w") (DI "d")]) ++ ++;; expands to the name of the atomic operand that implements a ++;; particular code. ++(define_code_attr amop [(ior "or") (xor "xor") (and "and") (plus "add")]) + +-;; expands to the name of the atomic operand that implements a particular code. +-(define_code_attr amop [(ior "or") +- (xor "xor") +- (and "and") +- (plus "add")]) + ;; Memory barriers. + + (define_expand "mem_thread_fence" + [(match_operand:SI 0 "const_int_operand" "")] ;; model + "" + { +- if (INTVAL (operands[0]) != MEMMODEL_RELAXED) ++ enum memmodel model = memmodel_from_int (INTVAL (operands[0])); ++ if (!is_mm_relaxed (model)) + { + rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode)); + MEM_VOLATILE_P (mem) = 1; +@@ -60,37 +65,79 @@ + DONE; + }) + +-;; Until the LARCH memory model (hence its mapping from C++) is finalized, ++;; Until the LoongArch memory model (hence its mapping from C++) is finalized, + ;; conservatively emit a full FENCE. + (define_insn "mem_thread_fence_1" + [(set (match_operand:BLK 0 "" "") + (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER)) + (match_operand:SI 1 "const_int_operand" "")] ;; model + "" +- "dbar\t0") ++{ ++ enum memmodel model = memmodel_from_int (INTVAL (operands[1])); ++ if (is_mm_consume (model)) ++ return "dbar\t0x700"; ++ else if (is_mm_acquire (model)) ++ return "dbar\t0x14"; ++ else ++ return "dbar\t0x10"; ++}) + + ;; Atomic memory operations. + ++(define_insn "atomic_load" ++ [(set (match_operand:QHWD 0 "register_operand" "=r") ++ (unspec_volatile:QHWD ++ [(match_operand:QHWD 1 "memory_operand" "+m") ++ (match_operand:SI 2 "const_int_operand")] ;; model ++ UNSPEC_ATOMIC_LOAD))] ++ "" ++{ ++ enum memmodel model = memmodel_from_int (INTVAL (operands[2])); ++ if (is_mm_relaxed (model) || is_mm_release (model)) ++ return "ld.\t%0,%1"; ++ if (is_mm_consume (model)) ++ return "ld.\t%0,%1\n\tdbar\t0x700"; ++ else ++ return "ld.\t%0,%1\n\tdbar\t0x14"; ++}) ++ + ;; Implement atomic stores with amoswap. Fall back to fences for atomic loads. + (define_insn "atomic_store" +- [(set (match_operand:GPR 0 "memory_operand" "+ZB") +- (unspec_volatile:GPR +- [(match_operand:GPR 1 "reg_or_0_operand" "rJ") ++ [(set (match_operand:QHWD 0 "memory_operand" "+m") ++ (unspec_volatile:QHWD ++ [(match_operand:QHWD 1 "reg_or_0_operand" "rJ") + (match_operand:SI 2 "const_int_operand")] ;; model + UNSPEC_ATOMIC_STORE))] + "" +- "amswap%A2.\t$zero,%z1,%0" ++{ ++ enum memmodel model = memmodel_from_int (INTVAL (operands[2])); ++ if (is_mm_relaxed (model) || is_mm_acquire (model) || is_mm_consume (model)) ++ return "st.\t%z1,%0"; ++ else ++ return "dbar\t0x12\n\tst.\t%z1,%0"; ++} + [(set (attr "length") (const_int 8))]) + + (define_insn "atomic_" + [(set (match_operand:GPR 0 "memory_operand" "+ZB") + (unspec_volatile:GPR + [(any_atomic:GPR (match_dup 0) +- (match_operand:GPR 1 "reg_or_0_operand" "rJ")) ++ (match_operand:GPR 1 "reg_or_0_operand" "rJ")) + (match_operand:SI 2 "const_int_operand")] ;; model + UNSPEC_SYNC_OLD_OP))] + "" +- "am%A2.\t$zero,%z1,%0" ++ "%J2\n\tam%A2.\t$zero,%z1,%0\n\t%K2" ++ [(set (attr "length") (const_int 8))]) ++ ++(define_insn "atomic_add" ++ [(set (match_operand:SHORT 0 "memory_operand" "+ZB") ++ (unspec_volatile:SHORT ++ [(plus:SHORT (match_dup 0) ++ (match_operand:SHORT 1 "reg_or_0_operand" "rJ")) ++ (match_operand:SI 2 "const_int_operand")] ;; model ++ UNSPEC_SYNC_OLD_OP))] ++ "TARGET_uARCH_LA664" ++ "%J2\n\tamadd%A2.\t$zero,%z1,%0\n\t%K2" + [(set (attr "length") (const_int 8))]) + + (define_insn "atomic_fetch_" +@@ -99,11 +146,11 @@ + (set (match_dup 1) + (unspec_volatile:GPR + [(any_atomic:GPR (match_dup 1) +- (match_operand:GPR 2 "reg_or_0_operand" "rJ")) ++ (match_operand:GPR 2 "reg_or_0_operand" "rJ")) + (match_operand:SI 3 "const_int_operand")] ;; model + UNSPEC_SYNC_OLD_OP))] + "" +- "am%A3.\t%0,%z2,%1" ++ "%J3\n\tam%A3.\t%0,%z2,%1\n\t%K3" + [(set (attr "length") (const_int 8))]) + + (define_insn "atomic_exchange" +@@ -115,35 +162,90 @@ + (set (match_dup 1) + (match_operand:GPR 2 "register_operand" "r"))] + "" +- "amswap%A3.\t%0,%z2,%1" ++ "%J3\n\tamswap%A3.\t%0,%z2,%1\n\t%K3" ++ [(set (attr "length") (const_int 8))]) ++ ++(define_insn "atomic_exchange_1" ++ [(set (match_operand:SHORT 0 "register_operand" "=&r") ++ (unspec_volatile:SHORT ++ [(match_operand:SHORT 1 "memory_operand" "+ZB") ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ UNSPEC_SYNC_EXCHANGE)) ++ (set (match_dup 1) ++ (match_operand:SHORT 2 "register_operand" "r"))] ++ "" ++ "%J3\n\tamswap%A3.\t%0,%z2,%1\n\t%K3" + [(set (attr "length") (const_int 8))]) + + (define_insn "atomic_cas_value_strong" + [(set (match_operand:GPR 0 "register_operand" "=&r") +- (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (match_operand:GPR 1 "memory_operand" "+ZB")) + (set (match_dup 1) + (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") + (match_operand:GPR 3 "reg_or_0_operand" "rJ") +- (match_operand:SI 4 "const_int_operand") ;; mod_s +- (match_operand:SI 5 "const_int_operand")] ;; mod_f ++ (match_operand:SI 4 "const_int_operand")] ;; mod_s + UNSPEC_COMPARE_AND_SWAP)) +- (clobber (match_scratch:GPR 6 "=&r"))] ++ (clobber (match_scratch:GPR 5 "=&r"))] + "" + { +- return "%G5\n\t" +- "1:\n\t" +- "ll.\t%0,%1\n\t" +- "bne\t%0,%z2,2f\n\t" +- "or%i3\t%6,$zero,%3\n\t" +- "sc.\t%6,%1\n\t" +- "beq\t$zero,%6,1b\n\t" +- "b\t3f\n\t" +- "2:\n\t" +- "dbar\t0x700\n\t" +- "3:\n\t"; ++ if (TARGET_uARCH_LA664) ++ { ++ enum memmodel model = memmodel_from_int (INTVAL (operands[4])); ++ output_asm_insn ("1:",operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) ++ output_asm_insn ("ll.\t%0,%1", operands); ++ else ++ output_asm_insn ("llacq.\t%0,%1", operands); ++ ++ output_asm_insn ("bne\t%0,%z2,2f", operands); ++ output_asm_insn ("or%i3\t%5,$zero,%3", operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) ++ output_asm_insn ("sc.\t%5,%1", operands); ++ else ++ output_asm_insn ("screl.\t%5,%1", operands); ++ ++ output_asm_insn ("beq\t$zero,%5,1b", operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) ++ { ++ output_asm_insn ("b\t3f", operands); ++ output_asm_insn ("2:", operands); ++ output_asm_insn ("dbar\t0x700", operands); ++ output_asm_insn ("3:", operands); ++ } ++ else ++ output_asm_insn ("2:", operands); ++ return ""; ++ } ++ else ++ return "%G4\\n\\t" ++ "1:\\n\\t" ++ "ll.\\t%0,%1\\n\\t" ++ "bne\\t%0,%z2,2f\\n\\t" ++ "or%i3\\t%5,$zero,%3\\n\\t" ++ "sc.\\t%5,%1\\n\\t" ++ "beq\\t$zero,%5,1b\\n\\t" ++ "b\\t3f\\n\\t" ++ "2:\\n\\t" ++ "dbar\\t0x700\\n\\t" ++ "3:\\n\\t"; + } + [(set (attr "length") (const_int 32))]) + ++(define_insn "atomic_cas_value_strong_3a6000" ++ [(set (match_operand:AMO_BHWD 0 "register_operand" "=&r") ++ (match_operand:AMO_BHWD 1 "memory_operand" "+ZB")) ++ (set (match_dup 1) ++ (unspec_volatile:AMO_BHWD [(match_operand:AMO_BHWD 2 "reg_or_0_operand" "rJ") ++ (match_operand:AMO_BHWD 3 "reg_or_0_operand" "rJ") ++ (match_operand:SI 4 "const_int_operand")] ;; mod_s ++ UNSPEC_COMPARE_AND_SWAP))] ++ "TARGET_uARCH_LA664" ++ "ori\t%0,%z2,0\n\t%J4\n\tamcas%A4.\t%0,%z3,%1\n\t%K4" ++ [(set (attr "length") (const_int 32))]) ++ + (define_expand "atomic_compare_and_swap" + [(match_operand:SI 0 "register_operand" "") ;; bool output + (match_operand:GPR 1 "register_operand" "") ;; val output +@@ -155,9 +257,29 @@ + (match_operand:SI 7 "const_int_operand" "")] ;; mod_f + "" + { +- emit_insn (gen_atomic_cas_value_strong (operands[1], operands[2], +- operands[3], operands[4], +- operands[6], operands[7])); ++ rtx mod_s, mod_f; ++ ++ mod_s = operands[6]; ++ mod_f = operands[7]; ++ ++ /* Normally the succ memory model must be stronger than fail, but in the ++ unlikely event of fail being ACQUIRE and succ being RELEASE we need to ++ promote succ to ACQ_REL so that we don't lose the acquire semantics. */ ++ ++ if (is_mm_acquire (memmodel_from_int (INTVAL (mod_f))) ++ && is_mm_release (memmodel_from_int (INTVAL (mod_s)))) ++ mod_s = GEN_INT (MEMMODEL_ACQ_REL); ++ ++ operands[6] = mod_s; ++ ++ if (TARGET_uARCH_LA664) ++ emit_insn (gen_atomic_cas_value_strong_3a6000 (operands[1], operands[2], ++ operands[3], operands[4], ++ operands[6])); ++ else ++ emit_insn (gen_atomic_cas_value_strong (operands[1], operands[2], ++ operands[3], operands[4], ++ operands[6])); + + rtx compare = operands[1]; + if (operands[3] != const0_rtx) +@@ -174,7 +296,8 @@ + compare = reg; + } + +- emit_insn (gen_rtx_SET (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx))); ++ emit_insn (gen_rtx_SET (operands[0], ++ gen_rtx_EQ (SImode, compare, const0_rtx))); + DONE; + }) + +@@ -185,7 +308,7 @@ + "" + { + /* We have no QImode atomics, so use the address LSBs to form a mask, +- then use an aligned SImode atomic. */ ++ then use an aligned SImode atomic. */ + rtx result = operands[0]; + rtx mem = operands[1]; + rtx model = operands[2]; +@@ -221,11 +344,9 @@ + DONE; + }) + +- +- + (define_insn "atomic_cas_value_cmp_and_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") +- (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (match_operand:GPR 1 "memory_operand" "+ZB")) + (set (match_dup 1) + (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") + (match_operand:GPR 3 "reg_or_0_operand" "rJ") +@@ -236,23 +357,56 @@ + (clobber (match_scratch:GPR 7 "=&r"))] + "" + { +- return "%G6\n\t" +- "1:\n\t" +- "ll.\t%0,%1\n\t" +- "and\t%7,%0,%2\n\t" +- "bne\t%7,%z4,2f\n\t" +- "and\t%7,%0,%z3\n\t" +- "or%i5\t%7,%7,%5\n\t" +- "sc.\t%7,%1\n\t" +- "beq\t$zero,%7,1b\n\t" +- "b\t3f\n\t" +- "2:\n\t" +- "dbar\t0x700\n\t" +- "3:\n\t"; ++ if (TARGET_uARCH_LA664) ++ { ++ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); ++ output_asm_insn ("1:",operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) ++ output_asm_insn ("ll.\t%0,%1", operands); ++ else ++ output_asm_insn ("llacq.\t%0,%1", operands); ++ ++ output_asm_insn ("and\t%7,%0,%2", operands); ++ output_asm_insn ("bne\t%7,%z4,2f", operands); ++ output_asm_insn ("and\t%7,%0,%z3", operands); ++ output_asm_insn ("or%i5\t%7,%7,%5", operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) ++ output_asm_insn ("sc.\t%7,%1", operands); ++ else ++ output_asm_insn ("screl.\t%7,%1", operands); ++ ++ output_asm_insn ("beq\t$zero,%7,1b", operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) ++ { ++ output_asm_insn ("b\t3f", operands); ++ output_asm_insn ("2:", operands); ++ output_asm_insn ("dbar\t0x700", operands); ++ output_asm_insn ("3:", operands); ++ } ++ else ++ output_asm_insn ("2:", operands); ++ return ""; ++ } ++ else ++ return "%G6\\n\\t" ++ "1:\\n\\t" ++ "ll.\\t%0,%1\\n\\t" ++ "and\\t%7,%0,%2\\n\\t" ++ "bne\\t%7,%z4,2f\\n\\t" ++ "and\\t%7,%0,%z3\\n\\t" ++ "or%i5\\t%7,%7,%5\\n\\t" ++ "sc.\\t%7,%1\\n\\t" ++ "beq\\t$zero,%7,1b\\n\\t" ++ "b\\t3f\\n\\t" ++ "2:\\n\\t" ++ "dbar\\t0x700\\n\\t" ++ "3:\\n\\t"; + } + [(set (attr "length") (const_int 40))]) + +- + (define_expand "atomic_compare_and_swap" + [(match_operand:SI 0 "register_operand" "") ;; bool output + (match_operand:SHORT 1 "register_operand" "") ;; val output +@@ -264,43 +418,59 @@ + (match_operand:SI 7 "const_int_operand" "")] ;; mod_f + "" + { +- union loongarch_gen_fn_ptrs generator; +- generator.fn_7 = gen_atomic_cas_value_cmp_and_7_si; +- loongarch_expand_atomic_qihi (generator, +- operands[1], +- operands[2], +- operands[3], +- operands[4], +- operands[7]); ++ rtx mod_s, mod_f; + +- rtx compare = operands[1]; +- if (operands[3] != const0_rtx) +- { +- machine_mode mode = GET_MODE (operands[3]); +- rtx op1 = convert_modes (SImode, mode, operands[1], true); +- rtx op3 = convert_modes (SImode, mode, operands[3], true); +- rtx difference = gen_rtx_MINUS (SImode, op1, op3); +- compare = gen_reg_rtx (SImode); +- emit_insn (gen_rtx_SET (compare, difference)); +- } ++ mod_s = operands[6]; ++ mod_f = operands[7]; + +- if (word_mode != mode) ++ /* Normally the succ memory model must be stronger than fail, but in the ++ unlikely event of fail being ACQUIRE and succ being RELEASE we need to ++ promote succ to ACQ_REL so that we don't lose the acquire semantics. */ ++ ++ if (is_mm_acquire (memmodel_from_int (INTVAL (mod_f))) ++ && is_mm_release (memmodel_from_int (INTVAL (mod_s)))) ++ mod_s = GEN_INT (MEMMODEL_ACQ_REL); ++ ++ operands[6] = mod_s; ++ ++ if (TARGET_uARCH_LA664) ++ emit_insn (gen_atomic_cas_value_strong_3a6000 (operands[1], operands[2], ++ operands[3], operands[4], ++ operands[6])); ++ else + { +- rtx reg = gen_reg_rtx (word_mode); +- emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare))); +- compare = reg; ++ union loongarch_gen_fn_ptrs generator; ++ generator.fn_7 = gen_atomic_cas_value_cmp_and_7_si; ++ loongarch_expand_atomic_qihi (generator, operands[1], operands[2], ++ operands[3], operands[4], operands[6]); + } + +- emit_insn (gen_rtx_SET (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx))); ++ rtx compare = operands[1]; ++ if (operands[3] != const0_rtx) ++ { ++ machine_mode mode = GET_MODE (operands[3]); ++ rtx op1 = convert_modes (SImode, mode, operands[1], true); ++ rtx op3 = convert_modes (SImode, mode, operands[3], true); ++ rtx difference = gen_rtx_MINUS (SImode, op1, op3); ++ compare = gen_reg_rtx (SImode); ++ emit_insn (gen_rtx_SET (compare, difference)); ++ } ++ ++ if (word_mode != mode) ++ { ++ rtx reg = gen_reg_rtx (word_mode); ++ emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare))); ++ compare = reg; ++ } ++ ++ emit_insn (gen_rtx_SET (operands[0], ++ gen_rtx_EQ (SImode, compare, const0_rtx))); + DONE; + }) + +- +- +- + (define_insn "atomic_cas_value_add_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res +- (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (match_operand:GPR 1 "memory_operand" "+ZB")) + (set (match_dup 1) + (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask + (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask +@@ -312,24 +482,46 @@ + (clobber (match_scratch:GPR 8 "=&r"))] + "" + { +- return "%G6\n\t" +- "1:\n\t" +- "ll.\t%0,%1\n\t" +- "and\t%7,%0,%3\n\t" +- "add.w\t%8,%0,%z5\n\t" +- "and\t%8,%8,%z2\n\t" +- "or%i8\t%7,%7,%8\n\t" +- "sc.\t%7,%1\n\t" +- "beq\t$zero,%7,1b"; ++ if (TARGET_uARCH_LA664) ++ { ++ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); ++ output_asm_insn ("1:",operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) ++ output_asm_insn ("ll.\t%0,%1", operands); ++ else ++ output_asm_insn ("llacq.\t%0,%1", operands); ++ ++ output_asm_insn ("and\t%7,%0,%3", operands); ++ output_asm_insn ("add.w\t%8,%0,%z5", operands); ++ output_asm_insn ("and\t%8,%8,%z2", operands); ++ output_asm_insn ("or%i8\t%7,%7,%8", operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) ++ output_asm_insn ("sc.\t%7,%1", operands); ++ else ++ output_asm_insn ("screl.\t%7,%1", operands); ++ ++ output_asm_insn ("beq\t$zero,%7,1b",operands); ++ return ""; ++ } ++ else ++ return "%G6\\n\\t" ++ "1:\\n\\t" ++ "ll.\\t%0,%1\\n\\t" ++ "and\\t%7,%0,%3\\n\\t" ++ "add.w\\t%8,%0,%z5\\n\\t" ++ "and\\t%8,%8,%z2\\n\\t" ++ "or%i8\\t%7,%7,%8\\n\\t" ++ "sc.\\t%7,%1\\n\\t" ++ "beq\\t$zero,%7,1b"; + } + + [(set (attr "length") (const_int 32))]) + +- +- + (define_insn "atomic_cas_value_sub_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res +- (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (match_operand:GPR 1 "memory_operand" "+ZB")) + (set (match_dup 1) + (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask + (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask +@@ -341,23 +533,45 @@ + (clobber (match_scratch:GPR 8 "=&r"))] + "" + { +- return "%G6\n\t" +- "1:\n\t" +- "ll.\t%0,%1\n\t" +- "and\t%7,%0,%3\n\t" +- "sub.w\t%8,%0,%z5\n\t" +- "and\t%8,%8,%z2\n\t" +- "or%i8\t%7,%7,%8\n\t" +- "sc.\t%7,%1\n\t" +- "beq\t$zero,%7,1b"; ++ if (TARGET_uARCH_LA664) ++ { ++ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); ++ output_asm_insn ("1:",operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) ++ output_asm_insn ("ll.\t%0,%1", operands); ++ else ++ output_asm_insn ("llacq.\t%0,%1", operands); ++ ++ output_asm_insn ("and\t%7,%0,%3", operands); ++ output_asm_insn ("sub.w\t%8,%0,%z5", operands); ++ output_asm_insn ("and\t%8,%8,%z2", operands); ++ output_asm_insn ("or%i8\t%7,%7,%8", operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) ++ output_asm_insn ("sc.\t%7,%1", operands); ++ else ++ output_asm_insn ("screl.\t%7,%1", operands); ++ ++ output_asm_insn ("beq\t$zero,%7,1b", operands); ++ return ""; ++ } ++ else ++ return "%G6\\n\\t" ++ "1:\\n\\t" ++ "ll.\\t%0,%1\\n\\t" ++ "and\\t%7,%0,%3\\n\\t" ++ "sub.w\\t%8,%0,%z5\\n\\t" ++ "and\\t%8,%8,%z2\\n\\t" ++ "or%i8\\t%7,%7,%8\\n\\t" ++ "sc.\\t%7,%1\\n\\t" ++ "beq\\t$zero,%7,1b"; + } + [(set (attr "length") (const_int 32))]) + +- +- + (define_insn "atomic_cas_value_and_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res +- (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (match_operand:GPR 1 "memory_operand" "+ZB")) + (set (match_dup 1) + (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask + (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask +@@ -369,21 +583,45 @@ + (clobber (match_scratch:GPR 8 "=&r"))] + "" + { +- return "%G6\n\t" +- "1:\n\t" +- "ll.\t%0,%1\n\t" +- "and\t%7,%0,%3\n\t" +- "and\t%8,%0,%z5\n\t" +- "and\t%8,%8,%z2\n\t" +- "or%i8\t%7,%7,%8\n\t" +- "sc.\t%7,%1\n\t" +- "beq\t$zero,%7,1b"; ++ if (TARGET_uARCH_LA664) ++ { ++ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); ++ output_asm_insn ("1:",operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) ++ output_asm_insn ("ll.\t%0,%1", operands); ++ else ++ output_asm_insn ("llacq.\t%0,%1", operands); ++ ++ output_asm_insn ("and\t%7,%0,%3", operands); ++ output_asm_insn ("and\t%8,%0,%z5", operands); ++ output_asm_insn ("and\t%8,%8,%z2", operands); ++ output_asm_insn ("or%i8\t%7,%7,%8", operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) ++ output_asm_insn ("sc.\t%7,%1", operands); ++ else ++ output_asm_insn ("screl.\t%7,%1", operands); ++ ++ output_asm_insn ("beq\t$zero,%7,1b", operands); ++ return ""; ++ } ++ else ++ return "%G6\\n\\t" ++ "1:\\n\\t" ++ "ll.\\t%0,%1\\n\\t" ++ "and\\t%7,%0,%3\\n\\t" ++ "and\\t%8,%0,%z5\\n\\t" ++ "and\\t%8,%8,%z2\\n\\t" ++ "or%i8\\t%7,%7,%8\\n\\t" ++ "sc.\\t%7,%1\\n\\t" ++ "beq\\t$zero,%7,1b"; + } + [(set (attr "length") (const_int 32))]) + + (define_insn "atomic_cas_value_xor_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res +- (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (match_operand:GPR 1 "memory_operand" "+ZB")) + (set (match_dup 1) + (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask + (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask +@@ -395,22 +633,46 @@ + (clobber (match_scratch:GPR 8 "=&r"))] + "" + { +- return "%G6\n\t" +- "1:\n\t" +- "ll.\t%0,%1\n\t" +- "and\t%7,%0,%3\n\t" +- "xor\t%8,%0,%z5\n\t" +- "and\t%8,%8,%z2\n\t" +- "or%i8\t%7,%7,%8\n\t" +- "sc.\t%7,%1\n\t" +- "beq\t$zero,%7,1b"; ++ if (TARGET_uARCH_LA664) ++ { ++ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); ++ output_asm_insn ("1:",operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) ++ output_asm_insn ("ll.\t%0,%1", operands); ++ else ++ output_asm_insn ("llacq.\t%0,%1", operands); ++ ++ output_asm_insn ("and\t%7,%0,%3", operands); ++ output_asm_insn ("xor\t%8,%0,%z5", operands); ++ output_asm_insn ("and\t%8,%8,%z2", operands); ++ output_asm_insn ("or%i8\t%7,%7,%8", operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) ++ output_asm_insn ("sc.\t%7,%1", operands); ++ else ++ output_asm_insn ("screl.\t%7,%1", operands); ++ ++ output_asm_insn ("beq\t$zero,%7,1b", operands); ++ return ""; ++ } ++ else ++ return "%G6\\n\\t" ++ "1:\\n\\t" ++ "ll.\\t%0,%1\\n\\t" ++ "and\\t%7,%0,%3\\n\\t" ++ "xor\\t%8,%0,%z5\\n\\t" ++ "and\\t%8,%8,%z2\\n\\t" ++ "or%i8\\t%7,%7,%8\\n\\t" ++ "sc.\\t%7,%1\\n\\t" ++ "beq\\t$zero,%7,1b"; + } + + [(set (attr "length") (const_int 32))]) + + (define_insn "atomic_cas_value_or_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res +- (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (match_operand:GPR 1 "memory_operand" "+ZB")) + (set (match_dup 1) + (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask + (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask +@@ -422,22 +684,46 @@ + (clobber (match_scratch:GPR 8 "=&r"))] + "" + { +- return "%G6\n\t" +- "1:\n\t" +- "ll.\t%0,%1\n\t" +- "and\t%7,%0,%3\n\t" +- "or\t%8,%0,%z5\n\t" +- "and\t%8,%8,%z2\n\t" +- "or%i8\t%7,%7,%8\n\t" +- "sc.\t%7,%1\n\t" +- "beq\t$zero,%7,1b"; ++ if (TARGET_uARCH_LA664) ++ { ++ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); ++ output_asm_insn ("1:",operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) ++ output_asm_insn ("ll.\t%0,%1", operands); ++ else ++ output_asm_insn ("llacq.\t%0,%1", operands); ++ ++ output_asm_insn ("and\t%7,%0,%3", operands); ++ output_asm_insn ("or\t%8,%0,%z5", operands); ++ output_asm_insn ("and\t%8,%8,%z2", operands); ++ output_asm_insn ("or%i8\t%7,%7,%8", operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) ++ output_asm_insn ("sc.\t%7,%1", operands); ++ else ++ output_asm_insn ("screl.\t%7,%1", operands); ++ ++ output_asm_insn ("beq\t$zero,%7,1b", operands); ++ return ""; ++ } ++ else ++ return "%G6\\n\\t" ++ "1:\\n\\t" ++ "ll.\\t%0,%1\\n\\t" ++ "and\\t%7,%0,%3\\n\\t" ++ "or\\t%8,%0,%z5\\n\\t" ++ "and\\t%8,%8,%z2\\n\\t" ++ "or%i8\\t%7,%7,%8\\n\\t" ++ "sc.\\t%7,%1\\n\\t" ++ "beq\\t$zero,%7,1b"; + } + + [(set (attr "length") (const_int 32))]) + + (define_insn "atomic_cas_value_nand_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res +- (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (match_operand:GPR 1 "memory_operand" "+ZB")) + (set (match_dup 1) + (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") ;; mask + (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; inverted_mask +@@ -449,21 +735,45 @@ + (clobber (match_scratch:GPR 8 "=&r"))] + "" + { +- return "%G6\n\t" +- "1:\n\t" +- "ll.\t%0,%1\n\t" +- "and\t%7,%0,%3\n\t" +- "and\t%8,%0,%z5\n\t" +- "xor\t%8,%8,%z2\n\t" +- "or%i8\t%7,%7,%8\n\t" +- "sc.\t%7,%1\n\t" +- "beq\t$zero,%7,1b"; ++ if (TARGET_uARCH_LA664) ++ { ++ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); ++ output_asm_insn ("1:",operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) ++ output_asm_insn ("ll.\t%0,%1", operands); ++ else ++ output_asm_insn ("llacq.\t%0,%1", operands); ++ ++ output_asm_insn ("and\t%7,%0,%3", operands); ++ output_asm_insn ("and\t%8,%0,%z5", operands); ++ output_asm_insn ("xor\t%8,%8,%z2", operands); ++ output_asm_insn ("or%i8\t%7,%7,%8", operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) ++ output_asm_insn ("sc.\t%7,%1", operands); ++ else ++ output_asm_insn ("screl.\t%7,%1", operands); ++ ++ output_asm_insn ("beq\t$zero,%7,1b", operands); ++ return ""; ++ } ++ else ++ return "%G6\\n\\t" ++ "1:\\n\\t" ++ "ll.\\t%0,%1\\n\\t" ++ "and\\t%7,%0,%3\\n\\t" ++ "and\\t%8,%0,%z5\\n\\t" ++ "xor\\t%8,%8,%z2\\n\\t" ++ "or%i8\\t%7,%7,%8\\n\\t" ++ "sc.\\t%7,%1\\n\\t" ++ "beq\\t$zero,%7,1b"; + } + [(set (attr "length") (const_int 32))]) + + (define_insn "atomic_cas_value_exchange_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") +- (match_operand:GPR 1 "memory_operand" "+ZC")) ++ (match_operand:GPR 1 "memory_operand" "+ZB")) + (set (match_dup 1) + (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ") + (match_operand:GPR 3 "reg_or_0_operand" "rJ") +@@ -474,13 +784,36 @@ + (clobber (match_scratch:GPR 7 "=&r"))] + "" + { +- return "%G6\\n\\t" +- "1:\\n\\t" +- "ll.\\t%0,%1\\n\\t" +- "and\\t%7,%0,%z3\\n\\t" +- "or%i5\\t%7,%7,%5\\n\\t" +- "sc.\\t%7,%1\\n\\t" +- "beqz\\t%7,1b\\n\\t"; ++ if (TARGET_uARCH_LA664) ++ { ++ enum memmodel model = memmodel_from_int (INTVAL (operands[6])); ++ output_asm_insn ("1:",operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) ++ output_asm_insn ("ll.\t%0,%1", operands); ++ else ++ output_asm_insn ("llacq.\t%0,%1", operands); ++ ++ output_asm_insn ("and\t%7,%0,%z3", operands); ++ output_asm_insn ("or%i5\t%7,%7,%5", operands); ++ ++ if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) ++ output_asm_insn ("sc.\t%7,%1", operands); ++ else ++ output_asm_insn ("screl.\t%7,%1", operands); ++ ++ output_asm_insn ("beqz\t%7,1b", operands); ++ ++ return ""; ++ } ++ else ++ return "%G6\\n\\t" ++ "1:\\n\\t" ++ "ll.\\t%0,%1\\n\\t" ++ "and\\t%7,%0,%z3\\n\\t" ++ "or%i5\\t%7,%7,%5\\n\\t" ++ "sc.\\t%7,%1\\n\\t" ++ "beqz\\t%7,1b\\n\\t"; + } + [(set (attr "length") (const_int 20))]) + +@@ -494,17 +827,30 @@ + (match_operand:SHORT 2 "register_operand"))] + "" + { +- union loongarch_gen_fn_ptrs generator; +- generator.fn_7 = gen_atomic_cas_value_exchange_7_si; +- loongarch_expand_atomic_qihi (generator, +- operands[0], +- operands[1], +- const0_rtx, +- operands[2], +- operands[3]); ++ if (TARGET_uARCH_LA664) ++ emit_insn (gen_atomic_exchange_1 (operands[0], operands[1], operands[2], operands[3])); ++ else ++ { ++ union loongarch_gen_fn_ptrs generator; ++ generator.fn_7 = gen_atomic_cas_value_exchange_7_si; ++ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], ++ const0_rtx, operands[2], operands[3]); ++ } + DONE; + }) + ++(define_insn "atomic_fetch_add_1" ++ [(set (match_operand:SHORT 0 "register_operand" "=&r") ++ (match_operand:SHORT 1 "memory_operand" "+ZB")) ++ (set (match_dup 1) ++ (unspec_volatile:SHORT ++ [(plus:SHORT (match_dup 1) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ UNSPEC_SYNC_OLD_OP))] ++ "" ++ "%J3\n\tamadd%A3.\t%0,%z2,%1\n\t%K3" ++ [(set (attr "length") (const_int 8))]) + + (define_expand "atomic_fetch_add" + [(set (match_operand:SHORT 0 "register_operand" "=&r") +@@ -512,19 +858,21 @@ + (set (match_dup 1) + (unspec_volatile:SHORT + [(plus:SHORT (match_dup 1) +- (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) + (match_operand:SI 3 "const_int_operand")] ;; model + UNSPEC_SYNC_OLD_OP))] + "" + { +- union loongarch_gen_fn_ptrs generator; +- generator.fn_7 = gen_atomic_cas_value_add_7_si; +- loongarch_expand_atomic_qihi (generator, +- operands[0], +- operands[1], +- operands[1], +- operands[2], +- operands[3]); ++ if (TARGET_uARCH_LA664) ++ emit_insn (gen_atomic_fetch_add_1 (operands[0], operands[1], ++ operands[2], operands[3])); ++ else ++ { ++ union loongarch_gen_fn_ptrs generator; ++ generator.fn_7 = gen_atomic_cas_value_add_7_si; ++ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], ++ operands[1], operands[2], operands[3]); ++ } + DONE; + }) + +@@ -534,19 +882,15 @@ + (set (match_dup 1) + (unspec_volatile:SHORT + [(minus:SHORT (match_dup 1) +- (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) + (match_operand:SI 3 "const_int_operand")] ;; model + UNSPEC_SYNC_OLD_OP))] + "" + { + union loongarch_gen_fn_ptrs generator; + generator.fn_7 = gen_atomic_cas_value_sub_7_si; +- loongarch_expand_atomic_qihi (generator, +- operands[0], +- operands[1], +- operands[1], +- operands[2], +- operands[3]); ++ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], ++ operands[1], operands[2], operands[3]); + DONE; + }) + +@@ -556,19 +900,15 @@ + (set (match_dup 1) + (unspec_volatile:SHORT + [(and:SHORT (match_dup 1) +- (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) + (match_operand:SI 3 "const_int_operand")] ;; model + UNSPEC_SYNC_OLD_OP))] + "" + { + union loongarch_gen_fn_ptrs generator; + generator.fn_7 = gen_atomic_cas_value_and_7_si; +- loongarch_expand_atomic_qihi (generator, +- operands[0], +- operands[1], +- operands[1], +- operands[2], +- operands[3]); ++ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], ++ operands[1], operands[2], operands[3]); + DONE; + }) + +@@ -578,19 +918,15 @@ + (set (match_dup 1) + (unspec_volatile:SHORT + [(xor:SHORT (match_dup 1) +- (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) + (match_operand:SI 3 "const_int_operand")] ;; model + UNSPEC_SYNC_OLD_OP))] + "" + { + union loongarch_gen_fn_ptrs generator; + generator.fn_7 = gen_atomic_cas_value_xor_7_si; +- loongarch_expand_atomic_qihi (generator, +- operands[0], +- operands[1], +- operands[1], +- operands[2], +- operands[3]); ++ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], ++ operands[1], operands[2], operands[3]); + DONE; + }) + +@@ -600,19 +936,15 @@ + (set (match_dup 1) + (unspec_volatile:SHORT + [(ior:SHORT (match_dup 1) +- (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ")) + (match_operand:SI 3 "const_int_operand")] ;; model + UNSPEC_SYNC_OLD_OP))] + "" + { + union loongarch_gen_fn_ptrs generator; + generator.fn_7 = gen_atomic_cas_value_or_7_si; +- loongarch_expand_atomic_qihi (generator, +- operands[0], +- operands[1], +- operands[1], +- operands[2], +- operands[3]); ++ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], ++ operands[1], operands[2], operands[3]); + DONE; + }) + +@@ -622,18 +954,14 @@ + (set (match_dup 1) + (unspec_volatile:SHORT + [(not:SHORT (and:SHORT (match_dup 1) +- (match_operand:SHORT 2 "reg_or_0_operand" "rJ"))) ++ (match_operand:SHORT 2 "reg_or_0_operand" "rJ"))) + (match_operand:SI 3 "const_int_operand")] ;; model + UNSPEC_SYNC_OLD_OP))] + "" + { + union loongarch_gen_fn_ptrs generator; + generator.fn_7 = gen_atomic_cas_value_nand_7_si; +- loongarch_expand_atomic_qihi (generator, +- operands[0], +- operands[1], +- operands[1], +- operands[2], +- operands[3]); ++ loongarch_expand_atomic_qihi (generator, operands[0], operands[1], ++ operands[1], operands[2], operands[3]); + DONE; + }) +diff --git a/gcc/config/loongarch/t-linux b/gcc/config/loongarch/t-linux +index 58f27f89d..5ecf814fa 100644 +--- a/gcc/config/loongarch/t-linux ++++ b/gcc/config/loongarch/t-linux +@@ -16,8 +16,65 @@ + # along with GCC; see the file COPYING3. If not see + # . + +-MULTILIB_OSDIRNAMES := ../lib64$(call if_multiarch,:loongarch64-linux-gnu) +-MULTIARCH_DIRNAME := $(call if_multiarch,loongarch64-linux-gnu) ++# Multilib ++MULTILIB_OPTIONS = mabi=lp64d/mabi=lp64f/mabi=lp64s ++MULTILIB_DIRNAMES = . base/lp64f base/lp64s ++ ++# The GCC driver always gets all abi-related options on the command line. ++# (see loongarch-driver.c:driver_get_normalized_m_opts) ++comma=, ++MULTILIB_REQUIRED = $(foreach mlib,$(subst $(comma), ,$(TM_MULTILIB_CONFIG)),\ ++ $(firstword $(subst /, ,$(mlib)))) + +-# haven't supported lp32 yet +-MULTILIB_EXCEPTIONS = mabi=lp32 ++SPECS = specs.install ++ ++# temporary self_spec when building libraries (e.g. libgcc) ++gen_mlib_spec = $(if $(word 2,$1),\ ++ %{$(firstword $1):$(patsubst %,-%,$(wordlist 2,$(words $1),$1))}) ++ ++# clean up the result of DRIVER_SELF_SPEC to avoid conflict ++lib_build_self_spec = % $@ ++ ++# Remove lib_build_self_specs before regression tests. ++.PHONY: remove-lib-specs ++check check-host check-target $(CHECK_TARGETS) $(lang_checks): remove-lib-specs ++remove-lib-specs: ++ -mv -f specs.install specs 2>/dev/null ++ ++# Multiarch ++ifneq ($(call if_multiarch,yes),yes) ++ # Define LA_DISABLE_MULTIARCH if multiarch is disabled. ++ tm_defines += LA_DISABLE_MULTIARCH ++else ++ # Only define MULTIARCH_DIRNAME when multiarch is enabled, ++ # or it would always introduce ${target} into the search path. ++ MULTIARCH_DIRNAME = $(LA_MULTIARCH_TRIPLET) ++endif ++ ++# Don't define MULTILIB_OSDIRNAMES if multilib is disabled. ++ifeq ($(filter LA_DISABLE_MULTILIB,$(tm_defines)),) ++ ++ MULTILIB_OSDIRNAMES = \ ++ mabi.lp64d=../lib64$\ ++ $(call if_multiarch,:loongarch64-linux-gnu) ++ ++ MULTILIB_OSDIRNAMES += \ ++ mabi.lp64f=../lib64/f32$\ ++ $(call if_multiarch,:loongarch64-linux-gnuf32) ++ ++ MULTILIB_OSDIRNAMES += \ ++ mabi.lp64s=../lib64/sf$\ ++ $(call if_multiarch,:loongarch64-linux-gnusf) ++else ++ MULTILIB_OSDIRNAMES := ../lib64 ++endif +diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch +index 5689da44a..9d32fbcf6 100644 +--- a/gcc/config/loongarch/t-loongarch ++++ b/gcc/config/loongarch/t-loongarch +@@ -16,14 +16,20 @@ + # along with GCC; see the file COPYING3. If not see + # . + +-$(srcdir)/config/loongarch/loongarch-tables.opt: $(srcdir)/config/loongarch/genopt.sh \ +- $(srcdir)/config/loongarch/loongarch-cpus.def +- $(SHELL) $(srcdir)/config/loongarch/genopt.sh $(srcdir)/config/loongarch > \ +- $(srcdir)/config/loongarch/loongarch-tables.opt ++# Canonical target triplet from config.gcc ++LA_MULTIARCH_TRIPLET = $(patsubst LA_MULTIARCH_TRIPLET=%,%,$\ ++$(filter LA_MULTIARCH_TRIPLET=%,$(tm_defines))) + +-frame-header-opt.o: $(srcdir)/config/loongarch/frame-header-opt.c +- $(COMPILE) $< +- $(POSTCOMPILE) ++# String definition header ++LA_STR_H = $(srcdir)/config/loongarch/loongarch-str.h ++$(LA_STR_H): s-loongarch-str ; @true ++s-loongarch-str: $(srcdir)/config/loongarch/genopts/genstr.sh \ ++ $(srcdir)/config/loongarch/genopts/loongarch-strings ++ $(SHELL) $(srcdir)/config/loongarch/genopts/genstr.sh header \ ++ $(srcdir)/config/loongarch/genopts/loongarch-strings > \ ++ tmp-loongarch-str.h ++ $(SHELL) $(srcdir)/../move-if-change tmp-loongarch-str.h $(LA_STR_H) ++ $(STAMP) s-loongarch-str + + loongarch-c.o: $(srcdir)/config/loongarch/loongarch-c.c $(CONFIG_H) $(SYSTEM_H) \ + coretypes.h $(TM_H) $(TREE_H) output.h $(C_COMMON_H) $(TARGET_H) +@@ -31,15 +37,32 @@ loongarch-c.o: $(srcdir)/config/loongarch/loongarch-c.c $(CONFIG_H) $(SYSTEM_H) + $(srcdir)/config/loongarch/loongarch-c.c + + loongarch-builtins.o: $(srcdir)/config/loongarch/loongarch-builtins.c $(CONFIG_H) \ +- $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TREE_H) $(RECOG_H) langhooks.h \ +- $(DIAGNOSTIC_CORE_H) $(OPTABS_H) $(srcdir)/config/loongarch/loongarch-ftypes.def \ +- $(srcdir)/config/loongarch/loongarch-modes.def ++ $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TREE_H) $(RECOG_H) langhooks.h \ ++ $(DIAGNOSTIC_CORE_H) $(OPTABS_H) $(srcdir)/config/loongarch/loongarch-ftypes.def \ ++ $(srcdir)/config/loongarch/loongarch-modes.def + $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ +- $(srcdir)/config/loongarch/loongarch-builtins.c +-loongarch-d.o: $(srcdir)/config/loongarch/loongarch-d.c +- $(COMPILE) $< +- $(POSTCOMPILE) +- +-comma=, +-MULTILIB_OPTIONS = $(subst $(comma),/, $(patsubst %, mabi=%, $(subst $(comma),$(comma)mabi=,$(TM_MULTILIB_CONFIG)))) +-MULTILIB_DIRNAMES = $(subst $(comma), ,$(TM_MULTILIB_CONFIG)) ++ $(srcdir)/config/loongarch/loongarch-builtins.c ++ ++loongarch-driver.o : $(srcdir)/config/loongarch/loongarch-driver.c $(LA_STR_H) \ ++ $(CONFIG_H) $(SYSTEM_H) ++ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< ++ ++loongarch-opts.o: $(srcdir)/config/loongarch/loongarch-opts.c $(LA_STR_H) ++ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< ++ ++loongarch-cpu.o: $(srcdir)/config/loongarch/loongarch-cpu.c $(LA_STR_H) ++ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< ++ ++loongarch-def.o: $(srcdir)/config/loongarch/loongarch-def.c $(LA_STR_H) ++ $(CC) -c $(ALL_CFLAGS) $(INCLUDES) $< ++ ++$(srcdir)/config/loongarch/loongarch.opt: s-loongarch-opt ; @true ++s-loongarch-opt: $(srcdir)/config/loongarch/genopts/genstr.sh \ ++ $(srcdir)/config/loongarch/genopts/loongarch.opt.in ++ $(SHELL) $(srcdir)/config/loongarch/genopts/genstr.sh opt \ ++ $(srcdir)/config/loongarch/genopts/loongarch.opt.in \ ++ > tmp-loongarch.opt ++ $(SHELL) $(srcdir)/../move-if-change tmp-loongarch.opt \ ++ $(srcdir)/config/loongarch/loongarch.opt ++ $(STAMP) s-loongarch-opt ++ +diff --git a/gcc/config/loongarch/x-native b/gcc/config/loongarch/x-native +deleted file mode 100644 +index 827d21f1a..000000000 +--- a/gcc/config/loongarch/x-native ++++ /dev/null +@@ -1,3 +0,0 @@ +-driver-native.o : $(srcdir)/config/loongarch/driver-native.c \ +- $(CONFIG_H) $(SYSTEM_H) +- $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< +diff --git a/libgcc/config/loongarch/crtfastmath.c b/libgcc/config/loongarch/crtfastmath.c +index d7371de6d..5f7b298ac 100644 +--- a/libgcc/config/loongarch/crtfastmath.c ++++ b/libgcc/config/loongarch/crtfastmath.c +@@ -1,30 +1,32 @@ +-/* Copyright (C) 2010-2018 Free Software Foundation, Inc. ++/* Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ Contributed by Loongson Ltd. ++ Based on MIPS target for GNU compiler. + +- This file is part of GCC. ++This file is part of GCC. + +- GCC is free software; you can redistribute it and/or modify it +- under the terms of the GNU General Public License as published by +- the Free Software Foundation; either version 3, or (at your option) +- any later version. ++GCC is free software; you can redistribute it and/or modify it ++under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. + +- GCC is distributed in the hope that it will be useful, but WITHOUT +- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +- License for more details. ++GCC is distributed in the hope that it will be useful, but WITHOUT ++ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++License for more details. + +- Under Section 7 of GPL version 3, you are granted additional +- permissions described in the GCC Runtime Library Exception, version +- 3.1, as published by the Free Software Foundation. ++Under Section 7 of GPL version 3, you are granted additional ++permissions described in the GCC Runtime Library Exception, version ++3.1, as published by the Free Software Foundation. + +- You should have received a copy of the GNU General Public License +- and a copy of the GCC Runtime Library Exception along with this +- program; see the files COPYING3 and COPYING.RUNTIME respectively. +- If not, see . */ ++You should have received a copy of the GNU General Public License ++and a copy of the GCC Runtime Library Exception along with this ++program; see the files COPYING3 and COPYING.RUNTIME respectively. ++If not, see . */ + + #ifdef __loongarch_hard_float + + /* Rounding control. */ +-#define _FPU_RC_NEAREST 0x000 /* RECOMMENDED */ ++#define _FPU_RC_NEAREST 0x000 /* RECOMMENDED. */ + #define _FPU_RC_ZERO 0x100 + #define _FPU_RC_UP 0x200 + #define _FPU_RC_DOWN 0x300 +@@ -33,18 +35,18 @@ + #define _FPU_IEEE 0x0000001F + + /* Macros for accessing the hardware control word. */ +-#define _FPU_GETCW(cw) __asm__ ("movgr2fcsr %0,$r1" : "=r" (cw)) +-#define _FPU_SETCW(cw) __asm__ ("movfcsr2gr %0,$r1" : : "r" (cw)) ++#define _FPU_GETCW(cw) __asm__ volatile ("movfcsr2gr %0,$r0" : "=r" (cw)) ++#define _FPU_SETCW(cw) __asm__ volatile ("movgr2fcsr $r0,%0" : : "r" (cw)) + + static void __attribute__((constructor)) + set_fast_math (void) + { + unsigned int fcr; + +- /* round to nearest, IEEE exceptions disabled. */ ++ /* Flush to zero, round to nearest, IEEE exceptions disabled. */ + fcr = _FPU_RC_NEAREST; + +- _FPU_SETCW(fcr); ++ _FPU_SETCW (fcr); + } + +-#endif /* __loongarch_hard_float */ ++#endif /* __loongarch_hard_float */ +diff --git a/libgcc/config/loongarch/crti.S b/libgcc/config/loongarch/crti.S +deleted file mode 100644 +index dcd05afea..000000000 +--- a/libgcc/config/loongarch/crti.S ++++ /dev/null +@@ -1,43 +0,0 @@ +-/* Copyright (C) 2001-2018 Free Software Foundation, Inc. +- +-This file is part of GCC. +- +-GCC is free software; you can redistribute it and/or modify it under +-the terms of the GNU General Public License as published by the Free +-Software Foundation; either version 3, or (at your option) any later +-version. +- +-GCC is distributed in the hope that it will be useful, but WITHOUT ANY +-WARRANTY; without even the implied warranty of MERCHANTABILITY or +-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-for more details. +- +-Under Section 7 of GPL version 3, you are granted additional +-permissions described in the GCC Runtime Library Exception, version +-3.1, as published by the Free Software Foundation. +- +-You should have received a copy of the GNU General Public License and +-a copy of the GCC Runtime Library Exception along with this program; +-see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +-. */ +- +-/* 4 slots for argument spill area. 1 for cpreturn, 1 for stack. +- Return spill offset of 40 and 20. Aligned to 16 bytes for n32. */ +- +- .section .init,"ax",@progbits +- .globl _init +- .type _init,@function +-_init: +- addi.d $r3,$r3,-48 +- st.d $r1,$r3,40 +- addi.d $r3,$r3,48 +- jirl $r0,$r1,0 +- +- .section .fini,"ax",@progbits +- .globl _fini +- .type _fini,@function +-_fini: +- addi.d $r3,$r3,-48 +- st.d $r1,$r3,40 +- addi.d $r3,$r3,48 +- jirl $r0,$r1,0 +diff --git a/libgcc/config/loongarch/crtn.S b/libgcc/config/loongarch/crtn.S +deleted file mode 100644 +index 91d9d5e7f..000000000 +--- a/libgcc/config/loongarch/crtn.S ++++ /dev/null +@@ -1,39 +0,0 @@ +-/* Copyright (C) 2001-2018 Free Software Foundation, Inc. +- +-This file is part of GCC. +- +-GCC is free software; you can redistribute it and/or modify it under +-the terms of the GNU General Public License as published by the Free +-Software Foundation; either version 3, or (at your option) any later +-version. +- +-GCC is distributed in the hope that it will be useful, but WITHOUT ANY +-WARRANTY; without even the implied warranty of MERCHANTABILITY or +-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-for more details. +- +-Under Section 7 of GPL version 3, you are granted additional +-permissions described in the GCC Runtime Library Exception, version +-3.1, as published by the Free Software Foundation. +- +-You should have received a copy of the GNU General Public License and +-a copy of the GCC Runtime Library Exception along with this program; +-see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +-. */ +- +-/* 4 slots for argument spill area. 1 for cpreturn, 1 for stack. +- Return spill offset of 40 and 20. Aligned to 16 bytes for n32. */ +- +- +- .section .init,"ax",@progbits +-init: +- ld.d $r1,$r3,40 +- addi.d $r3,$r3,48 +- jirl $r0,$r1,0 +- +- .section .fini,"ax",@progbits +-fini: +- ld.d $r1,$r3,40 +- addi.d $r3,$r3,48 +- jirl $r0,$r1,0 +- +diff --git a/libgcc/config/loongarch/gthr-loongnixsde.h b/libgcc/config/loongarch/gthr-loongnixsde.h +deleted file mode 100644 +index f62b57318..000000000 +--- a/libgcc/config/loongarch/gthr-loongnixsde.h ++++ /dev/null +@@ -1,237 +0,0 @@ +-/* LARCH SDE threads compatibility routines for libgcc2 and libobjc. */ +-/* Compile this one with gcc. */ +-/* Copyright (C) 2006-2018 Free Software Foundation, Inc. +- Contributed by Nigel Stephens +- +-This file is part of GCC. +- +-GCC is free software; you can redistribute it and/or modify it under +-the terms of the GNU General Public License as published by the Free +-Software Foundation; either version 3, or (at your option) any later +-version. +- +-GCC is distributed in the hope that it will be useful, but WITHOUT ANY +-WARRANTY; without even the implied warranty of MERCHANTABILITY or +-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-for more details. +- +-Under Section 7 of GPL version 3, you are granted additional +-permissions described in the GCC Runtime Library Exception, version +-3.1, as published by the Free Software Foundation. +- +-You should have received a copy of the GNU General Public License and +-a copy of the GCC Runtime Library Exception along with this program; +-see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +-. */ +- +-#ifndef GCC_GTHR_LARCHSDE_H +-#define GCC_GTHR_LARCHSDE_H +- +-/* LARCH SDE threading API specific definitions. +- Easy, since the interface is pretty much one-to-one. */ +- +-#define __GTHREADS 1 +- +-#include +-#include +- +-#ifdef __cplusplus +-extern "C" { +-#endif +- +-typedef __sdethread_key_t __gthread_key_t; +-typedef __sdethread_once_t __gthread_once_t; +-typedef __sdethread_mutex_t __gthread_mutex_t; +- +-typedef struct { +- long depth; +- __sdethread_t owner; +- __sdethread_mutex_t actual; +-} __gthread_recursive_mutex_t; +- +-#define __GTHREAD_MUTEX_INIT __SDETHREAD_MUTEX_INITIALIZER("gthr") +-#define __GTHREAD_ONCE_INIT __SDETHREAD_ONCE_INIT +-static inline int +-__gthread_recursive_mutex_init_function(__gthread_recursive_mutex_t *__mutex); +-#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function +- +-#if SUPPORTS_WEAK && GTHREAD_USE_WEAK +-# define __gthrw(name) \ +- static __typeof(name) __gthrw_ ## name __attribute__ ((__weakref__(#name))); +-# define __gthrw_(name) __gthrw_ ## name +-#else +-# define __gthrw(name) +-# define __gthrw_(name) name +-#endif +- +-__gthrw(__sdethread_once) +-__gthrw(__sdethread_key_create) +-__gthrw(__sdethread_key_delete) +-__gthrw(__sdethread_getspecific) +-__gthrw(__sdethread_setspecific) +- +-__gthrw(__sdethread_self) +- +-__gthrw(__sdethread_mutex_lock) +-__gthrw(__sdethread_mutex_trylock) +-__gthrw(__sdethread_mutex_unlock) +- +-__gthrw(__sdethread_mutex_init) +- +-__gthrw(__sdethread_threading) +- +-#if SUPPORTS_WEAK && GTHREAD_USE_WEAK +- +-static inline int +-__gthread_active_p (void) +-{ +- return !!(void *)&__sdethread_threading; +-} +- +-#else /* not SUPPORTS_WEAK */ +- +-static inline int +-__gthread_active_p (void) +-{ +- return 1; +-} +- +-#endif /* SUPPORTS_WEAK */ +- +-static inline int +-__gthread_once (__gthread_once_t *__once, void (*__func) (void)) +-{ +- if (__gthread_active_p ()) +- return __gthrw_(__sdethread_once) (__once, __func); +- else +- return -1; +-} +- +-static inline int +-__gthread_key_create (__gthread_key_t *__key, void (*__dtor) (void *)) +-{ +- return __gthrw_(__sdethread_key_create) (__key, __dtor); +-} +- +-static inline int +-__gthread_key_delete (__gthread_key_t __key) +-{ +- return __gthrw_(__sdethread_key_delete) (__key); +-} +- +-static inline void * +-__gthread_getspecific (__gthread_key_t __key) +-{ +- return __gthrw_(__sdethread_getspecific) (__key); +-} +- +-static inline int +-__gthread_setspecific (__gthread_key_t __key, const void *__ptr) +-{ +- return __gthrw_(__sdethread_setspecific) (__key, __ptr); +-} +- +-static inline int +-__gthread_mutex_destroy (__gthread_mutex_t * UNUSED(__mutex)) +-{ +- return 0; +-} +- +-static inline int +-__gthread_mutex_lock (__gthread_mutex_t *__mutex) +-{ +- if (__gthread_active_p ()) +- return __gthrw_(__sdethread_mutex_lock) (__mutex); +- else +- return 0; +-} +- +-static inline int +-__gthread_mutex_trylock (__gthread_mutex_t *__mutex) +-{ +- if (__gthread_active_p ()) +- return __gthrw_(__sdethread_mutex_trylock) (__mutex); +- else +- return 0; +-} +- +-static inline int +-__gthread_mutex_unlock (__gthread_mutex_t *__mutex) +-{ +- if (__gthread_active_p ()) +- return __gthrw_(__sdethread_mutex_unlock) (__mutex); +- else +- return 0; +-} +- +-static inline int +-__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex) +-{ +- __mutex->depth = 0; +- __mutex->owner = __gthrw_(__sdethread_self) (); +- return __gthrw_(__sdethread_mutex_init) (&__mutex->actual, NULL); +-} +- +-static inline int +-__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *__mutex) +-{ +- if (__gthread_active_p ()) +- { +- __sdethread_t __me = __gthrw_(__sdethread_self) (); +- +- if (__mutex->owner != __me) +- { +- __gthrw_(__sdethread_mutex_lock) (&__mutex->actual); +- __mutex->owner = __me; +- } +- +- __mutex->depth++; +- } +- return 0; +-} +- +-static inline int +-__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *__mutex) +-{ +- if (__gthread_active_p ()) +- { +- __sdethread_t __me = __gthrw_(__sdethread_self) (); +- +- if (__mutex->owner != __me) +- { +- if (__gthrw_(__sdethread_mutex_trylock) (&__mutex->actual)) +- return 1; +- __mutex->owner = __me; +- } +- +- __mutex->depth++; +- } +- return 0; +-} +- +-static inline int +-__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex) +-{ +- if (__gthread_active_p ()) +- { +- if (--__mutex->depth == 0) +- { +- __mutex->owner = (__sdethread_t) 0; +- __gthrw_(__sdethread_mutex_unlock) (&__mutex->actual); +- } +- } +- return 0; +-} +- +-static inline int +-__gthread_recursive_mutex_destroy (__gthread_recursive_mutex_t +- * UNUSED(__mutex)) +-{ +- return 0; +-} +- +-#ifdef __cplusplus +-} +-#endif +- +-#endif /* ! GCC_GTHR_LARCHSDE_H */ +diff --git a/libgcc/config/loongarch/linux-unwind.h b/libgcc/config/loongarch/linux-unwind.h +index d77dfb058..30603e44f 100644 +--- a/libgcc/config/loongarch/linux-unwind.h ++++ b/libgcc/config/loongarch/linux-unwind.h +@@ -1,5 +1,5 @@ +-/* DWARF2 EH unwinding support for LARCH Linux. +- Copyright (C) 2004-2018 Free Software Foundation, Inc. ++/* DWARF2 EH unwinding support for LoongArch Linux. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. + + This file is part of GCC. + +@@ -34,26 +34,27 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + + static _Unwind_Reason_Code + loongarch_fallback_frame_state (struct _Unwind_Context *context, +- _Unwind_FrameState *fs) ++ _Unwind_FrameState *fs) + { + u_int32_t *pc = (u_int32_t *) context->ra; + struct sigcontext *sc; + _Unwind_Ptr new_cfa; + int i; + +- /* 03822c0b dli a7, 0x8b (sigreturn) */ +- /* 002b0000 syscall 0 */ ++ /* 03822c0b li.d a7, 0x8b (sigreturn) */ ++ /* 002b0000 syscall 0 */ + if (pc[1] != 0x002b0000) + return _URC_END_OF_STACK; + if (pc[0] == 0x03822c0b) + { +- struct rt_sigframe { ++ struct rt_sigframe ++ { + u_int32_t ass[4]; /* Argument save space for o32. */ + u_int32_t trampoline[2]; + siginfo_t info; + ucontext_t uc; + } *rt_ = context->cfa; +- sc = &rt_->uc.uc_mcontext; ++ sc = (struct sigcontext *) (void *) &rt_->uc.uc_mcontext; + } + else + return _URC_END_OF_STACK; +@@ -63,17 +64,17 @@ loongarch_fallback_frame_state (struct _Unwind_Context *context, + fs->regs.cfa_reg = __LIBGCC_STACK_POINTER_REGNUM__; + fs->regs.cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa; + +- for (i = 0; i < 32; i++) { +- fs->regs.reg[i].how = REG_SAVED_OFFSET; +- fs->regs.reg[i].loc.offset +- = (_Unwind_Ptr)&(sc->sc_regs[i]) - new_cfa; +- } ++ for (i = 0; i < 32; i++) ++ { ++ fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.reg[i].loc.offset = (_Unwind_Ptr) & (sc->sc_regs[i]) - new_cfa; ++ } + + fs->signal_frame = 1; + fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].how + = REG_SAVED_VAL_OFFSET; + fs->regs.reg[__LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__].loc.offset +- = (_Unwind_Ptr)(sc->sc_pc) - new_cfa; ++ = (_Unwind_Ptr) (sc->sc_pc) - new_cfa; + fs->retaddr_column = __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__; + + return _URC_NO_REASON; +diff --git a/libgcc/config/loongarch/sfp-machine.h b/libgcc/config/loongarch/sfp-machine.h +index f7800a003..420f94274 100644 +--- a/libgcc/config/loongarch/sfp-machine.h ++++ b/libgcc/config/loongarch/sfp-machine.h +@@ -1,5 +1,5 @@ +-/* softfp machine description for LARCH. +- Copyright (C) 2009-2018 Free Software Foundation, Inc. ++/* softfp machine description for LoongArch. ++ Copyright (C) 2020-2022 Free Software Foundation, Inc. + + This file is part of GCC. + +@@ -23,49 +23,49 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + + #ifdef __loongarch64 +-#define _FP_W_TYPE_SIZE 64 +-#define _FP_W_TYPE unsigned long long +-#define _FP_WS_TYPE signed long long +-#define _FP_I_TYPE long long ++#define _FP_W_TYPE_SIZE 64 ++#define _FP_W_TYPE unsigned long long ++#define _FP_WS_TYPE signed long long ++#define _FP_I_TYPE long long + + typedef int TItype __attribute__ ((mode (TI))); + typedef unsigned int UTItype __attribute__ ((mode (TI))); + #define TI_BITS (__CHAR_BIT__ * (int) sizeof (TItype)) + +-#define _FP_MUL_MEAT_S(R,X,Y) \ +- _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) +-#define _FP_MUL_MEAT_D(R,X,Y) \ +- _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) +-#define _FP_MUL_MEAT_Q(R,X,Y) \ +- _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) ++#define _FP_MUL_MEAT_S(R, X, Y) \ ++ _FP_MUL_MEAT_1_wide (_FP_WFRACBITS_S, R, X, Y, umul_ppmm) ++#define _FP_MUL_MEAT_D(R, X, Y) \ ++ _FP_MUL_MEAT_1_wide (_FP_WFRACBITS_D, R, X, Y, umul_ppmm) ++#define _FP_MUL_MEAT_Q(R, X, Y) \ ++ _FP_MUL_MEAT_2_wide (_FP_WFRACBITS_Q, R, X, Y, umul_ppmm) + +-#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y) +-#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y) +-#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y) ++#define _FP_DIV_MEAT_S(R, X, Y) _FP_DIV_MEAT_1_udiv_norm (S, R, X, Y) ++#define _FP_DIV_MEAT_D(R, X, Y) _FP_DIV_MEAT_1_udiv_norm (D, R, X, Y) ++#define _FP_DIV_MEAT_Q(R, X, Y) _FP_DIV_MEAT_2_udiv (Q, R, X, Y) + +-# define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) +-# define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1) +-# define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1 ++#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) ++#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1) ++#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1 + #else +-#define _FP_W_TYPE_SIZE 32 +-#define _FP_W_TYPE unsigned int +-#define _FP_WS_TYPE signed int +-#define _FP_I_TYPE int +- +-#define _FP_MUL_MEAT_S(R,X,Y) \ +- _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) +-#define _FP_MUL_MEAT_D(R,X,Y) \ +- _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) +-#define _FP_MUL_MEAT_Q(R,X,Y) \ +- _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) +- +-#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y) +-#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y) +-#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y) +- +-# define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) +-# define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1 +-# define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1 ++#define _FP_W_TYPE_SIZE 32 ++#define _FP_W_TYPE unsigned int ++#define _FP_WS_TYPE signed int ++#define _FP_I_TYPE int ++ ++#define _FP_MUL_MEAT_S(R, X, Y) \ ++ _FP_MUL_MEAT_1_wide (_FP_WFRACBITS_S, R, X, Y, umul_ppmm) ++#define _FP_MUL_MEAT_D(R, X, Y) \ ++ _FP_MUL_MEAT_2_wide (_FP_WFRACBITS_D, R, X, Y, umul_ppmm) ++#define _FP_MUL_MEAT_Q(R, X, Y) \ ++ _FP_MUL_MEAT_4_wide (_FP_WFRACBITS_Q, R, X, Y, umul_ppmm) ++ ++#define _FP_DIV_MEAT_S(R, X, Y) _FP_DIV_MEAT_1_udiv_norm (S, R, X, Y) ++#define _FP_DIV_MEAT_D(R, X, Y) _FP_DIV_MEAT_2_udiv (D, R, X, Y) ++#define _FP_DIV_MEAT_Q(R, X, Y) _FP_DIV_MEAT_4_udiv (Q, R, X, Y) ++ ++#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) ++#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1 ++#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1 + #endif + + /* The type of the result of a floating point comparison. This must +@@ -73,76 +73,80 @@ typedef unsigned int UTItype __attribute__ ((mode (TI))); + typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__))); + #define CMPtype __gcc_CMPtype + +-#define _FP_NANSIGN_S 0 +-#define _FP_NANSIGN_D 0 +-#define _FP_NANSIGN_Q 0 ++#define _FP_NANSIGN_S 0 ++#define _FP_NANSIGN_D 0 ++#define _FP_NANSIGN_Q 0 + + #define _FP_KEEPNANFRACP 1 +-# define _FP_QNANNEGATEDP 0 ++#define _FP_QNANNEGATEDP 0 + + /* NaN payloads should be preserved for NAN2008. */ +-# define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ +- do \ +- { \ +- R##_s = X##_s; \ +- _FP_FRAC_COPY_##wc (R, X); \ +- R##_c = FP_CLS_NAN; \ +- } \ ++#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ ++ do \ ++ { \ ++ R##_s = X##_s; \ ++ _FP_FRAC_COPY_##wc (R, X); \ ++ R##_c = FP_CLS_NAN; \ ++ } \ + while (0) + + #ifdef __loongarch_hard_float +-#define FP_EX_INVALID 0x100000 +-#define FP_EX_DIVZERO 0x080000 +-#define FP_EX_OVERFLOW 0x040000 +-#define FP_EX_UNDERFLOW 0x020000 +-#define FP_EX_INEXACT 0x010000 ++#define FP_EX_INVALID 0x100000 ++#define FP_EX_DIVZERO 0x080000 ++#define FP_EX_OVERFLOW 0x040000 ++#define FP_EX_UNDERFLOW 0x020000 ++#define FP_EX_INEXACT 0x010000 + #define FP_EX_ALL \ +- (FP_EX_INVALID | FP_EX_DIVZERO | FP_EX_OVERFLOW | FP_EX_UNDERFLOW \ +- | FP_EX_INEXACT) ++ (FP_EX_INVALID | FP_EX_DIVZERO | FP_EX_OVERFLOW | FP_EX_UNDERFLOW \ ++ | FP_EX_INEXACT) + +-#define FP_EX_ENABLE_SHIFT 16 +-#define FP_EX_CAUSE_SHIFT 8 ++#define FP_EX_ENABLE_SHIFT 16 ++#define FP_EX_CAUSE_SHIFT 8 + +-#define FP_RND_NEAREST 0x000 +-#define FP_RND_ZERO 0x100 +-#define FP_RND_PINF 0x200 +-#define FP_RND_MINF 0x300 +-#define FP_RND_MASK 0x300 ++#define FP_RND_NEAREST 0x000 ++#define FP_RND_ZERO 0x100 ++#define FP_RND_PINF 0x200 ++#define FP_RND_MINF 0x300 ++#define FP_RND_MASK 0x300 + + #define _FP_DECL_EX \ + unsigned long int _fcsr __attribute__ ((unused)) = FP_RND_NEAREST + +-#define FP_INIT_ROUNDMODE \ +- do { \ +- _fcsr = __builtin_loongarch_movfcsr2gr (0); \ +- } while (0) ++#define FP_INIT_ROUNDMODE \ ++ do \ ++ { \ ++ _fcsr = __builtin_loongarch_movfcsr2gr (0); \ ++ } \ ++ while (0) + + #define FP_ROUNDMODE (_fcsr & FP_RND_MASK) + + #define FP_TRAPPING_EXCEPTIONS ((_fcsr << FP_EX_ENABLE_SHIFT) & FP_EX_ALL) + +-#define FP_HANDLE_EXCEPTIONS \ +- do { \ +- _fcsr &= ~(FP_EX_ALL << FP_EX_CAUSE_SHIFT); \ +- _fcsr |= _fex | (_fex << FP_EX_CAUSE_SHIFT); \ +- __builtin_loongarch_movgr2fcsr (0, _fcsr); \ +- } while (0) ++#define FP_HANDLE_EXCEPTIONS \ ++ do \ ++ { \ ++ _fcsr &= ~(FP_EX_ALL << FP_EX_CAUSE_SHIFT); \ ++ _fcsr |= _fex | (_fex << FP_EX_CAUSE_SHIFT); \ ++ __builtin_loongarch_movgr2fcsr (0, _fcsr); \ ++ } \ ++ while (0) + + #else +-#define FP_EX_INVALID (1 << 4) +-#define FP_EX_DIVZERO (1 << 3) +-#define FP_EX_OVERFLOW (1 << 2) +-#define FP_EX_UNDERFLOW (1 << 1) +-#define FP_EX_INEXACT (1 << 0) ++#define FP_EX_INVALID (1 << 4) ++#define FP_EX_DIVZERO (1 << 3) ++#define FP_EX_OVERFLOW (1 << 2) ++#define FP_EX_UNDERFLOW (1 << 1) ++#define FP_EX_INEXACT (1 << 0) + #endif + + #define _FP_TININESS_AFTER_ROUNDING 1 + +-#define __LITTLE_ENDIAN 1234 ++#define __LITTLE_ENDIAN 1234 + +-# define __BYTE_ORDER __LITTLE_ENDIAN ++#define __BYTE_ORDER __LITTLE_ENDIAN + + /* Define ALIASNAME as a strong alias for NAME. */ +-# define strong_alias(name, aliasname) _strong_alias(name, aliasname) +-# define _strong_alias(name, aliasname) \ ++#define strong_alias(name, aliasname) _strong_alias (name, aliasname) ++#define _strong_alias(name, aliasname) \ + extern __typeof (name) aliasname __attribute__ ((alias (#name))); +diff --git a/libgcc/config/loongarch/t-elf b/libgcc/config/loongarch/t-elf +deleted file mode 100644 +index 651f10a53..000000000 +--- a/libgcc/config/loongarch/t-elf ++++ /dev/null +@@ -1,3 +0,0 @@ +-# We must build libgcc2.a with -G 0, in case the user wants to link +-# without the $gp register. +-HOST_LIBGCC2_CFLAGS += -G 0 +diff --git a/libgcc/config/loongarch/t-loongarch b/libgcc/config/loongarch/t-loongarch +index 9508cb2fc..2a7dbf6ca 100644 +--- a/libgcc/config/loongarch/t-loongarch ++++ b/libgcc/config/loongarch/t-loongarch +@@ -5,5 +5,3 @@ softfp_int_modes := si di + softfp_extensions := + softfp_truncations := + softfp_exclude_libgcc2 := n +- +-LIB2ADD_ST += $(srcdir)/config/loongarch/lib2funcs.c +diff --git a/libgcc/config/loongarch/t-sdemtk b/libgcc/config/loongarch/t-sdemtk +deleted file mode 100644 +index a06e828b5..000000000 +--- a/libgcc/config/loongarch/t-sdemtk ++++ /dev/null +@@ -1,3 +0,0 @@ +-# Don't build FPBIT and DPBIT; we'll be using the SDE soft-float library. +-FPBIT = +-DPBIT = +diff --git a/libgcc/config/loongarch/t-vr b/libgcc/config/loongarch/t-vr +deleted file mode 100644 +index e69de29bb..000000000 +-- +2.39.3 + diff --git a/gcc.spec b/gcc.spec index c77ee69..174a380 100644 --- a/gcc.spec +++ b/gcc.spec @@ -39,32 +39,32 @@ %else %global build_libquadmath 0 %endif -%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 +%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 loongarch64 %global build_libasan 1 %else %global build_libasan 0 %endif -%ifarch x86_64 ppc64 ppc64le aarch64 +%ifarch x86_64 ppc64 ppc64le aarch64 loongarch64 %global build_libtsan 1 %else %global build_libtsan 0 %endif -%ifarch x86_64 ppc64 ppc64le aarch64 +%ifarch x86_64 ppc64 ppc64le aarch64 loongarch64 %global build_liblsan 1 %else %global build_liblsan 0 %endif -%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 +%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 loongarch64 %global build_libubsan 1 %else %global build_libubsan 0 %endif -%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 %{mips} +%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 %{mips} loongarch64 %global build_libatomic 1 %else %global build_libatomic 0 %endif -%ifarch %{ix86} x86_64 %{arm} alpha ppc ppc64 ppc64le ppc64p7 s390 s390x aarch64 +%ifarch %{ix86} x86_64 %{arm} alpha ppc ppc64 ppc64le ppc64p7 s390 s390x aarch64 loongarch64 %global build_libitm 1 %else %global build_libitm 0 @@ -80,7 +80,7 @@ %endif %global build_isl 1 %global build_libstdcxx_docs 1 -%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 %{mips} +%ifarch %{ix86} x86_64 ppc ppc64 ppc64le ppc64p7 s390 s390x %{arm} aarch64 %{mips} loongarch64 %global attr_ifunc 1 %else %global attr_ifunc 0 @@ -316,6 +316,18 @@ Patch1002: nvptx-tools-glibc.patch Patch10000: 0001-gcc-anolis-Rebrand-for-OpenAnolis.patch +Patch1003: 0001-Add-LoongArch-support-for-anolis-a8-gcc.patch +Patch1004: 0002-loongarch-fix-multilib-osdirnames-to-lib64.patch +Patch1005: 0001-LoongArch-Fixup-configure-file-error.patch +Patch1006: 0002-LoongArch-Rename-config-file-for-loongarch.patch +Patch1007: LoongArch-Fix-atomic_exchange-expanding-PR107713.patch +Patch1008: Sync-to-gcc-8-vec-36.patch +Patch1009: LoongArch-Remove-NOOP_TRUNCATION-and-fix-extendsidi2.patch +Patch1010: Improve-specs-processing-to-allow-in-function-argume.patch +Patch1011: LoongArch-Add-sanitizer-support.patch +Patch1012: libitm-Add-LoongArch-support.patch +Patch1013: LoongArch-Add-missing-headers.patch +Patch1014: Fix-dwarf2cfi-error.patch # On ARM EABI systems, we do want -gnueabi to be part of the # target triple. @@ -977,6 +989,19 @@ rm -f gcc/testsuite/go.test/test/chan/goroutines.go %patch10000 -p1 +%patch1003 -p1 +%patch1004 -p1 +%patch1005 -p1 +%patch1006 -p1 +%patch1007 -p1 +%patch1008 -p1 +%patch1009 -p1 +%patch1010 -p1 +%patch1011 -p1 +%patch1012 -p1 +%patch1013 -p1 +%patch1014 -p1 + %build # Undo the broken autoconf change in recent Fedora versions @@ -1061,7 +1086,7 @@ CONFIGURE_OPTS="\ %ifarch ppc64le --enable-targets=powerpcle-linux \ %endif -%ifarch ppc64le %{mips} riscv64 s390x +%ifarch ppc64le %{mips} riscv64 s390x loongarch64 --disable-multilib \ %else %if 0%{?anolis} @@ -1174,6 +1199,17 @@ CONFIGURE_OPTS="\ %endif %ifnarch sparc sparcv9 ppc --build=%{gcc_target_platform} \ +%endif +%ifarch loongarch64 + --with-arch=loongarch64 \ + --with-abi=lp64 \ + --enable-tls \ + --with-long-double-128 \ + --enable-initfini-array \ + --enable-gnu-indirect-function \ + --disable-emultls \ + --disable-multilib \ + --with-linker-hash-style=gnu \ %endif " @@ -1294,6 +1330,8 @@ then echo "Unpacking annobin sources" rm -fr annobin-* tar xvf %{annobin_source_dir}/latest-annobin.tar.xz + %_update_config_guess + %_update_config_sub # Setting this as a local symbol because using %%global does not appear to work. annobin_dir=$(find . -maxdepth 1 -type d -name "annobin*") @@ -2545,6 +2583,17 @@ fi %{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/include/htmxlintrin.h %{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/include/vecintrin.h %endif +%ifarch loongarch64 +%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/include/larchintrin.h +%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/include/lasxintrin.h +%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/include/lsxintrin.h +%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/plugin/include/config/loongarch/loongarch-tune.h +%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/plugin/include/config/loongarch/loongarch-def.h +%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/plugin/include/config/loongarch/loongarch-protos.h +%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/plugin/include/config/loongarch/loongarch-opts.h +%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/plugin/include/config/loongarch/loongarch-str.h +%{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/plugin/include/config/loongarch/loongarch-driver.h +%endif %if %{build_libmpx} %{_prefix}/lib/gcc/%{gcc_target_platform}/%{gcc_major}/libmpx.spec %endif diff --git a/libitm-Add-LoongArch-support.patch b/libitm-Add-LoongArch-support.patch new file mode 100644 index 0000000..8cb1d50 --- /dev/null +++ b/libitm-Add-LoongArch-support.patch @@ -0,0 +1,285 @@ +From 59b72352ab2e4e16f28d5e242f83ff37257c5301 Mon Sep 17 00:00:00 2001 +From: Xing Li +Date: Fri, 6 Jan 2023 10:44:00 +0800 +Subject: [PATCH 1/2] libitm: Add LoongArch support. + +Signed-off-by: Xing Li +Signed-off-by: Yang Yujie +--- + libitm/config/loongarch/asm.h | 54 +++++++++++++ + libitm/config/loongarch/sjlj.S | 130 +++++++++++++++++++++++++++++++ + libitm/config/loongarch/target.h | 50 ++++++++++++ + libitm/configure.tgt | 2 + + 4 files changed, 236 insertions(+) + create mode 100644 libitm/config/loongarch/asm.h + create mode 100644 libitm/config/loongarch/sjlj.S + create mode 100644 libitm/config/loongarch/target.h + +diff --git a/libitm/config/loongarch/asm.h b/libitm/config/loongarch/asm.h +new file mode 100644 +index 000000000..e7f881b03 +--- /dev/null ++++ b/libitm/config/loongarch/asm.h +@@ -0,0 +1,54 @@ ++/* Copyright (C) 2014-2018 Free Software Foundation, Inc. ++ Contributed by Loongson Co. Ltd. ++ ++ This file is part of the GNU Transactional Memory Library (libitm). ++ ++ Libitm is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3 of the License, or ++ (at your option) any later version. ++ ++ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY ++ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS ++ FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++#ifndef _LA_ASM_H ++#define _LA_ASM_H ++ ++#if defined(__loongarch64) ++# define GPR_L ld.d ++# define GPR_S st.d ++# define SZ_GPR 8 ++# define ADDSP(si) addi.d $sp, $sp, si ++#elif defined(__loongarch32) ++# define GPR_L ld.w ++# define GPR_S st.w ++# define SZ_GPR 4 ++# define ADDSP(si) addi.w $sp, $sp, si ++#else ++# error Unsupported GPR size (must be 64-bit or 32-bit). ++#endif ++ ++#if defined(__loongarch_hard_float) ++# define FPR_L fld.d ++# define FPR_S fst.d ++# define SZ_FPR 8 ++#elif defined(__loongarch_single_float) ++# define FPR_L fld.s ++# define FPR_S fst.s ++# define SZ_FPR 4 ++#else ++# define SZ_FPR 0 ++#endif ++ ++#endif /* _LA_ASM_H */ +diff --git a/libitm/config/loongarch/sjlj.S b/libitm/config/loongarch/sjlj.S +new file mode 100644 +index 000000000..e8610f9b5 +--- /dev/null ++++ b/libitm/config/loongarch/sjlj.S +@@ -0,0 +1,130 @@ ++/* Copyright (C) 2014-2018 Free Software Foundation, Inc. ++ Contributed by Loongson Co. Ltd. ++ ++ This file is part of the GNU Transactional Memory Library (libitm). ++ ++ Libitm is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3 of the License, or ++ (at your option) any later version. ++ ++ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY ++ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS ++ FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++#include "asmcfi.h" ++#include "asm.h" ++ ++ .text ++ .align 2 ++ .global _ITM_beginTransaction ++ .type _ITM_beginTransaction, @function ++ ++_ITM_beginTransaction: ++ cfi_startproc ++ move $r5, $sp ++ ADDSP(-(12*SZ_GPR+8*SZ_FPR)) ++ cfi_adjust_cfa_offset(12*SZ_GPR+8*SZ_FPR) ++ ++ /* Frame Pointer */ ++ GPR_S $fp, $sp, 0*SZ_GPR ++ cfi_rel_offset(22, 0) ++ ++ /* Return Address */ ++ GPR_S $r1, $sp, 1*SZ_GPR ++ cfi_rel_offset(1, SZ_GPR) ++ ++ /* Caller's $sp */ ++ GPR_S $r5, $sp, 2*SZ_GPR ++ ++ /* Callee-saved scratch GPRs (r23-r31) */ ++ GPR_S $s0, $sp, 3*SZ_GPR ++ GPR_S $s1, $sp, 4*SZ_GPR ++ GPR_S $s2, $sp, 5*SZ_GPR ++ GPR_S $s3, $sp, 6*SZ_GPR ++ GPR_S $s4, $sp, 7*SZ_GPR ++ GPR_S $s5, $sp, 8*SZ_GPR ++ GPR_S $s6, $sp, 9*SZ_GPR ++ GPR_S $s7, $sp, 10*SZ_GPR ++ GPR_S $s8, $sp, 11*SZ_GPR ++ ++#if !defined(__loongarch_soft_float) ++ /* Callee-saved scratch FPRs (f24-f31) */ ++ FPR_S $f24, $sp, 12*SZ_GPR + 0*SZ_FPR ++ FPR_S $f25, $sp, 12*SZ_GPR + 1*SZ_FPR ++ FPR_S $f26, $sp, 12*SZ_GPR + 2*SZ_FPR ++ FPR_S $f27, $sp, 12*SZ_GPR + 3*SZ_FPR ++ FPR_S $f28, $sp, 12*SZ_GPR + 4*SZ_FPR ++ FPR_S $f29, $sp, 12*SZ_GPR + 5*SZ_FPR ++ FPR_S $f30, $sp, 12*SZ_GPR + 6*SZ_FPR ++ FPR_S $f31, $sp, 12*SZ_GPR + 7*SZ_FPR ++#endif ++ move $fp, $sp ++ ++ /* Invoke GTM_begin_transaction with the struct we've just built. */ ++ move $r5, $sp ++ bl %plt(GTM_begin_transaction) ++ ++ /* Return. (no call-saved scratch reg needs to be restored here) */ ++ GPR_L $fp, $sp, 0*SZ_GPR ++ cfi_restore(22) ++ GPR_L $r1, $sp, 1*SZ_GPR ++ cfi_restore(1) ++ ++ ADDSP(12*SZ_GPR+8*SZ_FPR) ++ cfi_adjust_cfa_offset(-(12*SZ_GPR+8*SZ_FPR)) ++ ++ jr $r1 ++ cfi_endproc ++ .size _ITM_beginTransaction, . - _ITM_beginTransaction ++ ++ .align 2 ++ .global GTM_longjmp ++ .hidden GTM_longjmp ++ .type GTM_longjmp, @function ++ ++GTM_longjmp: ++ cfi_startproc ++ GPR_L $s0, $r5, 3*SZ_GPR ++ GPR_L $s1, $r5, 4*SZ_GPR ++ GPR_L $s2, $r5, 5*SZ_GPR ++ GPR_L $s3, $r5, 6*SZ_GPR ++ GPR_L $s4, $r5, 7*SZ_GPR ++ GPR_L $s5, $r5, 8*SZ_GPR ++ GPR_L $s6, $r5, 9*SZ_GPR ++ GPR_L $s7, $r5, 10*SZ_GPR ++ GPR_L $s8, $r5, 11*SZ_GPR ++ ++#if !defined(__loongarch_soft_float) ++ /* Callee-saved scratch FPRs (f24-f31) */ ++ FPR_L $f24, $r5, 12*SZ_GPR + 0*SZ_FPR ++ FPR_L $f25, $r5, 12*SZ_GPR + 1*SZ_FPR ++ FPR_L $f26, $r5, 12*SZ_GPR + 2*SZ_FPR ++ FPR_L $f27, $r5, 12*SZ_GPR + 3*SZ_FPR ++ FPR_L $f28, $r5, 12*SZ_GPR + 4*SZ_FPR ++ FPR_L $f29, $r5, 12*SZ_GPR + 5*SZ_FPR ++ FPR_L $f30, $r5, 12*SZ_GPR + 6*SZ_FPR ++ FPR_L $f31, $r5, 12*SZ_GPR + 7*SZ_FPR ++#endif ++ ++ GPR_L $r7, $r5, 2*SZ_GPR ++ GPR_L $fp, $r5, 0*SZ_GPR ++ GPR_L $r1, $r5, 1*SZ_GPR ++ cfi_def_cfa(5, 0) ++ move $sp, $r7 ++ jr $r1 ++ cfi_endproc ++ .size GTM_longjmp, . - GTM_longjmp ++ ++#ifdef __linux__ ++.section .note.GNU-stack, "", @progbits ++#endif +diff --git a/libitm/config/loongarch/target.h b/libitm/config/loongarch/target.h +new file mode 100644 +index 000000000..2853bf203 +--- /dev/null ++++ b/libitm/config/loongarch/target.h +@@ -0,0 +1,50 @@ ++/* Copyright (C) 2014-2018 Free Software Foundation, Inc. ++ Contributed by Loongson Co. Ltd. ++ ++ This file is part of the GNU Transactional Memory Library (libitm). ++ ++ Libitm is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3 of the License, or ++ (at your option) any later version. ++ ++ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY ++ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS ++ FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++namespace GTM HIDDEN { ++ ++typedef struct gtm_jmpbuf ++ { ++ long int fp; /* Frame Pointer: r22 */ ++ long int pc; /* Return Address: r1 */ ++ void *cfa; /* CFA: r3 */ ++ long int gpr[9]; /* Callee-saved scratch GPRs: r23(s0)-r31(s8) */ ++ ++ /* Callee-saved scratch FPRs: f24-f31 */ ++#if defined(__loongarch_double_float) ++ double fpr[8]; ++#elif defined(__loongarch_single_float) ++ float fpr[8]; ++#endif ++ } gtm_jmpbuf; ++ ++#define HW_CACHELINE_SIZE 128 ++ ++static inline void ++cpu_relax (void) ++{ ++ __asm__ volatile ("" : : : "memory"); ++} ++ ++} // namespace GTM +diff --git a/libitm/configure.tgt b/libitm/configure.tgt +index 0cbb0974d..18a06e45f 100644 +--- a/libitm/configure.tgt ++++ b/libitm/configure.tgt +@@ -69,6 +69,8 @@ case "${target_cpu}" in + ARCH=x86 + ;; + ++ loongarch*) ARCH=loongarch ;; ++ + sh*) ARCH=sh ;; + + sparc) +-- +2.39.3 + -- Gitee From 4dc35e1f1c2091f486b0eb4f895a4df2d47c0557 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Wed, 24 Jan 2024 11:10:36 +0800 Subject: [PATCH 4/4] Separate LoongArch's supported patches. Signed-off-by: Peng Fan --- gcc.spec | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gcc.spec b/gcc.spec index 174a380..da8d09d 100644 --- a/gcc.spec +++ b/gcc.spec @@ -989,6 +989,7 @@ rm -f gcc/testsuite/go.test/test/chan/goroutines.go %patch10000 -p1 +%ifarch loongarch64 %patch1003 -p1 %patch1004 -p1 %patch1005 -p1 @@ -1001,6 +1002,7 @@ rm -f gcc/testsuite/go.test/test/chan/goroutines.go %patch1012 -p1 %patch1013 -p1 %patch1014 -p1 +%endif %build @@ -3400,6 +3402,7 @@ fi %changelog * Wed Feb 12 2025 Xue haolin 8.5.0-23.0.1 - Rebrand for Anolis OS. +- Separate LoongArch's supported patches.(fanpeng@loongson.cn) * Fri Feb 7 2025 Marek Polacek 8.5.0-23 - rebuild for CVE-2020-11023 (RHEL-78274) -- Gitee